Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8#ifdef CONFIG_TASKS_RCU_GENERIC
9#include "rcu_segcblist.h"
10
11////////////////////////////////////////////////////////////////////////
12//
13// Generic data structures.
14
15struct rcu_tasks;
16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17typedef void (*pregp_func_t)(struct list_head *hop);
18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19typedef void (*postscan_func_t)(struct list_head *hop);
20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22
23/**
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
31 * @rtp_work: Work queue for invoking callbacks.
32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
33 * @barrier_q_head: RCU callback for barrier operation.
34 * @rtp_blkd_tasks: List of tasks blocked as readers.
35 * @rtp_exit_list: List of tasks in the latter portion of do_exit().
36 * @cpu: CPU number corresponding to this entry.
37 * @index: Index of this CPU in rtpcp_array of the rcu_tasks structure.
38 * @rtpp: Pointer to the rcu_tasks structure.
39 */
40struct rcu_tasks_percpu {
41 struct rcu_segcblist cblist;
42 raw_spinlock_t __private lock;
43 unsigned long rtp_jiffies;
44 unsigned long rtp_n_lock_retries;
45 struct timer_list lazy_timer;
46 unsigned int urgent_gp;
47 struct work_struct rtp_work;
48 struct irq_work rtp_irq_work;
49 struct rcu_head barrier_q_head;
50 struct list_head rtp_blkd_tasks;
51 struct list_head rtp_exit_list;
52 int cpu;
53 int index;
54 struct rcu_tasks *rtpp;
55};
56
57/**
58 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
59 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
60 * @cbs_gbl_lock: Lock protecting callback list.
61 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
62 * @gp_func: This flavor's grace-period-wait function.
63 * @gp_state: Grace period's most recent state transition (debugging).
64 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
65 * @init_fract: Initial backoff sleep interval.
66 * @gp_jiffies: Time of last @gp_state transition.
67 * @gp_start: Most recent grace-period start in jiffies.
68 * @tasks_gp_seq: Number of grace periods completed since boot in upper bits.
69 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
70 * @n_ipis_fails: Number of IPI-send failures.
71 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
72 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
73 * @pregp_func: This flavor's pre-grace-period function (optional).
74 * @pertask_func: This flavor's per-task scan function (optional).
75 * @postscan_func: This flavor's post-task scan function (optional).
76 * @holdouts_func: This flavor's holdout-list scan function (optional).
77 * @postgp_func: This flavor's post-grace-period function (optional).
78 * @call_func: This flavor's call_rcu()-equivalent function.
79 * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE).
80 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
81 * @rtpcp_array: Array of pointers to rcu_tasks_percpu structure of CPUs in cpu_possible_mask.
82 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
83 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
84 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
85 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
86 * @barrier_q_mutex: Serialize barrier operations.
87 * @barrier_q_count: Number of queues being waited on.
88 * @barrier_q_completion: Barrier wait/wakeup mechanism.
89 * @barrier_q_seq: Sequence number for barrier operations.
90 * @barrier_q_start: Most recent barrier start in jiffies.
91 * @name: This flavor's textual name.
92 * @kname: This flavor's kthread name.
93 */
94struct rcu_tasks {
95 struct rcuwait cbs_wait;
96 raw_spinlock_t cbs_gbl_lock;
97 struct mutex tasks_gp_mutex;
98 int gp_state;
99 int gp_sleep;
100 int init_fract;
101 unsigned long gp_jiffies;
102 unsigned long gp_start;
103 unsigned long tasks_gp_seq;
104 unsigned long n_ipis;
105 unsigned long n_ipis_fails;
106 struct task_struct *kthread_ptr;
107 unsigned long lazy_jiffies;
108 rcu_tasks_gp_func_t gp_func;
109 pregp_func_t pregp_func;
110 pertask_func_t pertask_func;
111 postscan_func_t postscan_func;
112 holdouts_func_t holdouts_func;
113 postgp_func_t postgp_func;
114 call_rcu_func_t call_func;
115 unsigned int wait_state;
116 struct rcu_tasks_percpu __percpu *rtpcpu;
117 struct rcu_tasks_percpu **rtpcp_array;
118 int percpu_enqueue_shift;
119 int percpu_enqueue_lim;
120 int percpu_dequeue_lim;
121 unsigned long percpu_dequeue_gpseq;
122 struct mutex barrier_q_mutex;
123 atomic_t barrier_q_count;
124 struct completion barrier_q_completion;
125 unsigned long barrier_q_seq;
126 unsigned long barrier_q_start;
127 char *name;
128 char *kname;
129};
130
131static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
132
133#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
134static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
135 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
136 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
137}; \
138static struct rcu_tasks rt_name = \
139{ \
140 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
141 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
142 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
143 .gp_func = gp, \
144 .call_func = call, \
145 .wait_state = TASK_UNINTERRUPTIBLE, \
146 .rtpcpu = &rt_name ## __percpu, \
147 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
148 .name = n, \
149 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
150 .percpu_enqueue_lim = 1, \
151 .percpu_dequeue_lim = 1, \
152 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
153 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
154 .kname = #rt_name, \
155}
156
157#ifdef CONFIG_TASKS_RCU
158
159/* Report delay of scan exiting tasklist in rcu_tasks_postscan(). */
160static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
161static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
162#endif
163
164/* Avoid IPIing CPUs early in the grace period. */
165#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
166static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
167module_param(rcu_task_ipi_delay, int, 0644);
168
169/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
170#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
171#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
172static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
173module_param(rcu_task_stall_timeout, int, 0644);
174#define RCU_TASK_STALL_INFO (HZ * 10)
175static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
176module_param(rcu_task_stall_info, int, 0644);
177static int rcu_task_stall_info_mult __read_mostly = 3;
178module_param(rcu_task_stall_info_mult, int, 0444);
179
180static int rcu_task_enqueue_lim __read_mostly = -1;
181module_param(rcu_task_enqueue_lim, int, 0444);
182
183static bool rcu_task_cb_adjust;
184static int rcu_task_contend_lim __read_mostly = 100;
185module_param(rcu_task_contend_lim, int, 0444);
186static int rcu_task_collapse_lim __read_mostly = 10;
187module_param(rcu_task_collapse_lim, int, 0444);
188static int rcu_task_lazy_lim __read_mostly = 32;
189module_param(rcu_task_lazy_lim, int, 0444);
190
191static int rcu_task_cpu_ids;
192
193/* RCU tasks grace-period state for debugging. */
194#define RTGS_INIT 0
195#define RTGS_WAIT_WAIT_CBS 1
196#define RTGS_WAIT_GP 2
197#define RTGS_PRE_WAIT_GP 3
198#define RTGS_SCAN_TASKLIST 4
199#define RTGS_POST_SCAN_TASKLIST 5
200#define RTGS_WAIT_SCAN_HOLDOUTS 6
201#define RTGS_SCAN_HOLDOUTS 7
202#define RTGS_POST_GP 8
203#define RTGS_WAIT_READERS 9
204#define RTGS_INVOKE_CBS 10
205#define RTGS_WAIT_CBS 11
206#ifndef CONFIG_TINY_RCU
207static const char * const rcu_tasks_gp_state_names[] = {
208 "RTGS_INIT",
209 "RTGS_WAIT_WAIT_CBS",
210 "RTGS_WAIT_GP",
211 "RTGS_PRE_WAIT_GP",
212 "RTGS_SCAN_TASKLIST",
213 "RTGS_POST_SCAN_TASKLIST",
214 "RTGS_WAIT_SCAN_HOLDOUTS",
215 "RTGS_SCAN_HOLDOUTS",
216 "RTGS_POST_GP",
217 "RTGS_WAIT_READERS",
218 "RTGS_INVOKE_CBS",
219 "RTGS_WAIT_CBS",
220};
221#endif /* #ifndef CONFIG_TINY_RCU */
222
223////////////////////////////////////////////////////////////////////////
224//
225// Generic code.
226
227static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
228
229/* Record grace-period phase and time. */
230static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
231{
232 rtp->gp_state = newstate;
233 rtp->gp_jiffies = jiffies;
234}
235
236#ifndef CONFIG_TINY_RCU
237/* Return state name. */
238static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
239{
240 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
241 int j = READ_ONCE(i); // Prevent the compiler from reading twice
242
243 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
244 return "???";
245 return rcu_tasks_gp_state_names[j];
246}
247#endif /* #ifndef CONFIG_TINY_RCU */
248
249// Initialize per-CPU callback lists for the specified flavor of
250// Tasks RCU. Do not enqueue callbacks before this function is invoked.
251static void cblist_init_generic(struct rcu_tasks *rtp)
252{
253 int cpu;
254 int lim;
255 int shift;
256 int maxcpu;
257 int index = 0;
258
259 if (rcu_task_enqueue_lim < 0) {
260 rcu_task_enqueue_lim = 1;
261 rcu_task_cb_adjust = true;
262 } else if (rcu_task_enqueue_lim == 0) {
263 rcu_task_enqueue_lim = 1;
264 }
265 lim = rcu_task_enqueue_lim;
266
267 rtp->rtpcp_array = kcalloc(num_possible_cpus(), sizeof(struct rcu_tasks_percpu *), GFP_KERNEL);
268 BUG_ON(!rtp->rtpcp_array);
269
270 for_each_possible_cpu(cpu) {
271 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
272
273 WARN_ON_ONCE(!rtpcp);
274 if (cpu)
275 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
276 if (rcu_segcblist_empty(&rtpcp->cblist))
277 rcu_segcblist_init(&rtpcp->cblist);
278 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
279 rtpcp->cpu = cpu;
280 rtpcp->rtpp = rtp;
281 rtpcp->index = index;
282 rtp->rtpcp_array[index] = rtpcp;
283 index++;
284 if (!rtpcp->rtp_blkd_tasks.next)
285 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
286 if (!rtpcp->rtp_exit_list.next)
287 INIT_LIST_HEAD(&rtpcp->rtp_exit_list);
288 rtpcp->barrier_q_head.next = &rtpcp->barrier_q_head;
289 maxcpu = cpu;
290 }
291
292 rcu_task_cpu_ids = maxcpu + 1;
293 if (lim > rcu_task_cpu_ids)
294 lim = rcu_task_cpu_ids;
295 shift = ilog2(rcu_task_cpu_ids / lim);
296 if (((rcu_task_cpu_ids - 1) >> shift) >= lim)
297 shift++;
298 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
299 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
300 smp_store_release(&rtp->percpu_enqueue_lim, lim);
301
302 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d rcu_task_cpu_ids=%d.\n",
303 rtp->name, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim),
304 rcu_task_cb_adjust, rcu_task_cpu_ids);
305}
306
307// Compute wakeup time for lazy callback timer.
308static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
309{
310 return jiffies + rtp->lazy_jiffies;
311}
312
313// Timer handler that unlazifies lazy callbacks.
314static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
315{
316 unsigned long flags;
317 bool needwake = false;
318 struct rcu_tasks *rtp;
319 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
320
321 rtp = rtpcp->rtpp;
322 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
323 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
324 if (!rtpcp->urgent_gp)
325 rtpcp->urgent_gp = 1;
326 needwake = true;
327 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
328 }
329 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
330 if (needwake)
331 rcuwait_wake_up(&rtp->cbs_wait);
332}
333
334// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
335static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
336{
337 struct rcu_tasks *rtp;
338 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
339
340 rtp = rtpcp->rtpp;
341 rcuwait_wake_up(&rtp->cbs_wait);
342}
343
344// Enqueue a callback for the specified flavor of Tasks RCU.
345static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
346 struct rcu_tasks *rtp)
347{
348 int chosen_cpu;
349 unsigned long flags;
350 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
351 int ideal_cpu;
352 unsigned long j;
353 bool needadjust = false;
354 bool needwake;
355 struct rcu_tasks_percpu *rtpcp;
356
357 rhp->next = NULL;
358 rhp->func = func;
359 local_irq_save(flags);
360 rcu_read_lock();
361 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
362 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
363 WARN_ON_ONCE(chosen_cpu >= rcu_task_cpu_ids);
364 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
365 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
366 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
367 j = jiffies;
368 if (rtpcp->rtp_jiffies != j) {
369 rtpcp->rtp_jiffies = j;
370 rtpcp->rtp_n_lock_retries = 0;
371 }
372 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
373 READ_ONCE(rtp->percpu_enqueue_lim) != rcu_task_cpu_ids)
374 needadjust = true; // Defer adjustment to avoid deadlock.
375 }
376 // Queuing callbacks before initialization not yet supported.
377 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
378 rcu_segcblist_init(&rtpcp->cblist);
379 needwake = (func == wakeme_after_rcu) ||
380 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
381 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
382 if (rtp->lazy_jiffies)
383 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
384 else
385 needwake = rcu_segcblist_empty(&rtpcp->cblist);
386 }
387 if (needwake)
388 rtpcp->urgent_gp = 3;
389 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
390 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
391 if (unlikely(needadjust)) {
392 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
393 if (rtp->percpu_enqueue_lim != rcu_task_cpu_ids) {
394 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
395 WRITE_ONCE(rtp->percpu_dequeue_lim, rcu_task_cpu_ids);
396 smp_store_release(&rtp->percpu_enqueue_lim, rcu_task_cpu_ids);
397 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
398 }
399 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
400 }
401 rcu_read_unlock();
402 /* We can't create the thread unless interrupts are enabled. */
403 if (needwake && READ_ONCE(rtp->kthread_ptr))
404 irq_work_queue(&rtpcp->rtp_irq_work);
405}
406
407// RCU callback function for rcu_barrier_tasks_generic().
408static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
409{
410 struct rcu_tasks *rtp;
411 struct rcu_tasks_percpu *rtpcp;
412
413 rhp->next = rhp; // Mark the callback as having been invoked.
414 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
415 rtp = rtpcp->rtpp;
416 if (atomic_dec_and_test(&rtp->barrier_q_count))
417 complete(&rtp->barrier_q_completion);
418}
419
420// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
421// Operates in a manner similar to rcu_barrier().
422static void __maybe_unused rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
423{
424 int cpu;
425 unsigned long flags;
426 struct rcu_tasks_percpu *rtpcp;
427 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
428
429 mutex_lock(&rtp->barrier_q_mutex);
430 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
431 smp_mb();
432 mutex_unlock(&rtp->barrier_q_mutex);
433 return;
434 }
435 rtp->barrier_q_start = jiffies;
436 rcu_seq_start(&rtp->barrier_q_seq);
437 init_completion(&rtp->barrier_q_completion);
438 atomic_set(&rtp->barrier_q_count, 2);
439 for_each_possible_cpu(cpu) {
440 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
441 break;
442 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
443 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
444 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
445 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
446 atomic_inc(&rtp->barrier_q_count);
447 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
448 }
449 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
450 complete(&rtp->barrier_q_completion);
451 wait_for_completion(&rtp->barrier_q_completion);
452 rcu_seq_end(&rtp->barrier_q_seq);
453 mutex_unlock(&rtp->barrier_q_mutex);
454}
455
456// Advance callbacks and indicate whether either a grace period or
457// callback invocation is needed.
458static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
459{
460 int cpu;
461 int dequeue_limit;
462 unsigned long flags;
463 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
464 long n;
465 long ncbs = 0;
466 long ncbsnz = 0;
467 int needgpcb = 0;
468
469 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
470 for (cpu = 0; cpu < dequeue_limit; cpu++) {
471 if (!cpu_possible(cpu))
472 continue;
473 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
474
475 /* Advance and accelerate any new callbacks. */
476 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
477 continue;
478 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
479 // Should we shrink down to a single callback queue?
480 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
481 if (n) {
482 ncbs += n;
483 if (cpu > 0)
484 ncbsnz += n;
485 }
486 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
487 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
488 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
489 if (rtp->lazy_jiffies)
490 rtpcp->urgent_gp--;
491 needgpcb |= 0x3;
492 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
493 rtpcp->urgent_gp = 0;
494 }
495 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
496 needgpcb |= 0x1;
497 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
498 }
499
500 // Shrink down to a single callback queue if appropriate.
501 // This is done in two stages: (1) If there are no more than
502 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
503 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
504 // if there has not been an increase in callbacks, limit dequeuing
505 // to CPU 0. Note the matching RCU read-side critical section in
506 // call_rcu_tasks_generic().
507 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
508 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
509 if (rtp->percpu_enqueue_lim > 1) {
510 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(rcu_task_cpu_ids));
511 smp_store_release(&rtp->percpu_enqueue_lim, 1);
512 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
513 gpdone = false;
514 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
515 }
516 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
517 }
518 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
519 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
520 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
521 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
522 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
523 }
524 if (rtp->percpu_dequeue_lim == 1) {
525 for (cpu = rtp->percpu_dequeue_lim; cpu < rcu_task_cpu_ids; cpu++) {
526 if (!cpu_possible(cpu))
527 continue;
528 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
529
530 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
531 }
532 }
533 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
534 }
535
536 return needgpcb;
537}
538
539// Advance callbacks and invoke any that are ready.
540static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
541{
542 int cpuwq;
543 unsigned long flags;
544 int len;
545 int index;
546 struct rcu_head *rhp;
547 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
548 struct rcu_tasks_percpu *rtpcp_next;
549
550 index = rtpcp->index * 2 + 1;
551 if (index < num_possible_cpus()) {
552 rtpcp_next = rtp->rtpcp_array[index];
553 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
554 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
555 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
556 index++;
557 if (index < num_possible_cpus()) {
558 rtpcp_next = rtp->rtpcp_array[index];
559 if (rtpcp_next->cpu < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
560 cpuwq = rcu_cpu_beenfullyonline(rtpcp_next->cpu) ? rtpcp_next->cpu : WORK_CPU_UNBOUND;
561 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
562 }
563 }
564 }
565 }
566
567 if (rcu_segcblist_empty(&rtpcp->cblist))
568 return;
569 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
570 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
571 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
572 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
573 len = rcl.len;
574 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
575 debug_rcu_head_callback(rhp);
576 local_bh_disable();
577 rhp->func(rhp);
578 local_bh_enable();
579 cond_resched();
580 }
581 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
582 rcu_segcblist_add_len(&rtpcp->cblist, -len);
583 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
584 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
585}
586
587// Workqueue flood to advance callbacks and invoke any that are ready.
588static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
589{
590 struct rcu_tasks *rtp;
591 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
592
593 rtp = rtpcp->rtpp;
594 rcu_tasks_invoke_cbs(rtp, rtpcp);
595}
596
597// Wait for one grace period.
598static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
599{
600 int needgpcb;
601
602 mutex_lock(&rtp->tasks_gp_mutex);
603
604 // If there were none, wait a bit and start over.
605 if (unlikely(midboot)) {
606 needgpcb = 0x2;
607 } else {
608 mutex_unlock(&rtp->tasks_gp_mutex);
609 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
610 rcuwait_wait_event(&rtp->cbs_wait,
611 (needgpcb = rcu_tasks_need_gpcb(rtp)),
612 TASK_IDLE);
613 mutex_lock(&rtp->tasks_gp_mutex);
614 }
615
616 if (needgpcb & 0x2) {
617 // Wait for one grace period.
618 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
619 rtp->gp_start = jiffies;
620 rcu_seq_start(&rtp->tasks_gp_seq);
621 rtp->gp_func(rtp);
622 rcu_seq_end(&rtp->tasks_gp_seq);
623 }
624
625 // Invoke callbacks.
626 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
627 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
628 mutex_unlock(&rtp->tasks_gp_mutex);
629}
630
631// RCU-tasks kthread that detects grace periods and invokes callbacks.
632static int __noreturn rcu_tasks_kthread(void *arg)
633{
634 int cpu;
635 struct rcu_tasks *rtp = arg;
636
637 for_each_possible_cpu(cpu) {
638 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
639
640 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
641 rtpcp->urgent_gp = 1;
642 }
643
644 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
645 housekeeping_affine(current, HK_TYPE_RCU);
646 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
647
648 /*
649 * Each pass through the following loop makes one check for
650 * newly arrived callbacks, and, if there are some, waits for
651 * one RCU-tasks grace period and then invokes the callbacks.
652 * This loop is terminated by the system going down. ;-)
653 */
654 for (;;) {
655 // Wait for one grace period and invoke any callbacks
656 // that are ready.
657 rcu_tasks_one_gp(rtp, false);
658
659 // Paranoid sleep to keep this from entering a tight loop.
660 schedule_timeout_idle(rtp->gp_sleep);
661 }
662}
663
664// Wait for a grace period for the specified flavor of Tasks RCU.
665static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
666{
667 /* Complain if the scheduler has not started. */
668 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
669 "synchronize_%s() called too soon", rtp->name))
670 return;
671
672 // If the grace-period kthread is running, use it.
673 if (READ_ONCE(rtp->kthread_ptr)) {
674 wait_rcu_gp_state(rtp->wait_state, rtp->call_func);
675 return;
676 }
677 rcu_tasks_one_gp(rtp, true);
678}
679
680/* Spawn RCU-tasks grace-period kthread. */
681static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
682{
683 struct task_struct *t;
684
685 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
686 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
687 return;
688 smp_mb(); /* Ensure others see full kthread. */
689}
690
691#ifndef CONFIG_TINY_RCU
692
693/*
694 * Print any non-default Tasks RCU settings.
695 */
696static void __init rcu_tasks_bootup_oddness(void)
697{
698#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
699 int rtsimc;
700
701 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
702 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
703 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
704 if (rtsimc != rcu_task_stall_info_mult) {
705 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
706 rcu_task_stall_info_mult = rtsimc;
707 }
708#endif /* #ifdef CONFIG_TASKS_RCU */
709#ifdef CONFIG_TASKS_RCU
710 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
711#endif /* #ifdef CONFIG_TASKS_RCU */
712#ifdef CONFIG_TASKS_RUDE_RCU
713 pr_info("\tRude variant of Tasks RCU enabled.\n");
714#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
715#ifdef CONFIG_TASKS_TRACE_RCU
716 pr_info("\tTracing variant of Tasks RCU enabled.\n");
717#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
718}
719
720
721/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
722static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
723{
724 int cpu;
725 bool havecbs = false;
726 bool haveurgent = false;
727 bool haveurgentcbs = false;
728
729 for_each_possible_cpu(cpu) {
730 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
731
732 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
733 havecbs = true;
734 if (data_race(rtpcp->urgent_gp))
735 haveurgent = true;
736 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
737 haveurgentcbs = true;
738 if (havecbs && haveurgent && haveurgentcbs)
739 break;
740 }
741 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
742 rtp->kname,
743 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
744 jiffies - data_race(rtp->gp_jiffies),
745 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
746 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
747 ".k"[!!data_race(rtp->kthread_ptr)],
748 ".C"[havecbs],
749 ".u"[haveurgent],
750 ".U"[haveurgentcbs],
751 rtp->lazy_jiffies,
752 s);
753}
754
755/* Dump out more rcutorture-relevant state common to all RCU-tasks flavors. */
756static void rcu_tasks_torture_stats_print_generic(struct rcu_tasks *rtp, char *tt,
757 char *tf, char *tst)
758{
759 cpumask_var_t cm;
760 int cpu;
761 bool gotcb = false;
762 unsigned long j = jiffies;
763
764 pr_alert("%s%s Tasks%s RCU g%ld gp_start %lu gp_jiffies %lu gp_state %d (%s).\n",
765 tt, tf, tst, data_race(rtp->tasks_gp_seq),
766 j - data_race(rtp->gp_start), j - data_race(rtp->gp_jiffies),
767 data_race(rtp->gp_state), tasks_gp_state_getname(rtp));
768 pr_alert("\tEnqueue shift %d limit %d Dequeue limit %d gpseq %lu.\n",
769 data_race(rtp->percpu_enqueue_shift),
770 data_race(rtp->percpu_enqueue_lim),
771 data_race(rtp->percpu_dequeue_lim),
772 data_race(rtp->percpu_dequeue_gpseq));
773 (void)zalloc_cpumask_var(&cm, GFP_KERNEL);
774 pr_alert("\tCallback counts:");
775 for_each_possible_cpu(cpu) {
776 long n;
777 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
778
779 if (cpumask_available(cm) && !rcu_barrier_cb_is_done(&rtpcp->barrier_q_head))
780 cpumask_set_cpu(cpu, cm);
781 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
782 if (!n)
783 continue;
784 pr_cont(" %d:%ld", cpu, n);
785 gotcb = true;
786 }
787 if (gotcb)
788 pr_cont(".\n");
789 else
790 pr_cont(" (none).\n");
791 pr_alert("\tBarrier seq %lu start %lu count %d holdout CPUs ",
792 data_race(rtp->barrier_q_seq), j - data_race(rtp->barrier_q_start),
793 atomic_read(&rtp->barrier_q_count));
794 if (cpumask_available(cm) && !cpumask_empty(cm))
795 pr_cont(" %*pbl.\n", cpumask_pr_args(cm));
796 else
797 pr_cont("(none).\n");
798 free_cpumask_var(cm);
799}
800
801#endif // #ifndef CONFIG_TINY_RCU
802
803static void exit_tasks_rcu_finish_trace(struct task_struct *t);
804
805#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
806
807////////////////////////////////////////////////////////////////////////
808//
809// Shared code between task-list-scanning variants of Tasks RCU.
810
811/* Wait for one RCU-tasks grace period. */
812static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
813{
814 struct task_struct *g;
815 int fract;
816 LIST_HEAD(holdouts);
817 unsigned long j;
818 unsigned long lastinfo;
819 unsigned long lastreport;
820 bool reported = false;
821 int rtsi;
822 struct task_struct *t;
823
824 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
825 rtp->pregp_func(&holdouts);
826
827 /*
828 * There were callbacks, so we need to wait for an RCU-tasks
829 * grace period. Start off by scanning the task list for tasks
830 * that are not already voluntarily blocked. Mark these tasks
831 * and make a list of them in holdouts.
832 */
833 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
834 if (rtp->pertask_func) {
835 rcu_read_lock();
836 for_each_process_thread(g, t)
837 rtp->pertask_func(t, &holdouts);
838 rcu_read_unlock();
839 }
840
841 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
842 rtp->postscan_func(&holdouts);
843
844 /*
845 * Each pass through the following loop scans the list of holdout
846 * tasks, removing any that are no longer holdouts. When the list
847 * is empty, we are done.
848 */
849 lastreport = jiffies;
850 lastinfo = lastreport;
851 rtsi = READ_ONCE(rcu_task_stall_info);
852
853 // Start off with initial wait and slowly back off to 1 HZ wait.
854 fract = rtp->init_fract;
855
856 while (!list_empty(&holdouts)) {
857 ktime_t exp;
858 bool firstreport;
859 bool needreport;
860 int rtst;
861
862 // Slowly back off waiting for holdouts
863 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
864 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
865 schedule_timeout_idle(fract);
866 } else {
867 exp = jiffies_to_nsecs(fract);
868 __set_current_state(TASK_IDLE);
869 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
870 }
871
872 if (fract < HZ)
873 fract++;
874
875 rtst = READ_ONCE(rcu_task_stall_timeout);
876 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
877 if (needreport) {
878 lastreport = jiffies;
879 reported = true;
880 }
881 firstreport = true;
882 WARN_ON(signal_pending(current));
883 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
884 rtp->holdouts_func(&holdouts, needreport, &firstreport);
885
886 // Print pre-stall informational messages if needed.
887 j = jiffies;
888 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
889 lastinfo = j;
890 rtsi = rtsi * rcu_task_stall_info_mult;
891 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
892 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
893 }
894 }
895
896 set_tasks_gp_state(rtp, RTGS_POST_GP);
897 rtp->postgp_func(rtp);
898}
899
900#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
901
902#ifdef CONFIG_TASKS_RCU
903
904////////////////////////////////////////////////////////////////////////
905//
906// Simple variant of RCU whose quiescent states are voluntary context
907// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
908// As such, grace periods can take one good long time. There are no
909// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
910// because this implementation is intended to get the system into a safe
911// state for some of the manipulations involved in tracing and the like.
912// Finally, this implementation does not support high call_rcu_tasks()
913// rates from multiple CPUs. If this is required, per-CPU callback lists
914// will be needed.
915//
916// The implementation uses rcu_tasks_wait_gp(), which relies on function
917// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
918// function sets these function pointers up so that rcu_tasks_wait_gp()
919// invokes these functions in this order:
920//
921// rcu_tasks_pregp_step():
922// Invokes synchronize_rcu() in order to wait for all in-flight
923// t->on_rq and t->nvcsw transitions to complete. This works because
924// all such transitions are carried out with interrupts disabled.
925// rcu_tasks_pertask(), invoked on every non-idle task:
926// For every runnable non-idle task other than the current one, use
927// get_task_struct() to pin down that task, snapshot that task's
928// number of voluntary context switches, and add that task to the
929// holdout list.
930// rcu_tasks_postscan():
931// Gather per-CPU lists of tasks in do_exit() to ensure that all
932// tasks that were in the process of exiting (and which thus might
933// not know to synchronize with this RCU Tasks grace period) have
934// completed exiting. The synchronize_rcu() in rcu_tasks_postgp()
935// will take care of any tasks stuck in the non-preemptible region
936// of do_exit() following its call to exit_tasks_rcu_finish().
937// check_all_holdout_tasks(), repeatedly until holdout list is empty:
938// Scans the holdout list, attempting to identify a quiescent state
939// for each task on the list. If there is a quiescent state, the
940// corresponding task is removed from the holdout list.
941// rcu_tasks_postgp():
942// Invokes synchronize_rcu() in order to ensure that all prior
943// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
944// to have happened before the end of this RCU Tasks grace period.
945// Again, this works because all such transitions are carried out
946// with interrupts disabled.
947//
948// For each exiting task, the exit_tasks_rcu_start() and
949// exit_tasks_rcu_finish() functions add and remove, respectively, the
950// current task to a per-CPU list of tasks that rcu_tasks_postscan() must
951// wait on. This is necessary because rcu_tasks_postscan() must wait on
952// tasks that have already been removed from the global list of tasks.
953//
954// Pre-grace-period update-side code is ordered before the grace
955// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
956// is ordered before the grace period via synchronize_rcu() call in
957// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
958// disabling.
959
960/* Pre-grace-period preparation. */
961static void rcu_tasks_pregp_step(struct list_head *hop)
962{
963 /*
964 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
965 * to complete. Invoking synchronize_rcu() suffices because all
966 * these transitions occur with interrupts disabled. Without this
967 * synchronize_rcu(), a read-side critical section that started
968 * before the grace period might be incorrectly seen as having
969 * started after the grace period.
970 *
971 * This synchronize_rcu() also dispenses with the need for a
972 * memory barrier on the first store to t->rcu_tasks_holdout,
973 * as it forces the store to happen after the beginning of the
974 * grace period.
975 */
976 synchronize_rcu();
977}
978
979/* Check for quiescent states since the pregp's synchronize_rcu() */
980static bool rcu_tasks_is_holdout(struct task_struct *t)
981{
982 int cpu;
983
984 /* Has the task been seen voluntarily sleeping? */
985 if (!READ_ONCE(t->on_rq))
986 return false;
987
988 /*
989 * t->on_rq && !t->se.sched_delayed *could* be considered sleeping but
990 * since it is a spurious state (it will transition into the
991 * traditional blocked state or get woken up without outside
992 * dependencies), not considering it such should only affect timing.
993 *
994 * Be conservative for now and not include it.
995 */
996
997 /*
998 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
999 * quiescent states. But CPU boot code performed by the idle task
1000 * isn't a quiescent state.
1001 */
1002 if (is_idle_task(t))
1003 return false;
1004
1005 cpu = task_cpu(t);
1006
1007 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
1008 if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
1009 return false;
1010
1011 return true;
1012}
1013
1014/* Per-task initial processing. */
1015static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
1016{
1017 if (t != current && rcu_tasks_is_holdout(t)) {
1018 get_task_struct(t);
1019 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
1020 WRITE_ONCE(t->rcu_tasks_holdout, true);
1021 list_add(&t->rcu_tasks_holdout_list, hop);
1022 }
1023}
1024
1025void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1026DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
1027
1028/* Processing between scanning taskslist and draining the holdout list. */
1029static void rcu_tasks_postscan(struct list_head *hop)
1030{
1031 int cpu;
1032 int rtsi = READ_ONCE(rcu_task_stall_info);
1033
1034 if (!IS_ENABLED(CONFIG_TINY_RCU)) {
1035 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1036 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1037 }
1038
1039 /*
1040 * Exiting tasks may escape the tasklist scan. Those are vulnerable
1041 * until their final schedule() with TASK_DEAD state. To cope with
1042 * this, divide the fragile exit path part in two intersecting
1043 * read side critical sections:
1044 *
1045 * 1) A task_struct list addition before calling exit_notify(),
1046 * which may remove the task from the tasklist, with the
1047 * removal after the final preempt_disable() call in do_exit().
1048 *
1049 * 2) An _RCU_ read side starting with the final preempt_disable()
1050 * call in do_exit() and ending with the final call to schedule()
1051 * with TASK_DEAD state.
1052 *
1053 * This handles the part 1). And postgp will handle part 2) with a
1054 * call to synchronize_rcu().
1055 */
1056
1057 for_each_possible_cpu(cpu) {
1058 unsigned long j = jiffies + 1;
1059 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, cpu);
1060 struct task_struct *t;
1061 struct task_struct *t1;
1062 struct list_head tmp;
1063
1064 raw_spin_lock_irq_rcu_node(rtpcp);
1065 list_for_each_entry_safe(t, t1, &rtpcp->rtp_exit_list, rcu_tasks_exit_list) {
1066 if (list_empty(&t->rcu_tasks_holdout_list))
1067 rcu_tasks_pertask(t, hop);
1068
1069 // RT kernels need frequent pauses, otherwise
1070 // pause at least once per pair of jiffies.
1071 if (!IS_ENABLED(CONFIG_PREEMPT_RT) && time_before(jiffies, j))
1072 continue;
1073
1074 // Keep our place in the list while pausing.
1075 // Nothing else traverses this list, so adding a
1076 // bare list_head is OK.
1077 list_add(&tmp, &t->rcu_tasks_exit_list);
1078 raw_spin_unlock_irq_rcu_node(rtpcp);
1079 cond_resched(); // For CONFIG_PREEMPT=n kernels
1080 raw_spin_lock_irq_rcu_node(rtpcp);
1081 t1 = list_entry(tmp.next, struct task_struct, rcu_tasks_exit_list);
1082 list_del(&tmp);
1083 j = jiffies + 1;
1084 }
1085 raw_spin_unlock_irq_rcu_node(rtpcp);
1086 }
1087
1088 if (!IS_ENABLED(CONFIG_TINY_RCU))
1089 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
1090}
1091
1092/* See if tasks are still holding out, complain if so. */
1093static void check_holdout_task(struct task_struct *t,
1094 bool needreport, bool *firstreport)
1095{
1096 int cpu;
1097
1098 if (!READ_ONCE(t->rcu_tasks_holdout) ||
1099 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
1100 !rcu_tasks_is_holdout(t) ||
1101 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
1102 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
1103 WRITE_ONCE(t->rcu_tasks_holdout, false);
1104 list_del_init(&t->rcu_tasks_holdout_list);
1105 put_task_struct(t);
1106 return;
1107 }
1108 rcu_request_urgent_qs_task(t);
1109 if (!needreport)
1110 return;
1111 if (*firstreport) {
1112 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
1113 *firstreport = false;
1114 }
1115 cpu = task_cpu(t);
1116 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
1117 t, ".I"[is_idle_task(t)],
1118 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
1119 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
1120 data_race(t->rcu_tasks_idle_cpu), cpu);
1121 sched_show_task(t);
1122}
1123
1124/* Scan the holdout lists for tasks no longer holding out. */
1125static void check_all_holdout_tasks(struct list_head *hop,
1126 bool needreport, bool *firstreport)
1127{
1128 struct task_struct *t, *t1;
1129
1130 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1131 check_holdout_task(t, needreport, firstreport);
1132 cond_resched();
1133 }
1134}
1135
1136/* Finish off the Tasks-RCU grace period. */
1137static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1138{
1139 /*
1140 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1141 * memory barriers prior to them in the schedule() path, memory
1142 * reordering on other CPUs could cause their RCU-tasks read-side
1143 * critical sections to extend past the end of the grace period.
1144 * However, because these ->nvcsw updates are carried out with
1145 * interrupts disabled, we can use synchronize_rcu() to force the
1146 * needed ordering on all such CPUs.
1147 *
1148 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1149 * accesses to be within the grace period, avoiding the need for
1150 * memory barriers for ->rcu_tasks_holdout accesses.
1151 *
1152 * In addition, this synchronize_rcu() waits for exiting tasks
1153 * to complete their final preempt_disable() region of execution,
1154 * enforcing the whole region before tasklist removal until
1155 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1156 * read side critical section.
1157 */
1158 synchronize_rcu();
1159}
1160
1161static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1162{
1163#ifndef CONFIG_TINY_RCU
1164 int rtsi;
1165
1166 rtsi = READ_ONCE(rcu_task_stall_info);
1167 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1168 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1169 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1170 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1171 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1172 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1173#endif // #ifndef CONFIG_TINY_RCU
1174}
1175
1176/**
1177 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1178 * @rhp: structure to be used for queueing the RCU updates.
1179 * @func: actual callback function to be invoked after the grace period
1180 *
1181 * The callback function will be invoked some time after a full grace
1182 * period elapses, in other words after all currently executing RCU
1183 * read-side critical sections have completed. call_rcu_tasks() assumes
1184 * that the read-side critical sections end at a voluntary context
1185 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1186 * or transition to usermode execution. As such, there are no read-side
1187 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1188 * this primitive is intended to determine that all tasks have passed
1189 * through a safe state, not so much for data-structure synchronization.
1190 *
1191 * See the description of call_rcu() for more detailed information on
1192 * memory ordering guarantees.
1193 */
1194void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1195{
1196 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1197}
1198EXPORT_SYMBOL_GPL(call_rcu_tasks);
1199
1200/**
1201 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1202 *
1203 * Control will return to the caller some time after a full rcu-tasks
1204 * grace period has elapsed, in other words after all currently
1205 * executing rcu-tasks read-side critical sections have elapsed. These
1206 * read-side critical sections are delimited by calls to schedule(),
1207 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1208 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1209 *
1210 * This is a very specialized primitive, intended only for a few uses in
1211 * tracing and other situations requiring manipulation of function
1212 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1213 * is not (yet) intended for heavy use from multiple CPUs.
1214 *
1215 * See the description of synchronize_rcu() for more detailed information
1216 * on memory ordering guarantees.
1217 */
1218void synchronize_rcu_tasks(void)
1219{
1220 synchronize_rcu_tasks_generic(&rcu_tasks);
1221}
1222EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1223
1224/**
1225 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1226 *
1227 * Although the current implementation is guaranteed to wait, it is not
1228 * obligated to, for example, if there are no pending callbacks.
1229 */
1230void rcu_barrier_tasks(void)
1231{
1232 rcu_barrier_tasks_generic(&rcu_tasks);
1233}
1234EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1235
1236static int rcu_tasks_lazy_ms = -1;
1237module_param(rcu_tasks_lazy_ms, int, 0444);
1238
1239static int __init rcu_spawn_tasks_kthread(void)
1240{
1241 rcu_tasks.gp_sleep = HZ / 10;
1242 rcu_tasks.init_fract = HZ / 10;
1243 if (rcu_tasks_lazy_ms >= 0)
1244 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1245 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1246 rcu_tasks.pertask_func = rcu_tasks_pertask;
1247 rcu_tasks.postscan_func = rcu_tasks_postscan;
1248 rcu_tasks.holdouts_func = check_all_holdout_tasks;
1249 rcu_tasks.postgp_func = rcu_tasks_postgp;
1250 rcu_tasks.wait_state = TASK_IDLE;
1251 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1252 return 0;
1253}
1254
1255#if !defined(CONFIG_TINY_RCU)
1256void show_rcu_tasks_classic_gp_kthread(void)
1257{
1258 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1259}
1260EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1261
1262void rcu_tasks_torture_stats_print(char *tt, char *tf)
1263{
1264 rcu_tasks_torture_stats_print_generic(&rcu_tasks, tt, tf, "");
1265}
1266EXPORT_SYMBOL_GPL(rcu_tasks_torture_stats_print);
1267#endif // !defined(CONFIG_TINY_RCU)
1268
1269struct task_struct *get_rcu_tasks_gp_kthread(void)
1270{
1271 return rcu_tasks.kthread_ptr;
1272}
1273EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1274
1275void rcu_tasks_get_gp_data(int *flags, unsigned long *gp_seq)
1276{
1277 *flags = 0;
1278 *gp_seq = rcu_seq_current(&rcu_tasks.tasks_gp_seq);
1279}
1280EXPORT_SYMBOL_GPL(rcu_tasks_get_gp_data);
1281
1282/*
1283 * Protect against tasklist scan blind spot while the task is exiting and
1284 * may be removed from the tasklist. Do this by adding the task to yet
1285 * another list.
1286 *
1287 * Note that the task will remove itself from this list, so there is no
1288 * need for get_task_struct(), except in the case where rcu_tasks_pertask()
1289 * adds it to the holdout list, in which case rcu_tasks_pertask() supplies
1290 * the needed get_task_struct().
1291 */
1292void exit_tasks_rcu_start(void)
1293{
1294 unsigned long flags;
1295 struct rcu_tasks_percpu *rtpcp;
1296 struct task_struct *t = current;
1297
1298 WARN_ON_ONCE(!list_empty(&t->rcu_tasks_exit_list));
1299 preempt_disable();
1300 rtpcp = this_cpu_ptr(rcu_tasks.rtpcpu);
1301 t->rcu_tasks_exit_cpu = smp_processor_id();
1302 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1303 WARN_ON_ONCE(!rtpcp->rtp_exit_list.next);
1304 list_add(&t->rcu_tasks_exit_list, &rtpcp->rtp_exit_list);
1305 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1306 preempt_enable();
1307}
1308
1309/*
1310 * Remove the task from the "yet another list" because do_exit() is now
1311 * non-preemptible, allowing synchronize_rcu() to wait beyond this point.
1312 */
1313void exit_tasks_rcu_finish(void)
1314{
1315 unsigned long flags;
1316 struct rcu_tasks_percpu *rtpcp;
1317 struct task_struct *t = current;
1318
1319 WARN_ON_ONCE(list_empty(&t->rcu_tasks_exit_list));
1320 rtpcp = per_cpu_ptr(rcu_tasks.rtpcpu, t->rcu_tasks_exit_cpu);
1321 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1322 list_del_init(&t->rcu_tasks_exit_list);
1323 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1324
1325 exit_tasks_rcu_finish_trace(t);
1326}
1327
1328#else /* #ifdef CONFIG_TASKS_RCU */
1329void exit_tasks_rcu_start(void) { }
1330void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1331#endif /* #else #ifdef CONFIG_TASKS_RCU */
1332
1333#ifdef CONFIG_TASKS_RUDE_RCU
1334
1335////////////////////////////////////////////////////////////////////////
1336//
1337// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's
1338// trick of passing an empty function to schedule_on_each_cpu().
1339// This approach provides batching of concurrent calls to the synchronous
1340// synchronize_rcu_tasks_rude() API. This invokes schedule_on_each_cpu()
1341// in order to send IPIs far and wide and induces otherwise unnecessary
1342// context switches on all online CPUs, whether idle or not.
1343//
1344// Callback handling is provided by the rcu_tasks_kthread() function.
1345//
1346// Ordering is provided by the scheduler's context-switch code.
1347
1348// Empty function to allow workqueues to force a context switch.
1349static void rcu_tasks_be_rude(struct work_struct *work)
1350{
1351}
1352
1353// Wait for one rude RCU-tasks grace period.
1354static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1355{
1356 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1357 schedule_on_each_cpu(rcu_tasks_be_rude);
1358}
1359
1360static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1361DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1362 "RCU Tasks Rude");
1363
1364/*
1365 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1366 * @rhp: structure to be used for queueing the RCU updates.
1367 * @func: actual callback function to be invoked after the grace period
1368 *
1369 * The callback function will be invoked some time after a full grace
1370 * period elapses, in other words after all currently executing RCU
1371 * read-side critical sections have completed. call_rcu_tasks_rude()
1372 * assumes that the read-side critical sections end at context switch,
1373 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1374 * usermode execution is schedulable). As such, there are no read-side
1375 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1376 * this primitive is intended to determine that all tasks have passed
1377 * through a safe state, not so much for data-structure synchronization.
1378 *
1379 * See the description of call_rcu() for more detailed information on
1380 * memory ordering guarantees.
1381 *
1382 * This is no longer exported, and is instead reserved for use by
1383 * synchronize_rcu_tasks_rude().
1384 */
1385static void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1386{
1387 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1388}
1389
1390/**
1391 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1392 *
1393 * Control will return to the caller some time after a rude rcu-tasks
1394 * grace period has elapsed, in other words after all currently
1395 * executing rcu-tasks read-side critical sections have elapsed. These
1396 * read-side critical sections are delimited by calls to schedule(),
1397 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1398 * context), and (in theory, anyway) cond_resched().
1399 *
1400 * This is a very specialized primitive, intended only for a few uses in
1401 * tracing and other situations requiring manipulation of function preambles
1402 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1403 * (yet) intended for heavy use from multiple CPUs.
1404 *
1405 * See the description of synchronize_rcu() for more detailed information
1406 * on memory ordering guarantees.
1407 */
1408void synchronize_rcu_tasks_rude(void)
1409{
1410 if (!IS_ENABLED(CONFIG_ARCH_WANTS_NO_INSTR) || IS_ENABLED(CONFIG_FORCE_TASKS_RUDE_RCU))
1411 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1412}
1413EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1414
1415static int __init rcu_spawn_tasks_rude_kthread(void)
1416{
1417 rcu_tasks_rude.gp_sleep = HZ / 10;
1418 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1419 return 0;
1420}
1421
1422#if !defined(CONFIG_TINY_RCU)
1423void show_rcu_tasks_rude_gp_kthread(void)
1424{
1425 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1426}
1427EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1428
1429void rcu_tasks_rude_torture_stats_print(char *tt, char *tf)
1430{
1431 rcu_tasks_torture_stats_print_generic(&rcu_tasks_rude, tt, tf, "");
1432}
1433EXPORT_SYMBOL_GPL(rcu_tasks_rude_torture_stats_print);
1434#endif // !defined(CONFIG_TINY_RCU)
1435
1436struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1437{
1438 return rcu_tasks_rude.kthread_ptr;
1439}
1440EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1441
1442void rcu_tasks_rude_get_gp_data(int *flags, unsigned long *gp_seq)
1443{
1444 *flags = 0;
1445 *gp_seq = rcu_seq_current(&rcu_tasks_rude.tasks_gp_seq);
1446}
1447EXPORT_SYMBOL_GPL(rcu_tasks_rude_get_gp_data);
1448
1449#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1450
1451////////////////////////////////////////////////////////////////////////
1452//
1453// Tracing variant of Tasks RCU. This variant is designed to be used
1454// to protect tracing hooks, including those of BPF. This variant
1455// therefore:
1456//
1457// 1. Has explicit read-side markers to allow finite grace periods
1458// in the face of in-kernel loops for PREEMPT=n builds.
1459//
1460// 2. Protects code in the idle loop, exception entry/exit, and
1461// CPU-hotplug code paths, similar to the capabilities of SRCU.
1462//
1463// 3. Avoids expensive read-side instructions, having overhead similar
1464// to that of Preemptible RCU.
1465//
1466// There are of course downsides. For example, the grace-period code
1467// can send IPIs to CPUs, even when those CPUs are in the idle loop or
1468// in nohz_full userspace. If needed, these downsides can be at least
1469// partially remedied.
1470//
1471// Perhaps most important, this variant of RCU does not affect the vanilla
1472// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1473// readers can operate from idle, offline, and exception entry/exit in no
1474// way allows rcu_preempt and rcu_sched readers to also do so.
1475//
1476// The implementation uses rcu_tasks_wait_gp(), which relies on function
1477// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1478// function sets these function pointers up so that rcu_tasks_wait_gp()
1479// invokes these functions in this order:
1480//
1481// rcu_tasks_trace_pregp_step():
1482// Disables CPU hotplug, adds all currently executing tasks to the
1483// holdout list, then checks the state of all tasks that blocked
1484// or were preempted within their current RCU Tasks Trace read-side
1485// critical section, adding them to the holdout list if appropriate.
1486// Finally, this function re-enables CPU hotplug.
1487// The ->pertask_func() pointer is NULL, so there is no per-task processing.
1488// rcu_tasks_trace_postscan():
1489// Invokes synchronize_rcu() to wait for late-stage exiting tasks
1490// to finish exiting.
1491// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1492// Scans the holdout list, attempting to identify a quiescent state
1493// for each task on the list. If there is a quiescent state, the
1494// corresponding task is removed from the holdout list. Once this
1495// list is empty, the grace period has completed.
1496// rcu_tasks_trace_postgp():
1497// Provides the needed full memory barrier and does debug checks.
1498//
1499// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1500//
1501// Pre-grace-period update-side code is ordered before the grace period
1502// via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1503// read-side code is ordered before the grace period by atomic operations
1504// on .b.need_qs flag of each task involved in this process, or by scheduler
1505// context-switch ordering (for locked-down non-running readers).
1506
1507// The lockdep state must be outside of #ifdef to be useful.
1508#ifdef CONFIG_DEBUG_LOCK_ALLOC
1509static struct lock_class_key rcu_lock_trace_key;
1510struct lockdep_map rcu_trace_lock_map =
1511 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1512EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1513#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1514
1515#ifdef CONFIG_TASKS_TRACE_RCU
1516
1517// Record outstanding IPIs to each CPU. No point in sending two...
1518static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1519
1520// The number of detections of task quiescent state relying on
1521// heavyweight readers executing explicit memory barriers.
1522static unsigned long n_heavy_reader_attempts;
1523static unsigned long n_heavy_reader_updates;
1524static unsigned long n_heavy_reader_ofl_updates;
1525static unsigned long n_trc_holdouts;
1526
1527void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1528DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1529 "RCU Tasks Trace");
1530
1531/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1532static u8 rcu_ld_need_qs(struct task_struct *t)
1533{
1534 smp_mb(); // Enforce full grace-period ordering.
1535 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1536}
1537
1538/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1539static void rcu_st_need_qs(struct task_struct *t, u8 v)
1540{
1541 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1542 smp_mb(); // Enforce full grace-period ordering.
1543}
1544
1545/*
1546 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1547 * the four-byte operand-size restriction of some platforms.
1548 *
1549 * Returns the old value, which is often ignored.
1550 */
1551u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1552{
1553 return cmpxchg(&t->trc_reader_special.b.need_qs, old, new);
1554}
1555EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1556
1557/*
1558 * If we are the last reader, signal the grace-period kthread.
1559 * Also remove from the per-CPU list of blocked tasks.
1560 */
1561void rcu_read_unlock_trace_special(struct task_struct *t)
1562{
1563 unsigned long flags;
1564 struct rcu_tasks_percpu *rtpcp;
1565 union rcu_special trs;
1566
1567 // Open-coded full-word version of rcu_ld_need_qs().
1568 smp_mb(); // Enforce full grace-period ordering.
1569 trs = smp_load_acquire(&t->trc_reader_special);
1570
1571 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1572 smp_mb(); // Pairs with update-side barriers.
1573 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1574 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1575 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1576 TRC_NEED_QS_CHECKED);
1577
1578 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1579 }
1580 if (trs.b.blocked) {
1581 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1582 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1583 list_del_init(&t->trc_blkd_node);
1584 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1585 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1586 }
1587 WRITE_ONCE(t->trc_reader_nesting, 0);
1588}
1589EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1590
1591/* Add a newly blocked reader task to its CPU's list. */
1592void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1593{
1594 unsigned long flags;
1595 struct rcu_tasks_percpu *rtpcp;
1596
1597 local_irq_save(flags);
1598 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1599 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1600 t->trc_blkd_cpu = smp_processor_id();
1601 if (!rtpcp->rtp_blkd_tasks.next)
1602 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1603 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1604 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1605 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1606}
1607EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1608
1609/* Add a task to the holdout list, if it is not already on the list. */
1610static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1611{
1612 if (list_empty(&t->trc_holdout_list)) {
1613 get_task_struct(t);
1614 list_add(&t->trc_holdout_list, bhp);
1615 n_trc_holdouts++;
1616 }
1617}
1618
1619/* Remove a task from the holdout list, if it is in fact present. */
1620static void trc_del_holdout(struct task_struct *t)
1621{
1622 if (!list_empty(&t->trc_holdout_list)) {
1623 list_del_init(&t->trc_holdout_list);
1624 put_task_struct(t);
1625 n_trc_holdouts--;
1626 }
1627}
1628
1629/* IPI handler to check task state. */
1630static void trc_read_check_handler(void *t_in)
1631{
1632 int nesting;
1633 struct task_struct *t = current;
1634 struct task_struct *texp = t_in;
1635
1636 // If the task is no longer running on this CPU, leave.
1637 if (unlikely(texp != t))
1638 goto reset_ipi; // Already on holdout list, so will check later.
1639
1640 // If the task is not in a read-side critical section, and
1641 // if this is the last reader, awaken the grace-period kthread.
1642 nesting = READ_ONCE(t->trc_reader_nesting);
1643 if (likely(!nesting)) {
1644 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1645 goto reset_ipi;
1646 }
1647 // If we are racing with an rcu_read_unlock_trace(), try again later.
1648 if (unlikely(nesting < 0))
1649 goto reset_ipi;
1650
1651 // Get here if the task is in a read-side critical section.
1652 // Set its state so that it will update state for the grace-period
1653 // kthread upon exit from that critical section.
1654 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1655
1656reset_ipi:
1657 // Allow future IPIs to be sent on CPU and for task.
1658 // Also order this IPI handler against any later manipulations of
1659 // the intended task.
1660 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1661 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1662}
1663
1664/* Callback function for scheduler to check locked-down task. */
1665static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1666{
1667 struct list_head *bhp = bhp_in;
1668 int cpu = task_cpu(t);
1669 int nesting;
1670 bool ofl = cpu_is_offline(cpu);
1671
1672 if (task_curr(t) && !ofl) {
1673 // If no chance of heavyweight readers, do it the hard way.
1674 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1675 return -EINVAL;
1676
1677 // If heavyweight readers are enabled on the remote task,
1678 // we can inspect its state despite its currently running.
1679 // However, we cannot safely change its state.
1680 n_heavy_reader_attempts++;
1681 // Check for "running" idle tasks on offline CPUs.
1682 if (!rcu_watching_zero_in_eqs(cpu, &t->trc_reader_nesting))
1683 return -EINVAL; // No quiescent state, do it the hard way.
1684 n_heavy_reader_updates++;
1685 nesting = 0;
1686 } else {
1687 // The task is not running, so C-language access is safe.
1688 nesting = t->trc_reader_nesting;
1689 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1690 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1691 n_heavy_reader_ofl_updates++;
1692 }
1693
1694 // If not exiting a read-side critical section, mark as checked
1695 // so that the grace-period kthread will remove it from the
1696 // holdout list.
1697 if (!nesting) {
1698 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1699 return 0; // In QS, so done.
1700 }
1701 if (nesting < 0)
1702 return -EINVAL; // Reader transitioning, try again later.
1703
1704 // The task is in a read-side critical section, so set up its
1705 // state so that it will update state upon exit from that critical
1706 // section.
1707 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1708 trc_add_holdout(t, bhp);
1709 return 0;
1710}
1711
1712/* Attempt to extract the state for the specified task. */
1713static void trc_wait_for_one_reader(struct task_struct *t,
1714 struct list_head *bhp)
1715{
1716 int cpu;
1717
1718 // If a previous IPI is still in flight, let it complete.
1719 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1720 return;
1721
1722 // The current task had better be in a quiescent state.
1723 if (t == current) {
1724 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1725 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1726 return;
1727 }
1728
1729 // Attempt to nail down the task for inspection.
1730 get_task_struct(t);
1731 if (!task_call_func(t, trc_inspect_reader, bhp)) {
1732 put_task_struct(t);
1733 return;
1734 }
1735 put_task_struct(t);
1736
1737 // If this task is not yet on the holdout list, then we are in
1738 // an RCU read-side critical section. Otherwise, the invocation of
1739 // trc_add_holdout() that added it to the list did the necessary
1740 // get_task_struct(). Either way, the task cannot be freed out
1741 // from under this code.
1742
1743 // If currently running, send an IPI, either way, add to list.
1744 trc_add_holdout(t, bhp);
1745 if (task_curr(t) &&
1746 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1747 // The task is currently running, so try IPIing it.
1748 cpu = task_cpu(t);
1749
1750 // If there is already an IPI outstanding, let it happen.
1751 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1752 return;
1753
1754 per_cpu(trc_ipi_to_cpu, cpu) = true;
1755 t->trc_ipi_to_cpu = cpu;
1756 rcu_tasks_trace.n_ipis++;
1757 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1758 // Just in case there is some other reason for
1759 // failure than the target CPU being offline.
1760 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1761 __func__, cpu);
1762 rcu_tasks_trace.n_ipis_fails++;
1763 per_cpu(trc_ipi_to_cpu, cpu) = false;
1764 t->trc_ipi_to_cpu = -1;
1765 }
1766 }
1767}
1768
1769/*
1770 * Initialize for first-round processing for the specified task.
1771 * Return false if task is NULL or already taken care of, true otherwise.
1772 */
1773static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1774{
1775 // During early boot when there is only the one boot CPU, there
1776 // is no idle task for the other CPUs. Also, the grace-period
1777 // kthread is always in a quiescent state. In addition, just return
1778 // if this task is already on the list.
1779 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1780 return false;
1781
1782 rcu_st_need_qs(t, 0);
1783 t->trc_ipi_to_cpu = -1;
1784 return true;
1785}
1786
1787/* Do first-round processing for the specified task. */
1788static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1789{
1790 if (rcu_tasks_trace_pertask_prep(t, true))
1791 trc_wait_for_one_reader(t, hop);
1792}
1793
1794/* Initialize for a new RCU-tasks-trace grace period. */
1795static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1796{
1797 LIST_HEAD(blkd_tasks);
1798 int cpu;
1799 unsigned long flags;
1800 struct rcu_tasks_percpu *rtpcp;
1801 struct task_struct *t;
1802
1803 // There shouldn't be any old IPIs, but...
1804 for_each_possible_cpu(cpu)
1805 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1806
1807 // Disable CPU hotplug across the CPU scan for the benefit of
1808 // any IPIs that might be needed. This also waits for all readers
1809 // in CPU-hotplug code paths.
1810 cpus_read_lock();
1811
1812 // These rcu_tasks_trace_pertask_prep() calls are serialized to
1813 // allow safe access to the hop list.
1814 for_each_online_cpu(cpu) {
1815 rcu_read_lock();
1816 // Note that cpu_curr_snapshot() picks up the target
1817 // CPU's current task while its runqueue is locked with
1818 // an smp_mb__after_spinlock(). This ensures that either
1819 // the grace-period kthread will see that task's read-side
1820 // critical section or the task will see the updater's pre-GP
1821 // accesses. The trailing smp_mb() in cpu_curr_snapshot()
1822 // does not currently play a role other than simplify
1823 // that function's ordering semantics. If these simplified
1824 // ordering semantics continue to be redundant, that smp_mb()
1825 // might be removed.
1826 t = cpu_curr_snapshot(cpu);
1827 if (rcu_tasks_trace_pertask_prep(t, true))
1828 trc_add_holdout(t, hop);
1829 rcu_read_unlock();
1830 cond_resched_tasks_rcu_qs();
1831 }
1832
1833 // Only after all running tasks have been accounted for is it
1834 // safe to take care of the tasks that have blocked within their
1835 // current RCU tasks trace read-side critical section.
1836 for_each_possible_cpu(cpu) {
1837 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1838 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1839 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1840 while (!list_empty(&blkd_tasks)) {
1841 rcu_read_lock();
1842 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1843 list_del_init(&t->trc_blkd_node);
1844 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1845 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1846 rcu_tasks_trace_pertask(t, hop);
1847 rcu_read_unlock();
1848 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1849 }
1850 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1851 cond_resched_tasks_rcu_qs();
1852 }
1853
1854 // Re-enable CPU hotplug now that the holdout list is populated.
1855 cpus_read_unlock();
1856}
1857
1858/*
1859 * Do intermediate processing between task and holdout scans.
1860 */
1861static void rcu_tasks_trace_postscan(struct list_head *hop)
1862{
1863 // Wait for late-stage exiting tasks to finish exiting.
1864 // These might have passed the call to exit_tasks_rcu_finish().
1865
1866 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1867 synchronize_rcu();
1868 // Any tasks that exit after this point will set
1869 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1870}
1871
1872/* Communicate task state back to the RCU tasks trace stall warning request. */
1873struct trc_stall_chk_rdr {
1874 int nesting;
1875 int ipi_to_cpu;
1876 u8 needqs;
1877};
1878
1879static int trc_check_slow_task(struct task_struct *t, void *arg)
1880{
1881 struct trc_stall_chk_rdr *trc_rdrp = arg;
1882
1883 if (task_curr(t) && cpu_online(task_cpu(t)))
1884 return false; // It is running, so decline to inspect it.
1885 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1886 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1887 trc_rdrp->needqs = rcu_ld_need_qs(t);
1888 return true;
1889}
1890
1891/* Show the state of a task stalling the current RCU tasks trace GP. */
1892static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1893{
1894 int cpu;
1895 struct trc_stall_chk_rdr trc_rdr;
1896 bool is_idle_tsk = is_idle_task(t);
1897
1898 if (*firstreport) {
1899 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1900 *firstreport = false;
1901 }
1902 cpu = task_cpu(t);
1903 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1904 pr_alert("P%d: %c%c\n",
1905 t->pid,
1906 ".I"[t->trc_ipi_to_cpu >= 0],
1907 ".i"[is_idle_tsk]);
1908 else
1909 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1910 t->pid,
1911 ".I"[trc_rdr.ipi_to_cpu >= 0],
1912 ".i"[is_idle_tsk],
1913 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1914 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1915 trc_rdr.nesting,
1916 " !CN"[trc_rdr.needqs & 0x3],
1917 " ?"[trc_rdr.needqs > 0x3],
1918 cpu, cpu_online(cpu) ? "" : "(offline)");
1919 sched_show_task(t);
1920}
1921
1922/* List stalled IPIs for RCU tasks trace. */
1923static void show_stalled_ipi_trace(void)
1924{
1925 int cpu;
1926
1927 for_each_possible_cpu(cpu)
1928 if (per_cpu(trc_ipi_to_cpu, cpu))
1929 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1930}
1931
1932/* Do one scan of the holdout list. */
1933static void check_all_holdout_tasks_trace(struct list_head *hop,
1934 bool needreport, bool *firstreport)
1935{
1936 struct task_struct *g, *t;
1937
1938 // Disable CPU hotplug across the holdout list scan for IPIs.
1939 cpus_read_lock();
1940
1941 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1942 // If safe and needed, try to check the current task.
1943 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1944 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1945 trc_wait_for_one_reader(t, hop);
1946
1947 // If check succeeded, remove this task from the list.
1948 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1949 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1950 trc_del_holdout(t);
1951 else if (needreport)
1952 show_stalled_task_trace(t, firstreport);
1953 cond_resched_tasks_rcu_qs();
1954 }
1955
1956 // Re-enable CPU hotplug now that the holdout list scan has completed.
1957 cpus_read_unlock();
1958
1959 if (needreport) {
1960 if (*firstreport)
1961 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1962 show_stalled_ipi_trace();
1963 }
1964}
1965
1966static void rcu_tasks_trace_empty_fn(void *unused)
1967{
1968}
1969
1970/* Wait for grace period to complete and provide ordering. */
1971static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1972{
1973 int cpu;
1974
1975 // Wait for any lingering IPI handlers to complete. Note that
1976 // if a CPU has gone offline or transitioned to userspace in the
1977 // meantime, all IPI handlers should have been drained beforehand.
1978 // Yes, this assumes that CPUs process IPIs in order. If that ever
1979 // changes, there will need to be a recheck and/or timed wait.
1980 for_each_online_cpu(cpu)
1981 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1982 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1983
1984 smp_mb(); // Caller's code must be ordered after wakeup.
1985 // Pairs with pretty much every ordering primitive.
1986}
1987
1988/* Report any needed quiescent state for this exiting task. */
1989static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1990{
1991 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1992
1993 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1994 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1995 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1996 rcu_read_unlock_trace_special(t);
1997 else
1998 WRITE_ONCE(t->trc_reader_nesting, 0);
1999}
2000
2001/**
2002 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
2003 * @rhp: structure to be used for queueing the RCU updates.
2004 * @func: actual callback function to be invoked after the grace period
2005 *
2006 * The callback function will be invoked some time after a trace rcu-tasks
2007 * grace period elapses, in other words after all currently executing
2008 * trace rcu-tasks read-side critical sections have completed. These
2009 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
2010 * and rcu_read_unlock_trace().
2011 *
2012 * See the description of call_rcu() for more detailed information on
2013 * memory ordering guarantees.
2014 */
2015void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
2016{
2017 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
2018}
2019EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
2020
2021/**
2022 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
2023 *
2024 * Control will return to the caller some time after a trace rcu-tasks
2025 * grace period has elapsed, in other words after all currently executing
2026 * trace rcu-tasks read-side critical sections have elapsed. These read-side
2027 * critical sections are delimited by calls to rcu_read_lock_trace()
2028 * and rcu_read_unlock_trace().
2029 *
2030 * This is a very specialized primitive, intended only for a few uses in
2031 * tracing and other situations requiring manipulation of function preambles
2032 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
2033 * (yet) intended for heavy use from multiple CPUs.
2034 *
2035 * See the description of synchronize_rcu() for more detailed information
2036 * on memory ordering guarantees.
2037 */
2038void synchronize_rcu_tasks_trace(void)
2039{
2040 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
2041 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
2042}
2043EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
2044
2045/**
2046 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
2047 *
2048 * Although the current implementation is guaranteed to wait, it is not
2049 * obligated to, for example, if there are no pending callbacks.
2050 */
2051void rcu_barrier_tasks_trace(void)
2052{
2053 rcu_barrier_tasks_generic(&rcu_tasks_trace);
2054}
2055EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
2056
2057int rcu_tasks_trace_lazy_ms = -1;
2058module_param(rcu_tasks_trace_lazy_ms, int, 0444);
2059
2060static int __init rcu_spawn_tasks_trace_kthread(void)
2061{
2062 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
2063 rcu_tasks_trace.gp_sleep = HZ / 10;
2064 rcu_tasks_trace.init_fract = HZ / 10;
2065 } else {
2066 rcu_tasks_trace.gp_sleep = HZ / 200;
2067 if (rcu_tasks_trace.gp_sleep <= 0)
2068 rcu_tasks_trace.gp_sleep = 1;
2069 rcu_tasks_trace.init_fract = HZ / 200;
2070 if (rcu_tasks_trace.init_fract <= 0)
2071 rcu_tasks_trace.init_fract = 1;
2072 }
2073 if (rcu_tasks_trace_lazy_ms >= 0)
2074 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
2075 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
2076 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
2077 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
2078 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
2079 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
2080 return 0;
2081}
2082
2083#if !defined(CONFIG_TINY_RCU)
2084void show_rcu_tasks_trace_gp_kthread(void)
2085{
2086 char buf[64];
2087
2088 snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
2089 data_race(n_trc_holdouts),
2090 data_race(n_heavy_reader_ofl_updates),
2091 data_race(n_heavy_reader_updates),
2092 data_race(n_heavy_reader_attempts));
2093 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
2094}
2095EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
2096
2097void rcu_tasks_trace_torture_stats_print(char *tt, char *tf)
2098{
2099 rcu_tasks_torture_stats_print_generic(&rcu_tasks_trace, tt, tf, "");
2100}
2101EXPORT_SYMBOL_GPL(rcu_tasks_trace_torture_stats_print);
2102#endif // !defined(CONFIG_TINY_RCU)
2103
2104struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
2105{
2106 return rcu_tasks_trace.kthread_ptr;
2107}
2108EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
2109
2110void rcu_tasks_trace_get_gp_data(int *flags, unsigned long *gp_seq)
2111{
2112 *flags = 0;
2113 *gp_seq = rcu_seq_current(&rcu_tasks_trace.tasks_gp_seq);
2114}
2115EXPORT_SYMBOL_GPL(rcu_tasks_trace_get_gp_data);
2116
2117#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
2118static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
2119#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
2120
2121#ifndef CONFIG_TINY_RCU
2122void show_rcu_tasks_gp_kthreads(void)
2123{
2124 show_rcu_tasks_classic_gp_kthread();
2125 show_rcu_tasks_rude_gp_kthread();
2126 show_rcu_tasks_trace_gp_kthread();
2127}
2128#endif /* #ifndef CONFIG_TINY_RCU */
2129
2130#ifdef CONFIG_PROVE_RCU
2131struct rcu_tasks_test_desc {
2132 struct rcu_head rh;
2133 const char *name;
2134 bool notrun;
2135 unsigned long runstart;
2136};
2137
2138static struct rcu_tasks_test_desc tests[] = {
2139 {
2140 .name = "call_rcu_tasks()",
2141 /* If not defined, the test is skipped. */
2142 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
2143 },
2144 {
2145 .name = "call_rcu_tasks_trace()",
2146 /* If not defined, the test is skipped. */
2147 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
2148 }
2149};
2150
2151#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
2152static void test_rcu_tasks_callback(struct rcu_head *rhp)
2153{
2154 struct rcu_tasks_test_desc *rttd =
2155 container_of(rhp, struct rcu_tasks_test_desc, rh);
2156
2157 pr_info("Callback from %s invoked.\n", rttd->name);
2158
2159 rttd->notrun = false;
2160}
2161#endif // #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
2162
2163static void rcu_tasks_initiate_self_tests(void)
2164{
2165#ifdef CONFIG_TASKS_RCU
2166 pr_info("Running RCU Tasks wait API self tests\n");
2167 tests[0].runstart = jiffies;
2168 synchronize_rcu_tasks();
2169 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2170#endif
2171
2172#ifdef CONFIG_TASKS_RUDE_RCU
2173 pr_info("Running RCU Tasks Rude wait API self tests\n");
2174 synchronize_rcu_tasks_rude();
2175#endif
2176
2177#ifdef CONFIG_TASKS_TRACE_RCU
2178 pr_info("Running RCU Tasks Trace wait API self tests\n");
2179 tests[1].runstart = jiffies;
2180 synchronize_rcu_tasks_trace();
2181 call_rcu_tasks_trace(&tests[1].rh, test_rcu_tasks_callback);
2182#endif
2183}
2184
2185/*
2186 * Return: 0 - test passed
2187 * 1 - test failed, but have not timed out yet
2188 * -1 - test failed and timed out
2189 */
2190static int rcu_tasks_verify_self_tests(void)
2191{
2192 int ret = 0;
2193 int i;
2194 unsigned long bst = rcu_task_stall_timeout;
2195
2196 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2197 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2198 for (i = 0; i < ARRAY_SIZE(tests); i++) {
2199 while (tests[i].notrun) { // still hanging.
2200 if (time_after(jiffies, tests[i].runstart + bst)) {
2201 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2202 ret = -1;
2203 break;
2204 }
2205 ret = 1;
2206 break;
2207 }
2208 }
2209 WARN_ON(ret < 0);
2210
2211 return ret;
2212}
2213
2214/*
2215 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2216 * test passes or has timed out.
2217 */
2218static struct delayed_work rcu_tasks_verify_work;
2219static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2220{
2221 int ret = rcu_tasks_verify_self_tests();
2222
2223 if (ret <= 0)
2224 return;
2225
2226 /* Test fails but not timed out yet, reschedule another check */
2227 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2228}
2229
2230static int rcu_tasks_verify_schedule_work(void)
2231{
2232 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2233 rcu_tasks_verify_work_fn(NULL);
2234 return 0;
2235}
2236late_initcall(rcu_tasks_verify_schedule_work);
2237#else /* #ifdef CONFIG_PROVE_RCU */
2238static void rcu_tasks_initiate_self_tests(void) { }
2239#endif /* #else #ifdef CONFIG_PROVE_RCU */
2240
2241void __init tasks_cblist_init_generic(void)
2242{
2243 lockdep_assert_irqs_disabled();
2244 WARN_ON(num_online_cpus() > 1);
2245
2246#ifdef CONFIG_TASKS_RCU
2247 cblist_init_generic(&rcu_tasks);
2248#endif
2249
2250#ifdef CONFIG_TASKS_RUDE_RCU
2251 cblist_init_generic(&rcu_tasks_rude);
2252#endif
2253
2254#ifdef CONFIG_TASKS_TRACE_RCU
2255 cblist_init_generic(&rcu_tasks_trace);
2256#endif
2257}
2258
2259void __init rcu_init_tasks_generic(void)
2260{
2261#ifdef CONFIG_TASKS_RCU
2262 rcu_spawn_tasks_kthread();
2263#endif
2264
2265#ifdef CONFIG_TASKS_RUDE_RCU
2266 rcu_spawn_tasks_rude_kthread();
2267#endif
2268
2269#ifdef CONFIG_TASKS_TRACE_RCU
2270 rcu_spawn_tasks_trace_kthread();
2271#endif
2272
2273 // Run the self-tests.
2274 rcu_tasks_initiate_self_tests();
2275}
2276
2277#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2278static inline void rcu_tasks_bootup_oddness(void) {}
2279#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Task-based RCU implementations.
4 *
5 * Copyright (C) 2020 Paul E. McKenney
6 */
7
8#ifdef CONFIG_TASKS_RCU_GENERIC
9#include "rcu_segcblist.h"
10
11////////////////////////////////////////////////////////////////////////
12//
13// Generic data structures.
14
15struct rcu_tasks;
16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17typedef void (*pregp_func_t)(struct list_head *hop);
18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19typedef void (*postscan_func_t)(struct list_head *hop);
20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22
23/**
24 * struct rcu_tasks_percpu - Per-CPU component of definition for a Tasks-RCU-like mechanism.
25 * @cblist: Callback list.
26 * @lock: Lock protecting per-CPU callback list.
27 * @rtp_jiffies: Jiffies counter value for statistics.
28 * @lazy_timer: Timer to unlazify callbacks.
29 * @urgent_gp: Number of additional non-lazy grace periods.
30 * @rtp_n_lock_retries: Rough lock-contention statistic.
31 * @rtp_work: Work queue for invoking callbacks.
32 * @rtp_irq_work: IRQ work queue for deferred wakeups.
33 * @barrier_q_head: RCU callback for barrier operation.
34 * @rtp_blkd_tasks: List of tasks blocked as readers.
35 * @cpu: CPU number corresponding to this entry.
36 * @rtpp: Pointer to the rcu_tasks structure.
37 */
38struct rcu_tasks_percpu {
39 struct rcu_segcblist cblist;
40 raw_spinlock_t __private lock;
41 unsigned long rtp_jiffies;
42 unsigned long rtp_n_lock_retries;
43 struct timer_list lazy_timer;
44 unsigned int urgent_gp;
45 struct work_struct rtp_work;
46 struct irq_work rtp_irq_work;
47 struct rcu_head barrier_q_head;
48 struct list_head rtp_blkd_tasks;
49 int cpu;
50 struct rcu_tasks *rtpp;
51};
52
53/**
54 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
55 * @cbs_wait: RCU wait allowing a new callback to get kthread's attention.
56 * @cbs_gbl_lock: Lock protecting callback list.
57 * @tasks_gp_mutex: Mutex protecting grace period, needed during mid-boot dead zone.
58 * @gp_func: This flavor's grace-period-wait function.
59 * @gp_state: Grace period's most recent state transition (debugging).
60 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
61 * @init_fract: Initial backoff sleep interval.
62 * @gp_jiffies: Time of last @gp_state transition.
63 * @gp_start: Most recent grace-period start in jiffies.
64 * @tasks_gp_seq: Number of grace periods completed since boot.
65 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
66 * @n_ipis_fails: Number of IPI-send failures.
67 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
68 * @lazy_jiffies: Number of jiffies to allow callbacks to be lazy.
69 * @pregp_func: This flavor's pre-grace-period function (optional).
70 * @pertask_func: This flavor's per-task scan function (optional).
71 * @postscan_func: This flavor's post-task scan function (optional).
72 * @holdouts_func: This flavor's holdout-list scan function (optional).
73 * @postgp_func: This flavor's post-grace-period function (optional).
74 * @call_func: This flavor's call_rcu()-equivalent function.
75 * @rtpcpu: This flavor's rcu_tasks_percpu structure.
76 * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks.
77 * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing.
78 * @percpu_dequeue_lim: Number of per-CPU callback queues in use for dequeuing.
79 * @percpu_dequeue_gpseq: RCU grace-period number to propagate enqueue limit to dequeuers.
80 * @barrier_q_mutex: Serialize barrier operations.
81 * @barrier_q_count: Number of queues being waited on.
82 * @barrier_q_completion: Barrier wait/wakeup mechanism.
83 * @barrier_q_seq: Sequence number for barrier operations.
84 * @name: This flavor's textual name.
85 * @kname: This flavor's kthread name.
86 */
87struct rcu_tasks {
88 struct rcuwait cbs_wait;
89 raw_spinlock_t cbs_gbl_lock;
90 struct mutex tasks_gp_mutex;
91 int gp_state;
92 int gp_sleep;
93 int init_fract;
94 unsigned long gp_jiffies;
95 unsigned long gp_start;
96 unsigned long tasks_gp_seq;
97 unsigned long n_ipis;
98 unsigned long n_ipis_fails;
99 struct task_struct *kthread_ptr;
100 unsigned long lazy_jiffies;
101 rcu_tasks_gp_func_t gp_func;
102 pregp_func_t pregp_func;
103 pertask_func_t pertask_func;
104 postscan_func_t postscan_func;
105 holdouts_func_t holdouts_func;
106 postgp_func_t postgp_func;
107 call_rcu_func_t call_func;
108 struct rcu_tasks_percpu __percpu *rtpcpu;
109 int percpu_enqueue_shift;
110 int percpu_enqueue_lim;
111 int percpu_dequeue_lim;
112 unsigned long percpu_dequeue_gpseq;
113 struct mutex barrier_q_mutex;
114 atomic_t barrier_q_count;
115 struct completion barrier_q_completion;
116 unsigned long barrier_q_seq;
117 char *name;
118 char *kname;
119};
120
121static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
122
123#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
124static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
125 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
126 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
127}; \
128static struct rcu_tasks rt_name = \
129{ \
130 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
131 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
132 .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \
133 .gp_func = gp, \
134 .call_func = call, \
135 .rtpcpu = &rt_name ## __percpu, \
136 .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \
137 .name = n, \
138 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
139 .percpu_enqueue_lim = 1, \
140 .percpu_dequeue_lim = 1, \
141 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
142 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
143 .kname = #rt_name, \
144}
145
146#ifdef CONFIG_TASKS_RCU
147/* Track exiting tasks in order to allow them to be waited for. */
148DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
149
150/* Report delay in synchronize_srcu() completion in rcu_tasks_postscan(). */
151static void tasks_rcu_exit_srcu_stall(struct timer_list *unused);
152static DEFINE_TIMER(tasks_rcu_exit_srcu_stall_timer, tasks_rcu_exit_srcu_stall);
153#endif
154
155/* Avoid IPIing CPUs early in the grace period. */
156#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
157static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
158module_param(rcu_task_ipi_delay, int, 0644);
159
160/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
161#define RCU_TASK_BOOT_STALL_TIMEOUT (HZ * 30)
162#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
163static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
164module_param(rcu_task_stall_timeout, int, 0644);
165#define RCU_TASK_STALL_INFO (HZ * 10)
166static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
167module_param(rcu_task_stall_info, int, 0644);
168static int rcu_task_stall_info_mult __read_mostly = 3;
169module_param(rcu_task_stall_info_mult, int, 0444);
170
171static int rcu_task_enqueue_lim __read_mostly = -1;
172module_param(rcu_task_enqueue_lim, int, 0444);
173
174static bool rcu_task_cb_adjust;
175static int rcu_task_contend_lim __read_mostly = 100;
176module_param(rcu_task_contend_lim, int, 0444);
177static int rcu_task_collapse_lim __read_mostly = 10;
178module_param(rcu_task_collapse_lim, int, 0444);
179static int rcu_task_lazy_lim __read_mostly = 32;
180module_param(rcu_task_lazy_lim, int, 0444);
181
182/* RCU tasks grace-period state for debugging. */
183#define RTGS_INIT 0
184#define RTGS_WAIT_WAIT_CBS 1
185#define RTGS_WAIT_GP 2
186#define RTGS_PRE_WAIT_GP 3
187#define RTGS_SCAN_TASKLIST 4
188#define RTGS_POST_SCAN_TASKLIST 5
189#define RTGS_WAIT_SCAN_HOLDOUTS 6
190#define RTGS_SCAN_HOLDOUTS 7
191#define RTGS_POST_GP 8
192#define RTGS_WAIT_READERS 9
193#define RTGS_INVOKE_CBS 10
194#define RTGS_WAIT_CBS 11
195#ifndef CONFIG_TINY_RCU
196static const char * const rcu_tasks_gp_state_names[] = {
197 "RTGS_INIT",
198 "RTGS_WAIT_WAIT_CBS",
199 "RTGS_WAIT_GP",
200 "RTGS_PRE_WAIT_GP",
201 "RTGS_SCAN_TASKLIST",
202 "RTGS_POST_SCAN_TASKLIST",
203 "RTGS_WAIT_SCAN_HOLDOUTS",
204 "RTGS_SCAN_HOLDOUTS",
205 "RTGS_POST_GP",
206 "RTGS_WAIT_READERS",
207 "RTGS_INVOKE_CBS",
208 "RTGS_WAIT_CBS",
209};
210#endif /* #ifndef CONFIG_TINY_RCU */
211
212////////////////////////////////////////////////////////////////////////
213//
214// Generic code.
215
216static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
217
218/* Record grace-period phase and time. */
219static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
220{
221 rtp->gp_state = newstate;
222 rtp->gp_jiffies = jiffies;
223}
224
225#ifndef CONFIG_TINY_RCU
226/* Return state name. */
227static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
228{
229 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
230 int j = READ_ONCE(i); // Prevent the compiler from reading twice
231
232 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
233 return "???";
234 return rcu_tasks_gp_state_names[j];
235}
236#endif /* #ifndef CONFIG_TINY_RCU */
237
238// Initialize per-CPU callback lists for the specified flavor of
239// Tasks RCU. Do not enqueue callbacks before this function is invoked.
240static void cblist_init_generic(struct rcu_tasks *rtp)
241{
242 int cpu;
243 unsigned long flags;
244 int lim;
245 int shift;
246
247 if (rcu_task_enqueue_lim < 0) {
248 rcu_task_enqueue_lim = 1;
249 rcu_task_cb_adjust = true;
250 } else if (rcu_task_enqueue_lim == 0) {
251 rcu_task_enqueue_lim = 1;
252 }
253 lim = rcu_task_enqueue_lim;
254
255 if (lim > nr_cpu_ids)
256 lim = nr_cpu_ids;
257 shift = ilog2(nr_cpu_ids / lim);
258 if (((nr_cpu_ids - 1) >> shift) >= lim)
259 shift++;
260 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
261 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
262 smp_store_release(&rtp->percpu_enqueue_lim, lim);
263 for_each_possible_cpu(cpu) {
264 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
265
266 WARN_ON_ONCE(!rtpcp);
267 if (cpu)
268 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
269 local_irq_save(flags); // serialize initialization
270 if (rcu_segcblist_empty(&rtpcp->cblist))
271 rcu_segcblist_init(&rtpcp->cblist);
272 local_irq_restore(flags);
273 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
274 rtpcp->cpu = cpu;
275 rtpcp->rtpp = rtp;
276 if (!rtpcp->rtp_blkd_tasks.next)
277 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
278 }
279
280 pr_info("%s: Setting shift to %d and lim to %d rcu_task_cb_adjust=%d.\n", rtp->name,
281 data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim), rcu_task_cb_adjust);
282}
283
284// Compute wakeup time for lazy callback timer.
285static unsigned long rcu_tasks_lazy_time(struct rcu_tasks *rtp)
286{
287 return jiffies + rtp->lazy_jiffies;
288}
289
290// Timer handler that unlazifies lazy callbacks.
291static void call_rcu_tasks_generic_timer(struct timer_list *tlp)
292{
293 unsigned long flags;
294 bool needwake = false;
295 struct rcu_tasks *rtp;
296 struct rcu_tasks_percpu *rtpcp = from_timer(rtpcp, tlp, lazy_timer);
297
298 rtp = rtpcp->rtpp;
299 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
300 if (!rcu_segcblist_empty(&rtpcp->cblist) && rtp->lazy_jiffies) {
301 if (!rtpcp->urgent_gp)
302 rtpcp->urgent_gp = 1;
303 needwake = true;
304 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
305 }
306 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
307 if (needwake)
308 rcuwait_wake_up(&rtp->cbs_wait);
309}
310
311// IRQ-work handler that does deferred wakeup for call_rcu_tasks_generic().
312static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
313{
314 struct rcu_tasks *rtp;
315 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
316
317 rtp = rtpcp->rtpp;
318 rcuwait_wake_up(&rtp->cbs_wait);
319}
320
321// Enqueue a callback for the specified flavor of Tasks RCU.
322static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
323 struct rcu_tasks *rtp)
324{
325 int chosen_cpu;
326 unsigned long flags;
327 bool havekthread = smp_load_acquire(&rtp->kthread_ptr);
328 int ideal_cpu;
329 unsigned long j;
330 bool needadjust = false;
331 bool needwake;
332 struct rcu_tasks_percpu *rtpcp;
333
334 rhp->next = NULL;
335 rhp->func = func;
336 local_irq_save(flags);
337 rcu_read_lock();
338 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
339 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
340 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
341 if (!raw_spin_trylock_rcu_node(rtpcp)) { // irqs already disabled.
342 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled.
343 j = jiffies;
344 if (rtpcp->rtp_jiffies != j) {
345 rtpcp->rtp_jiffies = j;
346 rtpcp->rtp_n_lock_retries = 0;
347 }
348 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
349 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
350 needadjust = true; // Defer adjustment to avoid deadlock.
351 }
352 // Queuing callbacks before initialization not yet supported.
353 if (WARN_ON_ONCE(!rcu_segcblist_is_enabled(&rtpcp->cblist)))
354 rcu_segcblist_init(&rtpcp->cblist);
355 needwake = (func == wakeme_after_rcu) ||
356 (rcu_segcblist_n_cbs(&rtpcp->cblist) == rcu_task_lazy_lim);
357 if (havekthread && !needwake && !timer_pending(&rtpcp->lazy_timer)) {
358 if (rtp->lazy_jiffies)
359 mod_timer(&rtpcp->lazy_timer, rcu_tasks_lazy_time(rtp));
360 else
361 needwake = rcu_segcblist_empty(&rtpcp->cblist);
362 }
363 if (needwake)
364 rtpcp->urgent_gp = 3;
365 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
366 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
367 if (unlikely(needadjust)) {
368 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
369 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
370 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
371 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
372 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
373 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
374 }
375 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
376 }
377 rcu_read_unlock();
378 /* We can't create the thread unless interrupts are enabled. */
379 if (needwake && READ_ONCE(rtp->kthread_ptr))
380 irq_work_queue(&rtpcp->rtp_irq_work);
381}
382
383// RCU callback function for rcu_barrier_tasks_generic().
384static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
385{
386 struct rcu_tasks *rtp;
387 struct rcu_tasks_percpu *rtpcp;
388
389 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
390 rtp = rtpcp->rtpp;
391 if (atomic_dec_and_test(&rtp->barrier_q_count))
392 complete(&rtp->barrier_q_completion);
393}
394
395// Wait for all in-flight callbacks for the specified RCU Tasks flavor.
396// Operates in a manner similar to rcu_barrier().
397static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
398{
399 int cpu;
400 unsigned long flags;
401 struct rcu_tasks_percpu *rtpcp;
402 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
403
404 mutex_lock(&rtp->barrier_q_mutex);
405 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
406 smp_mb();
407 mutex_unlock(&rtp->barrier_q_mutex);
408 return;
409 }
410 rcu_seq_start(&rtp->barrier_q_seq);
411 init_completion(&rtp->barrier_q_completion);
412 atomic_set(&rtp->barrier_q_count, 2);
413 for_each_possible_cpu(cpu) {
414 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
415 break;
416 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
417 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
418 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
419 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
420 atomic_inc(&rtp->barrier_q_count);
421 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
422 }
423 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
424 complete(&rtp->barrier_q_completion);
425 wait_for_completion(&rtp->barrier_q_completion);
426 rcu_seq_end(&rtp->barrier_q_seq);
427 mutex_unlock(&rtp->barrier_q_mutex);
428}
429
430// Advance callbacks and indicate whether either a grace period or
431// callback invocation is needed.
432static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
433{
434 int cpu;
435 int dequeue_limit;
436 unsigned long flags;
437 bool gpdone = poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq);
438 long n;
439 long ncbs = 0;
440 long ncbsnz = 0;
441 int needgpcb = 0;
442
443 dequeue_limit = smp_load_acquire(&rtp->percpu_dequeue_lim);
444 for (cpu = 0; cpu < dequeue_limit; cpu++) {
445 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
446
447 /* Advance and accelerate any new callbacks. */
448 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
449 continue;
450 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
451 // Should we shrink down to a single callback queue?
452 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
453 if (n) {
454 ncbs += n;
455 if (cpu > 0)
456 ncbsnz += n;
457 }
458 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
459 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
460 if (rtpcp->urgent_gp > 0 && rcu_segcblist_pend_cbs(&rtpcp->cblist)) {
461 if (rtp->lazy_jiffies)
462 rtpcp->urgent_gp--;
463 needgpcb |= 0x3;
464 } else if (rcu_segcblist_empty(&rtpcp->cblist)) {
465 rtpcp->urgent_gp = 0;
466 }
467 if (rcu_segcblist_ready_cbs(&rtpcp->cblist))
468 needgpcb |= 0x1;
469 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
470 }
471
472 // Shrink down to a single callback queue if appropriate.
473 // This is done in two stages: (1) If there are no more than
474 // rcu_task_collapse_lim callbacks on CPU 0 and none on any other
475 // CPU, limit enqueueing to CPU 0. (2) After an RCU grace period,
476 // if there has not been an increase in callbacks, limit dequeuing
477 // to CPU 0. Note the matching RCU read-side critical section in
478 // call_rcu_tasks_generic().
479 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
480 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
481 if (rtp->percpu_enqueue_lim > 1) {
482 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
483 smp_store_release(&rtp->percpu_enqueue_lim, 1);
484 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
485 gpdone = false;
486 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
487 }
488 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
489 }
490 if (rcu_task_cb_adjust && !ncbsnz && gpdone) {
491 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
492 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
493 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
494 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
495 }
496 if (rtp->percpu_dequeue_lim == 1) {
497 for (cpu = rtp->percpu_dequeue_lim; cpu < nr_cpu_ids; cpu++) {
498 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
499
500 WARN_ON_ONCE(rcu_segcblist_n_cbs(&rtpcp->cblist));
501 }
502 }
503 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
504 }
505
506 return needgpcb;
507}
508
509// Advance callbacks and invoke any that are ready.
510static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
511{
512 int cpu;
513 int cpunext;
514 int cpuwq;
515 unsigned long flags;
516 int len;
517 struct rcu_head *rhp;
518 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
519 struct rcu_tasks_percpu *rtpcp_next;
520
521 cpu = rtpcp->cpu;
522 cpunext = cpu * 2 + 1;
523 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
524 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
525 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
526 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
527 cpunext++;
528 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
529 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
530 cpuwq = rcu_cpu_beenfullyonline(cpunext) ? cpunext : WORK_CPU_UNBOUND;
531 queue_work_on(cpuwq, system_wq, &rtpcp_next->rtp_work);
532 }
533 }
534
535 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
536 return;
537 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
538 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
539 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
540 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
541 len = rcl.len;
542 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
543 debug_rcu_head_callback(rhp);
544 local_bh_disable();
545 rhp->func(rhp);
546 local_bh_enable();
547 cond_resched();
548 }
549 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
550 rcu_segcblist_add_len(&rtpcp->cblist, -len);
551 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
552 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
553}
554
555// Workqueue flood to advance callbacks and invoke any that are ready.
556static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
557{
558 struct rcu_tasks *rtp;
559 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
560
561 rtp = rtpcp->rtpp;
562 rcu_tasks_invoke_cbs(rtp, rtpcp);
563}
564
565// Wait for one grace period.
566static void rcu_tasks_one_gp(struct rcu_tasks *rtp, bool midboot)
567{
568 int needgpcb;
569
570 mutex_lock(&rtp->tasks_gp_mutex);
571
572 // If there were none, wait a bit and start over.
573 if (unlikely(midboot)) {
574 needgpcb = 0x2;
575 } else {
576 mutex_unlock(&rtp->tasks_gp_mutex);
577 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
578 rcuwait_wait_event(&rtp->cbs_wait,
579 (needgpcb = rcu_tasks_need_gpcb(rtp)),
580 TASK_IDLE);
581 mutex_lock(&rtp->tasks_gp_mutex);
582 }
583
584 if (needgpcb & 0x2) {
585 // Wait for one grace period.
586 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
587 rtp->gp_start = jiffies;
588 rcu_seq_start(&rtp->tasks_gp_seq);
589 rtp->gp_func(rtp);
590 rcu_seq_end(&rtp->tasks_gp_seq);
591 }
592
593 // Invoke callbacks.
594 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
595 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
596 mutex_unlock(&rtp->tasks_gp_mutex);
597}
598
599// RCU-tasks kthread that detects grace periods and invokes callbacks.
600static int __noreturn rcu_tasks_kthread(void *arg)
601{
602 int cpu;
603 struct rcu_tasks *rtp = arg;
604
605 for_each_possible_cpu(cpu) {
606 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
607
608 timer_setup(&rtpcp->lazy_timer, call_rcu_tasks_generic_timer, 0);
609 rtpcp->urgent_gp = 1;
610 }
611
612 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
613 housekeeping_affine(current, HK_TYPE_RCU);
614 smp_store_release(&rtp->kthread_ptr, current); // Let GPs start!
615
616 /*
617 * Each pass through the following loop makes one check for
618 * newly arrived callbacks, and, if there are some, waits for
619 * one RCU-tasks grace period and then invokes the callbacks.
620 * This loop is terminated by the system going down. ;-)
621 */
622 for (;;) {
623 // Wait for one grace period and invoke any callbacks
624 // that are ready.
625 rcu_tasks_one_gp(rtp, false);
626
627 // Paranoid sleep to keep this from entering a tight loop.
628 schedule_timeout_idle(rtp->gp_sleep);
629 }
630}
631
632// Wait for a grace period for the specified flavor of Tasks RCU.
633static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
634{
635 /* Complain if the scheduler has not started. */
636 if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
637 "synchronize_%s() called too soon", rtp->name))
638 return;
639
640 // If the grace-period kthread is running, use it.
641 if (READ_ONCE(rtp->kthread_ptr)) {
642 wait_rcu_gp(rtp->call_func);
643 return;
644 }
645 rcu_tasks_one_gp(rtp, true);
646}
647
648/* Spawn RCU-tasks grace-period kthread. */
649static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
650{
651 struct task_struct *t;
652
653 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
654 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
655 return;
656 smp_mb(); /* Ensure others see full kthread. */
657}
658
659#ifndef CONFIG_TINY_RCU
660
661/*
662 * Print any non-default Tasks RCU settings.
663 */
664static void __init rcu_tasks_bootup_oddness(void)
665{
666#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
667 int rtsimc;
668
669 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
670 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
671 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
672 if (rtsimc != rcu_task_stall_info_mult) {
673 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
674 rcu_task_stall_info_mult = rtsimc;
675 }
676#endif /* #ifdef CONFIG_TASKS_RCU */
677#ifdef CONFIG_TASKS_RCU
678 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
679#endif /* #ifdef CONFIG_TASKS_RCU */
680#ifdef CONFIG_TASKS_RUDE_RCU
681 pr_info("\tRude variant of Tasks RCU enabled.\n");
682#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
683#ifdef CONFIG_TASKS_TRACE_RCU
684 pr_info("\tTracing variant of Tasks RCU enabled.\n");
685#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
686}
687
688#endif /* #ifndef CONFIG_TINY_RCU */
689
690#ifndef CONFIG_TINY_RCU
691/* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
692static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
693{
694 int cpu;
695 bool havecbs = false;
696 bool haveurgent = false;
697 bool haveurgentcbs = false;
698
699 for_each_possible_cpu(cpu) {
700 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
701
702 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)))
703 havecbs = true;
704 if (data_race(rtpcp->urgent_gp))
705 haveurgent = true;
706 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist)) && data_race(rtpcp->urgent_gp))
707 haveurgentcbs = true;
708 if (havecbs && haveurgent && haveurgentcbs)
709 break;
710 }
711 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c%c%c l:%lu %s\n",
712 rtp->kname,
713 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
714 jiffies - data_race(rtp->gp_jiffies),
715 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
716 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
717 ".k"[!!data_race(rtp->kthread_ptr)],
718 ".C"[havecbs],
719 ".u"[haveurgent],
720 ".U"[haveurgentcbs],
721 rtp->lazy_jiffies,
722 s);
723}
724#endif // #ifndef CONFIG_TINY_RCU
725
726static void exit_tasks_rcu_finish_trace(struct task_struct *t);
727
728#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
729
730////////////////////////////////////////////////////////////////////////
731//
732// Shared code between task-list-scanning variants of Tasks RCU.
733
734/* Wait for one RCU-tasks grace period. */
735static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
736{
737 struct task_struct *g;
738 int fract;
739 LIST_HEAD(holdouts);
740 unsigned long j;
741 unsigned long lastinfo;
742 unsigned long lastreport;
743 bool reported = false;
744 int rtsi;
745 struct task_struct *t;
746
747 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
748 rtp->pregp_func(&holdouts);
749
750 /*
751 * There were callbacks, so we need to wait for an RCU-tasks
752 * grace period. Start off by scanning the task list for tasks
753 * that are not already voluntarily blocked. Mark these tasks
754 * and make a list of them in holdouts.
755 */
756 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
757 if (rtp->pertask_func) {
758 rcu_read_lock();
759 for_each_process_thread(g, t)
760 rtp->pertask_func(t, &holdouts);
761 rcu_read_unlock();
762 }
763
764 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
765 rtp->postscan_func(&holdouts);
766
767 /*
768 * Each pass through the following loop scans the list of holdout
769 * tasks, removing any that are no longer holdouts. When the list
770 * is empty, we are done.
771 */
772 lastreport = jiffies;
773 lastinfo = lastreport;
774 rtsi = READ_ONCE(rcu_task_stall_info);
775
776 // Start off with initial wait and slowly back off to 1 HZ wait.
777 fract = rtp->init_fract;
778
779 while (!list_empty(&holdouts)) {
780 ktime_t exp;
781 bool firstreport;
782 bool needreport;
783 int rtst;
784
785 // Slowly back off waiting for holdouts
786 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
787 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
788 schedule_timeout_idle(fract);
789 } else {
790 exp = jiffies_to_nsecs(fract);
791 __set_current_state(TASK_IDLE);
792 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
793 }
794
795 if (fract < HZ)
796 fract++;
797
798 rtst = READ_ONCE(rcu_task_stall_timeout);
799 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
800 if (needreport) {
801 lastreport = jiffies;
802 reported = true;
803 }
804 firstreport = true;
805 WARN_ON(signal_pending(current));
806 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
807 rtp->holdouts_func(&holdouts, needreport, &firstreport);
808
809 // Print pre-stall informational messages if needed.
810 j = jiffies;
811 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
812 lastinfo = j;
813 rtsi = rtsi * rcu_task_stall_info_mult;
814 pr_info("%s: %s grace period number %lu (since boot) is %lu jiffies old.\n",
815 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
816 }
817 }
818
819 set_tasks_gp_state(rtp, RTGS_POST_GP);
820 rtp->postgp_func(rtp);
821}
822
823#endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
824
825#ifdef CONFIG_TASKS_RCU
826
827////////////////////////////////////////////////////////////////////////
828//
829// Simple variant of RCU whose quiescent states are voluntary context
830// switch, cond_resched_tasks_rcu_qs(), user-space execution, and idle.
831// As such, grace periods can take one good long time. There are no
832// read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
833// because this implementation is intended to get the system into a safe
834// state for some of the manipulations involved in tracing and the like.
835// Finally, this implementation does not support high call_rcu_tasks()
836// rates from multiple CPUs. If this is required, per-CPU callback lists
837// will be needed.
838//
839// The implementation uses rcu_tasks_wait_gp(), which relies on function
840// pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
841// function sets these function pointers up so that rcu_tasks_wait_gp()
842// invokes these functions in this order:
843//
844// rcu_tasks_pregp_step():
845// Invokes synchronize_rcu() in order to wait for all in-flight
846// t->on_rq and t->nvcsw transitions to complete. This works because
847// all such transitions are carried out with interrupts disabled.
848// rcu_tasks_pertask(), invoked on every non-idle task:
849// For every runnable non-idle task other than the current one, use
850// get_task_struct() to pin down that task, snapshot that task's
851// number of voluntary context switches, and add that task to the
852// holdout list.
853// rcu_tasks_postscan():
854// Invoke synchronize_srcu() to ensure that all tasks that were
855// in the process of exiting (and which thus might not know to
856// synchronize with this RCU Tasks grace period) have completed
857// exiting.
858// check_all_holdout_tasks(), repeatedly until holdout list is empty:
859// Scans the holdout list, attempting to identify a quiescent state
860// for each task on the list. If there is a quiescent state, the
861// corresponding task is removed from the holdout list.
862// rcu_tasks_postgp():
863// Invokes synchronize_rcu() in order to ensure that all prior
864// t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
865// to have happened before the end of this RCU Tasks grace period.
866// Again, this works because all such transitions are carried out
867// with interrupts disabled.
868//
869// For each exiting task, the exit_tasks_rcu_start() and
870// exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
871// read-side critical sections waited for by rcu_tasks_postscan().
872//
873// Pre-grace-period update-side code is ordered before the grace
874// via the raw_spin_lock.*rcu_node(). Pre-grace-period read-side code
875// is ordered before the grace period via synchronize_rcu() call in
876// rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
877// disabling.
878
879/* Pre-grace-period preparation. */
880static void rcu_tasks_pregp_step(struct list_head *hop)
881{
882 /*
883 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
884 * to complete. Invoking synchronize_rcu() suffices because all
885 * these transitions occur with interrupts disabled. Without this
886 * synchronize_rcu(), a read-side critical section that started
887 * before the grace period might be incorrectly seen as having
888 * started after the grace period.
889 *
890 * This synchronize_rcu() also dispenses with the need for a
891 * memory barrier on the first store to t->rcu_tasks_holdout,
892 * as it forces the store to happen after the beginning of the
893 * grace period.
894 */
895 synchronize_rcu();
896}
897
898/* Check for quiescent states since the pregp's synchronize_rcu() */
899static bool rcu_tasks_is_holdout(struct task_struct *t)
900{
901 int cpu;
902
903 /* Has the task been seen voluntarily sleeping? */
904 if (!READ_ONCE(t->on_rq))
905 return false;
906
907 /*
908 * Idle tasks (or idle injection) within the idle loop are RCU-tasks
909 * quiescent states. But CPU boot code performed by the idle task
910 * isn't a quiescent state.
911 */
912 if (is_idle_task(t))
913 return false;
914
915 cpu = task_cpu(t);
916
917 /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
918 if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
919 return false;
920
921 return true;
922}
923
924/* Per-task initial processing. */
925static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
926{
927 if (t != current && rcu_tasks_is_holdout(t)) {
928 get_task_struct(t);
929 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
930 WRITE_ONCE(t->rcu_tasks_holdout, true);
931 list_add(&t->rcu_tasks_holdout_list, hop);
932 }
933}
934
935/* Processing between scanning taskslist and draining the holdout list. */
936static void rcu_tasks_postscan(struct list_head *hop)
937{
938 int rtsi = READ_ONCE(rcu_task_stall_info);
939
940 if (!IS_ENABLED(CONFIG_TINY_RCU)) {
941 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
942 add_timer(&tasks_rcu_exit_srcu_stall_timer);
943 }
944
945 /*
946 * Exiting tasks may escape the tasklist scan. Those are vulnerable
947 * until their final schedule() with TASK_DEAD state. To cope with
948 * this, divide the fragile exit path part in two intersecting
949 * read side critical sections:
950 *
951 * 1) An _SRCU_ read side starting before calling exit_notify(),
952 * which may remove the task from the tasklist, and ending after
953 * the final preempt_disable() call in do_exit().
954 *
955 * 2) An _RCU_ read side starting with the final preempt_disable()
956 * call in do_exit() and ending with the final call to schedule()
957 * with TASK_DEAD state.
958 *
959 * This handles the part 1). And postgp will handle part 2) with a
960 * call to synchronize_rcu().
961 */
962 synchronize_srcu(&tasks_rcu_exit_srcu);
963
964 if (!IS_ENABLED(CONFIG_TINY_RCU))
965 del_timer_sync(&tasks_rcu_exit_srcu_stall_timer);
966}
967
968/* See if tasks are still holding out, complain if so. */
969static void check_holdout_task(struct task_struct *t,
970 bool needreport, bool *firstreport)
971{
972 int cpu;
973
974 if (!READ_ONCE(t->rcu_tasks_holdout) ||
975 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
976 !rcu_tasks_is_holdout(t) ||
977 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
978 !is_idle_task(t) && READ_ONCE(t->rcu_tasks_idle_cpu) >= 0)) {
979 WRITE_ONCE(t->rcu_tasks_holdout, false);
980 list_del_init(&t->rcu_tasks_holdout_list);
981 put_task_struct(t);
982 return;
983 }
984 rcu_request_urgent_qs_task(t);
985 if (!needreport)
986 return;
987 if (*firstreport) {
988 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
989 *firstreport = false;
990 }
991 cpu = task_cpu(t);
992 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
993 t, ".I"[is_idle_task(t)],
994 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
995 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
996 data_race(t->rcu_tasks_idle_cpu), cpu);
997 sched_show_task(t);
998}
999
1000/* Scan the holdout lists for tasks no longer holding out. */
1001static void check_all_holdout_tasks(struct list_head *hop,
1002 bool needreport, bool *firstreport)
1003{
1004 struct task_struct *t, *t1;
1005
1006 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
1007 check_holdout_task(t, needreport, firstreport);
1008 cond_resched();
1009 }
1010}
1011
1012/* Finish off the Tasks-RCU grace period. */
1013static void rcu_tasks_postgp(struct rcu_tasks *rtp)
1014{
1015 /*
1016 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
1017 * memory barriers prior to them in the schedule() path, memory
1018 * reordering on other CPUs could cause their RCU-tasks read-side
1019 * critical sections to extend past the end of the grace period.
1020 * However, because these ->nvcsw updates are carried out with
1021 * interrupts disabled, we can use synchronize_rcu() to force the
1022 * needed ordering on all such CPUs.
1023 *
1024 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
1025 * accesses to be within the grace period, avoiding the need for
1026 * memory barriers for ->rcu_tasks_holdout accesses.
1027 *
1028 * In addition, this synchronize_rcu() waits for exiting tasks
1029 * to complete their final preempt_disable() region of execution,
1030 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
1031 * enforcing the whole region before tasklist removal until
1032 * the final schedule() with TASK_DEAD state to be an RCU TASKS
1033 * read side critical section.
1034 */
1035 synchronize_rcu();
1036}
1037
1038void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
1039DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
1040
1041static void tasks_rcu_exit_srcu_stall(struct timer_list *unused)
1042{
1043#ifndef CONFIG_TINY_RCU
1044 int rtsi;
1045
1046 rtsi = READ_ONCE(rcu_task_stall_info);
1047 pr_info("%s: %s grace period number %lu (since boot) gp_state: %s is %lu jiffies old.\n",
1048 __func__, rcu_tasks.kname, rcu_tasks.tasks_gp_seq,
1049 tasks_gp_state_getname(&rcu_tasks), jiffies - rcu_tasks.gp_jiffies);
1050 pr_info("Please check any exiting tasks stuck between calls to exit_tasks_rcu_start() and exit_tasks_rcu_finish()\n");
1051 tasks_rcu_exit_srcu_stall_timer.expires = jiffies + rtsi;
1052 add_timer(&tasks_rcu_exit_srcu_stall_timer);
1053#endif // #ifndef CONFIG_TINY_RCU
1054}
1055
1056/**
1057 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
1058 * @rhp: structure to be used for queueing the RCU updates.
1059 * @func: actual callback function to be invoked after the grace period
1060 *
1061 * The callback function will be invoked some time after a full grace
1062 * period elapses, in other words after all currently executing RCU
1063 * read-side critical sections have completed. call_rcu_tasks() assumes
1064 * that the read-side critical sections end at a voluntary context
1065 * switch (not a preemption!), cond_resched_tasks_rcu_qs(), entry into idle,
1066 * or transition to usermode execution. As such, there are no read-side
1067 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1068 * this primitive is intended to determine that all tasks have passed
1069 * through a safe state, not so much for data-structure synchronization.
1070 *
1071 * See the description of call_rcu() for more detailed information on
1072 * memory ordering guarantees.
1073 */
1074void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
1075{
1076 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
1077}
1078EXPORT_SYMBOL_GPL(call_rcu_tasks);
1079
1080/**
1081 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
1082 *
1083 * Control will return to the caller some time after a full rcu-tasks
1084 * grace period has elapsed, in other words after all currently
1085 * executing rcu-tasks read-side critical sections have elapsed. These
1086 * read-side critical sections are delimited by calls to schedule(),
1087 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
1088 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
1089 *
1090 * This is a very specialized primitive, intended only for a few uses in
1091 * tracing and other situations requiring manipulation of function
1092 * preambles and profiling hooks. The synchronize_rcu_tasks() function
1093 * is not (yet) intended for heavy use from multiple CPUs.
1094 *
1095 * See the description of synchronize_rcu() for more detailed information
1096 * on memory ordering guarantees.
1097 */
1098void synchronize_rcu_tasks(void)
1099{
1100 synchronize_rcu_tasks_generic(&rcu_tasks);
1101}
1102EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
1103
1104/**
1105 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
1106 *
1107 * Although the current implementation is guaranteed to wait, it is not
1108 * obligated to, for example, if there are no pending callbacks.
1109 */
1110void rcu_barrier_tasks(void)
1111{
1112 rcu_barrier_tasks_generic(&rcu_tasks);
1113}
1114EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
1115
1116static int rcu_tasks_lazy_ms = -1;
1117module_param(rcu_tasks_lazy_ms, int, 0444);
1118
1119static int __init rcu_spawn_tasks_kthread(void)
1120{
1121 cblist_init_generic(&rcu_tasks);
1122 rcu_tasks.gp_sleep = HZ / 10;
1123 rcu_tasks.init_fract = HZ / 10;
1124 if (rcu_tasks_lazy_ms >= 0)
1125 rcu_tasks.lazy_jiffies = msecs_to_jiffies(rcu_tasks_lazy_ms);
1126 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
1127 rcu_tasks.pertask_func = rcu_tasks_pertask;
1128 rcu_tasks.postscan_func = rcu_tasks_postscan;
1129 rcu_tasks.holdouts_func = check_all_holdout_tasks;
1130 rcu_tasks.postgp_func = rcu_tasks_postgp;
1131 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
1132 return 0;
1133}
1134
1135#if !defined(CONFIG_TINY_RCU)
1136void show_rcu_tasks_classic_gp_kthread(void)
1137{
1138 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
1139}
1140EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
1141#endif // !defined(CONFIG_TINY_RCU)
1142
1143struct task_struct *get_rcu_tasks_gp_kthread(void)
1144{
1145 return rcu_tasks.kthread_ptr;
1146}
1147EXPORT_SYMBOL_GPL(get_rcu_tasks_gp_kthread);
1148
1149/*
1150 * Contribute to protect against tasklist scan blind spot while the
1151 * task is exiting and may be removed from the tasklist. See
1152 * corresponding synchronize_srcu() for further details.
1153 */
1154void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
1155{
1156 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
1157}
1158
1159/*
1160 * Contribute to protect against tasklist scan blind spot while the
1161 * task is exiting and may be removed from the tasklist. See
1162 * corresponding synchronize_srcu() for further details.
1163 */
1164void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
1165{
1166 struct task_struct *t = current;
1167
1168 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
1169}
1170
1171/*
1172 * Contribute to protect against tasklist scan blind spot while the
1173 * task is exiting and may be removed from the tasklist. See
1174 * corresponding synchronize_srcu() for further details.
1175 */
1176void exit_tasks_rcu_finish(void)
1177{
1178 exit_tasks_rcu_stop();
1179 exit_tasks_rcu_finish_trace(current);
1180}
1181
1182#else /* #ifdef CONFIG_TASKS_RCU */
1183void exit_tasks_rcu_start(void) { }
1184void exit_tasks_rcu_stop(void) { }
1185void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
1186#endif /* #else #ifdef CONFIG_TASKS_RCU */
1187
1188#ifdef CONFIG_TASKS_RUDE_RCU
1189
1190////////////////////////////////////////////////////////////////////////
1191//
1192// "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
1193// passing an empty function to schedule_on_each_cpu(). This approach
1194// provides an asynchronous call_rcu_tasks_rude() API and batching of
1195// concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
1196// This invokes schedule_on_each_cpu() in order to send IPIs far and wide
1197// and induces otherwise unnecessary context switches on all online CPUs,
1198// whether idle or not.
1199//
1200// Callback handling is provided by the rcu_tasks_kthread() function.
1201//
1202// Ordering is provided by the scheduler's context-switch code.
1203
1204// Empty function to allow workqueues to force a context switch.
1205static void rcu_tasks_be_rude(struct work_struct *work)
1206{
1207}
1208
1209// Wait for one rude RCU-tasks grace period.
1210static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1211{
1212 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1213 schedule_on_each_cpu(rcu_tasks_be_rude);
1214}
1215
1216void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1217DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1218 "RCU Tasks Rude");
1219
1220/**
1221 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
1222 * @rhp: structure to be used for queueing the RCU updates.
1223 * @func: actual callback function to be invoked after the grace period
1224 *
1225 * The callback function will be invoked some time after a full grace
1226 * period elapses, in other words after all currently executing RCU
1227 * read-side critical sections have completed. call_rcu_tasks_rude()
1228 * assumes that the read-side critical sections end at context switch,
1229 * cond_resched_tasks_rcu_qs(), or transition to usermode execution (as
1230 * usermode execution is schedulable). As such, there are no read-side
1231 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
1232 * this primitive is intended to determine that all tasks have passed
1233 * through a safe state, not so much for data-structure synchronization.
1234 *
1235 * See the description of call_rcu() for more detailed information on
1236 * memory ordering guarantees.
1237 */
1238void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1239{
1240 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1241}
1242EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1243
1244/**
1245 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
1246 *
1247 * Control will return to the caller some time after a rude rcu-tasks
1248 * grace period has elapsed, in other words after all currently
1249 * executing rcu-tasks read-side critical sections have elapsed. These
1250 * read-side critical sections are delimited by calls to schedule(),
1251 * cond_resched_tasks_rcu_qs(), userspace execution (which is a schedulable
1252 * context), and (in theory, anyway) cond_resched().
1253 *
1254 * This is a very specialized primitive, intended only for a few uses in
1255 * tracing and other situations requiring manipulation of function preambles
1256 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
1257 * (yet) intended for heavy use from multiple CPUs.
1258 *
1259 * See the description of synchronize_rcu() for more detailed information
1260 * on memory ordering guarantees.
1261 */
1262void synchronize_rcu_tasks_rude(void)
1263{
1264 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1265}
1266EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1267
1268/**
1269 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
1270 *
1271 * Although the current implementation is guaranteed to wait, it is not
1272 * obligated to, for example, if there are no pending callbacks.
1273 */
1274void rcu_barrier_tasks_rude(void)
1275{
1276 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1277}
1278EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1279
1280int rcu_tasks_rude_lazy_ms = -1;
1281module_param(rcu_tasks_rude_lazy_ms, int, 0444);
1282
1283static int __init rcu_spawn_tasks_rude_kthread(void)
1284{
1285 cblist_init_generic(&rcu_tasks_rude);
1286 rcu_tasks_rude.gp_sleep = HZ / 10;
1287 if (rcu_tasks_rude_lazy_ms >= 0)
1288 rcu_tasks_rude.lazy_jiffies = msecs_to_jiffies(rcu_tasks_rude_lazy_ms);
1289 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1290 return 0;
1291}
1292
1293#if !defined(CONFIG_TINY_RCU)
1294void show_rcu_tasks_rude_gp_kthread(void)
1295{
1296 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1297}
1298EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1299#endif // !defined(CONFIG_TINY_RCU)
1300
1301struct task_struct *get_rcu_tasks_rude_gp_kthread(void)
1302{
1303 return rcu_tasks_rude.kthread_ptr;
1304}
1305EXPORT_SYMBOL_GPL(get_rcu_tasks_rude_gp_kthread);
1306
1307#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
1308
1309////////////////////////////////////////////////////////////////////////
1310//
1311// Tracing variant of Tasks RCU. This variant is designed to be used
1312// to protect tracing hooks, including those of BPF. This variant
1313// therefore:
1314//
1315// 1. Has explicit read-side markers to allow finite grace periods
1316// in the face of in-kernel loops for PREEMPT=n builds.
1317//
1318// 2. Protects code in the idle loop, exception entry/exit, and
1319// CPU-hotplug code paths, similar to the capabilities of SRCU.
1320//
1321// 3. Avoids expensive read-side instructions, having overhead similar
1322// to that of Preemptible RCU.
1323//
1324// There are of course downsides. For example, the grace-period code
1325// can send IPIs to CPUs, even when those CPUs are in the idle loop or
1326// in nohz_full userspace. If needed, these downsides can be at least
1327// partially remedied.
1328//
1329// Perhaps most important, this variant of RCU does not affect the vanilla
1330// flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
1331// readers can operate from idle, offline, and exception entry/exit in no
1332// way allows rcu_preempt and rcu_sched readers to also do so.
1333//
1334// The implementation uses rcu_tasks_wait_gp(), which relies on function
1335// pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
1336// function sets these function pointers up so that rcu_tasks_wait_gp()
1337// invokes these functions in this order:
1338//
1339// rcu_tasks_trace_pregp_step():
1340// Disables CPU hotplug, adds all currently executing tasks to the
1341// holdout list, then checks the state of all tasks that blocked
1342// or were preempted within their current RCU Tasks Trace read-side
1343// critical section, adding them to the holdout list if appropriate.
1344// Finally, this function re-enables CPU hotplug.
1345// The ->pertask_func() pointer is NULL, so there is no per-task processing.
1346// rcu_tasks_trace_postscan():
1347// Invokes synchronize_rcu() to wait for late-stage exiting tasks
1348// to finish exiting.
1349// check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
1350// Scans the holdout list, attempting to identify a quiescent state
1351// for each task on the list. If there is a quiescent state, the
1352// corresponding task is removed from the holdout list. Once this
1353// list is empty, the grace period has completed.
1354// rcu_tasks_trace_postgp():
1355// Provides the needed full memory barrier and does debug checks.
1356//
1357// The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
1358//
1359// Pre-grace-period update-side code is ordered before the grace period
1360// via the ->cbs_lock and barriers in rcu_tasks_kthread(). Pre-grace-period
1361// read-side code is ordered before the grace period by atomic operations
1362// on .b.need_qs flag of each task involved in this process, or by scheduler
1363// context-switch ordering (for locked-down non-running readers).
1364
1365// The lockdep state must be outside of #ifdef to be useful.
1366#ifdef CONFIG_DEBUG_LOCK_ALLOC
1367static struct lock_class_key rcu_lock_trace_key;
1368struct lockdep_map rcu_trace_lock_map =
1369 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1370EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1371#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
1372
1373#ifdef CONFIG_TASKS_TRACE_RCU
1374
1375// Record outstanding IPIs to each CPU. No point in sending two...
1376static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1377
1378// The number of detections of task quiescent state relying on
1379// heavyweight readers executing explicit memory barriers.
1380static unsigned long n_heavy_reader_attempts;
1381static unsigned long n_heavy_reader_updates;
1382static unsigned long n_heavy_reader_ofl_updates;
1383static unsigned long n_trc_holdouts;
1384
1385void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1386DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1387 "RCU Tasks Trace");
1388
1389/* Load from ->trc_reader_special.b.need_qs with proper ordering. */
1390static u8 rcu_ld_need_qs(struct task_struct *t)
1391{
1392 smp_mb(); // Enforce full grace-period ordering.
1393 return smp_load_acquire(&t->trc_reader_special.b.need_qs);
1394}
1395
1396/* Store to ->trc_reader_special.b.need_qs with proper ordering. */
1397static void rcu_st_need_qs(struct task_struct *t, u8 v)
1398{
1399 smp_store_release(&t->trc_reader_special.b.need_qs, v);
1400 smp_mb(); // Enforce full grace-period ordering.
1401}
1402
1403/*
1404 * Do a cmpxchg() on ->trc_reader_special.b.need_qs, allowing for
1405 * the four-byte operand-size restriction of some platforms.
1406 * Returns the old value, which is often ignored.
1407 */
1408u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new)
1409{
1410 union rcu_special ret;
1411 union rcu_special trs_old = READ_ONCE(t->trc_reader_special);
1412 union rcu_special trs_new = trs_old;
1413
1414 if (trs_old.b.need_qs != old)
1415 return trs_old.b.need_qs;
1416 trs_new.b.need_qs = new;
1417 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s);
1418 return ret.b.need_qs;
1419}
1420EXPORT_SYMBOL_GPL(rcu_trc_cmpxchg_need_qs);
1421
1422/*
1423 * If we are the last reader, signal the grace-period kthread.
1424 * Also remove from the per-CPU list of blocked tasks.
1425 */
1426void rcu_read_unlock_trace_special(struct task_struct *t)
1427{
1428 unsigned long flags;
1429 struct rcu_tasks_percpu *rtpcp;
1430 union rcu_special trs;
1431
1432 // Open-coded full-word version of rcu_ld_need_qs().
1433 smp_mb(); // Enforce full grace-period ordering.
1434 trs = smp_load_acquire(&t->trc_reader_special);
1435
1436 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb)
1437 smp_mb(); // Pairs with update-side barriers.
1438 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
1439 if (trs.b.need_qs == (TRC_NEED_QS_CHECKED | TRC_NEED_QS)) {
1440 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS,
1441 TRC_NEED_QS_CHECKED);
1442
1443 WARN_ONCE(result != trs.b.need_qs, "%s: result = %d", __func__, result);
1444 }
1445 if (trs.b.blocked) {
1446 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu);
1447 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1448 list_del_init(&t->trc_blkd_node);
1449 WRITE_ONCE(t->trc_reader_special.b.blocked, false);
1450 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1451 }
1452 WRITE_ONCE(t->trc_reader_nesting, 0);
1453}
1454EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1455
1456/* Add a newly blocked reader task to its CPU's list. */
1457void rcu_tasks_trace_qs_blkd(struct task_struct *t)
1458{
1459 unsigned long flags;
1460 struct rcu_tasks_percpu *rtpcp;
1461
1462 local_irq_save(flags);
1463 rtpcp = this_cpu_ptr(rcu_tasks_trace.rtpcpu);
1464 raw_spin_lock_rcu_node(rtpcp); // irqs already disabled
1465 t->trc_blkd_cpu = smp_processor_id();
1466 if (!rtpcp->rtp_blkd_tasks.next)
1467 INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
1468 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1469 WRITE_ONCE(t->trc_reader_special.b.blocked, true);
1470 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1471}
1472EXPORT_SYMBOL_GPL(rcu_tasks_trace_qs_blkd);
1473
1474/* Add a task to the holdout list, if it is not already on the list. */
1475static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1476{
1477 if (list_empty(&t->trc_holdout_list)) {
1478 get_task_struct(t);
1479 list_add(&t->trc_holdout_list, bhp);
1480 n_trc_holdouts++;
1481 }
1482}
1483
1484/* Remove a task from the holdout list, if it is in fact present. */
1485static void trc_del_holdout(struct task_struct *t)
1486{
1487 if (!list_empty(&t->trc_holdout_list)) {
1488 list_del_init(&t->trc_holdout_list);
1489 put_task_struct(t);
1490 n_trc_holdouts--;
1491 }
1492}
1493
1494/* IPI handler to check task state. */
1495static void trc_read_check_handler(void *t_in)
1496{
1497 int nesting;
1498 struct task_struct *t = current;
1499 struct task_struct *texp = t_in;
1500
1501 // If the task is no longer running on this CPU, leave.
1502 if (unlikely(texp != t))
1503 goto reset_ipi; // Already on holdout list, so will check later.
1504
1505 // If the task is not in a read-side critical section, and
1506 // if this is the last reader, awaken the grace-period kthread.
1507 nesting = READ_ONCE(t->trc_reader_nesting);
1508 if (likely(!nesting)) {
1509 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1510 goto reset_ipi;
1511 }
1512 // If we are racing with an rcu_read_unlock_trace(), try again later.
1513 if (unlikely(nesting < 0))
1514 goto reset_ipi;
1515
1516 // Get here if the task is in a read-side critical section.
1517 // Set its state so that it will update state for the grace-period
1518 // kthread upon exit from that critical section.
1519 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED);
1520
1521reset_ipi:
1522 // Allow future IPIs to be sent on CPU and for task.
1523 // Also order this IPI handler against any later manipulations of
1524 // the intended task.
1525 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
1526 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
1527}
1528
1529/* Callback function for scheduler to check locked-down task. */
1530static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
1531{
1532 struct list_head *bhp = bhp_in;
1533 int cpu = task_cpu(t);
1534 int nesting;
1535 bool ofl = cpu_is_offline(cpu);
1536
1537 if (task_curr(t) && !ofl) {
1538 // If no chance of heavyweight readers, do it the hard way.
1539 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1540 return -EINVAL;
1541
1542 // If heavyweight readers are enabled on the remote task,
1543 // we can inspect its state despite its currently running.
1544 // However, we cannot safely change its state.
1545 n_heavy_reader_attempts++;
1546 // Check for "running" idle tasks on offline CPUs.
1547 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1548 return -EINVAL; // No quiescent state, do it the hard way.
1549 n_heavy_reader_updates++;
1550 nesting = 0;
1551 } else {
1552 // The task is not running, so C-language access is safe.
1553 nesting = t->trc_reader_nesting;
1554 WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
1555 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
1556 n_heavy_reader_ofl_updates++;
1557 }
1558
1559 // If not exiting a read-side critical section, mark as checked
1560 // so that the grace-period kthread will remove it from the
1561 // holdout list.
1562 if (!nesting) {
1563 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1564 return 0; // In QS, so done.
1565 }
1566 if (nesting < 0)
1567 return -EINVAL; // Reader transitioning, try again later.
1568
1569 // The task is in a read-side critical section, so set up its
1570 // state so that it will update state upon exit from that critical
1571 // section.
1572 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED))
1573 trc_add_holdout(t, bhp);
1574 return 0;
1575}
1576
1577/* Attempt to extract the state for the specified task. */
1578static void trc_wait_for_one_reader(struct task_struct *t,
1579 struct list_head *bhp)
1580{
1581 int cpu;
1582
1583 // If a previous IPI is still in flight, let it complete.
1584 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1585 return;
1586
1587 // The current task had better be in a quiescent state.
1588 if (t == current) {
1589 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1590 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1591 return;
1592 }
1593
1594 // Attempt to nail down the task for inspection.
1595 get_task_struct(t);
1596 if (!task_call_func(t, trc_inspect_reader, bhp)) {
1597 put_task_struct(t);
1598 return;
1599 }
1600 put_task_struct(t);
1601
1602 // If this task is not yet on the holdout list, then we are in
1603 // an RCU read-side critical section. Otherwise, the invocation of
1604 // trc_add_holdout() that added it to the list did the necessary
1605 // get_task_struct(). Either way, the task cannot be freed out
1606 // from under this code.
1607
1608 // If currently running, send an IPI, either way, add to list.
1609 trc_add_holdout(t, bhp);
1610 if (task_curr(t) &&
1611 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1612 // The task is currently running, so try IPIing it.
1613 cpu = task_cpu(t);
1614
1615 // If there is already an IPI outstanding, let it happen.
1616 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1617 return;
1618
1619 per_cpu(trc_ipi_to_cpu, cpu) = true;
1620 t->trc_ipi_to_cpu = cpu;
1621 rcu_tasks_trace.n_ipis++;
1622 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1623 // Just in case there is some other reason for
1624 // failure than the target CPU being offline.
1625 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1626 __func__, cpu);
1627 rcu_tasks_trace.n_ipis_fails++;
1628 per_cpu(trc_ipi_to_cpu, cpu) = false;
1629 t->trc_ipi_to_cpu = -1;
1630 }
1631 }
1632}
1633
1634/*
1635 * Initialize for first-round processing for the specified task.
1636 * Return false if task is NULL or already taken care of, true otherwise.
1637 */
1638static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself)
1639{
1640 // During early boot when there is only the one boot CPU, there
1641 // is no idle task for the other CPUs. Also, the grace-period
1642 // kthread is always in a quiescent state. In addition, just return
1643 // if this task is already on the list.
1644 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list))
1645 return false;
1646
1647 rcu_st_need_qs(t, 0);
1648 t->trc_ipi_to_cpu = -1;
1649 return true;
1650}
1651
1652/* Do first-round processing for the specified task. */
1653static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop)
1654{
1655 if (rcu_tasks_trace_pertask_prep(t, true))
1656 trc_wait_for_one_reader(t, hop);
1657}
1658
1659/* Initialize for a new RCU-tasks-trace grace period. */
1660static void rcu_tasks_trace_pregp_step(struct list_head *hop)
1661{
1662 LIST_HEAD(blkd_tasks);
1663 int cpu;
1664 unsigned long flags;
1665 struct rcu_tasks_percpu *rtpcp;
1666 struct task_struct *t;
1667
1668 // There shouldn't be any old IPIs, but...
1669 for_each_possible_cpu(cpu)
1670 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1671
1672 // Disable CPU hotplug across the CPU scan for the benefit of
1673 // any IPIs that might be needed. This also waits for all readers
1674 // in CPU-hotplug code paths.
1675 cpus_read_lock();
1676
1677 // These rcu_tasks_trace_pertask_prep() calls are serialized to
1678 // allow safe access to the hop list.
1679 for_each_online_cpu(cpu) {
1680 rcu_read_lock();
1681 t = cpu_curr_snapshot(cpu);
1682 if (rcu_tasks_trace_pertask_prep(t, true))
1683 trc_add_holdout(t, hop);
1684 rcu_read_unlock();
1685 cond_resched_tasks_rcu_qs();
1686 }
1687
1688 // Only after all running tasks have been accounted for is it
1689 // safe to take care of the tasks that have blocked within their
1690 // current RCU tasks trace read-side critical section.
1691 for_each_possible_cpu(cpu) {
1692 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, cpu);
1693 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1694 list_splice_init(&rtpcp->rtp_blkd_tasks, &blkd_tasks);
1695 while (!list_empty(&blkd_tasks)) {
1696 rcu_read_lock();
1697 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node);
1698 list_del_init(&t->trc_blkd_node);
1699 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks);
1700 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1701 rcu_tasks_trace_pertask(t, hop);
1702 rcu_read_unlock();
1703 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
1704 }
1705 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
1706 cond_resched_tasks_rcu_qs();
1707 }
1708
1709 // Re-enable CPU hotplug now that the holdout list is populated.
1710 cpus_read_unlock();
1711}
1712
1713/*
1714 * Do intermediate processing between task and holdout scans.
1715 */
1716static void rcu_tasks_trace_postscan(struct list_head *hop)
1717{
1718 // Wait for late-stage exiting tasks to finish exiting.
1719 // These might have passed the call to exit_tasks_rcu_finish().
1720
1721 // If you remove the following line, update rcu_trace_implies_rcu_gp()!!!
1722 synchronize_rcu();
1723 // Any tasks that exit after this point will set
1724 // TRC_NEED_QS_CHECKED in ->trc_reader_special.b.need_qs.
1725}
1726
1727/* Communicate task state back to the RCU tasks trace stall warning request. */
1728struct trc_stall_chk_rdr {
1729 int nesting;
1730 int ipi_to_cpu;
1731 u8 needqs;
1732};
1733
1734static int trc_check_slow_task(struct task_struct *t, void *arg)
1735{
1736 struct trc_stall_chk_rdr *trc_rdrp = arg;
1737
1738 if (task_curr(t) && cpu_online(task_cpu(t)))
1739 return false; // It is running, so decline to inspect it.
1740 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1741 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1742 trc_rdrp->needqs = rcu_ld_need_qs(t);
1743 return true;
1744}
1745
1746/* Show the state of a task stalling the current RCU tasks trace GP. */
1747static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1748{
1749 int cpu;
1750 struct trc_stall_chk_rdr trc_rdr;
1751 bool is_idle_tsk = is_idle_task(t);
1752
1753 if (*firstreport) {
1754 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1755 *firstreport = false;
1756 }
1757 cpu = task_cpu(t);
1758 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1759 pr_alert("P%d: %c%c\n",
1760 t->pid,
1761 ".I"[t->trc_ipi_to_cpu >= 0],
1762 ".i"[is_idle_tsk]);
1763 else
1764 pr_alert("P%d: %c%c%c%c nesting: %d%c%c cpu: %d%s\n",
1765 t->pid,
1766 ".I"[trc_rdr.ipi_to_cpu >= 0],
1767 ".i"[is_idle_tsk],
1768 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1769 ".B"[!!data_race(t->trc_reader_special.b.blocked)],
1770 trc_rdr.nesting,
1771 " !CN"[trc_rdr.needqs & 0x3],
1772 " ?"[trc_rdr.needqs > 0x3],
1773 cpu, cpu_online(cpu) ? "" : "(offline)");
1774 sched_show_task(t);
1775}
1776
1777/* List stalled IPIs for RCU tasks trace. */
1778static void show_stalled_ipi_trace(void)
1779{
1780 int cpu;
1781
1782 for_each_possible_cpu(cpu)
1783 if (per_cpu(trc_ipi_to_cpu, cpu))
1784 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1785}
1786
1787/* Do one scan of the holdout list. */
1788static void check_all_holdout_tasks_trace(struct list_head *hop,
1789 bool needreport, bool *firstreport)
1790{
1791 struct task_struct *g, *t;
1792
1793 // Disable CPU hotplug across the holdout list scan for IPIs.
1794 cpus_read_lock();
1795
1796 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1797 // If safe and needed, try to check the current task.
1798 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1799 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED))
1800 trc_wait_for_one_reader(t, hop);
1801
1802 // If check succeeded, remove this task from the list.
1803 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1804 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED)
1805 trc_del_holdout(t);
1806 else if (needreport)
1807 show_stalled_task_trace(t, firstreport);
1808 cond_resched_tasks_rcu_qs();
1809 }
1810
1811 // Re-enable CPU hotplug now that the holdout list scan has completed.
1812 cpus_read_unlock();
1813
1814 if (needreport) {
1815 if (*firstreport)
1816 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1817 show_stalled_ipi_trace();
1818 }
1819}
1820
1821static void rcu_tasks_trace_empty_fn(void *unused)
1822{
1823}
1824
1825/* Wait for grace period to complete and provide ordering. */
1826static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1827{
1828 int cpu;
1829
1830 // Wait for any lingering IPI handlers to complete. Note that
1831 // if a CPU has gone offline or transitioned to userspace in the
1832 // meantime, all IPI handlers should have been drained beforehand.
1833 // Yes, this assumes that CPUs process IPIs in order. If that ever
1834 // changes, there will need to be a recheck and/or timed wait.
1835 for_each_online_cpu(cpu)
1836 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1837 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1838
1839 smp_mb(); // Caller's code must be ordered after wakeup.
1840 // Pairs with pretty much every ordering primitive.
1841}
1842
1843/* Report any needed quiescent state for this exiting task. */
1844static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1845{
1846 union rcu_special trs = READ_ONCE(t->trc_reader_special);
1847
1848 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED);
1849 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1850 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked))
1851 rcu_read_unlock_trace_special(t);
1852 else
1853 WRITE_ONCE(t->trc_reader_nesting, 0);
1854}
1855
1856/**
1857 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1858 * @rhp: structure to be used for queueing the RCU updates.
1859 * @func: actual callback function to be invoked after the grace period
1860 *
1861 * The callback function will be invoked some time after a trace rcu-tasks
1862 * grace period elapses, in other words after all currently executing
1863 * trace rcu-tasks read-side critical sections have completed. These
1864 * read-side critical sections are delimited by calls to rcu_read_lock_trace()
1865 * and rcu_read_unlock_trace().
1866 *
1867 * See the description of call_rcu() for more detailed information on
1868 * memory ordering guarantees.
1869 */
1870void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1871{
1872 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1873}
1874EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1875
1876/**
1877 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1878 *
1879 * Control will return to the caller some time after a trace rcu-tasks
1880 * grace period has elapsed, in other words after all currently executing
1881 * trace rcu-tasks read-side critical sections have elapsed. These read-side
1882 * critical sections are delimited by calls to rcu_read_lock_trace()
1883 * and rcu_read_unlock_trace().
1884 *
1885 * This is a very specialized primitive, intended only for a few uses in
1886 * tracing and other situations requiring manipulation of function preambles
1887 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1888 * (yet) intended for heavy use from multiple CPUs.
1889 *
1890 * See the description of synchronize_rcu() for more detailed information
1891 * on memory ordering guarantees.
1892 */
1893void synchronize_rcu_tasks_trace(void)
1894{
1895 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1896 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1897}
1898EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1899
1900/**
1901 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1902 *
1903 * Although the current implementation is guaranteed to wait, it is not
1904 * obligated to, for example, if there are no pending callbacks.
1905 */
1906void rcu_barrier_tasks_trace(void)
1907{
1908 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1909}
1910EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1911
1912int rcu_tasks_trace_lazy_ms = -1;
1913module_param(rcu_tasks_trace_lazy_ms, int, 0444);
1914
1915static int __init rcu_spawn_tasks_trace_kthread(void)
1916{
1917 cblist_init_generic(&rcu_tasks_trace);
1918 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1919 rcu_tasks_trace.gp_sleep = HZ / 10;
1920 rcu_tasks_trace.init_fract = HZ / 10;
1921 } else {
1922 rcu_tasks_trace.gp_sleep = HZ / 200;
1923 if (rcu_tasks_trace.gp_sleep <= 0)
1924 rcu_tasks_trace.gp_sleep = 1;
1925 rcu_tasks_trace.init_fract = HZ / 200;
1926 if (rcu_tasks_trace.init_fract <= 0)
1927 rcu_tasks_trace.init_fract = 1;
1928 }
1929 if (rcu_tasks_trace_lazy_ms >= 0)
1930 rcu_tasks_trace.lazy_jiffies = msecs_to_jiffies(rcu_tasks_trace_lazy_ms);
1931 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1932 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1933 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1934 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1935 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1936 return 0;
1937}
1938
1939#if !defined(CONFIG_TINY_RCU)
1940void show_rcu_tasks_trace_gp_kthread(void)
1941{
1942 char buf[64];
1943
1944 sprintf(buf, "N%lu h:%lu/%lu/%lu",
1945 data_race(n_trc_holdouts),
1946 data_race(n_heavy_reader_ofl_updates),
1947 data_race(n_heavy_reader_updates),
1948 data_race(n_heavy_reader_attempts));
1949 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1950}
1951EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1952#endif // !defined(CONFIG_TINY_RCU)
1953
1954struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
1955{
1956 return rcu_tasks_trace.kthread_ptr;
1957}
1958EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
1959
1960#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1961static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1962#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1963
1964#ifndef CONFIG_TINY_RCU
1965void show_rcu_tasks_gp_kthreads(void)
1966{
1967 show_rcu_tasks_classic_gp_kthread();
1968 show_rcu_tasks_rude_gp_kthread();
1969 show_rcu_tasks_trace_gp_kthread();
1970}
1971#endif /* #ifndef CONFIG_TINY_RCU */
1972
1973#ifdef CONFIG_PROVE_RCU
1974struct rcu_tasks_test_desc {
1975 struct rcu_head rh;
1976 const char *name;
1977 bool notrun;
1978 unsigned long runstart;
1979};
1980
1981static struct rcu_tasks_test_desc tests[] = {
1982 {
1983 .name = "call_rcu_tasks()",
1984 /* If not defined, the test is skipped. */
1985 .notrun = IS_ENABLED(CONFIG_TASKS_RCU),
1986 },
1987 {
1988 .name = "call_rcu_tasks_rude()",
1989 /* If not defined, the test is skipped. */
1990 .notrun = IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1991 },
1992 {
1993 .name = "call_rcu_tasks_trace()",
1994 /* If not defined, the test is skipped. */
1995 .notrun = IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1996 }
1997};
1998
1999static void test_rcu_tasks_callback(struct rcu_head *rhp)
2000{
2001 struct rcu_tasks_test_desc *rttd =
2002 container_of(rhp, struct rcu_tasks_test_desc, rh);
2003
2004 pr_info("Callback from %s invoked.\n", rttd->name);
2005
2006 rttd->notrun = false;
2007}
2008
2009static void rcu_tasks_initiate_self_tests(void)
2010{
2011#ifdef CONFIG_TASKS_RCU
2012 pr_info("Running RCU Tasks wait API self tests\n");
2013 tests[0].runstart = jiffies;
2014 synchronize_rcu_tasks();
2015 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
2016#endif
2017
2018#ifdef CONFIG_TASKS_RUDE_RCU
2019 pr_info("Running RCU Tasks Rude wait API self tests\n");
2020 tests[1].runstart = jiffies;
2021 synchronize_rcu_tasks_rude();
2022 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
2023#endif
2024
2025#ifdef CONFIG_TASKS_TRACE_RCU
2026 pr_info("Running RCU Tasks Trace wait API self tests\n");
2027 tests[2].runstart = jiffies;
2028 synchronize_rcu_tasks_trace();
2029 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
2030#endif
2031}
2032
2033/*
2034 * Return: 0 - test passed
2035 * 1 - test failed, but have not timed out yet
2036 * -1 - test failed and timed out
2037 */
2038static int rcu_tasks_verify_self_tests(void)
2039{
2040 int ret = 0;
2041 int i;
2042 unsigned long bst = rcu_task_stall_timeout;
2043
2044 if (bst <= 0 || bst > RCU_TASK_BOOT_STALL_TIMEOUT)
2045 bst = RCU_TASK_BOOT_STALL_TIMEOUT;
2046 for (i = 0; i < ARRAY_SIZE(tests); i++) {
2047 while (tests[i].notrun) { // still hanging.
2048 if (time_after(jiffies, tests[i].runstart + bst)) {
2049 pr_err("%s has failed boot-time tests.\n", tests[i].name);
2050 ret = -1;
2051 break;
2052 }
2053 ret = 1;
2054 break;
2055 }
2056 }
2057 WARN_ON(ret < 0);
2058
2059 return ret;
2060}
2061
2062/*
2063 * Repeat the rcu_tasks_verify_self_tests() call once every second until the
2064 * test passes or has timed out.
2065 */
2066static struct delayed_work rcu_tasks_verify_work;
2067static void rcu_tasks_verify_work_fn(struct work_struct *work __maybe_unused)
2068{
2069 int ret = rcu_tasks_verify_self_tests();
2070
2071 if (ret <= 0)
2072 return;
2073
2074 /* Test fails but not timed out yet, reschedule another check */
2075 schedule_delayed_work(&rcu_tasks_verify_work, HZ);
2076}
2077
2078static int rcu_tasks_verify_schedule_work(void)
2079{
2080 INIT_DELAYED_WORK(&rcu_tasks_verify_work, rcu_tasks_verify_work_fn);
2081 rcu_tasks_verify_work_fn(NULL);
2082 return 0;
2083}
2084late_initcall(rcu_tasks_verify_schedule_work);
2085#else /* #ifdef CONFIG_PROVE_RCU */
2086static void rcu_tasks_initiate_self_tests(void) { }
2087#endif /* #else #ifdef CONFIG_PROVE_RCU */
2088
2089void __init rcu_init_tasks_generic(void)
2090{
2091#ifdef CONFIG_TASKS_RCU
2092 rcu_spawn_tasks_kthread();
2093#endif
2094
2095#ifdef CONFIG_TASKS_RUDE_RCU
2096 rcu_spawn_tasks_rude_kthread();
2097#endif
2098
2099#ifdef CONFIG_TASKS_TRACE_RCU
2100 rcu_spawn_tasks_trace_kthread();
2101#endif
2102
2103 // Run the self-tests.
2104 rcu_tasks_initiate_self_tests();
2105}
2106
2107#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
2108static inline void rcu_tasks_bootup_oddness(void) {}
2109#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */