Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
6 *
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
9 *
10 * Author: Ingo Molnar <mingo@elte.hu>
11 * Paul E. McKenney <paulmck@linux.ibm.com>
12 */
13
14#include "../locking/rtmutex_common.h"
15
16static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
17{
18 /*
19 * In order to read the offloaded state of an rdp in a safe
20 * and stable way and prevent from its value to be changed
21 * under us, we must either hold the barrier mutex, the cpu
22 * hotplug lock (read or write) or the nocb lock. Local
23 * non-preemptible reads are also safe. NOCB kthreads and
24 * timers have their own means of synchronization against the
25 * offloaded state updaters.
26 */
27 RCU_NOCB_LOCKDEP_WARN(
28 !(lockdep_is_held(&rcu_state.barrier_mutex) ||
29 (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
30 lockdep_is_held(&rdp->nocb_lock) ||
31 lockdep_is_held(&rcu_state.nocb_mutex) ||
32 (!(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible()) &&
33 rdp == this_cpu_ptr(&rcu_data)) ||
34 rcu_current_is_nocb_kthread(rdp)),
35 "Unsafe read of RCU_NOCB offloaded state"
36 );
37
38 return rcu_segcblist_is_offloaded(&rdp->cblist);
39}
40
41/*
42 * Check the RCU kernel configuration parameters and print informative
43 * messages about anything out of the ordinary.
44 */
45static void __init rcu_bootup_announce_oddness(void)
46{
47 if (IS_ENABLED(CONFIG_RCU_TRACE))
48 pr_info("\tRCU event tracing is enabled.\n");
49 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
50 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
51 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
52 RCU_FANOUT);
53 if (rcu_fanout_exact)
54 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
55 if (IS_ENABLED(CONFIG_PROVE_RCU))
56 pr_info("\tRCU lockdep checking is enabled.\n");
57 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
58 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n");
59 if (RCU_NUM_LVLS >= 4)
60 pr_info("\tFour(or more)-level hierarchy is enabled.\n");
61 if (RCU_FANOUT_LEAF != 16)
62 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
63 RCU_FANOUT_LEAF);
64 if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
65 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
66 rcu_fanout_leaf);
67 if (nr_cpu_ids != NR_CPUS)
68 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
69#ifdef CONFIG_RCU_BOOST
70 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
71 kthread_prio, CONFIG_RCU_BOOST_DELAY);
72#endif
73 if (blimit != DEFAULT_RCU_BLIMIT)
74 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
75 if (qhimark != DEFAULT_RCU_QHIMARK)
76 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
77 if (qlowmark != DEFAULT_RCU_QLOMARK)
78 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
79 if (qovld != DEFAULT_RCU_QOVLD)
80 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
81 if (jiffies_till_first_fqs != ULONG_MAX)
82 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
83 if (jiffies_till_next_fqs != ULONG_MAX)
84 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
85 if (jiffies_till_sched_qs != ULONG_MAX)
86 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
87 if (rcu_kick_kthreads)
88 pr_info("\tKick kthreads if too-long grace period.\n");
89 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
90 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n");
91 if (gp_preinit_delay)
92 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
93 if (gp_init_delay)
94 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
95 if (gp_cleanup_delay)
96 pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay);
97 if (nohz_full_patience_delay < 0) {
98 pr_info("\tRCU NOCB CPU patience negative (%d), resetting to zero.\n", nohz_full_patience_delay);
99 nohz_full_patience_delay = 0;
100 } else if (nohz_full_patience_delay > 5 * MSEC_PER_SEC) {
101 pr_info("\tRCU NOCB CPU patience too large (%d), resetting to %ld.\n", nohz_full_patience_delay, 5 * MSEC_PER_SEC);
102 nohz_full_patience_delay = 5 * MSEC_PER_SEC;
103 } else if (nohz_full_patience_delay) {
104 pr_info("\tRCU NOCB CPU patience set to %d milliseconds.\n", nohz_full_patience_delay);
105 }
106 nohz_full_patience_delay_jiffies = msecs_to_jiffies(nohz_full_patience_delay);
107 if (!use_softirq)
108 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
109 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
110 pr_info("\tRCU debug extended QS entry/exit.\n");
111 rcupdate_announce_bootup_oddness();
112}
113
114#ifdef CONFIG_PREEMPT_RCU
115
116static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
117static void rcu_read_unlock_special(struct task_struct *t);
118
119/*
120 * Tell them what RCU they are running.
121 */
122static void __init rcu_bootup_announce(void)
123{
124 pr_info("Preemptible hierarchical RCU implementation.\n");
125 rcu_bootup_announce_oddness();
126}
127
128/* Flags for rcu_preempt_ctxt_queue() decision table. */
129#define RCU_GP_TASKS 0x8
130#define RCU_EXP_TASKS 0x4
131#define RCU_GP_BLKD 0x2
132#define RCU_EXP_BLKD 0x1
133
134/*
135 * Queues a task preempted within an RCU-preempt read-side critical
136 * section into the appropriate location within the ->blkd_tasks list,
137 * depending on the states of any ongoing normal and expedited grace
138 * periods. The ->gp_tasks pointer indicates which element the normal
139 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
140 * indicates which element the expedited grace period is waiting on (again,
141 * NULL if none). If a grace period is waiting on a given element in the
142 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
143 * adding a task to the tail of the list blocks any grace period that is
144 * already waiting on one of the elements. In contrast, adding a task
145 * to the head of the list won't block any grace period that is already
146 * waiting on one of the elements.
147 *
148 * This queuing is imprecise, and can sometimes make an ongoing grace
149 * period wait for a task that is not strictly speaking blocking it.
150 * Given the choice, we needlessly block a normal grace period rather than
151 * blocking an expedited grace period.
152 *
153 * Note that an endless sequence of expedited grace periods still cannot
154 * indefinitely postpone a normal grace period. Eventually, all of the
155 * fixed number of preempted tasks blocking the normal grace period that are
156 * not also blocking the expedited grace period will resume and complete
157 * their RCU read-side critical sections. At that point, the ->gp_tasks
158 * pointer will equal the ->exp_tasks pointer, at which point the end of
159 * the corresponding expedited grace period will also be the end of the
160 * normal grace period.
161 */
162static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
163 __releases(rnp->lock) /* But leaves rrupts disabled. */
164{
165 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
166 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
167 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
168 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
169 struct task_struct *t = current;
170
171 raw_lockdep_assert_held_rcu_node(rnp);
172 WARN_ON_ONCE(rdp->mynode != rnp);
173 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
174 /* RCU better not be waiting on newly onlined CPUs! */
175 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
176 rdp->grpmask);
177
178 /*
179 * Decide where to queue the newly blocked task. In theory,
180 * this could be an if-statement. In practice, when I tried
181 * that, it was quite messy.
182 */
183 switch (blkd_state) {
184 case 0:
185 case RCU_EXP_TASKS:
186 case RCU_EXP_TASKS | RCU_GP_BLKD:
187 case RCU_GP_TASKS:
188 case RCU_GP_TASKS | RCU_EXP_TASKS:
189
190 /*
191 * Blocking neither GP, or first task blocking the normal
192 * GP but not blocking the already-waiting expedited GP.
193 * Queue at the head of the list to avoid unnecessarily
194 * blocking the already-waiting GPs.
195 */
196 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
197 break;
198
199 case RCU_EXP_BLKD:
200 case RCU_GP_BLKD:
201 case RCU_GP_BLKD | RCU_EXP_BLKD:
202 case RCU_GP_TASKS | RCU_EXP_BLKD:
203 case RCU_GP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD:
204 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD:
205
206 /*
207 * First task arriving that blocks either GP, or first task
208 * arriving that blocks the expedited GP (with the normal
209 * GP already waiting), or a task arriving that blocks
210 * both GPs with both GPs already waiting. Queue at the
211 * tail of the list to avoid any GP waiting on any of the
212 * already queued tasks that are not blocking it.
213 */
214 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
215 break;
216
217 case RCU_EXP_TASKS | RCU_EXP_BLKD:
218 case RCU_EXP_TASKS | RCU_GP_BLKD | RCU_EXP_BLKD:
219 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_EXP_BLKD:
220
221 /*
222 * Second or subsequent task blocking the expedited GP.
223 * The task either does not block the normal GP, or is the
224 * first task blocking the normal GP. Queue just after
225 * the first task blocking the expedited GP.
226 */
227 list_add(&t->rcu_node_entry, rnp->exp_tasks);
228 break;
229
230 case RCU_GP_TASKS | RCU_GP_BLKD:
231 case RCU_GP_TASKS | RCU_EXP_TASKS | RCU_GP_BLKD:
232
233 /*
234 * Second or subsequent task blocking the normal GP.
235 * The task does not block the expedited GP. Queue just
236 * after the first task blocking the normal GP.
237 */
238 list_add(&t->rcu_node_entry, rnp->gp_tasks);
239 break;
240
241 default:
242
243 /* Yet another exercise in excessive paranoia. */
244 WARN_ON_ONCE(1);
245 break;
246 }
247
248 /*
249 * We have now queued the task. If it was the first one to
250 * block either grace period, update the ->gp_tasks and/or
251 * ->exp_tasks pointers, respectively, to reference the newly
252 * blocked tasks.
253 */
254 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
255 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
256 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
257 }
258 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
259 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
260 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
261 !(rnp->qsmask & rdp->grpmask));
262 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
263 !(rnp->expmask & rdp->grpmask));
264 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
265
266 /*
267 * Report the quiescent state for the expedited GP. This expedited
268 * GP should not be able to end until we report, so there should be
269 * no need to check for a subsequent expedited GP. (Though we are
270 * still in a quiescent state in any case.)
271 *
272 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change.
273 */
274 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
275 rcu_report_exp_rdp(rdp);
276 else
277 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
278}
279
280/*
281 * Record a preemptible-RCU quiescent state for the specified CPU.
282 * Note that this does not necessarily mean that the task currently running
283 * on the CPU is in a quiescent state: Instead, it means that the current
284 * grace period need not wait on any RCU read-side critical section that
285 * starts later on this CPU. It also means that if the current task is
286 * in an RCU read-side critical section, it has already added itself to
287 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
288 * current task, there might be any number of other tasks blocked while
289 * in an RCU read-side critical section.
290 *
291 * Unlike non-preemptible-RCU, quiescent state reports for expedited
292 * grace periods are handled separately via deferred quiescent states
293 * and context switch events.
294 *
295 * Callers to this function must disable preemption.
296 */
297static void rcu_qs(void)
298{
299 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
300 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) {
301 trace_rcu_grace_period(TPS("rcu_preempt"),
302 __this_cpu_read(rcu_data.gp_seq),
303 TPS("cpuqs"));
304 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
305 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
306 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
307 }
308}
309
310/*
311 * We have entered the scheduler, and the current task might soon be
312 * context-switched away from. If this task is in an RCU read-side
313 * critical section, we will no longer be able to rely on the CPU to
314 * record that fact, so we enqueue the task on the blkd_tasks list.
315 * The task will dequeue itself when it exits the outermost enclosing
316 * RCU read-side critical section. Therefore, the current grace period
317 * cannot be permitted to complete until the blkd_tasks list entries
318 * predating the current grace period drain, in other words, until
319 * rnp->gp_tasks becomes NULL.
320 *
321 * Caller must disable interrupts.
322 */
323void rcu_note_context_switch(bool preempt)
324{
325 struct task_struct *t = current;
326 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
327 struct rcu_node *rnp;
328
329 trace_rcu_utilization(TPS("Start context switch"));
330 lockdep_assert_irqs_disabled();
331 WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
332 if (rcu_preempt_depth() > 0 &&
333 !t->rcu_read_unlock_special.b.blocked) {
334
335 /* Possibly blocking in an RCU read-side critical section. */
336 rnp = rdp->mynode;
337 raw_spin_lock_rcu_node(rnp);
338 t->rcu_read_unlock_special.b.blocked = true;
339 t->rcu_blocked_node = rnp;
340
341 /*
342 * Verify the CPU's sanity, trace the preemption, and
343 * then queue the task as required based on the states
344 * of any ongoing and expedited grace periods.
345 */
346 WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
347 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
348 trace_rcu_preempt_task(rcu_state.name,
349 t->pid,
350 (rnp->qsmask & rdp->grpmask)
351 ? rnp->gp_seq
352 : rcu_seq_snap(&rnp->gp_seq));
353 rcu_preempt_ctxt_queue(rnp, rdp);
354 } else {
355 rcu_preempt_deferred_qs(t);
356 }
357
358 /*
359 * Either we were not in an RCU read-side critical section to
360 * begin with, or we have now recorded that critical section
361 * globally. Either way, we can now note a quiescent state
362 * for this CPU. Again, if we were in an RCU read-side critical
363 * section, and if that critical section was blocking the current
364 * grace period, then the fact that the task has been enqueued
365 * means that we continue to block the current grace period.
366 */
367 rcu_qs();
368 if (rdp->cpu_no_qs.b.exp)
369 rcu_report_exp_rdp(rdp);
370 rcu_tasks_qs(current, preempt);
371 trace_rcu_utilization(TPS("End context switch"));
372}
373EXPORT_SYMBOL_GPL(rcu_note_context_switch);
374
375/*
376 * Check for preempted RCU readers blocking the current grace period
377 * for the specified rcu_node structure. If the caller needs a reliable
378 * answer, it must hold the rcu_node's ->lock.
379 */
380static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
381{
382 return READ_ONCE(rnp->gp_tasks) != NULL;
383}
384
385/* limit value for ->rcu_read_lock_nesting. */
386#define RCU_NEST_PMAX (INT_MAX / 2)
387
388static void rcu_preempt_read_enter(void)
389{
390 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
391}
392
393static int rcu_preempt_read_exit(void)
394{
395 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
396
397 WRITE_ONCE(current->rcu_read_lock_nesting, ret);
398 return ret;
399}
400
401static void rcu_preempt_depth_set(int val)
402{
403 WRITE_ONCE(current->rcu_read_lock_nesting, val);
404}
405
406/*
407 * Preemptible RCU implementation for rcu_read_lock().
408 * Just increment ->rcu_read_lock_nesting, shared state will be updated
409 * if we block.
410 */
411void __rcu_read_lock(void)
412{
413 rcu_preempt_read_enter();
414 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
415 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
416 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
417 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
418 barrier(); /* critical section after entry code. */
419}
420EXPORT_SYMBOL_GPL(__rcu_read_lock);
421
422/*
423 * Preemptible RCU implementation for rcu_read_unlock().
424 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
425 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
426 * invoke rcu_read_unlock_special() to clean up after a context switch
427 * in an RCU read-side critical section and other special cases.
428 */
429void __rcu_read_unlock(void)
430{
431 struct task_struct *t = current;
432
433 barrier(); // critical section before exit code.
434 if (rcu_preempt_read_exit() == 0) {
435 barrier(); // critical-section exit before .s check.
436 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
437 rcu_read_unlock_special(t);
438 }
439 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
440 int rrln = rcu_preempt_depth();
441
442 WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
443 }
444}
445EXPORT_SYMBOL_GPL(__rcu_read_unlock);
446
447/*
448 * Advance a ->blkd_tasks-list pointer to the next entry, instead
449 * returning NULL if at the end of the list.
450 */
451static struct list_head *rcu_next_node_entry(struct task_struct *t,
452 struct rcu_node *rnp)
453{
454 struct list_head *np;
455
456 np = t->rcu_node_entry.next;
457 if (np == &rnp->blkd_tasks)
458 np = NULL;
459 return np;
460}
461
462/*
463 * Return true if the specified rcu_node structure has tasks that were
464 * preempted within an RCU read-side critical section.
465 */
466static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
467{
468 return !list_empty(&rnp->blkd_tasks);
469}
470
471/*
472 * Report deferred quiescent states. The deferral time can
473 * be quite short, for example, in the case of the call from
474 * rcu_read_unlock_special().
475 */
476static notrace void
477rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
478{
479 bool empty_exp;
480 bool empty_norm;
481 bool empty_exp_now;
482 struct list_head *np;
483 bool drop_boost_mutex = false;
484 struct rcu_data *rdp;
485 struct rcu_node *rnp;
486 union rcu_special special;
487
488 /*
489 * If RCU core is waiting for this CPU to exit its critical section,
490 * report the fact that it has exited. Because irqs are disabled,
491 * t->rcu_read_unlock_special cannot change.
492 */
493 special = t->rcu_read_unlock_special;
494 rdp = this_cpu_ptr(&rcu_data);
495 if (!special.s && !rdp->cpu_no_qs.b.exp) {
496 local_irq_restore(flags);
497 return;
498 }
499 t->rcu_read_unlock_special.s = 0;
500 if (special.b.need_qs) {
501 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
502 rdp->cpu_no_qs.b.norm = false;
503 rcu_report_qs_rdp(rdp);
504 udelay(rcu_unlock_delay);
505 } else {
506 rcu_qs();
507 }
508 }
509
510 /*
511 * Respond to a request by an expedited grace period for a
512 * quiescent state from this CPU. Note that requests from
513 * tasks are handled when removing the task from the
514 * blocked-tasks list below.
515 */
516 if (rdp->cpu_no_qs.b.exp)
517 rcu_report_exp_rdp(rdp);
518
519 /* Clean up if blocked during RCU read-side critical section. */
520 if (special.b.blocked) {
521
522 /*
523 * Remove this task from the list it blocked on. The task
524 * now remains queued on the rcu_node corresponding to the
525 * CPU it first blocked on, so there is no longer any need
526 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
527 */
528 rnp = t->rcu_blocked_node;
529 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
530 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
531 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
532 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
533 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
534 (!empty_norm || rnp->qsmask));
535 empty_exp = sync_rcu_exp_done(rnp);
536 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
537 np = rcu_next_node_entry(t, rnp);
538 list_del_init(&t->rcu_node_entry);
539 t->rcu_blocked_node = NULL;
540 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
541 rnp->gp_seq, t->pid);
542 if (&t->rcu_node_entry == rnp->gp_tasks)
543 WRITE_ONCE(rnp->gp_tasks, np);
544 if (&t->rcu_node_entry == rnp->exp_tasks)
545 WRITE_ONCE(rnp->exp_tasks, np);
546 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
547 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
548 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
549 if (&t->rcu_node_entry == rnp->boost_tasks)
550 WRITE_ONCE(rnp->boost_tasks, np);
551 }
552
553 /*
554 * If this was the last task on the current list, and if
555 * we aren't waiting on any CPUs, report the quiescent state.
556 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
557 * so we must take a snapshot of the expedited state.
558 */
559 empty_exp_now = sync_rcu_exp_done(rnp);
560 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
561 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
562 rnp->gp_seq,
563 0, rnp->qsmask,
564 rnp->level,
565 rnp->grplo,
566 rnp->grphi,
567 !!rnp->gp_tasks);
568 rcu_report_unblock_qs_rnp(rnp, flags);
569 } else {
570 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
571 }
572
573 /*
574 * If this was the last task on the expedited lists,
575 * then we need to report up the rcu_node hierarchy.
576 */
577 if (!empty_exp && empty_exp_now)
578 rcu_report_exp_rnp(rnp, true);
579
580 /* Unboost if we were boosted. */
581 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
582 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
583 } else {
584 local_irq_restore(flags);
585 }
586}
587
588/*
589 * Is a deferred quiescent-state pending, and are we also not in
590 * an RCU read-side critical section? It is the caller's responsibility
591 * to ensure it is otherwise safe to report any deferred quiescent
592 * states. The reason for this is that it is safe to report a
593 * quiescent state during context switch even though preemption
594 * is disabled. This function cannot be expected to understand these
595 * nuances, so the caller must handle them.
596 */
597static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
598{
599 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
600 READ_ONCE(t->rcu_read_unlock_special.s)) &&
601 rcu_preempt_depth() == 0;
602}
603
604/*
605 * Report a deferred quiescent state if needed and safe to do so.
606 * As with rcu_preempt_need_deferred_qs(), "safe" involves only
607 * not being in an RCU read-side critical section. The caller must
608 * evaluate safety in terms of interrupt, softirq, and preemption
609 * disabling.
610 */
611notrace void rcu_preempt_deferred_qs(struct task_struct *t)
612{
613 unsigned long flags;
614
615 if (!rcu_preempt_need_deferred_qs(t))
616 return;
617 local_irq_save(flags);
618 rcu_preempt_deferred_qs_irqrestore(t, flags);
619}
620
621/*
622 * Minimal handler to give the scheduler a chance to re-evaluate.
623 */
624static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
625{
626 struct rcu_data *rdp;
627
628 rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
629 rdp->defer_qs_iw_pending = false;
630}
631
632/*
633 * Handle special cases during rcu_read_unlock(), such as needing to
634 * notify RCU core processing or task having blocked during the RCU
635 * read-side critical section.
636 */
637static void rcu_read_unlock_special(struct task_struct *t)
638{
639 unsigned long flags;
640 bool irqs_were_disabled;
641 bool preempt_bh_were_disabled =
642 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
643
644 /* NMI handlers cannot block and cannot safely manipulate state. */
645 if (in_nmi())
646 return;
647
648 local_irq_save(flags);
649 irqs_were_disabled = irqs_disabled_flags(flags);
650 if (preempt_bh_were_disabled || irqs_were_disabled) {
651 bool expboost; // Expedited GP in flight or possible boosting.
652 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
653 struct rcu_node *rnp = rdp->mynode;
654
655 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
656 (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
657 (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
658 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||
659 (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&
660 t->rcu_blocked_node);
661 // Need to defer quiescent state until everything is enabled.
662 if (use_softirq && (in_hardirq() || (expboost && !irqs_were_disabled))) {
663 // Using softirq, safe to awaken, and either the
664 // wakeup is free or there is either an expedited
665 // GP in flight or a potential need to deboost.
666 raise_softirq_irqoff(RCU_SOFTIRQ);
667 } else {
668 // Enabling BH or preempt does reschedule, so...
669 // Also if no expediting and no possible deboosting,
670 // slow is OK. Plus nohz_full CPUs eventually get
671 // tick enabled.
672 set_tsk_need_resched(current);
673 set_preempt_need_resched();
674 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
675 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
676 // Get scheduler to re-evaluate and call hooks.
677 // If !IRQ_WORK, FQS scan will eventually IPI.
678 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
679 IS_ENABLED(CONFIG_PREEMPT_RT))
680 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
681 rcu_preempt_deferred_qs_handler);
682 else
683 init_irq_work(&rdp->defer_qs_iw,
684 rcu_preempt_deferred_qs_handler);
685 rdp->defer_qs_iw_pending = true;
686 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
687 }
688 }
689 local_irq_restore(flags);
690 return;
691 }
692 rcu_preempt_deferred_qs_irqrestore(t, flags);
693}
694
695/*
696 * Check that the list of blocked tasks for the newly completed grace
697 * period is in fact empty. It is a serious bug to complete a grace
698 * period that still has RCU readers blocked! This function must be
699 * invoked -before- updating this rnp's ->gp_seq.
700 *
701 * Also, if there are blocked tasks on the list, they automatically
702 * block the newly created grace period, so set up ->gp_tasks accordingly.
703 */
704static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
705{
706 struct task_struct *t;
707
708 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
709 raw_lockdep_assert_held_rcu_node(rnp);
710 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
711 dump_blkd_tasks(rnp, 10);
712 if (rcu_preempt_has_tasks(rnp) &&
713 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
714 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
715 t = container_of(rnp->gp_tasks, struct task_struct,
716 rcu_node_entry);
717 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
718 rnp->gp_seq, t->pid);
719 }
720 WARN_ON_ONCE(rnp->qsmask);
721}
722
723/*
724 * Check for a quiescent state from the current CPU, including voluntary
725 * context switches for Tasks RCU. When a task blocks, the task is
726 * recorded in the corresponding CPU's rcu_node structure, which is checked
727 * elsewhere, hence this function need only check for quiescent states
728 * related to the current CPU, not to those related to tasks.
729 */
730static void rcu_flavor_sched_clock_irq(int user)
731{
732 struct task_struct *t = current;
733
734 lockdep_assert_irqs_disabled();
735 if (rcu_preempt_depth() > 0 ||
736 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
737 /* No QS, force context switch if deferred. */
738 if (rcu_preempt_need_deferred_qs(t)) {
739 set_tsk_need_resched(t);
740 set_preempt_need_resched();
741 }
742 } else if (rcu_preempt_need_deferred_qs(t)) {
743 rcu_preempt_deferred_qs(t); /* Report deferred QS. */
744 return;
745 } else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
746 rcu_qs(); /* Report immediate QS. */
747 return;
748 }
749
750 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */
751 if (rcu_preempt_depth() > 0 &&
752 __this_cpu_read(rcu_data.core_needs_qs) &&
753 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
754 !t->rcu_read_unlock_special.b.need_qs &&
755 time_after(jiffies, rcu_state.gp_start + HZ))
756 t->rcu_read_unlock_special.b.need_qs = true;
757}
758
759/*
760 * Check for a task exiting while in a preemptible-RCU read-side
761 * critical section, clean up if so. No need to issue warnings, as
762 * debug_check_no_locks_held() already does this if lockdep is enabled.
763 * Besides, if this function does anything other than just immediately
764 * return, there was a bug of some sort. Spewing warnings from this
765 * function is like as not to simply obscure important prior warnings.
766 */
767void exit_rcu(void)
768{
769 struct task_struct *t = current;
770
771 if (unlikely(!list_empty(¤t->rcu_node_entry))) {
772 rcu_preempt_depth_set(1);
773 barrier();
774 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
775 } else if (unlikely(rcu_preempt_depth())) {
776 rcu_preempt_depth_set(1);
777 } else {
778 return;
779 }
780 __rcu_read_unlock();
781 rcu_preempt_deferred_qs(current);
782}
783
784/*
785 * Dump the blocked-tasks state, but limit the list dump to the
786 * specified number of elements.
787 */
788static void
789dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
790{
791 int cpu;
792 int i;
793 struct list_head *lhp;
794 struct rcu_data *rdp;
795 struct rcu_node *rnp1;
796
797 raw_lockdep_assert_held_rcu_node(rnp);
798 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
799 __func__, rnp->grplo, rnp->grphi, rnp->level,
800 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
801 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
802 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
803 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
804 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
805 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
806 READ_ONCE(rnp->exp_tasks));
807 pr_info("%s: ->blkd_tasks", __func__);
808 i = 0;
809 list_for_each(lhp, &rnp->blkd_tasks) {
810 pr_cont(" %p", lhp);
811 if (++i >= ncheck)
812 break;
813 }
814 pr_cont("\n");
815 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
816 rdp = per_cpu_ptr(&rcu_data, cpu);
817 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
818 cpu, ".o"[rcu_rdp_cpu_online(rdp)],
819 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_state,
820 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_state);
821 }
822}
823
824#else /* #ifdef CONFIG_PREEMPT_RCU */
825
826/*
827 * If strict grace periods are enabled, and if the calling
828 * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
829 * report that quiescent state and, if requested, spin for a bit.
830 */
831void rcu_read_unlock_strict(void)
832{
833 struct rcu_data *rdp;
834
835 if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
836 return;
837 rdp = this_cpu_ptr(&rcu_data);
838 rdp->cpu_no_qs.b.norm = false;
839 rcu_report_qs_rdp(rdp);
840 udelay(rcu_unlock_delay);
841}
842EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
843
844/*
845 * Tell them what RCU they are running.
846 */
847static void __init rcu_bootup_announce(void)
848{
849 pr_info("Hierarchical RCU implementation.\n");
850 rcu_bootup_announce_oddness();
851}
852
853/*
854 * Note a quiescent state for PREEMPTION=n. Because we do not need to know
855 * how many quiescent states passed, just if there was at least one since
856 * the start of the grace period, this just sets a flag. The caller must
857 * have disabled preemption.
858 */
859static void rcu_qs(void)
860{
861 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
862 if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
863 return;
864 trace_rcu_grace_period(TPS("rcu_sched"),
865 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
866 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
867 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
868 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
869}
870
871/*
872 * Register an urgently needed quiescent state. If there is an
873 * emergency, invoke rcu_momentary_eqs() to do a heavy-weight
874 * dyntick-idle quiescent state visible to other CPUs, which will in
875 * some cases serve for expedited as well as normal grace periods.
876 * Either way, register a lightweight quiescent state.
877 */
878void rcu_all_qs(void)
879{
880 unsigned long flags;
881
882 if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
883 return;
884 preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels
885 /* Load rcu_urgent_qs before other flags. */
886 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
887 preempt_enable();
888 return;
889 }
890 this_cpu_write(rcu_data.rcu_urgent_qs, false);
891 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
892 local_irq_save(flags);
893 rcu_momentary_eqs();
894 local_irq_restore(flags);
895 }
896 rcu_qs();
897 preempt_enable();
898}
899EXPORT_SYMBOL_GPL(rcu_all_qs);
900
901/*
902 * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
903 */
904void rcu_note_context_switch(bool preempt)
905{
906 trace_rcu_utilization(TPS("Start context switch"));
907 rcu_qs();
908 /* Load rcu_urgent_qs before other flags. */
909 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
910 goto out;
911 this_cpu_write(rcu_data.rcu_urgent_qs, false);
912 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
913 rcu_momentary_eqs();
914out:
915 rcu_tasks_qs(current, preempt);
916 trace_rcu_utilization(TPS("End context switch"));
917}
918EXPORT_SYMBOL_GPL(rcu_note_context_switch);
919
920/*
921 * Because preemptible RCU does not exist, there are never any preempted
922 * RCU readers.
923 */
924static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
925{
926 return 0;
927}
928
929/*
930 * Because there is no preemptible RCU, there can be no readers blocked.
931 */
932static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
933{
934 return false;
935}
936
937/*
938 * Because there is no preemptible RCU, there can be no deferred quiescent
939 * states.
940 */
941static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
942{
943 return false;
944}
945
946// Except that we do need to respond to a request by an expedited
947// grace period for a quiescent state from this CPU. Note that in
948// non-preemptible kernels, there can be no context switches within RCU
949// read-side critical sections, which in turn means that the leaf rcu_node
950// structure's blocked-tasks list is always empty. is therefore no need to
951// actually check it. Instead, a quiescent state from this CPU suffices,
952// and this function is only called from such a quiescent state.
953notrace void rcu_preempt_deferred_qs(struct task_struct *t)
954{
955 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
956
957 if (READ_ONCE(rdp->cpu_no_qs.b.exp))
958 rcu_report_exp_rdp(rdp);
959}
960
961/*
962 * Because there is no preemptible RCU, there can be no readers blocked,
963 * so there is no need to check for blocked tasks. So check only for
964 * bogus qsmask values.
965 */
966static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
967{
968 WARN_ON_ONCE(rnp->qsmask);
969}
970
971/*
972 * Check to see if this CPU is in a non-context-switch quiescent state,
973 * namely user mode and idle loop.
974 */
975static void rcu_flavor_sched_clock_irq(int user)
976{
977 if (user || rcu_is_cpu_rrupt_from_idle()) {
978
979 /*
980 * Get here if this CPU took its interrupt from user
981 * mode or from the idle loop, and if this is not a
982 * nested interrupt. In this case, the CPU is in
983 * a quiescent state, so note it.
984 *
985 * No memory barrier is required here because rcu_qs()
986 * references only CPU-local variables that other CPUs
987 * neither access nor modify, at least not while the
988 * corresponding CPU is online.
989 */
990 rcu_qs();
991 }
992}
993
994/*
995 * Because preemptible RCU does not exist, tasks cannot possibly exit
996 * while in preemptible RCU read-side critical sections.
997 */
998void exit_rcu(void)
999{
1000}
1001
1002/*
1003 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
1004 */
1005static void
1006dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
1007{
1008 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
1009}
1010
1011#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
1012
1013/*
1014 * If boosting, set rcuc kthreads to realtime priority.
1015 */
1016static void rcu_cpu_kthread_setup(unsigned int cpu)
1017{
1018 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1019#ifdef CONFIG_RCU_BOOST
1020 struct sched_param sp;
1021
1022 sp.sched_priority = kthread_prio;
1023 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1024#endif /* #ifdef CONFIG_RCU_BOOST */
1025
1026 WRITE_ONCE(rdp->rcuc_activity, jiffies);
1027}
1028
1029static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp)
1030{
1031#ifdef CONFIG_RCU_NOCB_CPU
1032 return rdp->nocb_cb_kthread == current;
1033#else
1034 return false;
1035#endif
1036}
1037
1038/*
1039 * Is the current CPU running the RCU-callbacks kthread?
1040 * Caller must have preemption disabled.
1041 */
1042static bool rcu_is_callbacks_kthread(struct rcu_data *rdp)
1043{
1044 return rdp->rcu_cpu_kthread_task == current ||
1045 rcu_is_callbacks_nocb_kthread(rdp);
1046}
1047
1048#ifdef CONFIG_RCU_BOOST
1049
1050/*
1051 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1052 * or ->boost_tasks, advancing the pointer to the next task in the
1053 * ->blkd_tasks list.
1054 *
1055 * Note that irqs must be enabled: boosting the task can block.
1056 * Returns 1 if there are more tasks needing to be boosted.
1057 */
1058static int rcu_boost(struct rcu_node *rnp)
1059{
1060 unsigned long flags;
1061 struct task_struct *t;
1062 struct list_head *tb;
1063
1064 if (READ_ONCE(rnp->exp_tasks) == NULL &&
1065 READ_ONCE(rnp->boost_tasks) == NULL)
1066 return 0; /* Nothing left to boost. */
1067
1068 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1069
1070 /*
1071 * Recheck under the lock: all tasks in need of boosting
1072 * might exit their RCU read-side critical sections on their own.
1073 */
1074 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1075 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1076 return 0;
1077 }
1078
1079 /*
1080 * Preferentially boost tasks blocking expedited grace periods.
1081 * This cannot starve the normal grace periods because a second
1082 * expedited grace period must boost all blocked tasks, including
1083 * those blocking the pre-existing normal grace period.
1084 */
1085 if (rnp->exp_tasks != NULL)
1086 tb = rnp->exp_tasks;
1087 else
1088 tb = rnp->boost_tasks;
1089
1090 /*
1091 * We boost task t by manufacturing an rt_mutex that appears to
1092 * be held by task t. We leave a pointer to that rt_mutex where
1093 * task t can find it, and task t will release the mutex when it
1094 * exits its outermost RCU read-side critical section. Then
1095 * simply acquiring this artificial rt_mutex will boost task
1096 * t's priority. (Thanks to tglx for suggesting this approach!)
1097 *
1098 * Note that task t must acquire rnp->lock to remove itself from
1099 * the ->blkd_tasks list, which it will do from exit() if from
1100 * nowhere else. We therefore are guaranteed that task t will
1101 * stay around at least until we drop rnp->lock. Note that
1102 * rnp->lock also resolves races between our priority boosting
1103 * and task t's exiting its outermost RCU read-side critical
1104 * section.
1105 */
1106 t = container_of(tb, struct task_struct, rcu_node_entry);
1107 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1108 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1109 /* Lock only for side effect: boosts task t's priority. */
1110 rt_mutex_lock(&rnp->boost_mtx);
1111 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1112 rnp->n_boosts++;
1113
1114 return READ_ONCE(rnp->exp_tasks) != NULL ||
1115 READ_ONCE(rnp->boost_tasks) != NULL;
1116}
1117
1118/*
1119 * Priority-boosting kthread, one per leaf rcu_node.
1120 */
1121static int rcu_boost_kthread(void *arg)
1122{
1123 struct rcu_node *rnp = (struct rcu_node *)arg;
1124 int spincnt = 0;
1125 int more2boost;
1126
1127 trace_rcu_utilization(TPS("Start boost kthread@init"));
1128 for (;;) {
1129 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1130 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1131 rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1132 READ_ONCE(rnp->exp_tasks));
1133 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1134 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1135 more2boost = rcu_boost(rnp);
1136 if (more2boost)
1137 spincnt++;
1138 else
1139 spincnt = 0;
1140 if (spincnt > 10) {
1141 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1142 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1143 schedule_timeout_idle(2);
1144 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1145 spincnt = 0;
1146 }
1147 }
1148 /* NOTREACHED */
1149 trace_rcu_utilization(TPS("End boost kthread@notreached"));
1150 return 0;
1151}
1152
1153/*
1154 * Check to see if it is time to start boosting RCU readers that are
1155 * blocking the current grace period, and, if so, tell the per-rcu_node
1156 * kthread to start boosting them. If there is an expedited grace
1157 * period in progress, it is always time to boost.
1158 *
1159 * The caller must hold rnp->lock, which this function releases.
1160 * The ->boost_kthread_task is immortal, so we don't need to worry
1161 * about it going away.
1162 */
1163static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1164 __releases(rnp->lock)
1165{
1166 raw_lockdep_assert_held_rcu_node(rnp);
1167 if (!rnp->boost_kthread_task ||
1168 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
1169 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1170 return;
1171 }
1172 if (rnp->exp_tasks != NULL ||
1173 (rnp->gp_tasks != NULL &&
1174 rnp->boost_tasks == NULL &&
1175 rnp->qsmask == 0 &&
1176 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld ||
1177 IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) {
1178 if (rnp->exp_tasks == NULL)
1179 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1180 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1181 rcu_wake_cond(rnp->boost_kthread_task,
1182 READ_ONCE(rnp->boost_kthread_status));
1183 } else {
1184 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1185 }
1186}
1187
1188#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1189
1190/*
1191 * Do priority-boost accounting for the start of a new grace period.
1192 */
1193static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1194{
1195 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1196}
1197
1198/*
1199 * Create an RCU-boost kthread for the specified node if one does not
1200 * already exist. We only create this kthread for preemptible RCU.
1201 */
1202static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1203{
1204 unsigned long flags;
1205 int rnp_index = rnp - rcu_get_root();
1206 struct sched_param sp;
1207 struct task_struct *t;
1208
1209 if (rnp->boost_kthread_task)
1210 return;
1211
1212 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1213 "rcub/%d", rnp_index);
1214 if (WARN_ON_ONCE(IS_ERR(t)))
1215 return;
1216
1217 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1218 rnp->boost_kthread_task = t;
1219 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1220 sp.sched_priority = kthread_prio;
1221 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1222 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1223}
1224
1225static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
1226{
1227 return READ_ONCE(rnp->boost_kthread_task);
1228}
1229
1230#else /* #ifdef CONFIG_RCU_BOOST */
1231
1232static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1233 __releases(rnp->lock)
1234{
1235 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1236}
1237
1238static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1239{
1240}
1241
1242static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1243{
1244}
1245
1246static struct task_struct *rcu_boost_task(struct rcu_node *rnp)
1247{
1248 return NULL;
1249}
1250#endif /* #else #ifdef CONFIG_RCU_BOOST */
1251
1252/*
1253 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
1254 * grace-period kthread will do force_quiescent_state() processing?
1255 * The idea is to avoid waking up RCU core processing on such a
1256 * CPU unless the grace period has extended for too long.
1257 *
1258 * This code relies on the fact that all NO_HZ_FULL CPUs are also
1259 * RCU_NOCB_CPU CPUs.
1260 */
1261static bool rcu_nohz_full_cpu(void)
1262{
1263#ifdef CONFIG_NO_HZ_FULL
1264 if (tick_nohz_full_cpu(smp_processor_id()) &&
1265 (!rcu_gp_in_progress() ||
1266 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
1267 return true;
1268#endif /* #ifdef CONFIG_NO_HZ_FULL */
1269 return false;
1270}
1271
1272/*
1273 * Bind the RCU grace-period kthreads to the housekeeping CPU.
1274 */
1275static void rcu_bind_gp_kthread(void)
1276{
1277 if (!tick_nohz_full_enabled())
1278 return;
1279 housekeeping_affine(current, HK_TYPE_RCU);
1280}
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4 * Internal non-public definitions that provide either classic
5 * or preemptible semantics.
6 *
7 * Copyright Red Hat, 2009
8 * Copyright IBM Corporation, 2009
9 *
10 * Author: Ingo Molnar <mingo@elte.hu>
11 * Paul E. McKenney <paulmck@linux.ibm.com>
12 */
13
14#include "../locking/rtmutex_common.h"
15
16static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
17{
18 /*
19 * In order to read the offloaded state of an rdp in a safe
20 * and stable way and prevent from its value to be changed
21 * under us, we must either hold the barrier mutex, the cpu
22 * hotplug lock (read or write) or the nocb lock. Local
23 * non-preemptible reads are also safe. NOCB kthreads and
24 * timers have their own means of synchronization against the
25 * offloaded state updaters.
26 */
27 RCU_LOCKDEP_WARN(
28 !(lockdep_is_held(&rcu_state.barrier_mutex) ||
29 (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
30 rcu_lockdep_is_held_nocb(rdp) ||
31 (rdp == this_cpu_ptr(&rcu_data) &&
32 !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
33 rcu_current_is_nocb_kthread(rdp)),
34 "Unsafe read of RCU_NOCB offloaded state"
35 );
36
37 return rcu_segcblist_is_offloaded(&rdp->cblist);
38}
39
40/*
41 * Check the RCU kernel configuration parameters and print informative
42 * messages about anything out of the ordinary.
43 */
44static void __init rcu_bootup_announce_oddness(void)
45{
46 if (IS_ENABLED(CONFIG_RCU_TRACE))
47 pr_info("\tRCU event tracing is enabled.\n");
48 if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
49 (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
50 pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
51 RCU_FANOUT);
52 if (rcu_fanout_exact)
53 pr_info("\tHierarchical RCU autobalancing is disabled.\n");
54 if (IS_ENABLED(CONFIG_PROVE_RCU))
55 pr_info("\tRCU lockdep checking is enabled.\n");
56 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
57 pr_info("\tRCU strict (and thus non-scalable) grace periods are enabled.\n");
58 if (RCU_NUM_LVLS >= 4)
59 pr_info("\tFour(or more)-level hierarchy is enabled.\n");
60 if (RCU_FANOUT_LEAF != 16)
61 pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
62 RCU_FANOUT_LEAF);
63 if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
64 pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
65 rcu_fanout_leaf);
66 if (nr_cpu_ids != NR_CPUS)
67 pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
68#ifdef CONFIG_RCU_BOOST
69 pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
70 kthread_prio, CONFIG_RCU_BOOST_DELAY);
71#endif
72 if (blimit != DEFAULT_RCU_BLIMIT)
73 pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
74 if (qhimark != DEFAULT_RCU_QHIMARK)
75 pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
76 if (qlowmark != DEFAULT_RCU_QLOMARK)
77 pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
78 if (qovld != DEFAULT_RCU_QOVLD)
79 pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
80 if (jiffies_till_first_fqs != ULONG_MAX)
81 pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
82 if (jiffies_till_next_fqs != ULONG_MAX)
83 pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
84 if (jiffies_till_sched_qs != ULONG_MAX)
85 pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
86 if (rcu_kick_kthreads)
87 pr_info("\tKick kthreads if too-long grace period.\n");
88 if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
89 pr_info("\tRCU callback double-/use-after-free debug is enabled.\n");
90 if (gp_preinit_delay)
91 pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
92 if (gp_init_delay)
93 pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
94 if (gp_cleanup_delay)
95 pr_info("\tRCU debug GP cleanup slowdown %d jiffies.\n", gp_cleanup_delay);
96 if (!use_softirq)
97 pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
98 if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
99 pr_info("\tRCU debug extended QS entry/exit.\n");
100 rcupdate_announce_bootup_oddness();
101}
102
103#ifdef CONFIG_PREEMPT_RCU
104
105static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
106static void rcu_read_unlock_special(struct task_struct *t);
107
108/*
109 * Tell them what RCU they are running.
110 */
111static void __init rcu_bootup_announce(void)
112{
113 pr_info("Preemptible hierarchical RCU implementation.\n");
114 rcu_bootup_announce_oddness();
115}
116
117/* Flags for rcu_preempt_ctxt_queue() decision table. */
118#define RCU_GP_TASKS 0x8
119#define RCU_EXP_TASKS 0x4
120#define RCU_GP_BLKD 0x2
121#define RCU_EXP_BLKD 0x1
122
123/*
124 * Queues a task preempted within an RCU-preempt read-side critical
125 * section into the appropriate location within the ->blkd_tasks list,
126 * depending on the states of any ongoing normal and expedited grace
127 * periods. The ->gp_tasks pointer indicates which element the normal
128 * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
129 * indicates which element the expedited grace period is waiting on (again,
130 * NULL if none). If a grace period is waiting on a given element in the
131 * ->blkd_tasks list, it also waits on all subsequent elements. Thus,
132 * adding a task to the tail of the list blocks any grace period that is
133 * already waiting on one of the elements. In contrast, adding a task
134 * to the head of the list won't block any grace period that is already
135 * waiting on one of the elements.
136 *
137 * This queuing is imprecise, and can sometimes make an ongoing grace
138 * period wait for a task that is not strictly speaking blocking it.
139 * Given the choice, we needlessly block a normal grace period rather than
140 * blocking an expedited grace period.
141 *
142 * Note that an endless sequence of expedited grace periods still cannot
143 * indefinitely postpone a normal grace period. Eventually, all of the
144 * fixed number of preempted tasks blocking the normal grace period that are
145 * not also blocking the expedited grace period will resume and complete
146 * their RCU read-side critical sections. At that point, the ->gp_tasks
147 * pointer will equal the ->exp_tasks pointer, at which point the end of
148 * the corresponding expedited grace period will also be the end of the
149 * normal grace period.
150 */
151static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
152 __releases(rnp->lock) /* But leaves rrupts disabled. */
153{
154 int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
155 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
156 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
157 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
158 struct task_struct *t = current;
159
160 raw_lockdep_assert_held_rcu_node(rnp);
161 WARN_ON_ONCE(rdp->mynode != rnp);
162 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
163 /* RCU better not be waiting on newly onlined CPUs! */
164 WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
165 rdp->grpmask);
166
167 /*
168 * Decide where to queue the newly blocked task. In theory,
169 * this could be an if-statement. In practice, when I tried
170 * that, it was quite messy.
171 */
172 switch (blkd_state) {
173 case 0:
174 case RCU_EXP_TASKS:
175 case RCU_EXP_TASKS + RCU_GP_BLKD:
176 case RCU_GP_TASKS:
177 case RCU_GP_TASKS + RCU_EXP_TASKS:
178
179 /*
180 * Blocking neither GP, or first task blocking the normal
181 * GP but not blocking the already-waiting expedited GP.
182 * Queue at the head of the list to avoid unnecessarily
183 * blocking the already-waiting GPs.
184 */
185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
186 break;
187
188 case RCU_EXP_BLKD:
189 case RCU_GP_BLKD:
190 case RCU_GP_BLKD + RCU_EXP_BLKD:
191 case RCU_GP_TASKS + RCU_EXP_BLKD:
192 case RCU_GP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
193 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
194
195 /*
196 * First task arriving that blocks either GP, or first task
197 * arriving that blocks the expedited GP (with the normal
198 * GP already waiting), or a task arriving that blocks
199 * both GPs with both GPs already waiting. Queue at the
200 * tail of the list to avoid any GP waiting on any of the
201 * already queued tasks that are not blocking it.
202 */
203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
204 break;
205
206 case RCU_EXP_TASKS + RCU_EXP_BLKD:
207 case RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
208 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_EXP_BLKD:
209
210 /*
211 * Second or subsequent task blocking the expedited GP.
212 * The task either does not block the normal GP, or is the
213 * first task blocking the normal GP. Queue just after
214 * the first task blocking the expedited GP.
215 */
216 list_add(&t->rcu_node_entry, rnp->exp_tasks);
217 break;
218
219 case RCU_GP_TASKS + RCU_GP_BLKD:
220 case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
221
222 /*
223 * Second or subsequent task blocking the normal GP.
224 * The task does not block the expedited GP. Queue just
225 * after the first task blocking the normal GP.
226 */
227 list_add(&t->rcu_node_entry, rnp->gp_tasks);
228 break;
229
230 default:
231
232 /* Yet another exercise in excessive paranoia. */
233 WARN_ON_ONCE(1);
234 break;
235 }
236
237 /*
238 * We have now queued the task. If it was the first one to
239 * block either grace period, update the ->gp_tasks and/or
240 * ->exp_tasks pointers, respectively, to reference the newly
241 * blocked tasks.
242 */
243 if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
245 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
246 }
247 if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
249 WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
250 !(rnp->qsmask & rdp->grpmask));
251 WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
252 !(rnp->expmask & rdp->grpmask));
253 raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
254
255 /*
256 * Report the quiescent state for the expedited GP. This expedited
257 * GP should not be able to end until we report, so there should be
258 * no need to check for a subsequent expedited GP. (Though we are
259 * still in a quiescent state in any case.)
260 *
261 * Interrupts are disabled, so ->cpu_no_qs.b.exp cannot change.
262 */
263 if (blkd_state & RCU_EXP_BLKD && rdp->cpu_no_qs.b.exp)
264 rcu_report_exp_rdp(rdp);
265 else
266 WARN_ON_ONCE(rdp->cpu_no_qs.b.exp);
267}
268
269/*
270 * Record a preemptible-RCU quiescent state for the specified CPU.
271 * Note that this does not necessarily mean that the task currently running
272 * on the CPU is in a quiescent state: Instead, it means that the current
273 * grace period need not wait on any RCU read-side critical section that
274 * starts later on this CPU. It also means that if the current task is
275 * in an RCU read-side critical section, it has already added itself to
276 * some leaf rcu_node structure's ->blkd_tasks list. In addition to the
277 * current task, there might be any number of other tasks blocked while
278 * in an RCU read-side critical section.
279 *
280 * Unlike non-preemptible-RCU, quiescent state reports for expedited
281 * grace periods are handled separately via deferred quiescent states
282 * and context switch events.
283 *
284 * Callers to this function must disable preemption.
285 */
286static void rcu_qs(void)
287{
288 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
289 if (__this_cpu_read(rcu_data.cpu_no_qs.b.norm)) {
290 trace_rcu_grace_period(TPS("rcu_preempt"),
291 __this_cpu_read(rcu_data.gp_seq),
292 TPS("cpuqs"));
293 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
294 barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
295 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
296 }
297}
298
299/*
300 * We have entered the scheduler, and the current task might soon be
301 * context-switched away from. If this task is in an RCU read-side
302 * critical section, we will no longer be able to rely on the CPU to
303 * record that fact, so we enqueue the task on the blkd_tasks list.
304 * The task will dequeue itself when it exits the outermost enclosing
305 * RCU read-side critical section. Therefore, the current grace period
306 * cannot be permitted to complete until the blkd_tasks list entries
307 * predating the current grace period drain, in other words, until
308 * rnp->gp_tasks becomes NULL.
309 *
310 * Caller must disable interrupts.
311 */
312void rcu_note_context_switch(bool preempt)
313{
314 struct task_struct *t = current;
315 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
316 struct rcu_node *rnp;
317
318 trace_rcu_utilization(TPS("Start context switch"));
319 lockdep_assert_irqs_disabled();
320 WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
321 if (rcu_preempt_depth() > 0 &&
322 !t->rcu_read_unlock_special.b.blocked) {
323
324 /* Possibly blocking in an RCU read-side critical section. */
325 rnp = rdp->mynode;
326 raw_spin_lock_rcu_node(rnp);
327 t->rcu_read_unlock_special.b.blocked = true;
328 t->rcu_blocked_node = rnp;
329
330 /*
331 * Verify the CPU's sanity, trace the preemption, and
332 * then queue the task as required based on the states
333 * of any ongoing and expedited grace periods.
334 */
335 WARN_ON_ONCE(!rcu_rdp_cpu_online(rdp));
336 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
337 trace_rcu_preempt_task(rcu_state.name,
338 t->pid,
339 (rnp->qsmask & rdp->grpmask)
340 ? rnp->gp_seq
341 : rcu_seq_snap(&rnp->gp_seq));
342 rcu_preempt_ctxt_queue(rnp, rdp);
343 } else {
344 rcu_preempt_deferred_qs(t);
345 }
346
347 /*
348 * Either we were not in an RCU read-side critical section to
349 * begin with, or we have now recorded that critical section
350 * globally. Either way, we can now note a quiescent state
351 * for this CPU. Again, if we were in an RCU read-side critical
352 * section, and if that critical section was blocking the current
353 * grace period, then the fact that the task has been enqueued
354 * means that we continue to block the current grace period.
355 */
356 rcu_qs();
357 if (rdp->cpu_no_qs.b.exp)
358 rcu_report_exp_rdp(rdp);
359 rcu_tasks_qs(current, preempt);
360 trace_rcu_utilization(TPS("End context switch"));
361}
362EXPORT_SYMBOL_GPL(rcu_note_context_switch);
363
364/*
365 * Check for preempted RCU readers blocking the current grace period
366 * for the specified rcu_node structure. If the caller needs a reliable
367 * answer, it must hold the rcu_node's ->lock.
368 */
369static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
370{
371 return READ_ONCE(rnp->gp_tasks) != NULL;
372}
373
374/* limit value for ->rcu_read_lock_nesting. */
375#define RCU_NEST_PMAX (INT_MAX / 2)
376
377static void rcu_preempt_read_enter(void)
378{
379 WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
380}
381
382static int rcu_preempt_read_exit(void)
383{
384 int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
385
386 WRITE_ONCE(current->rcu_read_lock_nesting, ret);
387 return ret;
388}
389
390static void rcu_preempt_depth_set(int val)
391{
392 WRITE_ONCE(current->rcu_read_lock_nesting, val);
393}
394
395/*
396 * Preemptible RCU implementation for rcu_read_lock().
397 * Just increment ->rcu_read_lock_nesting, shared state will be updated
398 * if we block.
399 */
400void __rcu_read_lock(void)
401{
402 rcu_preempt_read_enter();
403 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
404 WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
405 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
406 WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
407 barrier(); /* critical section after entry code. */
408}
409EXPORT_SYMBOL_GPL(__rcu_read_lock);
410
411/*
412 * Preemptible RCU implementation for rcu_read_unlock().
413 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
414 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
415 * invoke rcu_read_unlock_special() to clean up after a context switch
416 * in an RCU read-side critical section and other special cases.
417 */
418void __rcu_read_unlock(void)
419{
420 struct task_struct *t = current;
421
422 barrier(); // critical section before exit code.
423 if (rcu_preempt_read_exit() == 0) {
424 barrier(); // critical-section exit before .s check.
425 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
426 rcu_read_unlock_special(t);
427 }
428 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
429 int rrln = rcu_preempt_depth();
430
431 WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
432 }
433}
434EXPORT_SYMBOL_GPL(__rcu_read_unlock);
435
436/*
437 * Advance a ->blkd_tasks-list pointer to the next entry, instead
438 * returning NULL if at the end of the list.
439 */
440static struct list_head *rcu_next_node_entry(struct task_struct *t,
441 struct rcu_node *rnp)
442{
443 struct list_head *np;
444
445 np = t->rcu_node_entry.next;
446 if (np == &rnp->blkd_tasks)
447 np = NULL;
448 return np;
449}
450
451/*
452 * Return true if the specified rcu_node structure has tasks that were
453 * preempted within an RCU read-side critical section.
454 */
455static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
456{
457 return !list_empty(&rnp->blkd_tasks);
458}
459
460/*
461 * Report deferred quiescent states. The deferral time can
462 * be quite short, for example, in the case of the call from
463 * rcu_read_unlock_special().
464 */
465static notrace void
466rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
467{
468 bool empty_exp;
469 bool empty_norm;
470 bool empty_exp_now;
471 struct list_head *np;
472 bool drop_boost_mutex = false;
473 struct rcu_data *rdp;
474 struct rcu_node *rnp;
475 union rcu_special special;
476
477 /*
478 * If RCU core is waiting for this CPU to exit its critical section,
479 * report the fact that it has exited. Because irqs are disabled,
480 * t->rcu_read_unlock_special cannot change.
481 */
482 special = t->rcu_read_unlock_special;
483 rdp = this_cpu_ptr(&rcu_data);
484 if (!special.s && !rdp->cpu_no_qs.b.exp) {
485 local_irq_restore(flags);
486 return;
487 }
488 t->rcu_read_unlock_special.s = 0;
489 if (special.b.need_qs) {
490 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
491 rdp->cpu_no_qs.b.norm = false;
492 rcu_report_qs_rdp(rdp);
493 udelay(rcu_unlock_delay);
494 } else {
495 rcu_qs();
496 }
497 }
498
499 /*
500 * Respond to a request by an expedited grace period for a
501 * quiescent state from this CPU. Note that requests from
502 * tasks are handled when removing the task from the
503 * blocked-tasks list below.
504 */
505 if (rdp->cpu_no_qs.b.exp)
506 rcu_report_exp_rdp(rdp);
507
508 /* Clean up if blocked during RCU read-side critical section. */
509 if (special.b.blocked) {
510
511 /*
512 * Remove this task from the list it blocked on. The task
513 * now remains queued on the rcu_node corresponding to the
514 * CPU it first blocked on, so there is no longer any need
515 * to loop. Retain a WARN_ON_ONCE() out of sheer paranoia.
516 */
517 rnp = t->rcu_blocked_node;
518 raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
519 WARN_ON_ONCE(rnp != t->rcu_blocked_node);
520 WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
521 empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
522 WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
523 (!empty_norm || rnp->qsmask));
524 empty_exp = sync_rcu_exp_done(rnp);
525 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
526 np = rcu_next_node_entry(t, rnp);
527 list_del_init(&t->rcu_node_entry);
528 t->rcu_blocked_node = NULL;
529 trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
530 rnp->gp_seq, t->pid);
531 if (&t->rcu_node_entry == rnp->gp_tasks)
532 WRITE_ONCE(rnp->gp_tasks, np);
533 if (&t->rcu_node_entry == rnp->exp_tasks)
534 WRITE_ONCE(rnp->exp_tasks, np);
535 if (IS_ENABLED(CONFIG_RCU_BOOST)) {
536 /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
537 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
538 if (&t->rcu_node_entry == rnp->boost_tasks)
539 WRITE_ONCE(rnp->boost_tasks, np);
540 }
541
542 /*
543 * If this was the last task on the current list, and if
544 * we aren't waiting on any CPUs, report the quiescent state.
545 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
546 * so we must take a snapshot of the expedited state.
547 */
548 empty_exp_now = sync_rcu_exp_done(rnp);
549 if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
550 trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
551 rnp->gp_seq,
552 0, rnp->qsmask,
553 rnp->level,
554 rnp->grplo,
555 rnp->grphi,
556 !!rnp->gp_tasks);
557 rcu_report_unblock_qs_rnp(rnp, flags);
558 } else {
559 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
560 }
561
562 /*
563 * If this was the last task on the expedited lists,
564 * then we need to report up the rcu_node hierarchy.
565 */
566 if (!empty_exp && empty_exp_now)
567 rcu_report_exp_rnp(rnp, true);
568
569 /* Unboost if we were boosted. */
570 if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
571 rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
572 } else {
573 local_irq_restore(flags);
574 }
575}
576
577/*
578 * Is a deferred quiescent-state pending, and are we also not in
579 * an RCU read-side critical section? It is the caller's responsibility
580 * to ensure it is otherwise safe to report any deferred quiescent
581 * states. The reason for this is that it is safe to report a
582 * quiescent state during context switch even though preemption
583 * is disabled. This function cannot be expected to understand these
584 * nuances, so the caller must handle them.
585 */
586static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
587{
588 return (__this_cpu_read(rcu_data.cpu_no_qs.b.exp) ||
589 READ_ONCE(t->rcu_read_unlock_special.s)) &&
590 rcu_preempt_depth() == 0;
591}
592
593/*
594 * Report a deferred quiescent state if needed and safe to do so.
595 * As with rcu_preempt_need_deferred_qs(), "safe" involves only
596 * not being in an RCU read-side critical section. The caller must
597 * evaluate safety in terms of interrupt, softirq, and preemption
598 * disabling.
599 */
600notrace void rcu_preempt_deferred_qs(struct task_struct *t)
601{
602 unsigned long flags;
603
604 if (!rcu_preempt_need_deferred_qs(t))
605 return;
606 local_irq_save(flags);
607 rcu_preempt_deferred_qs_irqrestore(t, flags);
608}
609
610/*
611 * Minimal handler to give the scheduler a chance to re-evaluate.
612 */
613static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
614{
615 struct rcu_data *rdp;
616
617 rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
618 rdp->defer_qs_iw_pending = false;
619}
620
621/*
622 * Handle special cases during rcu_read_unlock(), such as needing to
623 * notify RCU core processing or task having blocked during the RCU
624 * read-side critical section.
625 */
626static void rcu_read_unlock_special(struct task_struct *t)
627{
628 unsigned long flags;
629 bool irqs_were_disabled;
630 bool preempt_bh_were_disabled =
631 !!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
632
633 /* NMI handlers cannot block and cannot safely manipulate state. */
634 if (in_nmi())
635 return;
636
637 local_irq_save(flags);
638 irqs_were_disabled = irqs_disabled_flags(flags);
639 if (preempt_bh_were_disabled || irqs_were_disabled) {
640 bool expboost; // Expedited GP in flight or possible boosting.
641 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
642 struct rcu_node *rnp = rdp->mynode;
643
644 expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
645 (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
646 (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
647 ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||
648 (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&
649 t->rcu_blocked_node);
650 // Need to defer quiescent state until everything is enabled.
651 if (use_softirq && (in_hardirq() || (expboost && !irqs_were_disabled))) {
652 // Using softirq, safe to awaken, and either the
653 // wakeup is free or there is either an expedited
654 // GP in flight or a potential need to deboost.
655 raise_softirq_irqoff(RCU_SOFTIRQ);
656 } else {
657 // Enabling BH or preempt does reschedule, so...
658 // Also if no expediting and no possible deboosting,
659 // slow is OK. Plus nohz_full CPUs eventually get
660 // tick enabled.
661 set_tsk_need_resched(current);
662 set_preempt_need_resched();
663 if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
664 expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
665 // Get scheduler to re-evaluate and call hooks.
666 // If !IRQ_WORK, FQS scan will eventually IPI.
667 if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
668 IS_ENABLED(CONFIG_PREEMPT_RT))
669 rdp->defer_qs_iw = IRQ_WORK_INIT_HARD(
670 rcu_preempt_deferred_qs_handler);
671 else
672 init_irq_work(&rdp->defer_qs_iw,
673 rcu_preempt_deferred_qs_handler);
674 rdp->defer_qs_iw_pending = true;
675 irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
676 }
677 }
678 local_irq_restore(flags);
679 return;
680 }
681 rcu_preempt_deferred_qs_irqrestore(t, flags);
682}
683
684/*
685 * Check that the list of blocked tasks for the newly completed grace
686 * period is in fact empty. It is a serious bug to complete a grace
687 * period that still has RCU readers blocked! This function must be
688 * invoked -before- updating this rnp's ->gp_seq.
689 *
690 * Also, if there are blocked tasks on the list, they automatically
691 * block the newly created grace period, so set up ->gp_tasks accordingly.
692 */
693static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
694{
695 struct task_struct *t;
696
697 RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
698 raw_lockdep_assert_held_rcu_node(rnp);
699 if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
700 dump_blkd_tasks(rnp, 10);
701 if (rcu_preempt_has_tasks(rnp) &&
702 (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
703 WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
704 t = container_of(rnp->gp_tasks, struct task_struct,
705 rcu_node_entry);
706 trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
707 rnp->gp_seq, t->pid);
708 }
709 WARN_ON_ONCE(rnp->qsmask);
710}
711
712/*
713 * Check for a quiescent state from the current CPU, including voluntary
714 * context switches for Tasks RCU. When a task blocks, the task is
715 * recorded in the corresponding CPU's rcu_node structure, which is checked
716 * elsewhere, hence this function need only check for quiescent states
717 * related to the current CPU, not to those related to tasks.
718 */
719static void rcu_flavor_sched_clock_irq(int user)
720{
721 struct task_struct *t = current;
722
723 lockdep_assert_irqs_disabled();
724 if (rcu_preempt_depth() > 0 ||
725 (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
726 /* No QS, force context switch if deferred. */
727 if (rcu_preempt_need_deferred_qs(t)) {
728 set_tsk_need_resched(t);
729 set_preempt_need_resched();
730 }
731 } else if (rcu_preempt_need_deferred_qs(t)) {
732 rcu_preempt_deferred_qs(t); /* Report deferred QS. */
733 return;
734 } else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
735 rcu_qs(); /* Report immediate QS. */
736 return;
737 }
738
739 /* If GP is oldish, ask for help from rcu_read_unlock_special(). */
740 if (rcu_preempt_depth() > 0 &&
741 __this_cpu_read(rcu_data.core_needs_qs) &&
742 __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
743 !t->rcu_read_unlock_special.b.need_qs &&
744 time_after(jiffies, rcu_state.gp_start + HZ))
745 t->rcu_read_unlock_special.b.need_qs = true;
746}
747
748/*
749 * Check for a task exiting while in a preemptible-RCU read-side
750 * critical section, clean up if so. No need to issue warnings, as
751 * debug_check_no_locks_held() already does this if lockdep is enabled.
752 * Besides, if this function does anything other than just immediately
753 * return, there was a bug of some sort. Spewing warnings from this
754 * function is like as not to simply obscure important prior warnings.
755 */
756void exit_rcu(void)
757{
758 struct task_struct *t = current;
759
760 if (unlikely(!list_empty(¤t->rcu_node_entry))) {
761 rcu_preempt_depth_set(1);
762 barrier();
763 WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
764 } else if (unlikely(rcu_preempt_depth())) {
765 rcu_preempt_depth_set(1);
766 } else {
767 return;
768 }
769 __rcu_read_unlock();
770 rcu_preempt_deferred_qs(current);
771}
772
773/*
774 * Dump the blocked-tasks state, but limit the list dump to the
775 * specified number of elements.
776 */
777static void
778dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
779{
780 int cpu;
781 int i;
782 struct list_head *lhp;
783 struct rcu_data *rdp;
784 struct rcu_node *rnp1;
785
786 raw_lockdep_assert_held_rcu_node(rnp);
787 pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
788 __func__, rnp->grplo, rnp->grphi, rnp->level,
789 (long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
790 for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
791 pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
792 __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
793 pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
794 __func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
795 READ_ONCE(rnp->exp_tasks));
796 pr_info("%s: ->blkd_tasks", __func__);
797 i = 0;
798 list_for_each(lhp, &rnp->blkd_tasks) {
799 pr_cont(" %p", lhp);
800 if (++i >= ncheck)
801 break;
802 }
803 pr_cont("\n");
804 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
805 rdp = per_cpu_ptr(&rcu_data, cpu);
806 pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
807 cpu, ".o"[rcu_rdp_cpu_online(rdp)],
808 (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
809 (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
810 }
811}
812
813#else /* #ifdef CONFIG_PREEMPT_RCU */
814
815/*
816 * If strict grace periods are enabled, and if the calling
817 * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
818 * report that quiescent state and, if requested, spin for a bit.
819 */
820void rcu_read_unlock_strict(void)
821{
822 struct rcu_data *rdp;
823
824 if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
825 return;
826 rdp = this_cpu_ptr(&rcu_data);
827 rdp->cpu_no_qs.b.norm = false;
828 rcu_report_qs_rdp(rdp);
829 udelay(rcu_unlock_delay);
830}
831EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
832
833/*
834 * Tell them what RCU they are running.
835 */
836static void __init rcu_bootup_announce(void)
837{
838 pr_info("Hierarchical RCU implementation.\n");
839 rcu_bootup_announce_oddness();
840}
841
842/*
843 * Note a quiescent state for PREEMPTION=n. Because we do not need to know
844 * how many quiescent states passed, just if there was at least one since
845 * the start of the grace period, this just sets a flag. The caller must
846 * have disabled preemption.
847 */
848static void rcu_qs(void)
849{
850 RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
851 if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
852 return;
853 trace_rcu_grace_period(TPS("rcu_sched"),
854 __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
855 __this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
856 if (__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
857 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
858}
859
860/*
861 * Register an urgently needed quiescent state. If there is an
862 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
863 * dyntick-idle quiescent state visible to other CPUs, which will in
864 * some cases serve for expedited as well as normal grace periods.
865 * Either way, register a lightweight quiescent state.
866 */
867void rcu_all_qs(void)
868{
869 unsigned long flags;
870
871 if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
872 return;
873 preempt_disable(); // For CONFIG_PREEMPT_COUNT=y kernels
874 /* Load rcu_urgent_qs before other flags. */
875 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
876 preempt_enable();
877 return;
878 }
879 this_cpu_write(rcu_data.rcu_urgent_qs, false);
880 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
881 local_irq_save(flags);
882 rcu_momentary_dyntick_idle();
883 local_irq_restore(flags);
884 }
885 rcu_qs();
886 preempt_enable();
887}
888EXPORT_SYMBOL_GPL(rcu_all_qs);
889
890/*
891 * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
892 */
893void rcu_note_context_switch(bool preempt)
894{
895 trace_rcu_utilization(TPS("Start context switch"));
896 rcu_qs();
897 /* Load rcu_urgent_qs before other flags. */
898 if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
899 goto out;
900 this_cpu_write(rcu_data.rcu_urgent_qs, false);
901 if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
902 rcu_momentary_dyntick_idle();
903out:
904 rcu_tasks_qs(current, preempt);
905 trace_rcu_utilization(TPS("End context switch"));
906}
907EXPORT_SYMBOL_GPL(rcu_note_context_switch);
908
909/*
910 * Because preemptible RCU does not exist, there are never any preempted
911 * RCU readers.
912 */
913static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
914{
915 return 0;
916}
917
918/*
919 * Because there is no preemptible RCU, there can be no readers blocked.
920 */
921static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
922{
923 return false;
924}
925
926/*
927 * Because there is no preemptible RCU, there can be no deferred quiescent
928 * states.
929 */
930static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
931{
932 return false;
933}
934
935// Except that we do need to respond to a request by an expedited
936// grace period for a quiescent state from this CPU. Note that in
937// non-preemptible kernels, there can be no context switches within RCU
938// read-side critical sections, which in turn means that the leaf rcu_node
939// structure's blocked-tasks list is always empty. is therefore no need to
940// actually check it. Instead, a quiescent state from this CPU suffices,
941// and this function is only called from such a quiescent state.
942notrace void rcu_preempt_deferred_qs(struct task_struct *t)
943{
944 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
945
946 if (READ_ONCE(rdp->cpu_no_qs.b.exp))
947 rcu_report_exp_rdp(rdp);
948}
949
950/*
951 * Because there is no preemptible RCU, there can be no readers blocked,
952 * so there is no need to check for blocked tasks. So check only for
953 * bogus qsmask values.
954 */
955static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
956{
957 WARN_ON_ONCE(rnp->qsmask);
958}
959
960/*
961 * Check to see if this CPU is in a non-context-switch quiescent state,
962 * namely user mode and idle loop.
963 */
964static void rcu_flavor_sched_clock_irq(int user)
965{
966 if (user || rcu_is_cpu_rrupt_from_idle()) {
967
968 /*
969 * Get here if this CPU took its interrupt from user
970 * mode or from the idle loop, and if this is not a
971 * nested interrupt. In this case, the CPU is in
972 * a quiescent state, so note it.
973 *
974 * No memory barrier is required here because rcu_qs()
975 * references only CPU-local variables that other CPUs
976 * neither access nor modify, at least not while the
977 * corresponding CPU is online.
978 */
979 rcu_qs();
980 }
981}
982
983/*
984 * Because preemptible RCU does not exist, tasks cannot possibly exit
985 * while in preemptible RCU read-side critical sections.
986 */
987void exit_rcu(void)
988{
989}
990
991/*
992 * Dump the guaranteed-empty blocked-tasks state. Trust but verify.
993 */
994static void
995dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
996{
997 WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
998}
999
1000#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
1001
1002/*
1003 * If boosting, set rcuc kthreads to realtime priority.
1004 */
1005static void rcu_cpu_kthread_setup(unsigned int cpu)
1006{
1007 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1008#ifdef CONFIG_RCU_BOOST
1009 struct sched_param sp;
1010
1011 sp.sched_priority = kthread_prio;
1012 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1013#endif /* #ifdef CONFIG_RCU_BOOST */
1014
1015 WRITE_ONCE(rdp->rcuc_activity, jiffies);
1016}
1017
1018static bool rcu_is_callbacks_nocb_kthread(struct rcu_data *rdp)
1019{
1020#ifdef CONFIG_RCU_NOCB_CPU
1021 return rdp->nocb_cb_kthread == current;
1022#else
1023 return false;
1024#endif
1025}
1026
1027/*
1028 * Is the current CPU running the RCU-callbacks kthread?
1029 * Caller must have preemption disabled.
1030 */
1031static bool rcu_is_callbacks_kthread(struct rcu_data *rdp)
1032{
1033 return rdp->rcu_cpu_kthread_task == current ||
1034 rcu_is_callbacks_nocb_kthread(rdp);
1035}
1036
1037#ifdef CONFIG_RCU_BOOST
1038
1039/*
1040 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1041 * or ->boost_tasks, advancing the pointer to the next task in the
1042 * ->blkd_tasks list.
1043 *
1044 * Note that irqs must be enabled: boosting the task can block.
1045 * Returns 1 if there are more tasks needing to be boosted.
1046 */
1047static int rcu_boost(struct rcu_node *rnp)
1048{
1049 unsigned long flags;
1050 struct task_struct *t;
1051 struct list_head *tb;
1052
1053 if (READ_ONCE(rnp->exp_tasks) == NULL &&
1054 READ_ONCE(rnp->boost_tasks) == NULL)
1055 return 0; /* Nothing left to boost. */
1056
1057 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1058
1059 /*
1060 * Recheck under the lock: all tasks in need of boosting
1061 * might exit their RCU read-side critical sections on their own.
1062 */
1063 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1064 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1065 return 0;
1066 }
1067
1068 /*
1069 * Preferentially boost tasks blocking expedited grace periods.
1070 * This cannot starve the normal grace periods because a second
1071 * expedited grace period must boost all blocked tasks, including
1072 * those blocking the pre-existing normal grace period.
1073 */
1074 if (rnp->exp_tasks != NULL)
1075 tb = rnp->exp_tasks;
1076 else
1077 tb = rnp->boost_tasks;
1078
1079 /*
1080 * We boost task t by manufacturing an rt_mutex that appears to
1081 * be held by task t. We leave a pointer to that rt_mutex where
1082 * task t can find it, and task t will release the mutex when it
1083 * exits its outermost RCU read-side critical section. Then
1084 * simply acquiring this artificial rt_mutex will boost task
1085 * t's priority. (Thanks to tglx for suggesting this approach!)
1086 *
1087 * Note that task t must acquire rnp->lock to remove itself from
1088 * the ->blkd_tasks list, which it will do from exit() if from
1089 * nowhere else. We therefore are guaranteed that task t will
1090 * stay around at least until we drop rnp->lock. Note that
1091 * rnp->lock also resolves races between our priority boosting
1092 * and task t's exiting its outermost RCU read-side critical
1093 * section.
1094 */
1095 t = container_of(tb, struct task_struct, rcu_node_entry);
1096 rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1097 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1098 /* Lock only for side effect: boosts task t's priority. */
1099 rt_mutex_lock(&rnp->boost_mtx);
1100 rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */
1101 rnp->n_boosts++;
1102
1103 return READ_ONCE(rnp->exp_tasks) != NULL ||
1104 READ_ONCE(rnp->boost_tasks) != NULL;
1105}
1106
1107/*
1108 * Priority-boosting kthread, one per leaf rcu_node.
1109 */
1110static int rcu_boost_kthread(void *arg)
1111{
1112 struct rcu_node *rnp = (struct rcu_node *)arg;
1113 int spincnt = 0;
1114 int more2boost;
1115
1116 trace_rcu_utilization(TPS("Start boost kthread@init"));
1117 for (;;) {
1118 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1119 trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1120 rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1121 READ_ONCE(rnp->exp_tasks));
1122 trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1123 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1124 more2boost = rcu_boost(rnp);
1125 if (more2boost)
1126 spincnt++;
1127 else
1128 spincnt = 0;
1129 if (spincnt > 10) {
1130 WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1131 trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1132 schedule_timeout_idle(2);
1133 trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1134 spincnt = 0;
1135 }
1136 }
1137 /* NOTREACHED */
1138 trace_rcu_utilization(TPS("End boost kthread@notreached"));
1139 return 0;
1140}
1141
1142/*
1143 * Check to see if it is time to start boosting RCU readers that are
1144 * blocking the current grace period, and, if so, tell the per-rcu_node
1145 * kthread to start boosting them. If there is an expedited grace
1146 * period in progress, it is always time to boost.
1147 *
1148 * The caller must hold rnp->lock, which this function releases.
1149 * The ->boost_kthread_task is immortal, so we don't need to worry
1150 * about it going away.
1151 */
1152static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1153 __releases(rnp->lock)
1154{
1155 raw_lockdep_assert_held_rcu_node(rnp);
1156 if (!rnp->boost_kthread_task ||
1157 (!rcu_preempt_blocked_readers_cgp(rnp) && !rnp->exp_tasks)) {
1158 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1159 return;
1160 }
1161 if (rnp->exp_tasks != NULL ||
1162 (rnp->gp_tasks != NULL &&
1163 rnp->boost_tasks == NULL &&
1164 rnp->qsmask == 0 &&
1165 (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld ||
1166 IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)))) {
1167 if (rnp->exp_tasks == NULL)
1168 WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1169 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1170 rcu_wake_cond(rnp->boost_kthread_task,
1171 READ_ONCE(rnp->boost_kthread_status));
1172 } else {
1173 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1174 }
1175}
1176
1177#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1178
1179/*
1180 * Do priority-boost accounting for the start of a new grace period.
1181 */
1182static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1183{
1184 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1185}
1186
1187/*
1188 * Create an RCU-boost kthread for the specified node if one does not
1189 * already exist. We only create this kthread for preemptible RCU.
1190 */
1191static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1192{
1193 unsigned long flags;
1194 int rnp_index = rnp - rcu_get_root();
1195 struct sched_param sp;
1196 struct task_struct *t;
1197
1198 mutex_lock(&rnp->boost_kthread_mutex);
1199 if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
1200 goto out;
1201
1202 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1203 "rcub/%d", rnp_index);
1204 if (WARN_ON_ONCE(IS_ERR(t)))
1205 goto out;
1206
1207 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1208 rnp->boost_kthread_task = t;
1209 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1210 sp.sched_priority = kthread_prio;
1211 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1212 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1213
1214 out:
1215 mutex_unlock(&rnp->boost_kthread_mutex);
1216}
1217
1218/*
1219 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1220 * served by the rcu_node in question. The CPU hotplug lock is still
1221 * held, so the value of rnp->qsmaskinit will be stable.
1222 *
1223 * We don't include outgoingcpu in the affinity set, use -1 if there is
1224 * no outgoing CPU. If there are no CPUs left in the affinity set,
1225 * this function allows the kthread to execute on any CPU.
1226 *
1227 * Any future concurrent calls are serialized via ->boost_kthread_mutex.
1228 */
1229static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1230{
1231 struct task_struct *t = rnp->boost_kthread_task;
1232 unsigned long mask;
1233 cpumask_var_t cm;
1234 int cpu;
1235
1236 if (!t)
1237 return;
1238 if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1239 return;
1240 mutex_lock(&rnp->boost_kthread_mutex);
1241 mask = rcu_rnp_online_cpus(rnp);
1242 for_each_leaf_node_possible_cpu(rnp, cpu)
1243 if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1244 cpu != outgoingcpu)
1245 cpumask_set_cpu(cpu, cm);
1246 cpumask_and(cm, cm, housekeeping_cpumask(HK_TYPE_RCU));
1247 if (cpumask_empty(cm)) {
1248 cpumask_copy(cm, housekeeping_cpumask(HK_TYPE_RCU));
1249 if (outgoingcpu >= 0)
1250 cpumask_clear_cpu(outgoingcpu, cm);
1251 }
1252 set_cpus_allowed_ptr(t, cm);
1253 mutex_unlock(&rnp->boost_kthread_mutex);
1254 free_cpumask_var(cm);
1255}
1256
1257#else /* #ifdef CONFIG_RCU_BOOST */
1258
1259static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1260 __releases(rnp->lock)
1261{
1262 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1263}
1264
1265static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1266{
1267}
1268
1269static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1270{
1271}
1272
1273static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1274{
1275}
1276
1277#endif /* #else #ifdef CONFIG_RCU_BOOST */
1278
1279/*
1280 * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
1281 * grace-period kthread will do force_quiescent_state() processing?
1282 * The idea is to avoid waking up RCU core processing on such a
1283 * CPU unless the grace period has extended for too long.
1284 *
1285 * This code relies on the fact that all NO_HZ_FULL CPUs are also
1286 * RCU_NOCB_CPU CPUs.
1287 */
1288static bool rcu_nohz_full_cpu(void)
1289{
1290#ifdef CONFIG_NO_HZ_FULL
1291 if (tick_nohz_full_cpu(smp_processor_id()) &&
1292 (!rcu_gp_in_progress() ||
1293 time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
1294 return true;
1295#endif /* #ifdef CONFIG_NO_HZ_FULL */
1296 return false;
1297}
1298
1299/*
1300 * Bind the RCU grace-period kthreads to the housekeeping CPU.
1301 */
1302static void rcu_bind_gp_kthread(void)
1303{
1304 if (!tick_nohz_full_enabled())
1305 return;
1306 housekeeping_affine(current, HK_TYPE_RCU);
1307}