Loading...
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * RCU expedited grace periods
4 *
5 * Copyright IBM Corporation, 2016
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#include <linux/lockdep.h>
11
12static void rcu_exp_handler(void *unused);
13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14
15/*
16 * Record the start of an expedited grace period.
17 */
18static void rcu_exp_gp_seq_start(void)
19{
20 rcu_seq_start(&rcu_state.expedited_sequence);
21}
22
23/*
24 * Return then value that expedited-grace-period counter will have
25 * at the end of the current grace period.
26 */
27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28{
29 return rcu_seq_endval(&rcu_state.expedited_sequence);
30}
31
32/*
33 * Record the end of an expedited grace period.
34 */
35static void rcu_exp_gp_seq_end(void)
36{
37 rcu_seq_end(&rcu_state.expedited_sequence);
38 smp_mb(); /* Ensure that consecutive grace periods serialize. */
39}
40
41/*
42 * Take a snapshot of the expedited-grace-period counter.
43 */
44static unsigned long rcu_exp_gp_seq_snap(void)
45{
46 unsigned long s;
47
48 smp_mb(); /* Caller's modifications seen first by other CPUs. */
49 s = rcu_seq_snap(&rcu_state.expedited_sequence);
50 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
51 return s;
52}
53
54/*
55 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
56 * if a full expedited grace period has elapsed since that snapshot
57 * was taken.
58 */
59static bool rcu_exp_gp_seq_done(unsigned long s)
60{
61 return rcu_seq_done(&rcu_state.expedited_sequence, s);
62}
63
64/*
65 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
66 * recent CPU-online activity. Note that these masks are not cleared
67 * when CPUs go offline, so they reflect the union of all CPUs that have
68 * ever been online. This means that this function normally takes its
69 * no-work-to-do fastpath.
70 */
71static void sync_exp_reset_tree_hotplug(void)
72{
73 bool done;
74 unsigned long flags;
75 unsigned long mask;
76 unsigned long oldmask;
77 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
78 struct rcu_node *rnp;
79 struct rcu_node *rnp_up;
80
81 /* If no new CPUs onlined since last time, nothing to do. */
82 if (likely(ncpus == rcu_state.ncpus_snap))
83 return;
84 rcu_state.ncpus_snap = ncpus;
85
86 /*
87 * Each pass through the following loop propagates newly onlined
88 * CPUs for the current rcu_node structure up the rcu_node tree.
89 */
90 rcu_for_each_leaf_node(rnp) {
91 raw_spin_lock_irqsave_rcu_node(rnp, flags);
92 if (rnp->expmaskinit == rnp->expmaskinitnext) {
93 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
94 continue; /* No new CPUs, nothing to do. */
95 }
96
97 /* Update this node's mask, track old value for propagation. */
98 oldmask = rnp->expmaskinit;
99 rnp->expmaskinit = rnp->expmaskinitnext;
100 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
101
102 /* If was already nonzero, nothing to propagate. */
103 if (oldmask)
104 continue;
105
106 /* Propagate the new CPU up the tree. */
107 mask = rnp->grpmask;
108 rnp_up = rnp->parent;
109 done = false;
110 while (rnp_up) {
111 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
112 if (rnp_up->expmaskinit)
113 done = true;
114 rnp_up->expmaskinit |= mask;
115 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
116 if (done)
117 break;
118 mask = rnp_up->grpmask;
119 rnp_up = rnp_up->parent;
120 }
121 }
122}
123
124/*
125 * Reset the ->expmask values in the rcu_node tree in preparation for
126 * a new expedited grace period.
127 */
128static void __maybe_unused sync_exp_reset_tree(void)
129{
130 unsigned long flags;
131 struct rcu_node *rnp;
132
133 sync_exp_reset_tree_hotplug();
134 rcu_for_each_node_breadth_first(rnp) {
135 raw_spin_lock_irqsave_rcu_node(rnp, flags);
136 WARN_ON_ONCE(rnp->expmask);
137 rnp->expmask = rnp->expmaskinit;
138 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
139 }
140}
141
142/*
143 * Return non-zero if there is no RCU expedited grace period in progress
144 * for the specified rcu_node structure, in other words, if all CPUs and
145 * tasks covered by the specified rcu_node structure have done their bit
146 * for the current expedited grace period. Works only for preemptible
147 * RCU -- other RCU implementation use other means.
148 *
149 * Caller must hold the specificed rcu_node structure's ->lock
150 */
151static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
152{
153 raw_lockdep_assert_held_rcu_node(rnp);
154
155 return rnp->exp_tasks == NULL &&
156 READ_ONCE(rnp->expmask) == 0;
157}
158
159/*
160 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
161 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
162 * itself
163 */
164static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
165{
166 unsigned long flags;
167 bool ret;
168
169 raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 ret = sync_rcu_preempt_exp_done(rnp);
171 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
172
173 return ret;
174}
175
176
177/*
178 * Report the exit from RCU read-side critical section for the last task
179 * that queued itself during or before the current expedited preemptible-RCU
180 * grace period. This event is reported either to the rcu_node structure on
181 * which the task was queued or to one of that rcu_node structure's ancestors,
182 * recursively up the tree. (Calm down, calm down, we do the recursion
183 * iteratively!)
184 *
185 * Caller must hold the specified rcu_node structure's ->lock.
186 */
187static void __rcu_report_exp_rnp(struct rcu_node *rnp,
188 bool wake, unsigned long flags)
189 __releases(rnp->lock)
190{
191 unsigned long mask;
192
193 for (;;) {
194 if (!sync_rcu_preempt_exp_done(rnp)) {
195 if (!rnp->expmask)
196 rcu_initiate_boost(rnp, flags);
197 else
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 break;
200 }
201 if (rnp->parent == NULL) {
202 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
203 if (wake) {
204 smp_mb(); /* EGP done before wake_up(). */
205 swake_up_one(&rcu_state.expedited_wq);
206 }
207 break;
208 }
209 mask = rnp->grpmask;
210 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
211 rnp = rnp->parent;
212 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
213 WARN_ON_ONCE(!(rnp->expmask & mask));
214 rnp->expmask &= ~mask;
215 }
216}
217
218/*
219 * Report expedited quiescent state for specified node. This is a
220 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
221 */
222static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
223{
224 unsigned long flags;
225
226 raw_spin_lock_irqsave_rcu_node(rnp, flags);
227 __rcu_report_exp_rnp(rnp, wake, flags);
228}
229
230/*
231 * Report expedited quiescent state for multiple CPUs, all covered by the
232 * specified leaf rcu_node structure.
233 */
234static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
235 unsigned long mask, bool wake)
236{
237 unsigned long flags;
238
239 raw_spin_lock_irqsave_rcu_node(rnp, flags);
240 if (!(rnp->expmask & mask)) {
241 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242 return;
243 }
244 rnp->expmask &= ~mask;
245 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
246}
247
248/*
249 * Report expedited quiescent state for specified rcu_data (CPU).
250 */
251static void rcu_report_exp_rdp(struct rcu_data *rdp)
252{
253 WRITE_ONCE(rdp->exp_deferred_qs, false);
254 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
255}
256
257/* Common code for work-done checking. */
258static bool sync_exp_work_done(unsigned long s)
259{
260 if (rcu_exp_gp_seq_done(s)) {
261 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
262 smp_mb(); /* Ensure test happens before caller kfree(). */
263 return true;
264 }
265 return false;
266}
267
268/*
269 * Funnel-lock acquisition for expedited grace periods. Returns true
270 * if some other task completed an expedited grace period that this task
271 * can piggy-back on, and with no mutex held. Otherwise, returns false
272 * with the mutex held, indicating that the caller must actually do the
273 * expedited grace period.
274 */
275static bool exp_funnel_lock(unsigned long s)
276{
277 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
278 struct rcu_node *rnp = rdp->mynode;
279 struct rcu_node *rnp_root = rcu_get_root();
280
281 /* Low-contention fastpath. */
282 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
283 (rnp == rnp_root ||
284 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
285 mutex_trylock(&rcu_state.exp_mutex))
286 goto fastpath;
287
288 /*
289 * Each pass through the following loop works its way up
290 * the rcu_node tree, returning if others have done the work or
291 * otherwise falls through to acquire ->exp_mutex. The mapping
292 * from CPU to rcu_node structure can be inexact, as it is just
293 * promoting locality and is not strictly needed for correctness.
294 */
295 for (; rnp != NULL; rnp = rnp->parent) {
296 if (sync_exp_work_done(s))
297 return true;
298
299 /* Work not done, either wait here or go up. */
300 spin_lock(&rnp->exp_lock);
301 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
302
303 /* Someone else doing GP, so wait for them. */
304 spin_unlock(&rnp->exp_lock);
305 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
306 rnp->grplo, rnp->grphi,
307 TPS("wait"));
308 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
309 sync_exp_work_done(s));
310 return true;
311 }
312 rnp->exp_seq_rq = s; /* Followers can wait on us. */
313 spin_unlock(&rnp->exp_lock);
314 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
315 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
316 }
317 mutex_lock(&rcu_state.exp_mutex);
318fastpath:
319 if (sync_exp_work_done(s)) {
320 mutex_unlock(&rcu_state.exp_mutex);
321 return true;
322 }
323 rcu_exp_gp_seq_start();
324 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
325 return false;
326}
327
328/*
329 * Select the CPUs within the specified rcu_node that the upcoming
330 * expedited grace period needs to wait for.
331 */
332static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
333{
334 int cpu;
335 unsigned long flags;
336 unsigned long mask_ofl_test;
337 unsigned long mask_ofl_ipi;
338 int ret;
339 struct rcu_exp_work *rewp =
340 container_of(wp, struct rcu_exp_work, rew_work);
341 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
342
343 raw_spin_lock_irqsave_rcu_node(rnp, flags);
344
345 /* Each pass checks a CPU for identity, offline, and idle. */
346 mask_ofl_test = 0;
347 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
348 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
349 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
350 int snap;
351
352 if (raw_smp_processor_id() == cpu ||
353 !(rnp->qsmaskinitnext & mask)) {
354 mask_ofl_test |= mask;
355 } else {
356 snap = rcu_dynticks_snap(rdp);
357 if (rcu_dynticks_in_eqs(snap))
358 mask_ofl_test |= mask;
359 else
360 rdp->exp_dynticks_snap = snap;
361 }
362 }
363 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
364
365 /*
366 * Need to wait for any blocked tasks as well. Note that
367 * additional blocking tasks will also block the expedited GP
368 * until such time as the ->expmask bits are cleared.
369 */
370 if (rcu_preempt_has_tasks(rnp))
371 rnp->exp_tasks = rnp->blkd_tasks.next;
372 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
373
374 /* IPI the remaining CPUs for expedited quiescent state. */
375 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
376 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
377 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
378
379 if (!(mask_ofl_ipi & mask))
380 continue;
381retry_ipi:
382 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
383 mask_ofl_test |= mask;
384 continue;
385 }
386 if (get_cpu() == cpu) {
387 put_cpu();
388 continue;
389 }
390 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
391 put_cpu();
392 if (!ret) {
393 mask_ofl_ipi &= ~mask;
394 continue;
395 }
396 /* Failed, raced with CPU hotplug operation. */
397 raw_spin_lock_irqsave_rcu_node(rnp, flags);
398 if ((rnp->qsmaskinitnext & mask) &&
399 (rnp->expmask & mask)) {
400 /* Online, so delay for a bit and try again. */
401 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
402 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
403 schedule_timeout_uninterruptible(1);
404 goto retry_ipi;
405 }
406 /* CPU really is offline, so we can ignore it. */
407 if (!(rnp->expmask & mask))
408 mask_ofl_ipi &= ~mask;
409 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
410 }
411 /* Report quiescent states for those that went offline. */
412 mask_ofl_test |= mask_ofl_ipi;
413 if (mask_ofl_test)
414 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
415}
416
417/*
418 * Select the nodes that the upcoming expedited grace period needs
419 * to wait for.
420 */
421static void sync_rcu_exp_select_cpus(void)
422{
423 int cpu;
424 struct rcu_node *rnp;
425
426 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
427 sync_exp_reset_tree();
428 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
429
430 /* Schedule work for each leaf rcu_node structure. */
431 rcu_for_each_leaf_node(rnp) {
432 rnp->exp_need_flush = false;
433 if (!READ_ONCE(rnp->expmask))
434 continue; /* Avoid early boot non-existent wq. */
435 if (!READ_ONCE(rcu_par_gp_wq) ||
436 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
437 rcu_is_last_leaf_node(rnp)) {
438 /* No workqueues yet or last leaf, do direct call. */
439 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
440 continue;
441 }
442 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
443 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
444 /* If all offline, queue the work on an unbound CPU. */
445 if (unlikely(cpu > rnp->grphi - rnp->grplo))
446 cpu = WORK_CPU_UNBOUND;
447 else
448 cpu += rnp->grplo;
449 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
450 rnp->exp_need_flush = true;
451 }
452
453 /* Wait for workqueue jobs (if any) to complete. */
454 rcu_for_each_leaf_node(rnp)
455 if (rnp->exp_need_flush)
456 flush_work(&rnp->rew.rew_work);
457}
458
459static void synchronize_sched_expedited_wait(void)
460{
461 int cpu;
462 unsigned long jiffies_stall;
463 unsigned long jiffies_start;
464 unsigned long mask;
465 int ndetected;
466 struct rcu_node *rnp;
467 struct rcu_node *rnp_root = rcu_get_root();
468 int ret;
469
470 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
471 jiffies_stall = rcu_jiffies_till_stall_check();
472 jiffies_start = jiffies;
473
474 for (;;) {
475 ret = swait_event_timeout_exclusive(
476 rcu_state.expedited_wq,
477 sync_rcu_preempt_exp_done_unlocked(rnp_root),
478 jiffies_stall);
479 if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
480 return;
481 WARN_ON(ret < 0); /* workqueues should not be signaled. */
482 if (rcu_cpu_stall_suppress)
483 continue;
484 panic_on_rcu_stall();
485 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
486 rcu_state.name);
487 ndetected = 0;
488 rcu_for_each_leaf_node(rnp) {
489 ndetected += rcu_print_task_exp_stall(rnp);
490 for_each_leaf_node_possible_cpu(rnp, cpu) {
491 struct rcu_data *rdp;
492
493 mask = leaf_node_cpu_bit(rnp, cpu);
494 if (!(rnp->expmask & mask))
495 continue;
496 ndetected++;
497 rdp = per_cpu_ptr(&rcu_data, cpu);
498 pr_cont(" %d-%c%c%c", cpu,
499 "O."[!!cpu_online(cpu)],
500 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
501 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
502 }
503 }
504 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
505 jiffies - jiffies_start, rcu_state.expedited_sequence,
506 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
507 if (ndetected) {
508 pr_err("blocking rcu_node structures:");
509 rcu_for_each_node_breadth_first(rnp) {
510 if (rnp == rnp_root)
511 continue; /* printed unconditionally */
512 if (sync_rcu_preempt_exp_done_unlocked(rnp))
513 continue;
514 pr_cont(" l=%u:%d-%d:%#lx/%c",
515 rnp->level, rnp->grplo, rnp->grphi,
516 rnp->expmask,
517 ".T"[!!rnp->exp_tasks]);
518 }
519 pr_cont("\n");
520 }
521 rcu_for_each_leaf_node(rnp) {
522 for_each_leaf_node_possible_cpu(rnp, cpu) {
523 mask = leaf_node_cpu_bit(rnp, cpu);
524 if (!(rnp->expmask & mask))
525 continue;
526 dump_cpu_task(cpu);
527 }
528 }
529 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
530 }
531}
532
533/*
534 * Wait for the current expedited grace period to complete, and then
535 * wake up everyone who piggybacked on the just-completed expedited
536 * grace period. Also update all the ->exp_seq_rq counters as needed
537 * in order to avoid counter-wrap problems.
538 */
539static void rcu_exp_wait_wake(unsigned long s)
540{
541 struct rcu_node *rnp;
542
543 synchronize_sched_expedited_wait();
544 rcu_exp_gp_seq_end();
545 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
546
547 /*
548 * Switch over to wakeup mode, allowing the next GP, but -only- the
549 * next GP, to proceed.
550 */
551 mutex_lock(&rcu_state.exp_wake_mutex);
552
553 rcu_for_each_node_breadth_first(rnp) {
554 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
555 spin_lock(&rnp->exp_lock);
556 /* Recheck, avoid hang in case someone just arrived. */
557 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
558 rnp->exp_seq_rq = s;
559 spin_unlock(&rnp->exp_lock);
560 }
561 smp_mb(); /* All above changes before wakeup. */
562 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]);
563 }
564 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
565 mutex_unlock(&rcu_state.exp_wake_mutex);
566}
567
568/*
569 * Common code to drive an expedited grace period forward, used by
570 * workqueues and mid-boot-time tasks.
571 */
572static void rcu_exp_sel_wait_wake(unsigned long s)
573{
574 /* Initialize the rcu_node tree in preparation for the wait. */
575 sync_rcu_exp_select_cpus();
576
577 /* Wait and clean up, including waking everyone. */
578 rcu_exp_wait_wake(s);
579}
580
581/*
582 * Work-queue handler to drive an expedited grace period forward.
583 */
584static void wait_rcu_exp_gp(struct work_struct *wp)
585{
586 struct rcu_exp_work *rewp;
587
588 rewp = container_of(wp, struct rcu_exp_work, rew_work);
589 rcu_exp_sel_wait_wake(rewp->rew_s);
590}
591
592#ifdef CONFIG_PREEMPT_RCU
593
594/*
595 * Remote handler for smp_call_function_single(). If there is an
596 * RCU read-side critical section in effect, request that the
597 * next rcu_read_unlock() record the quiescent state up the
598 * ->expmask fields in the rcu_node tree. Otherwise, immediately
599 * report the quiescent state.
600 */
601static void rcu_exp_handler(void *unused)
602{
603 unsigned long flags;
604 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
605 struct rcu_node *rnp = rdp->mynode;
606 struct task_struct *t = current;
607
608 /*
609 * First, the common case of not being in an RCU read-side
610 * critical section. If also enabled or idle, immediately
611 * report the quiescent state, otherwise defer.
612 */
613 if (!t->rcu_read_lock_nesting) {
614 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
615 rcu_dynticks_curr_cpu_in_eqs()) {
616 rcu_report_exp_rdp(rdp);
617 } else {
618 rdp->exp_deferred_qs = true;
619 set_tsk_need_resched(t);
620 set_preempt_need_resched();
621 }
622 return;
623 }
624
625 /*
626 * Second, the less-common case of being in an RCU read-side
627 * critical section. In this case we can count on a future
628 * rcu_read_unlock(). However, this rcu_read_unlock() might
629 * execute on some other CPU, but in that case there will be
630 * a future context switch. Either way, if the expedited
631 * grace period is still waiting on this CPU, set ->deferred_qs
632 * so that the eventual quiescent state will be reported.
633 * Note that there is a large group of race conditions that
634 * can have caused this quiescent state to already have been
635 * reported, so we really do need to check ->expmask.
636 */
637 if (t->rcu_read_lock_nesting > 0) {
638 raw_spin_lock_irqsave_rcu_node(rnp, flags);
639 if (rnp->expmask & rdp->grpmask) {
640 rdp->exp_deferred_qs = true;
641 t->rcu_read_unlock_special.b.exp_hint = true;
642 }
643 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
644 return;
645 }
646
647 /*
648 * The final and least likely case is where the interrupted
649 * code was just about to or just finished exiting the RCU-preempt
650 * read-side critical section, and no, we can't tell which.
651 * So either way, set ->deferred_qs to flag later code that
652 * a quiescent state is required.
653 *
654 * If the CPU is fully enabled (or if some buggy RCU-preempt
655 * read-side critical section is being used from idle), just
656 * invoke rcu_preempt_deferred_qs() to immediately report the
657 * quiescent state. We cannot use rcu_read_unlock_special()
658 * because we are in an interrupt handler, which will cause that
659 * function to take an early exit without doing anything.
660 *
661 * Otherwise, force a context switch after the CPU enables everything.
662 */
663 rdp->exp_deferred_qs = true;
664 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
665 WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
666 rcu_preempt_deferred_qs(t);
667 } else {
668 set_tsk_need_resched(t);
669 set_preempt_need_resched();
670 }
671}
672
673/* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
674static void sync_sched_exp_online_cleanup(int cpu)
675{
676}
677
678/*
679 * Scan the current list of tasks blocked within RCU read-side critical
680 * sections, printing out the tid of each that is blocking the current
681 * expedited grace period.
682 */
683static int rcu_print_task_exp_stall(struct rcu_node *rnp)
684{
685 struct task_struct *t;
686 int ndetected = 0;
687
688 if (!rnp->exp_tasks)
689 return 0;
690 t = list_entry(rnp->exp_tasks->prev,
691 struct task_struct, rcu_node_entry);
692 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
693 pr_cont(" P%d", t->pid);
694 ndetected++;
695 }
696 return ndetected;
697}
698
699#else /* #ifdef CONFIG_PREEMPT_RCU */
700
701/* Request an expedited quiescent state. */
702static void rcu_exp_need_qs(void)
703{
704 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
705 /* Store .exp before .rcu_urgent_qs. */
706 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
707 set_tsk_need_resched(current);
708 set_preempt_need_resched();
709}
710
711/* Invoked on each online non-idle CPU for expedited quiescent state. */
712static void rcu_exp_handler(void *unused)
713{
714 struct rcu_data *rdp;
715 struct rcu_node *rnp;
716
717 rdp = this_cpu_ptr(&rcu_data);
718 rnp = rdp->mynode;
719 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
720 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
721 return;
722 if (rcu_is_cpu_rrupt_from_idle()) {
723 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
724 return;
725 }
726 rcu_exp_need_qs();
727}
728
729/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
730static void sync_sched_exp_online_cleanup(int cpu)
731{
732 unsigned long flags;
733 int my_cpu;
734 struct rcu_data *rdp;
735 int ret;
736 struct rcu_node *rnp;
737
738 rdp = per_cpu_ptr(&rcu_data, cpu);
739 rnp = rdp->mynode;
740 my_cpu = get_cpu();
741 /* Quiescent state either not needed or already requested, leave. */
742 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
743 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
744 put_cpu();
745 return;
746 }
747 /* Quiescent state needed on current CPU, so set it up locally. */
748 if (my_cpu == cpu) {
749 local_irq_save(flags);
750 rcu_exp_need_qs();
751 local_irq_restore(flags);
752 put_cpu();
753 return;
754 }
755 /* Quiescent state needed on some other CPU, send IPI. */
756 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
757 put_cpu();
758 WARN_ON_ONCE(ret);
759}
760
761/*
762 * Because preemptible RCU does not exist, we never have to check for
763 * tasks blocked within RCU read-side critical sections that are
764 * blocking the current expedited grace period.
765 */
766static int rcu_print_task_exp_stall(struct rcu_node *rnp)
767{
768 return 0;
769}
770
771#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
772
773/**
774 * synchronize_rcu_expedited - Brute-force RCU grace period
775 *
776 * Wait for an RCU grace period, but expedite it. The basic idea is to
777 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
778 * the CPU is in an RCU critical section, and if so, it sets a flag that
779 * causes the outermost rcu_read_unlock() to report the quiescent state
780 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
781 * other hand, if the CPU is not in an RCU read-side critical section,
782 * the IPI handler reports the quiescent state immediately.
783 *
784 * Although this is a great improvement over previous expedited
785 * implementations, it is still unfriendly to real-time workloads, so is
786 * thus not recommended for any sort of common-case code. In fact, if
787 * you are using synchronize_rcu_expedited() in a loop, please restructure
788 * your code to batch your updates, and then Use a single synchronize_rcu()
789 * instead.
790 *
791 * This has the same semantics as (but is more brutal than) synchronize_rcu().
792 */
793void synchronize_rcu_expedited(void)
794{
795 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
796 struct rcu_exp_work rew;
797 struct rcu_node *rnp;
798 unsigned long s;
799
800 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
801 lock_is_held(&rcu_lock_map) ||
802 lock_is_held(&rcu_sched_lock_map),
803 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
804
805 /* Is the state is such that the call is a grace period? */
806 if (rcu_blocking_is_gp())
807 return;
808
809 /* If expedited grace periods are prohibited, fall back to normal. */
810 if (rcu_gp_is_normal()) {
811 wait_rcu_gp(call_rcu);
812 return;
813 }
814
815 /* Take a snapshot of the sequence number. */
816 s = rcu_exp_gp_seq_snap();
817 if (exp_funnel_lock(s))
818 return; /* Someone else did our work for us. */
819
820 /* Ensure that load happens before action based on it. */
821 if (unlikely(boottime)) {
822 /* Direct call during scheduler init and early_initcalls(). */
823 rcu_exp_sel_wait_wake(s);
824 } else {
825 /* Marshall arguments & schedule the expedited grace period. */
826 rew.rew_s = s;
827 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
828 queue_work(rcu_gp_wq, &rew.rew_work);
829 }
830
831 /* Wait for expedited grace period to complete. */
832 rnp = rcu_get_root();
833 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
834 sync_exp_work_done(s));
835 smp_mb(); /* Workqueue actions happen before return. */
836
837 /* Let the next expedited grace period start. */
838 mutex_unlock(&rcu_state.exp_mutex);
839
840 if (likely(!boottime))
841 destroy_work_on_stack(&rew.rew_work);
842}
843EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1/*
2 * RCU expedited grace periods
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2016
19 *
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 */
22
23/*
24 * Record the start of an expedited grace period.
25 */
26static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
27{
28 rcu_seq_start(&rsp->expedited_sequence);
29}
30
31/*
32 * Return then value that expedited-grace-period counter will have
33 * at the end of the current grace period.
34 */
35static __maybe_unused unsigned long rcu_exp_gp_seq_endval(struct rcu_state *rsp)
36{
37 return rcu_seq_endval(&rsp->expedited_sequence);
38}
39
40/*
41 * Record the end of an expedited grace period.
42 */
43static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
44{
45 rcu_seq_end(&rsp->expedited_sequence);
46 smp_mb(); /* Ensure that consecutive grace periods serialize. */
47}
48
49/*
50 * Take a snapshot of the expedited-grace-period counter.
51 */
52static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
53{
54 unsigned long s;
55
56 smp_mb(); /* Caller's modifications seen first by other CPUs. */
57 s = rcu_seq_snap(&rsp->expedited_sequence);
58 trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
59 return s;
60}
61
62/*
63 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
64 * if a full expedited grace period has elapsed since that snapshot
65 * was taken.
66 */
67static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
68{
69 return rcu_seq_done(&rsp->expedited_sequence, s);
70}
71
72/*
73 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
74 * recent CPU-online activity. Note that these masks are not cleared
75 * when CPUs go offline, so they reflect the union of all CPUs that have
76 * ever been online. This means that this function normally takes its
77 * no-work-to-do fastpath.
78 */
79static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
80{
81 bool done;
82 unsigned long flags;
83 unsigned long mask;
84 unsigned long oldmask;
85 int ncpus = smp_load_acquire(&rsp->ncpus); /* Order against locking. */
86 struct rcu_node *rnp;
87 struct rcu_node *rnp_up;
88
89 /* If no new CPUs onlined since last time, nothing to do. */
90 if (likely(ncpus == rsp->ncpus_snap))
91 return;
92 rsp->ncpus_snap = ncpus;
93
94 /*
95 * Each pass through the following loop propagates newly onlined
96 * CPUs for the current rcu_node structure up the rcu_node tree.
97 */
98 rcu_for_each_leaf_node(rsp, rnp) {
99 raw_spin_lock_irqsave_rcu_node(rnp, flags);
100 if (rnp->expmaskinit == rnp->expmaskinitnext) {
101 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
102 continue; /* No new CPUs, nothing to do. */
103 }
104
105 /* Update this node's mask, track old value for propagation. */
106 oldmask = rnp->expmaskinit;
107 rnp->expmaskinit = rnp->expmaskinitnext;
108 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
109
110 /* If was already nonzero, nothing to propagate. */
111 if (oldmask)
112 continue;
113
114 /* Propagate the new CPU up the tree. */
115 mask = rnp->grpmask;
116 rnp_up = rnp->parent;
117 done = false;
118 while (rnp_up) {
119 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
120 if (rnp_up->expmaskinit)
121 done = true;
122 rnp_up->expmaskinit |= mask;
123 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
124 if (done)
125 break;
126 mask = rnp_up->grpmask;
127 rnp_up = rnp_up->parent;
128 }
129 }
130}
131
132/*
133 * Reset the ->expmask values in the rcu_node tree in preparation for
134 * a new expedited grace period.
135 */
136static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
137{
138 unsigned long flags;
139 struct rcu_node *rnp;
140
141 sync_exp_reset_tree_hotplug(rsp);
142 rcu_for_each_node_breadth_first(rsp, rnp) {
143 raw_spin_lock_irqsave_rcu_node(rnp, flags);
144 WARN_ON_ONCE(rnp->expmask);
145 rnp->expmask = rnp->expmaskinit;
146 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
147 }
148}
149
150/*
151 * Return non-zero if there is no RCU expedited grace period in progress
152 * for the specified rcu_node structure, in other words, if all CPUs and
153 * tasks covered by the specified rcu_node structure have done their bit
154 * for the current expedited grace period. Works only for preemptible
155 * RCU -- other RCU implementation use other means.
156 *
157 * Caller must hold the rcu_state's exp_mutex.
158 */
159static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
160{
161 return rnp->exp_tasks == NULL &&
162 READ_ONCE(rnp->expmask) == 0;
163}
164
165/*
166 * Report the exit from RCU read-side critical section for the last task
167 * that queued itself during or before the current expedited preemptible-RCU
168 * grace period. This event is reported either to the rcu_node structure on
169 * which the task was queued or to one of that rcu_node structure's ancestors,
170 * recursively up the tree. (Calm down, calm down, we do the recursion
171 * iteratively!)
172 *
173 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
174 * structure's ->lock.
175 */
176static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
177 bool wake, unsigned long flags)
178 __releases(rnp->lock)
179{
180 unsigned long mask;
181
182 for (;;) {
183 if (!sync_rcu_preempt_exp_done(rnp)) {
184 if (!rnp->expmask)
185 rcu_initiate_boost(rnp, flags);
186 else
187 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
188 break;
189 }
190 if (rnp->parent == NULL) {
191 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
192 if (wake) {
193 smp_mb(); /* EGP done before wake_up(). */
194 swake_up(&rsp->expedited_wq);
195 }
196 break;
197 }
198 mask = rnp->grpmask;
199 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
200 rnp = rnp->parent;
201 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
202 WARN_ON_ONCE(!(rnp->expmask & mask));
203 rnp->expmask &= ~mask;
204 }
205}
206
207/*
208 * Report expedited quiescent state for specified node. This is a
209 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
210 *
211 * Caller must hold the rcu_state's exp_mutex.
212 */
213static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
214 struct rcu_node *rnp, bool wake)
215{
216 unsigned long flags;
217
218 raw_spin_lock_irqsave_rcu_node(rnp, flags);
219 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
220}
221
222/*
223 * Report expedited quiescent state for multiple CPUs, all covered by the
224 * specified leaf rcu_node structure. Caller must hold the rcu_state's
225 * exp_mutex.
226 */
227static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
228 unsigned long mask, bool wake)
229{
230 unsigned long flags;
231
232 raw_spin_lock_irqsave_rcu_node(rnp, flags);
233 if (!(rnp->expmask & mask)) {
234 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
235 return;
236 }
237 rnp->expmask &= ~mask;
238 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
239}
240
241/*
242 * Report expedited quiescent state for specified rcu_data (CPU).
243 */
244static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
245 bool wake)
246{
247 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
248}
249
250/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
251static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
252 unsigned long s)
253{
254 if (rcu_exp_gp_seq_done(rsp, s)) {
255 trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
256 /* Ensure test happens before caller kfree(). */
257 smp_mb__before_atomic(); /* ^^^ */
258 atomic_long_inc(stat);
259 return true;
260 }
261 return false;
262}
263
264/*
265 * Funnel-lock acquisition for expedited grace periods. Returns true
266 * if some other task completed an expedited grace period that this task
267 * can piggy-back on, and with no mutex held. Otherwise, returns false
268 * with the mutex held, indicating that the caller must actually do the
269 * expedited grace period.
270 */
271static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
272{
273 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
274 struct rcu_node *rnp = rdp->mynode;
275 struct rcu_node *rnp_root = rcu_get_root(rsp);
276
277 /* Low-contention fastpath. */
278 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
279 (rnp == rnp_root ||
280 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
281 mutex_trylock(&rsp->exp_mutex))
282 goto fastpath;
283
284 /*
285 * Each pass through the following loop works its way up
286 * the rcu_node tree, returning if others have done the work or
287 * otherwise falls through to acquire rsp->exp_mutex. The mapping
288 * from CPU to rcu_node structure can be inexact, as it is just
289 * promoting locality and is not strictly needed for correctness.
290 */
291 for (; rnp != NULL; rnp = rnp->parent) {
292 if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
293 return true;
294
295 /* Work not done, either wait here or go up. */
296 spin_lock(&rnp->exp_lock);
297 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
298
299 /* Someone else doing GP, so wait for them. */
300 spin_unlock(&rnp->exp_lock);
301 trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
302 rnp->grplo, rnp->grphi,
303 TPS("wait"));
304 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
305 sync_exp_work_done(rsp,
306 &rdp->exp_workdone2, s));
307 return true;
308 }
309 rnp->exp_seq_rq = s; /* Followers can wait on us. */
310 spin_unlock(&rnp->exp_lock);
311 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
312 rnp->grphi, TPS("nxtlvl"));
313 }
314 mutex_lock(&rsp->exp_mutex);
315fastpath:
316 if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
317 mutex_unlock(&rsp->exp_mutex);
318 return true;
319 }
320 rcu_exp_gp_seq_start(rsp);
321 trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
322 return false;
323}
324
325/* Invoked on each online non-idle CPU for expedited quiescent state. */
326static void sync_sched_exp_handler(void *data)
327{
328 struct rcu_data *rdp;
329 struct rcu_node *rnp;
330 struct rcu_state *rsp = data;
331
332 rdp = this_cpu_ptr(rsp->rda);
333 rnp = rdp->mynode;
334 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
335 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
336 return;
337 if (rcu_is_cpu_rrupt_from_idle()) {
338 rcu_report_exp_rdp(&rcu_sched_state,
339 this_cpu_ptr(&rcu_sched_data), true);
340 return;
341 }
342 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
343 /* Store .exp before .rcu_urgent_qs. */
344 smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
345 resched_cpu(smp_processor_id());
346}
347
348/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
349static void sync_sched_exp_online_cleanup(int cpu)
350{
351 struct rcu_data *rdp;
352 int ret;
353 struct rcu_node *rnp;
354 struct rcu_state *rsp = &rcu_sched_state;
355
356 rdp = per_cpu_ptr(rsp->rda, cpu);
357 rnp = rdp->mynode;
358 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
359 return;
360 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
361 WARN_ON_ONCE(ret);
362}
363
364/*
365 * Select the nodes that the upcoming expedited grace period needs
366 * to wait for.
367 */
368static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
369 smp_call_func_t func)
370{
371 int cpu;
372 unsigned long flags;
373 unsigned long mask_ofl_test;
374 unsigned long mask_ofl_ipi;
375 int ret;
376 struct rcu_node *rnp;
377
378 trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
379 sync_exp_reset_tree(rsp);
380 trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("select"));
381 rcu_for_each_leaf_node(rsp, rnp) {
382 raw_spin_lock_irqsave_rcu_node(rnp, flags);
383
384 /* Each pass checks a CPU for identity, offline, and idle. */
385 mask_ofl_test = 0;
386 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
387 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
388 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
389 struct rcu_dynticks *rdtp = per_cpu_ptr(&rcu_dynticks, cpu);
390 int snap;
391
392 if (raw_smp_processor_id() == cpu ||
393 !(rnp->qsmaskinitnext & mask)) {
394 mask_ofl_test |= mask;
395 } else {
396 snap = rcu_dynticks_snap(rdtp);
397 if (rcu_dynticks_in_eqs(snap))
398 mask_ofl_test |= mask;
399 else
400 rdp->exp_dynticks_snap = snap;
401 }
402 }
403 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
404
405 /*
406 * Need to wait for any blocked tasks as well. Note that
407 * additional blocking tasks will also block the expedited
408 * GP until such time as the ->expmask bits are cleared.
409 */
410 if (rcu_preempt_has_tasks(rnp))
411 rnp->exp_tasks = rnp->blkd_tasks.next;
412 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413
414 /* IPI the remaining CPUs for expedited quiescent state. */
415 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
416 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
417 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
418
419 if (!(mask_ofl_ipi & mask))
420 continue;
421retry_ipi:
422 if (rcu_dynticks_in_eqs_since(rdp->dynticks,
423 rdp->exp_dynticks_snap)) {
424 mask_ofl_test |= mask;
425 continue;
426 }
427 ret = smp_call_function_single(cpu, func, rsp, 0);
428 if (!ret) {
429 mask_ofl_ipi &= ~mask;
430 continue;
431 }
432 /* Failed, raced with CPU hotplug operation. */
433 raw_spin_lock_irqsave_rcu_node(rnp, flags);
434 if ((rnp->qsmaskinitnext & mask) &&
435 (rnp->expmask & mask)) {
436 /* Online, so delay for a bit and try again. */
437 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
438 trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("selectofl"));
439 schedule_timeout_uninterruptible(1);
440 goto retry_ipi;
441 }
442 /* CPU really is offline, so we can ignore it. */
443 if (!(rnp->expmask & mask))
444 mask_ofl_ipi &= ~mask;
445 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
446 }
447 /* Report quiescent states for those that went offline. */
448 mask_ofl_test |= mask_ofl_ipi;
449 if (mask_ofl_test)
450 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
451 }
452}
453
454static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
455{
456 int cpu;
457 unsigned long jiffies_stall;
458 unsigned long jiffies_start;
459 unsigned long mask;
460 int ndetected;
461 struct rcu_node *rnp;
462 struct rcu_node *rnp_root = rcu_get_root(rsp);
463 int ret;
464
465 trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("startwait"));
466 jiffies_stall = rcu_jiffies_till_stall_check();
467 jiffies_start = jiffies;
468
469 for (;;) {
470 ret = swait_event_timeout(
471 rsp->expedited_wq,
472 sync_rcu_preempt_exp_done(rnp_root),
473 jiffies_stall);
474 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
475 return;
476 WARN_ON(ret < 0); /* workqueues should not be signaled. */
477 if (rcu_cpu_stall_suppress)
478 continue;
479 panic_on_rcu_stall();
480 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
481 rsp->name);
482 ndetected = 0;
483 rcu_for_each_leaf_node(rsp, rnp) {
484 ndetected += rcu_print_task_exp_stall(rnp);
485 for_each_leaf_node_possible_cpu(rnp, cpu) {
486 struct rcu_data *rdp;
487
488 mask = leaf_node_cpu_bit(rnp, cpu);
489 if (!(rnp->expmask & mask))
490 continue;
491 ndetected++;
492 rdp = per_cpu_ptr(rsp->rda, cpu);
493 pr_cont(" %d-%c%c%c", cpu,
494 "O."[!!cpu_online(cpu)],
495 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
496 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
497 }
498 }
499 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
500 jiffies - jiffies_start, rsp->expedited_sequence,
501 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
502 if (ndetected) {
503 pr_err("blocking rcu_node structures:");
504 rcu_for_each_node_breadth_first(rsp, rnp) {
505 if (rnp == rnp_root)
506 continue; /* printed unconditionally */
507 if (sync_rcu_preempt_exp_done(rnp))
508 continue;
509 pr_cont(" l=%u:%d-%d:%#lx/%c",
510 rnp->level, rnp->grplo, rnp->grphi,
511 rnp->expmask,
512 ".T"[!!rnp->exp_tasks]);
513 }
514 pr_cont("\n");
515 }
516 rcu_for_each_leaf_node(rsp, rnp) {
517 for_each_leaf_node_possible_cpu(rnp, cpu) {
518 mask = leaf_node_cpu_bit(rnp, cpu);
519 if (!(rnp->expmask & mask))
520 continue;
521 dump_cpu_task(cpu);
522 }
523 }
524 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
525 }
526}
527
528/*
529 * Wait for the current expedited grace period to complete, and then
530 * wake up everyone who piggybacked on the just-completed expedited
531 * grace period. Also update all the ->exp_seq_rq counters as needed
532 * in order to avoid counter-wrap problems.
533 */
534static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
535{
536 struct rcu_node *rnp;
537
538 synchronize_sched_expedited_wait(rsp);
539 rcu_exp_gp_seq_end(rsp);
540 trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
541
542 /*
543 * Switch over to wakeup mode, allowing the next GP, but -only- the
544 * next GP, to proceed.
545 */
546 mutex_lock(&rsp->exp_wake_mutex);
547
548 rcu_for_each_node_breadth_first(rsp, rnp) {
549 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
550 spin_lock(&rnp->exp_lock);
551 /* Recheck, avoid hang in case someone just arrived. */
552 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
553 rnp->exp_seq_rq = s;
554 spin_unlock(&rnp->exp_lock);
555 }
556 smp_mb(); /* All above changes before wakeup. */
557 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rsp->expedited_sequence) & 0x3]);
558 }
559 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
560 mutex_unlock(&rsp->exp_wake_mutex);
561}
562
563/* Let the workqueue handler know what it is supposed to do. */
564struct rcu_exp_work {
565 smp_call_func_t rew_func;
566 struct rcu_state *rew_rsp;
567 unsigned long rew_s;
568 struct work_struct rew_work;
569};
570
571/*
572 * Common code to drive an expedited grace period forward, used by
573 * workqueues and mid-boot-time tasks.
574 */
575static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
576 smp_call_func_t func, unsigned long s)
577{
578 /* Initialize the rcu_node tree in preparation for the wait. */
579 sync_rcu_exp_select_cpus(rsp, func);
580
581 /* Wait and clean up, including waking everyone. */
582 rcu_exp_wait_wake(rsp, s);
583}
584
585/*
586 * Work-queue handler to drive an expedited grace period forward.
587 */
588static void wait_rcu_exp_gp(struct work_struct *wp)
589{
590 struct rcu_exp_work *rewp;
591
592 rewp = container_of(wp, struct rcu_exp_work, rew_work);
593 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
594}
595
596/*
597 * Given an rcu_state pointer and a smp_call_function() handler, kick
598 * off the specified flavor of expedited grace period.
599 */
600static void _synchronize_rcu_expedited(struct rcu_state *rsp,
601 smp_call_func_t func)
602{
603 struct rcu_data *rdp;
604 struct rcu_exp_work rew;
605 struct rcu_node *rnp;
606 unsigned long s;
607
608 /* If expedited grace periods are prohibited, fall back to normal. */
609 if (rcu_gp_is_normal()) {
610 wait_rcu_gp(rsp->call);
611 return;
612 }
613
614 /* Take a snapshot of the sequence number. */
615 s = rcu_exp_gp_seq_snap(rsp);
616 if (exp_funnel_lock(rsp, s))
617 return; /* Someone else did our work for us. */
618
619 /* Ensure that load happens before action based on it. */
620 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
621 /* Direct call during scheduler init and early_initcalls(). */
622 rcu_exp_sel_wait_wake(rsp, func, s);
623 } else {
624 /* Marshall arguments & schedule the expedited grace period. */
625 rew.rew_func = func;
626 rew.rew_rsp = rsp;
627 rew.rew_s = s;
628 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
629 queue_work(rcu_gp_wq, &rew.rew_work);
630 }
631
632 /* Wait for expedited grace period to complete. */
633 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
634 rnp = rcu_get_root(rsp);
635 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
636 sync_exp_work_done(rsp, &rdp->exp_workdone0, s));
637 smp_mb(); /* Workqueue actions happen before return. */
638
639 /* Let the next expedited grace period start. */
640 mutex_unlock(&rsp->exp_mutex);
641}
642
643/**
644 * synchronize_sched_expedited - Brute-force RCU-sched grace period
645 *
646 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
647 * approach to force the grace period to end quickly. This consumes
648 * significant time on all CPUs and is unfriendly to real-time workloads,
649 * so is thus not recommended for any sort of common-case code. In fact,
650 * if you are using synchronize_sched_expedited() in a loop, please
651 * restructure your code to batch your updates, and then use a single
652 * synchronize_sched() instead.
653 *
654 * This implementation can be thought of as an application of sequence
655 * locking to expedited grace periods, but using the sequence counter to
656 * determine when someone else has already done the work instead of for
657 * retrying readers.
658 */
659void synchronize_sched_expedited(void)
660{
661 struct rcu_state *rsp = &rcu_sched_state;
662
663 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
664 lock_is_held(&rcu_lock_map) ||
665 lock_is_held(&rcu_sched_lock_map),
666 "Illegal synchronize_sched_expedited() in RCU read-side critical section");
667
668 /* If only one CPU, this is automatically a grace period. */
669 if (rcu_blocking_is_gp())
670 return;
671
672 _synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
673}
674EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
675
676#ifdef CONFIG_PREEMPT_RCU
677
678/*
679 * Remote handler for smp_call_function_single(). If there is an
680 * RCU read-side critical section in effect, request that the
681 * next rcu_read_unlock() record the quiescent state up the
682 * ->expmask fields in the rcu_node tree. Otherwise, immediately
683 * report the quiescent state.
684 */
685static void sync_rcu_exp_handler(void *info)
686{
687 struct rcu_data *rdp;
688 struct rcu_state *rsp = info;
689 struct task_struct *t = current;
690
691 /*
692 * Within an RCU read-side critical section, request that the next
693 * rcu_read_unlock() report. Unless this RCU read-side critical
694 * section has already blocked, in which case it is already set
695 * up for the expedited grace period to wait on it.
696 */
697 if (t->rcu_read_lock_nesting > 0 &&
698 !t->rcu_read_unlock_special.b.blocked) {
699 t->rcu_read_unlock_special.b.exp_need_qs = true;
700 return;
701 }
702
703 /*
704 * We are either exiting an RCU read-side critical section (negative
705 * values of t->rcu_read_lock_nesting) or are not in one at all
706 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
707 * read-side critical section that blocked before this expedited
708 * grace period started. Either way, we can immediately report
709 * the quiescent state.
710 */
711 rdp = this_cpu_ptr(rsp->rda);
712 rcu_report_exp_rdp(rsp, rdp, true);
713}
714
715/**
716 * synchronize_rcu_expedited - Brute-force RCU grace period
717 *
718 * Wait for an RCU-preempt grace period, but expedite it. The basic
719 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
720 * checks whether the CPU is in an RCU-preempt critical section, and
721 * if so, it sets a flag that causes the outermost rcu_read_unlock()
722 * to report the quiescent state. On the other hand, if the CPU is
723 * not in an RCU read-side critical section, the IPI handler reports
724 * the quiescent state immediately.
725 *
726 * Although this is a greate improvement over previous expedited
727 * implementations, it is still unfriendly to real-time workloads, so is
728 * thus not recommended for any sort of common-case code. In fact, if
729 * you are using synchronize_rcu_expedited() in a loop, please restructure
730 * your code to batch your updates, and then Use a single synchronize_rcu()
731 * instead.
732 */
733void synchronize_rcu_expedited(void)
734{
735 struct rcu_state *rsp = rcu_state_p;
736
737 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
738 lock_is_held(&rcu_lock_map) ||
739 lock_is_held(&rcu_sched_lock_map),
740 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
741
742 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
743 return;
744 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
745}
746EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
747
748#else /* #ifdef CONFIG_PREEMPT_RCU */
749
750/*
751 * Wait for an rcu-preempt grace period, but make it happen quickly.
752 * But because preemptible RCU does not exist, map to rcu-sched.
753 */
754void synchronize_rcu_expedited(void)
755{
756 synchronize_sched_expedited();
757}
758EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
759
760#endif /* #else #ifdef CONFIG_PREEMPT_RCU */