Loading...
1/*
2 * RCU expedited grace periods
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2016
19 *
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 */
22
23/* Wrapper functions for expedited grace periods. */
24static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
25{
26 rcu_seq_start(&rsp->expedited_sequence);
27}
28static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
29{
30 rcu_seq_end(&rsp->expedited_sequence);
31 smp_mb(); /* Ensure that consecutive grace periods serialize. */
32}
33static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
34{
35 unsigned long s;
36
37 smp_mb(); /* Caller's modifications seen first by other CPUs. */
38 s = rcu_seq_snap(&rsp->expedited_sequence);
39 trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
40 return s;
41}
42static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
43{
44 return rcu_seq_done(&rsp->expedited_sequence, s);
45}
46
47/*
48 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
49 * recent CPU-online activity. Note that these masks are not cleared
50 * when CPUs go offline, so they reflect the union of all CPUs that have
51 * ever been online. This means that this function normally takes its
52 * no-work-to-do fastpath.
53 */
54static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
55{
56 bool done;
57 unsigned long flags;
58 unsigned long mask;
59 unsigned long oldmask;
60 int ncpus = READ_ONCE(rsp->ncpus);
61 struct rcu_node *rnp;
62 struct rcu_node *rnp_up;
63
64 /* If no new CPUs onlined since last time, nothing to do. */
65 if (likely(ncpus == rsp->ncpus_snap))
66 return;
67 rsp->ncpus_snap = ncpus;
68
69 /*
70 * Each pass through the following loop propagates newly onlined
71 * CPUs for the current rcu_node structure up the rcu_node tree.
72 */
73 rcu_for_each_leaf_node(rsp, rnp) {
74 raw_spin_lock_irqsave_rcu_node(rnp, flags);
75 if (rnp->expmaskinit == rnp->expmaskinitnext) {
76 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
77 continue; /* No new CPUs, nothing to do. */
78 }
79
80 /* Update this node's mask, track old value for propagation. */
81 oldmask = rnp->expmaskinit;
82 rnp->expmaskinit = rnp->expmaskinitnext;
83 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
84
85 /* If was already nonzero, nothing to propagate. */
86 if (oldmask)
87 continue;
88
89 /* Propagate the new CPU up the tree. */
90 mask = rnp->grpmask;
91 rnp_up = rnp->parent;
92 done = false;
93 while (rnp_up) {
94 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
95 if (rnp_up->expmaskinit)
96 done = true;
97 rnp_up->expmaskinit |= mask;
98 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
99 if (done)
100 break;
101 mask = rnp_up->grpmask;
102 rnp_up = rnp_up->parent;
103 }
104 }
105}
106
107/*
108 * Reset the ->expmask values in the rcu_node tree in preparation for
109 * a new expedited grace period.
110 */
111static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
112{
113 unsigned long flags;
114 struct rcu_node *rnp;
115
116 sync_exp_reset_tree_hotplug(rsp);
117 rcu_for_each_node_breadth_first(rsp, rnp) {
118 raw_spin_lock_irqsave_rcu_node(rnp, flags);
119 WARN_ON_ONCE(rnp->expmask);
120 rnp->expmask = rnp->expmaskinit;
121 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
122 }
123}
124
125/*
126 * Return non-zero if there is no RCU expedited grace period in progress
127 * for the specified rcu_node structure, in other words, if all CPUs and
128 * tasks covered by the specified rcu_node structure have done their bit
129 * for the current expedited grace period. Works only for preemptible
130 * RCU -- other RCU implementation use other means.
131 *
132 * Caller must hold the rcu_state's exp_mutex.
133 */
134static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
135{
136 return rnp->exp_tasks == NULL &&
137 READ_ONCE(rnp->expmask) == 0;
138}
139
140/*
141 * Report the exit from RCU read-side critical section for the last task
142 * that queued itself during or before the current expedited preemptible-RCU
143 * grace period. This event is reported either to the rcu_node structure on
144 * which the task was queued or to one of that rcu_node structure's ancestors,
145 * recursively up the tree. (Calm down, calm down, we do the recursion
146 * iteratively!)
147 *
148 * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
149 * structure's ->lock.
150 */
151static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
152 bool wake, unsigned long flags)
153 __releases(rnp->lock)
154{
155 unsigned long mask;
156
157 for (;;) {
158 if (!sync_rcu_preempt_exp_done(rnp)) {
159 if (!rnp->expmask)
160 rcu_initiate_boost(rnp, flags);
161 else
162 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
163 break;
164 }
165 if (rnp->parent == NULL) {
166 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
167 if (wake) {
168 smp_mb(); /* EGP done before wake_up(). */
169 swake_up(&rsp->expedited_wq);
170 }
171 break;
172 }
173 mask = rnp->grpmask;
174 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
175 rnp = rnp->parent;
176 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
177 WARN_ON_ONCE(!(rnp->expmask & mask));
178 rnp->expmask &= ~mask;
179 }
180}
181
182/*
183 * Report expedited quiescent state for specified node. This is a
184 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
185 *
186 * Caller must hold the rcu_state's exp_mutex.
187 */
188static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
189 struct rcu_node *rnp, bool wake)
190{
191 unsigned long flags;
192
193 raw_spin_lock_irqsave_rcu_node(rnp, flags);
194 __rcu_report_exp_rnp(rsp, rnp, wake, flags);
195}
196
197/*
198 * Report expedited quiescent state for multiple CPUs, all covered by the
199 * specified leaf rcu_node structure. Caller must hold the rcu_state's
200 * exp_mutex.
201 */
202static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
203 unsigned long mask, bool wake)
204{
205 unsigned long flags;
206
207 raw_spin_lock_irqsave_rcu_node(rnp, flags);
208 if (!(rnp->expmask & mask)) {
209 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
210 return;
211 }
212 rnp->expmask &= ~mask;
213 __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
214}
215
216/*
217 * Report expedited quiescent state for specified rcu_data (CPU).
218 */
219static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
220 bool wake)
221{
222 rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
223}
224
225/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
226static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
227 unsigned long s)
228{
229 if (rcu_exp_gp_seq_done(rsp, s)) {
230 trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
231 /* Ensure test happens before caller kfree(). */
232 smp_mb__before_atomic(); /* ^^^ */
233 atomic_long_inc(stat);
234 return true;
235 }
236 return false;
237}
238
239/*
240 * Funnel-lock acquisition for expedited grace periods. Returns true
241 * if some other task completed an expedited grace period that this task
242 * can piggy-back on, and with no mutex held. Otherwise, returns false
243 * with the mutex held, indicating that the caller must actually do the
244 * expedited grace period.
245 */
246static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
247{
248 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
249 struct rcu_node *rnp = rdp->mynode;
250 struct rcu_node *rnp_root = rcu_get_root(rsp);
251
252 /* Low-contention fastpath. */
253 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
254 (rnp == rnp_root ||
255 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
256 mutex_trylock(&rsp->exp_mutex))
257 goto fastpath;
258
259 /*
260 * Each pass through the following loop works its way up
261 * the rcu_node tree, returning if others have done the work or
262 * otherwise falls through to acquire rsp->exp_mutex. The mapping
263 * from CPU to rcu_node structure can be inexact, as it is just
264 * promoting locality and is not strictly needed for correctness.
265 */
266 for (; rnp != NULL; rnp = rnp->parent) {
267 if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
268 return true;
269
270 /* Work not done, either wait here or go up. */
271 spin_lock(&rnp->exp_lock);
272 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
273
274 /* Someone else doing GP, so wait for them. */
275 spin_unlock(&rnp->exp_lock);
276 trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
277 rnp->grplo, rnp->grphi,
278 TPS("wait"));
279 wait_event(rnp->exp_wq[(s >> 1) & 0x3],
280 sync_exp_work_done(rsp,
281 &rdp->exp_workdone2, s));
282 return true;
283 }
284 rnp->exp_seq_rq = s; /* Followers can wait on us. */
285 spin_unlock(&rnp->exp_lock);
286 trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
287 rnp->grphi, TPS("nxtlvl"));
288 }
289 mutex_lock(&rsp->exp_mutex);
290fastpath:
291 if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
292 mutex_unlock(&rsp->exp_mutex);
293 return true;
294 }
295 rcu_exp_gp_seq_start(rsp);
296 trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
297 return false;
298}
299
300/* Invoked on each online non-idle CPU for expedited quiescent state. */
301static void sync_sched_exp_handler(void *data)
302{
303 struct rcu_data *rdp;
304 struct rcu_node *rnp;
305 struct rcu_state *rsp = data;
306
307 rdp = this_cpu_ptr(rsp->rda);
308 rnp = rdp->mynode;
309 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
310 __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
311 return;
312 if (rcu_is_cpu_rrupt_from_idle()) {
313 rcu_report_exp_rdp(&rcu_sched_state,
314 this_cpu_ptr(&rcu_sched_data), true);
315 return;
316 }
317 __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
318 resched_cpu(smp_processor_id());
319}
320
321/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
322static void sync_sched_exp_online_cleanup(int cpu)
323{
324 struct rcu_data *rdp;
325 int ret;
326 struct rcu_node *rnp;
327 struct rcu_state *rsp = &rcu_sched_state;
328
329 rdp = per_cpu_ptr(rsp->rda, cpu);
330 rnp = rdp->mynode;
331 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
332 return;
333 ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
334 WARN_ON_ONCE(ret);
335}
336
337/*
338 * Select the nodes that the upcoming expedited grace period needs
339 * to wait for.
340 */
341static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
342 smp_call_func_t func)
343{
344 int cpu;
345 unsigned long flags;
346 unsigned long mask_ofl_test;
347 unsigned long mask_ofl_ipi;
348 int ret;
349 struct rcu_node *rnp;
350
351 sync_exp_reset_tree(rsp);
352 rcu_for_each_leaf_node(rsp, rnp) {
353 raw_spin_lock_irqsave_rcu_node(rnp, flags);
354
355 /* Each pass checks a CPU for identity, offline, and idle. */
356 mask_ofl_test = 0;
357 for_each_leaf_node_possible_cpu(rnp, cpu) {
358 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
359 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
360
361 rdp->exp_dynticks_snap =
362 atomic_add_return(0, &rdtp->dynticks);
363 if (raw_smp_processor_id() == cpu ||
364 !(rdp->exp_dynticks_snap & 0x1) ||
365 !(rnp->qsmaskinitnext & rdp->grpmask))
366 mask_ofl_test |= rdp->grpmask;
367 }
368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369
370 /*
371 * Need to wait for any blocked tasks as well. Note that
372 * additional blocking tasks will also block the expedited
373 * GP until such time as the ->expmask bits are cleared.
374 */
375 if (rcu_preempt_has_tasks(rnp))
376 rnp->exp_tasks = rnp->blkd_tasks.next;
377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379 /* IPI the remaining CPUs for expedited quiescent state. */
380 for_each_leaf_node_possible_cpu(rnp, cpu) {
381 unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
382 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
383 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
384
385 if (!(mask_ofl_ipi & mask))
386 continue;
387retry_ipi:
388 if (atomic_add_return(0, &rdtp->dynticks) !=
389 rdp->exp_dynticks_snap) {
390 mask_ofl_test |= mask;
391 continue;
392 }
393 ret = smp_call_function_single(cpu, func, rsp, 0);
394 if (!ret) {
395 mask_ofl_ipi &= ~mask;
396 continue;
397 }
398 /* Failed, raced with CPU hotplug operation. */
399 raw_spin_lock_irqsave_rcu_node(rnp, flags);
400 if ((rnp->qsmaskinitnext & mask) &&
401 (rnp->expmask & mask)) {
402 /* Online, so delay for a bit and try again. */
403 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
404 schedule_timeout_uninterruptible(1);
405 goto retry_ipi;
406 }
407 /* CPU really is offline, so we can ignore it. */
408 if (!(rnp->expmask & mask))
409 mask_ofl_ipi &= ~mask;
410 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
411 }
412 /* Report quiescent states for those that went offline. */
413 mask_ofl_test |= mask_ofl_ipi;
414 if (mask_ofl_test)
415 rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
416 }
417}
418
419static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
420{
421 int cpu;
422 unsigned long jiffies_stall;
423 unsigned long jiffies_start;
424 unsigned long mask;
425 int ndetected;
426 struct rcu_node *rnp;
427 struct rcu_node *rnp_root = rcu_get_root(rsp);
428 int ret;
429
430 jiffies_stall = rcu_jiffies_till_stall_check();
431 jiffies_start = jiffies;
432
433 for (;;) {
434 ret = swait_event_timeout(
435 rsp->expedited_wq,
436 sync_rcu_preempt_exp_done(rnp_root),
437 jiffies_stall);
438 if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
439 return;
440 WARN_ON(ret < 0); /* workqueues should not be signaled. */
441 if (rcu_cpu_stall_suppress)
442 continue;
443 panic_on_rcu_stall();
444 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
445 rsp->name);
446 ndetected = 0;
447 rcu_for_each_leaf_node(rsp, rnp) {
448 ndetected += rcu_print_task_exp_stall(rnp);
449 for_each_leaf_node_possible_cpu(rnp, cpu) {
450 struct rcu_data *rdp;
451
452 mask = leaf_node_cpu_bit(rnp, cpu);
453 if (!(rnp->expmask & mask))
454 continue;
455 ndetected++;
456 rdp = per_cpu_ptr(rsp->rda, cpu);
457 pr_cont(" %d-%c%c%c", cpu,
458 "O."[!!cpu_online(cpu)],
459 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
460 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
461 }
462 }
463 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
464 jiffies - jiffies_start, rsp->expedited_sequence,
465 rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
466 if (ndetected) {
467 pr_err("blocking rcu_node structures:");
468 rcu_for_each_node_breadth_first(rsp, rnp) {
469 if (rnp == rnp_root)
470 continue; /* printed unconditionally */
471 if (sync_rcu_preempt_exp_done(rnp))
472 continue;
473 pr_cont(" l=%u:%d-%d:%#lx/%c",
474 rnp->level, rnp->grplo, rnp->grphi,
475 rnp->expmask,
476 ".T"[!!rnp->exp_tasks]);
477 }
478 pr_cont("\n");
479 }
480 rcu_for_each_leaf_node(rsp, rnp) {
481 for_each_leaf_node_possible_cpu(rnp, cpu) {
482 mask = leaf_node_cpu_bit(rnp, cpu);
483 if (!(rnp->expmask & mask))
484 continue;
485 dump_cpu_task(cpu);
486 }
487 }
488 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
489 }
490}
491
492/*
493 * Wait for the current expedited grace period to complete, and then
494 * wake up everyone who piggybacked on the just-completed expedited
495 * grace period. Also update all the ->exp_seq_rq counters as needed
496 * in order to avoid counter-wrap problems.
497 */
498static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
499{
500 struct rcu_node *rnp;
501
502 synchronize_sched_expedited_wait(rsp);
503 rcu_exp_gp_seq_end(rsp);
504 trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
505
506 /*
507 * Switch over to wakeup mode, allowing the next GP, but -only- the
508 * next GP, to proceed.
509 */
510 mutex_lock(&rsp->exp_wake_mutex);
511
512 rcu_for_each_node_breadth_first(rsp, rnp) {
513 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
514 spin_lock(&rnp->exp_lock);
515 /* Recheck, avoid hang in case someone just arrived. */
516 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
517 rnp->exp_seq_rq = s;
518 spin_unlock(&rnp->exp_lock);
519 }
520 wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
521 }
522 trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
523 mutex_unlock(&rsp->exp_wake_mutex);
524}
525
526/* Let the workqueue handler know what it is supposed to do. */
527struct rcu_exp_work {
528 smp_call_func_t rew_func;
529 struct rcu_state *rew_rsp;
530 unsigned long rew_s;
531 struct work_struct rew_work;
532};
533
534/*
535 * Common code to drive an expedited grace period forward, used by
536 * workqueues and mid-boot-time tasks.
537 */
538static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
539 smp_call_func_t func, unsigned long s)
540{
541 /* Initialize the rcu_node tree in preparation for the wait. */
542 sync_rcu_exp_select_cpus(rsp, func);
543
544 /* Wait and clean up, including waking everyone. */
545 rcu_exp_wait_wake(rsp, s);
546}
547
548/*
549 * Work-queue handler to drive an expedited grace period forward.
550 */
551static void wait_rcu_exp_gp(struct work_struct *wp)
552{
553 struct rcu_exp_work *rewp;
554
555 rewp = container_of(wp, struct rcu_exp_work, rew_work);
556 rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
557}
558
559/*
560 * Given an rcu_state pointer and a smp_call_function() handler, kick
561 * off the specified flavor of expedited grace period.
562 */
563static void _synchronize_rcu_expedited(struct rcu_state *rsp,
564 smp_call_func_t func)
565{
566 struct rcu_data *rdp;
567 struct rcu_exp_work rew;
568 struct rcu_node *rnp;
569 unsigned long s;
570
571 /* If expedited grace periods are prohibited, fall back to normal. */
572 if (rcu_gp_is_normal()) {
573 wait_rcu_gp(rsp->call);
574 return;
575 }
576
577 /* Take a snapshot of the sequence number. */
578 s = rcu_exp_gp_seq_snap(rsp);
579 if (exp_funnel_lock(rsp, s))
580 return; /* Someone else did our work for us. */
581
582 /* Ensure that load happens before action based on it. */
583 if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
584 /* Direct call during scheduler init and early_initcalls(). */
585 rcu_exp_sel_wait_wake(rsp, func, s);
586 } else {
587 /* Marshall arguments & schedule the expedited grace period. */
588 rew.rew_func = func;
589 rew.rew_rsp = rsp;
590 rew.rew_s = s;
591 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
592 schedule_work(&rew.rew_work);
593 }
594
595 /* Wait for expedited grace period to complete. */
596 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
597 rnp = rcu_get_root(rsp);
598 wait_event(rnp->exp_wq[(s >> 1) & 0x3],
599 sync_exp_work_done(rsp,
600 &rdp->exp_workdone0, s));
601
602 /* Let the next expedited grace period start. */
603 mutex_unlock(&rsp->exp_mutex);
604}
605
606/**
607 * synchronize_sched_expedited - Brute-force RCU-sched grace period
608 *
609 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
610 * approach to force the grace period to end quickly. This consumes
611 * significant time on all CPUs and is unfriendly to real-time workloads,
612 * so is thus not recommended for any sort of common-case code. In fact,
613 * if you are using synchronize_sched_expedited() in a loop, please
614 * restructure your code to batch your updates, and then use a single
615 * synchronize_sched() instead.
616 *
617 * This implementation can be thought of as an application of sequence
618 * locking to expedited grace periods, but using the sequence counter to
619 * determine when someone else has already done the work instead of for
620 * retrying readers.
621 */
622void synchronize_sched_expedited(void)
623{
624 struct rcu_state *rsp = &rcu_sched_state;
625
626 /* If only one CPU, this is automatically a grace period. */
627 if (rcu_blocking_is_gp())
628 return;
629
630 _synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
631}
632EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
633
634#ifdef CONFIG_PREEMPT_RCU
635
636/*
637 * Remote handler for smp_call_function_single(). If there is an
638 * RCU read-side critical section in effect, request that the
639 * next rcu_read_unlock() record the quiescent state up the
640 * ->expmask fields in the rcu_node tree. Otherwise, immediately
641 * report the quiescent state.
642 */
643static void sync_rcu_exp_handler(void *info)
644{
645 struct rcu_data *rdp;
646 struct rcu_state *rsp = info;
647 struct task_struct *t = current;
648
649 /*
650 * Within an RCU read-side critical section, request that the next
651 * rcu_read_unlock() report. Unless this RCU read-side critical
652 * section has already blocked, in which case it is already set
653 * up for the expedited grace period to wait on it.
654 */
655 if (t->rcu_read_lock_nesting > 0 &&
656 !t->rcu_read_unlock_special.b.blocked) {
657 t->rcu_read_unlock_special.b.exp_need_qs = true;
658 return;
659 }
660
661 /*
662 * We are either exiting an RCU read-side critical section (negative
663 * values of t->rcu_read_lock_nesting) or are not in one at all
664 * (zero value of t->rcu_read_lock_nesting). Or we are in an RCU
665 * read-side critical section that blocked before this expedited
666 * grace period started. Either way, we can immediately report
667 * the quiescent state.
668 */
669 rdp = this_cpu_ptr(rsp->rda);
670 rcu_report_exp_rdp(rsp, rdp, true);
671}
672
673/**
674 * synchronize_rcu_expedited - Brute-force RCU grace period
675 *
676 * Wait for an RCU-preempt grace period, but expedite it. The basic
677 * idea is to IPI all non-idle non-nohz online CPUs. The IPI handler
678 * checks whether the CPU is in an RCU-preempt critical section, and
679 * if so, it sets a flag that causes the outermost rcu_read_unlock()
680 * to report the quiescent state. On the other hand, if the CPU is
681 * not in an RCU read-side critical section, the IPI handler reports
682 * the quiescent state immediately.
683 *
684 * Although this is a greate improvement over previous expedited
685 * implementations, it is still unfriendly to real-time workloads, so is
686 * thus not recommended for any sort of common-case code. In fact, if
687 * you are using synchronize_rcu_expedited() in a loop, please restructure
688 * your code to batch your updates, and then Use a single synchronize_rcu()
689 * instead.
690 */
691void synchronize_rcu_expedited(void)
692{
693 struct rcu_state *rsp = rcu_state_p;
694
695 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
696 return;
697 _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
698}
699EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
700
701#else /* #ifdef CONFIG_PREEMPT_RCU */
702
703/*
704 * Wait for an rcu-preempt grace period, but make it happen quickly.
705 * But because preemptible RCU does not exist, map to rcu-sched.
706 */
707void synchronize_rcu_expedited(void)
708{
709 synchronize_sched_expedited();
710}
711EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
712
713#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
714
715/*
716 * Switch to run-time mode once Tree RCU has fully initialized.
717 */
718static int __init rcu_exp_runtime_mode(void)
719{
720 rcu_test_sync_prims();
721 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
722 rcu_test_sync_prims();
723 return 0;
724}
725core_initcall(rcu_exp_runtime_mode);
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * RCU expedited grace periods
4 *
5 * Copyright IBM Corporation, 2016
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10#include <linux/lockdep.h>
11
12static void rcu_exp_handler(void *unused);
13static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14
15/*
16 * Record the start of an expedited grace period.
17 */
18static void rcu_exp_gp_seq_start(void)
19{
20 rcu_seq_start(&rcu_state.expedited_sequence);
21}
22
23/*
24 * Return the value that the expedited-grace-period counter will have
25 * at the end of the current grace period.
26 */
27static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28{
29 return rcu_seq_endval(&rcu_state.expedited_sequence);
30}
31
32/*
33 * Record the end of an expedited grace period.
34 */
35static void rcu_exp_gp_seq_end(void)
36{
37 rcu_seq_end(&rcu_state.expedited_sequence);
38 smp_mb(); /* Ensure that consecutive grace periods serialize. */
39}
40
41/*
42 * Take a snapshot of the expedited-grace-period counter, which is the
43 * earliest value that will indicate that a full grace period has
44 * elapsed since the current time.
45 */
46static unsigned long rcu_exp_gp_seq_snap(void)
47{
48 unsigned long s;
49
50 smp_mb(); /* Caller's modifications seen first by other CPUs. */
51 s = rcu_seq_snap(&rcu_state.expedited_sequence);
52 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
53 return s;
54}
55
56/*
57 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
58 * if a full expedited grace period has elapsed since that snapshot
59 * was taken.
60 */
61static bool rcu_exp_gp_seq_done(unsigned long s)
62{
63 return rcu_seq_done(&rcu_state.expedited_sequence, s);
64}
65
66/*
67 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
68 * recent CPU-online activity. Note that these masks are not cleared
69 * when CPUs go offline, so they reflect the union of all CPUs that have
70 * ever been online. This means that this function normally takes its
71 * no-work-to-do fastpath.
72 */
73static void sync_exp_reset_tree_hotplug(void)
74{
75 bool done;
76 unsigned long flags;
77 unsigned long mask;
78 unsigned long oldmask;
79 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
80 struct rcu_node *rnp;
81 struct rcu_node *rnp_up;
82
83 /* If no new CPUs onlined since last time, nothing to do. */
84 if (likely(ncpus == rcu_state.ncpus_snap))
85 return;
86 rcu_state.ncpus_snap = ncpus;
87
88 /*
89 * Each pass through the following loop propagates newly onlined
90 * CPUs for the current rcu_node structure up the rcu_node tree.
91 */
92 rcu_for_each_leaf_node(rnp) {
93 raw_spin_lock_irqsave_rcu_node(rnp, flags);
94 if (rnp->expmaskinit == rnp->expmaskinitnext) {
95 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
96 continue; /* No new CPUs, nothing to do. */
97 }
98
99 /* Update this node's mask, track old value for propagation. */
100 oldmask = rnp->expmaskinit;
101 rnp->expmaskinit = rnp->expmaskinitnext;
102 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103
104 /* If was already nonzero, nothing to propagate. */
105 if (oldmask)
106 continue;
107
108 /* Propagate the new CPU up the tree. */
109 mask = rnp->grpmask;
110 rnp_up = rnp->parent;
111 done = false;
112 while (rnp_up) {
113 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114 if (rnp_up->expmaskinit)
115 done = true;
116 rnp_up->expmaskinit |= mask;
117 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118 if (done)
119 break;
120 mask = rnp_up->grpmask;
121 rnp_up = rnp_up->parent;
122 }
123 }
124}
125
126/*
127 * Reset the ->expmask values in the rcu_node tree in preparation for
128 * a new expedited grace period.
129 */
130static void __maybe_unused sync_exp_reset_tree(void)
131{
132 unsigned long flags;
133 struct rcu_node *rnp;
134
135 sync_exp_reset_tree_hotplug();
136 rcu_for_each_node_breadth_first(rnp) {
137 raw_spin_lock_irqsave_rcu_node(rnp, flags);
138 WARN_ON_ONCE(rnp->expmask);
139 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
140 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141 }
142}
143
144/*
145 * Return non-zero if there is no RCU expedited grace period in progress
146 * for the specified rcu_node structure, in other words, if all CPUs and
147 * tasks covered by the specified rcu_node structure have done their bit
148 * for the current expedited grace period.
149 */
150static bool sync_rcu_exp_done(struct rcu_node *rnp)
151{
152 raw_lockdep_assert_held_rcu_node(rnp);
153 return READ_ONCE(rnp->exp_tasks) == NULL &&
154 READ_ONCE(rnp->expmask) == 0;
155}
156
157/*
158 * Like sync_rcu_exp_done(), but where the caller does not hold the
159 * rcu_node's ->lock.
160 */
161static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
162{
163 unsigned long flags;
164 bool ret;
165
166 raw_spin_lock_irqsave_rcu_node(rnp, flags);
167 ret = sync_rcu_exp_done(rnp);
168 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169
170 return ret;
171}
172
173
174/*
175 * Report the exit from RCU read-side critical section for the last task
176 * that queued itself during or before the current expedited preemptible-RCU
177 * grace period. This event is reported either to the rcu_node structure on
178 * which the task was queued or to one of that rcu_node structure's ancestors,
179 * recursively up the tree. (Calm down, calm down, we do the recursion
180 * iteratively!)
181 */
182static void __rcu_report_exp_rnp(struct rcu_node *rnp,
183 bool wake, unsigned long flags)
184 __releases(rnp->lock)
185{
186 unsigned long mask;
187
188 raw_lockdep_assert_held_rcu_node(rnp);
189 for (;;) {
190 if (!sync_rcu_exp_done(rnp)) {
191 if (!rnp->expmask)
192 rcu_initiate_boost(rnp, flags);
193 else
194 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195 break;
196 }
197 if (rnp->parent == NULL) {
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 if (wake) {
200 smp_mb(); /* EGP done before wake_up(). */
201 swake_up_one(&rcu_state.expedited_wq);
202 }
203 break;
204 }
205 mask = rnp->grpmask;
206 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
207 rnp = rnp->parent;
208 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
209 WARN_ON_ONCE(!(rnp->expmask & mask));
210 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
211 }
212}
213
214/*
215 * Report expedited quiescent state for specified node. This is a
216 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
217 */
218static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
219{
220 unsigned long flags;
221
222 raw_spin_lock_irqsave_rcu_node(rnp, flags);
223 __rcu_report_exp_rnp(rnp, wake, flags);
224}
225
226/*
227 * Report expedited quiescent state for multiple CPUs, all covered by the
228 * specified leaf rcu_node structure.
229 */
230static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
231 unsigned long mask, bool wake)
232{
233 int cpu;
234 unsigned long flags;
235 struct rcu_data *rdp;
236
237 raw_spin_lock_irqsave_rcu_node(rnp, flags);
238 if (!(rnp->expmask & mask)) {
239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240 return;
241 }
242 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
243 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244 rdp = per_cpu_ptr(&rcu_data, cpu);
245 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246 continue;
247 rdp->rcu_forced_tick_exp = false;
248 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249 }
250 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
251}
252
253/*
254 * Report expedited quiescent state for specified rcu_data (CPU).
255 */
256static void rcu_report_exp_rdp(struct rcu_data *rdp)
257{
258 WRITE_ONCE(rdp->exp_deferred_qs, false);
259 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
260}
261
262/* Common code for work-done checking. */
263static bool sync_exp_work_done(unsigned long s)
264{
265 if (rcu_exp_gp_seq_done(s)) {
266 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
267 smp_mb(); /* Ensure test happens before caller kfree(). */
268 return true;
269 }
270 return false;
271}
272
273/*
274 * Funnel-lock acquisition for expedited grace periods. Returns true
275 * if some other task completed an expedited grace period that this task
276 * can piggy-back on, and with no mutex held. Otherwise, returns false
277 * with the mutex held, indicating that the caller must actually do the
278 * expedited grace period.
279 */
280static bool exp_funnel_lock(unsigned long s)
281{
282 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
283 struct rcu_node *rnp = rdp->mynode;
284 struct rcu_node *rnp_root = rcu_get_root();
285
286 /* Low-contention fastpath. */
287 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288 (rnp == rnp_root ||
289 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
290 mutex_trylock(&rcu_state.exp_mutex))
291 goto fastpath;
292
293 /*
294 * Each pass through the following loop works its way up
295 * the rcu_node tree, returning if others have done the work or
296 * otherwise falls through to acquire ->exp_mutex. The mapping
297 * from CPU to rcu_node structure can be inexact, as it is just
298 * promoting locality and is not strictly needed for correctness.
299 */
300 for (; rnp != NULL; rnp = rnp->parent) {
301 if (sync_exp_work_done(s))
302 return true;
303
304 /* Work not done, either wait here or go up. */
305 spin_lock(&rnp->exp_lock);
306 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307
308 /* Someone else doing GP, so wait for them. */
309 spin_unlock(&rnp->exp_lock);
310 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
311 rnp->grplo, rnp->grphi,
312 TPS("wait"));
313 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
314 sync_exp_work_done(s));
315 return true;
316 }
317 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
318 spin_unlock(&rnp->exp_lock);
319 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
321 }
322 mutex_lock(&rcu_state.exp_mutex);
323fastpath:
324 if (sync_exp_work_done(s)) {
325 mutex_unlock(&rcu_state.exp_mutex);
326 return true;
327 }
328 rcu_exp_gp_seq_start();
329 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
330 return false;
331}
332
333/*
334 * Select the CPUs within the specified rcu_node that the upcoming
335 * expedited grace period needs to wait for.
336 */
337static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
338{
339 int cpu;
340 unsigned long flags;
341 unsigned long mask_ofl_test;
342 unsigned long mask_ofl_ipi;
343 int ret;
344 struct rcu_exp_work *rewp =
345 container_of(wp, struct rcu_exp_work, rew_work);
346 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347
348 raw_spin_lock_irqsave_rcu_node(rnp, flags);
349
350 /* Each pass checks a CPU for identity, offline, and idle. */
351 mask_ofl_test = 0;
352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354 unsigned long mask = rdp->grpmask;
355 int snap;
356
357 if (raw_smp_processor_id() == cpu ||
358 !(rnp->qsmaskinitnext & mask)) {
359 mask_ofl_test |= mask;
360 } else {
361 snap = rcu_dynticks_snap(rdp);
362 if (rcu_dynticks_in_eqs(snap))
363 mask_ofl_test |= mask;
364 else
365 rdp->exp_dynticks_snap = snap;
366 }
367 }
368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369
370 /*
371 * Need to wait for any blocked tasks as well. Note that
372 * additional blocking tasks will also block the expedited GP
373 * until such time as the ->expmask bits are cleared.
374 */
375 if (rcu_preempt_has_tasks(rnp))
376 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379 /* IPI the remaining CPUs for expedited quiescent state. */
380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382 unsigned long mask = rdp->grpmask;
383
384retry_ipi:
385 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386 mask_ofl_test |= mask;
387 continue;
388 }
389 if (get_cpu() == cpu) {
390 put_cpu();
391 continue;
392 }
393 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
394 put_cpu();
395 /* The CPU will report the QS in response to the IPI. */
396 if (!ret)
397 continue;
398
399 /* Failed, raced with CPU hotplug operation. */
400 raw_spin_lock_irqsave_rcu_node(rnp, flags);
401 if ((rnp->qsmaskinitnext & mask) &&
402 (rnp->expmask & mask)) {
403 /* Online, so delay for a bit and try again. */
404 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
405 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
406 schedule_timeout_idle(1);
407 goto retry_ipi;
408 }
409 /* CPU really is offline, so we must report its QS. */
410 if (rnp->expmask & mask)
411 mask_ofl_test |= mask;
412 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413 }
414 /* Report quiescent states for those that went offline. */
415 if (mask_ofl_test)
416 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
417}
418
419/*
420 * Select the nodes that the upcoming expedited grace period needs
421 * to wait for.
422 */
423static void sync_rcu_exp_select_cpus(void)
424{
425 int cpu;
426 struct rcu_node *rnp;
427
428 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
429 sync_exp_reset_tree();
430 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
431
432 /* Schedule work for each leaf rcu_node structure. */
433 rcu_for_each_leaf_node(rnp) {
434 rnp->exp_need_flush = false;
435 if (!READ_ONCE(rnp->expmask))
436 continue; /* Avoid early boot non-existent wq. */
437 if (!READ_ONCE(rcu_par_gp_wq) ||
438 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
439 rcu_is_last_leaf_node(rnp)) {
440 /* No workqueues yet or last leaf, do direct call. */
441 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
442 continue;
443 }
444 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
445 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
446 /* If all offline, queue the work on an unbound CPU. */
447 if (unlikely(cpu > rnp->grphi - rnp->grplo))
448 cpu = WORK_CPU_UNBOUND;
449 else
450 cpu += rnp->grplo;
451 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
452 rnp->exp_need_flush = true;
453 }
454
455 /* Wait for workqueue jobs (if any) to complete. */
456 rcu_for_each_leaf_node(rnp)
457 if (rnp->exp_need_flush)
458 flush_work(&rnp->rew.rew_work);
459}
460
461/*
462 * Wait for the expedited grace period to elapse, within time limit.
463 * If the time limit is exceeded without the grace period elapsing,
464 * return false, otherwise return true.
465 */
466static bool synchronize_rcu_expedited_wait_once(long tlimit)
467{
468 int t;
469 struct rcu_node *rnp_root = rcu_get_root();
470
471 t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
472 sync_rcu_exp_done_unlocked(rnp_root),
473 tlimit);
474 // Workqueues should not be signaled.
475 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
476 return true;
477 WARN_ON(t < 0); /* workqueues should not be signaled. */
478 return false;
479}
480
481/*
482 * Wait for the expedited grace period to elapse, issuing any needed
483 * RCU CPU stall warnings along the way.
484 */
485static void synchronize_rcu_expedited_wait(void)
486{
487 int cpu;
488 unsigned long j;
489 unsigned long jiffies_stall;
490 unsigned long jiffies_start;
491 unsigned long mask;
492 int ndetected;
493 struct rcu_data *rdp;
494 struct rcu_node *rnp;
495 struct rcu_node *rnp_root = rcu_get_root();
496
497 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
498 jiffies_stall = rcu_jiffies_till_stall_check();
499 jiffies_start = jiffies;
500 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
501 if (synchronize_rcu_expedited_wait_once(1))
502 return;
503 rcu_for_each_leaf_node(rnp) {
504 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
505 rdp = per_cpu_ptr(&rcu_data, cpu);
506 if (rdp->rcu_forced_tick_exp)
507 continue;
508 rdp->rcu_forced_tick_exp = true;
509 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
510 }
511 }
512 j = READ_ONCE(jiffies_till_first_fqs);
513 if (synchronize_rcu_expedited_wait_once(j + HZ))
514 return;
515 WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT));
516 }
517
518 for (;;) {
519 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
520 return;
521 if (rcu_stall_is_suppressed())
522 continue;
523 panic_on_rcu_stall();
524 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
525 rcu_state.name);
526 ndetected = 0;
527 rcu_for_each_leaf_node(rnp) {
528 ndetected += rcu_print_task_exp_stall(rnp);
529 for_each_leaf_node_possible_cpu(rnp, cpu) {
530 struct rcu_data *rdp;
531
532 mask = leaf_node_cpu_bit(rnp, cpu);
533 if (!(READ_ONCE(rnp->expmask) & mask))
534 continue;
535 ndetected++;
536 rdp = per_cpu_ptr(&rcu_data, cpu);
537 pr_cont(" %d-%c%c%c", cpu,
538 "O."[!!cpu_online(cpu)],
539 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
540 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
541 }
542 }
543 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
544 jiffies - jiffies_start, rcu_state.expedited_sequence,
545 data_race(rnp_root->expmask),
546 ".T"[!!data_race(rnp_root->exp_tasks)]);
547 if (ndetected) {
548 pr_err("blocking rcu_node structures:");
549 rcu_for_each_node_breadth_first(rnp) {
550 if (rnp == rnp_root)
551 continue; /* printed unconditionally */
552 if (sync_rcu_exp_done_unlocked(rnp))
553 continue;
554 pr_cont(" l=%u:%d-%d:%#lx/%c",
555 rnp->level, rnp->grplo, rnp->grphi,
556 data_race(rnp->expmask),
557 ".T"[!!data_race(rnp->exp_tasks)]);
558 }
559 pr_cont("\n");
560 }
561 rcu_for_each_leaf_node(rnp) {
562 for_each_leaf_node_possible_cpu(rnp, cpu) {
563 mask = leaf_node_cpu_bit(rnp, cpu);
564 if (!(READ_ONCE(rnp->expmask) & mask))
565 continue;
566 dump_cpu_task(cpu);
567 }
568 }
569 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
570 }
571}
572
573/*
574 * Wait for the current expedited grace period to complete, and then
575 * wake up everyone who piggybacked on the just-completed expedited
576 * grace period. Also update all the ->exp_seq_rq counters as needed
577 * in order to avoid counter-wrap problems.
578 */
579static void rcu_exp_wait_wake(unsigned long s)
580{
581 struct rcu_node *rnp;
582
583 synchronize_rcu_expedited_wait();
584
585 // Switch over to wakeup mode, allowing the next GP to proceed.
586 // End the previous grace period only after acquiring the mutex
587 // to ensure that only one GP runs concurrently with wakeups.
588 mutex_lock(&rcu_state.exp_wake_mutex);
589 rcu_exp_gp_seq_end();
590 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
591
592 rcu_for_each_node_breadth_first(rnp) {
593 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
594 spin_lock(&rnp->exp_lock);
595 /* Recheck, avoid hang in case someone just arrived. */
596 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
597 WRITE_ONCE(rnp->exp_seq_rq, s);
598 spin_unlock(&rnp->exp_lock);
599 }
600 smp_mb(); /* All above changes before wakeup. */
601 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
602 }
603 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
604 mutex_unlock(&rcu_state.exp_wake_mutex);
605}
606
607/*
608 * Common code to drive an expedited grace period forward, used by
609 * workqueues and mid-boot-time tasks.
610 */
611static void rcu_exp_sel_wait_wake(unsigned long s)
612{
613 /* Initialize the rcu_node tree in preparation for the wait. */
614 sync_rcu_exp_select_cpus();
615
616 /* Wait and clean up, including waking everyone. */
617 rcu_exp_wait_wake(s);
618}
619
620/*
621 * Work-queue handler to drive an expedited grace period forward.
622 */
623static void wait_rcu_exp_gp(struct work_struct *wp)
624{
625 struct rcu_exp_work *rewp;
626
627 rewp = container_of(wp, struct rcu_exp_work, rew_work);
628 rcu_exp_sel_wait_wake(rewp->rew_s);
629}
630
631#ifdef CONFIG_PREEMPT_RCU
632
633/*
634 * Remote handler for smp_call_function_single(). If there is an
635 * RCU read-side critical section in effect, request that the
636 * next rcu_read_unlock() record the quiescent state up the
637 * ->expmask fields in the rcu_node tree. Otherwise, immediately
638 * report the quiescent state.
639 */
640static void rcu_exp_handler(void *unused)
641{
642 int depth = rcu_preempt_depth();
643 unsigned long flags;
644 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
645 struct rcu_node *rnp = rdp->mynode;
646 struct task_struct *t = current;
647
648 /*
649 * First, the common case of not being in an RCU read-side
650 * critical section. If also enabled or idle, immediately
651 * report the quiescent state, otherwise defer.
652 */
653 if (!depth) {
654 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
655 rcu_dynticks_curr_cpu_in_eqs()) {
656 rcu_report_exp_rdp(rdp);
657 } else {
658 rdp->exp_deferred_qs = true;
659 set_tsk_need_resched(t);
660 set_preempt_need_resched();
661 }
662 return;
663 }
664
665 /*
666 * Second, the less-common case of being in an RCU read-side
667 * critical section. In this case we can count on a future
668 * rcu_read_unlock(). However, this rcu_read_unlock() might
669 * execute on some other CPU, but in that case there will be
670 * a future context switch. Either way, if the expedited
671 * grace period is still waiting on this CPU, set ->deferred_qs
672 * so that the eventual quiescent state will be reported.
673 * Note that there is a large group of race conditions that
674 * can have caused this quiescent state to already have been
675 * reported, so we really do need to check ->expmask.
676 */
677 if (depth > 0) {
678 raw_spin_lock_irqsave_rcu_node(rnp, flags);
679 if (rnp->expmask & rdp->grpmask) {
680 rdp->exp_deferred_qs = true;
681 t->rcu_read_unlock_special.b.exp_hint = true;
682 }
683 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
684 return;
685 }
686
687 // Finally, negative nesting depth should not happen.
688 WARN_ON_ONCE(1);
689}
690
691/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
692static void sync_sched_exp_online_cleanup(int cpu)
693{
694}
695
696/*
697 * Scan the current list of tasks blocked within RCU read-side critical
698 * sections, printing out the tid of each that is blocking the current
699 * expedited grace period.
700 */
701static int rcu_print_task_exp_stall(struct rcu_node *rnp)
702{
703 unsigned long flags;
704 int ndetected = 0;
705 struct task_struct *t;
706
707 if (!READ_ONCE(rnp->exp_tasks))
708 return 0;
709 raw_spin_lock_irqsave_rcu_node(rnp, flags);
710 t = list_entry(rnp->exp_tasks->prev,
711 struct task_struct, rcu_node_entry);
712 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
713 pr_cont(" P%d", t->pid);
714 ndetected++;
715 }
716 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
717 return ndetected;
718}
719
720#else /* #ifdef CONFIG_PREEMPT_RCU */
721
722/* Request an expedited quiescent state. */
723static void rcu_exp_need_qs(void)
724{
725 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
726 /* Store .exp before .rcu_urgent_qs. */
727 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
728 set_tsk_need_resched(current);
729 set_preempt_need_resched();
730}
731
732/* Invoked on each online non-idle CPU for expedited quiescent state. */
733static void rcu_exp_handler(void *unused)
734{
735 struct rcu_data *rdp;
736 struct rcu_node *rnp;
737
738 rdp = this_cpu_ptr(&rcu_data);
739 rnp = rdp->mynode;
740 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
741 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
742 return;
743 if (rcu_is_cpu_rrupt_from_idle()) {
744 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
745 return;
746 }
747 rcu_exp_need_qs();
748}
749
750/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
751static void sync_sched_exp_online_cleanup(int cpu)
752{
753 unsigned long flags;
754 int my_cpu;
755 struct rcu_data *rdp;
756 int ret;
757 struct rcu_node *rnp;
758
759 rdp = per_cpu_ptr(&rcu_data, cpu);
760 rnp = rdp->mynode;
761 my_cpu = get_cpu();
762 /* Quiescent state either not needed or already requested, leave. */
763 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
764 __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) {
765 put_cpu();
766 return;
767 }
768 /* Quiescent state needed on current CPU, so set it up locally. */
769 if (my_cpu == cpu) {
770 local_irq_save(flags);
771 rcu_exp_need_qs();
772 local_irq_restore(flags);
773 put_cpu();
774 return;
775 }
776 /* Quiescent state needed on some other CPU, send IPI. */
777 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
778 put_cpu();
779 WARN_ON_ONCE(ret);
780}
781
782/*
783 * Because preemptible RCU does not exist, we never have to check for
784 * tasks blocked within RCU read-side critical sections that are
785 * blocking the current expedited grace period.
786 */
787static int rcu_print_task_exp_stall(struct rcu_node *rnp)
788{
789 return 0;
790}
791
792#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
793
794/**
795 * synchronize_rcu_expedited - Brute-force RCU grace period
796 *
797 * Wait for an RCU grace period, but expedite it. The basic idea is to
798 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
799 * the CPU is in an RCU critical section, and if so, it sets a flag that
800 * causes the outermost rcu_read_unlock() to report the quiescent state
801 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
802 * other hand, if the CPU is not in an RCU read-side critical section,
803 * the IPI handler reports the quiescent state immediately.
804 *
805 * Although this is a great improvement over previous expedited
806 * implementations, it is still unfriendly to real-time workloads, so is
807 * thus not recommended for any sort of common-case code. In fact, if
808 * you are using synchronize_rcu_expedited() in a loop, please restructure
809 * your code to batch your updates, and then use a single synchronize_rcu()
810 * instead.
811 *
812 * This has the same semantics as (but is more brutal than) synchronize_rcu().
813 */
814void synchronize_rcu_expedited(void)
815{
816 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
817 struct rcu_exp_work rew;
818 struct rcu_node *rnp;
819 unsigned long s;
820
821 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
822 lock_is_held(&rcu_lock_map) ||
823 lock_is_held(&rcu_sched_lock_map),
824 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
825
826 /* Is the state is such that the call is a grace period? */
827 if (rcu_blocking_is_gp())
828 return;
829
830 /* If expedited grace periods are prohibited, fall back to normal. */
831 if (rcu_gp_is_normal()) {
832 wait_rcu_gp(call_rcu);
833 return;
834 }
835
836 /* Take a snapshot of the sequence number. */
837 s = rcu_exp_gp_seq_snap();
838 if (exp_funnel_lock(s))
839 return; /* Someone else did our work for us. */
840
841 /* Ensure that load happens before action based on it. */
842 if (unlikely(boottime)) {
843 /* Direct call during scheduler init and early_initcalls(). */
844 rcu_exp_sel_wait_wake(s);
845 } else {
846 /* Marshall arguments & schedule the expedited grace period. */
847 rew.rew_s = s;
848 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
849 queue_work(rcu_gp_wq, &rew.rew_work);
850 }
851
852 /* Wait for expedited grace period to complete. */
853 rnp = rcu_get_root();
854 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
855 sync_exp_work_done(s));
856 smp_mb(); /* Workqueue actions happen before return. */
857
858 /* Let the next expedited grace period start. */
859 mutex_unlock(&rcu_state.exp_mutex);
860
861 if (likely(!boottime))
862 destroy_work_on_stack(&rew.rew_work);
863}
864EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);