Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 *
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
7 *
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
10 *
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
13 *
14 */
15
16#define pr_fmt(fmt) "rcu: " fmt
17
18#include <linux/export.h>
19#include <linux/mutex.h>
20#include <linux/percpu.h>
21#include <linux/preempt.h>
22#include <linux/rcupdate_wait.h>
23#include <linux/sched.h>
24#include <linux/smp.h>
25#include <linux/delay.h>
26#include <linux/module.h>
27#include <linux/srcu.h>
28
29#include "rcu.h"
30#include "rcu_segcblist.h"
31
32/* Holdoff in nanoseconds for auto-expediting. */
33#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
34static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
35module_param(exp_holdoff, ulong, 0444);
36
37/* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
38static ulong counter_wrap_check = (ULONG_MAX >> 2);
39module_param(counter_wrap_check, ulong, 0444);
40
41/* Early-boot callback-management, so early that no lock is required! */
42static LIST_HEAD(srcu_boot_list);
43static bool __read_mostly srcu_init_done;
44
45static void srcu_invoke_callbacks(struct work_struct *work);
46static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
47static void process_srcu(struct work_struct *work);
48static void srcu_delay_timer(struct timer_list *t);
49
50/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
51#define spin_lock_rcu_node(p) \
52do { \
53 spin_lock(&ACCESS_PRIVATE(p, lock)); \
54 smp_mb__after_unlock_lock(); \
55} while (0)
56
57#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
58
59#define spin_lock_irq_rcu_node(p) \
60do { \
61 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
62 smp_mb__after_unlock_lock(); \
63} while (0)
64
65#define spin_unlock_irq_rcu_node(p) \
66 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
67
68#define spin_lock_irqsave_rcu_node(p, flags) \
69do { \
70 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
71 smp_mb__after_unlock_lock(); \
72} while (0)
73
74#define spin_unlock_irqrestore_rcu_node(p, flags) \
75 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
76
77/*
78 * Initialize SRCU combining tree. Note that statically allocated
79 * srcu_struct structures might already have srcu_read_lock() and
80 * srcu_read_unlock() running against them. So if the is_static parameter
81 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
82 */
83static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
84{
85 int cpu;
86 int i;
87 int level = 0;
88 int levelspread[RCU_NUM_LVLS];
89 struct srcu_data *sdp;
90 struct srcu_node *snp;
91 struct srcu_node *snp_first;
92
93 /* Work out the overall tree geometry. */
94 ssp->level[0] = &ssp->node[0];
95 for (i = 1; i < rcu_num_lvls; i++)
96 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
97 rcu_init_levelspread(levelspread, num_rcu_lvl);
98
99 /* Each pass through this loop initializes one srcu_node structure. */
100 srcu_for_each_node_breadth_first(ssp, snp) {
101 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
102 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
103 ARRAY_SIZE(snp->srcu_data_have_cbs));
104 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
105 snp->srcu_have_cbs[i] = 0;
106 snp->srcu_data_have_cbs[i] = 0;
107 }
108 snp->srcu_gp_seq_needed_exp = 0;
109 snp->grplo = -1;
110 snp->grphi = -1;
111 if (snp == &ssp->node[0]) {
112 /* Root node, special case. */
113 snp->srcu_parent = NULL;
114 continue;
115 }
116
117 /* Non-root node. */
118 if (snp == ssp->level[level + 1])
119 level++;
120 snp->srcu_parent = ssp->level[level - 1] +
121 (snp - ssp->level[level]) /
122 levelspread[level - 1];
123 }
124
125 /*
126 * Initialize the per-CPU srcu_data array, which feeds into the
127 * leaves of the srcu_node tree.
128 */
129 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
130 ARRAY_SIZE(sdp->srcu_unlock_count));
131 level = rcu_num_lvls - 1;
132 snp_first = ssp->level[level];
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
140 sdp->mynode = &snp_first[cpu / levelspread[level]];
141 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
142 if (snp->grplo < 0)
143 snp->grplo = cpu;
144 snp->grphi = cpu;
145 }
146 sdp->cpu = cpu;
147 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
148 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
149 sdp->ssp = ssp;
150 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
151 if (is_static)
152 continue;
153
154 /* Dynamically allocated, better be no srcu_read_locks()! */
155 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
156 sdp->srcu_lock_count[i] = 0;
157 sdp->srcu_unlock_count[i] = 0;
158 }
159 }
160}
161
162/*
163 * Initialize non-compile-time initialized fields, including the
164 * associated srcu_node and srcu_data structures. The is_static
165 * parameter is passed through to init_srcu_struct_nodes(), and
166 * also tells us that ->sda has already been wired up to srcu_data.
167 */
168static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
169{
170 mutex_init(&ssp->srcu_cb_mutex);
171 mutex_init(&ssp->srcu_gp_mutex);
172 ssp->srcu_idx = 0;
173 ssp->srcu_gp_seq = 0;
174 ssp->srcu_barrier_seq = 0;
175 mutex_init(&ssp->srcu_barrier_mutex);
176 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
177 INIT_DELAYED_WORK(&ssp->work, process_srcu);
178 if (!is_static)
179 ssp->sda = alloc_percpu(struct srcu_data);
180 init_srcu_struct_nodes(ssp, is_static);
181 ssp->srcu_gp_seq_needed_exp = 0;
182 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
183 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
184 return ssp->sda ? 0 : -ENOMEM;
185}
186
187#ifdef CONFIG_DEBUG_LOCK_ALLOC
188
189int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
190 struct lock_class_key *key)
191{
192 /* Don't re-initialize a lock while it is held. */
193 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
194 lockdep_init_map(&ssp->dep_map, name, key, 0);
195 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
196 return init_srcu_struct_fields(ssp, false);
197}
198EXPORT_SYMBOL_GPL(__init_srcu_struct);
199
200#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
201
202/**
203 * init_srcu_struct - initialize a sleep-RCU structure
204 * @ssp: structure to initialize.
205 *
206 * Must invoke this on a given srcu_struct before passing that srcu_struct
207 * to any other function. Each srcu_struct represents a separate domain
208 * of SRCU protection.
209 */
210int init_srcu_struct(struct srcu_struct *ssp)
211{
212 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
213 return init_srcu_struct_fields(ssp, false);
214}
215EXPORT_SYMBOL_GPL(init_srcu_struct);
216
217#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
218
219/*
220 * First-use initialization of statically allocated srcu_struct
221 * structure. Wiring up the combining tree is more than can be
222 * done with compile-time initialization, so this check is added
223 * to each update-side SRCU primitive. Use ssp->lock, which -is-
224 * compile-time initialized, to resolve races involving multiple
225 * CPUs trying to garner first-use privileges.
226 */
227static void check_init_srcu_struct(struct srcu_struct *ssp)
228{
229 unsigned long flags;
230
231 /* The smp_load_acquire() pairs with the smp_store_release(). */
232 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
233 return; /* Already initialized. */
234 spin_lock_irqsave_rcu_node(ssp, flags);
235 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
236 spin_unlock_irqrestore_rcu_node(ssp, flags);
237 return;
238 }
239 init_srcu_struct_fields(ssp, true);
240 spin_unlock_irqrestore_rcu_node(ssp, flags);
241}
242
243/*
244 * Returns approximate total of the readers' ->srcu_lock_count[] values
245 * for the rank of per-CPU counters specified by idx.
246 */
247static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
248{
249 int cpu;
250 unsigned long sum = 0;
251
252 for_each_possible_cpu(cpu) {
253 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
254
255 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
256 }
257 return sum;
258}
259
260/*
261 * Returns approximate total of the readers' ->srcu_unlock_count[] values
262 * for the rank of per-CPU counters specified by idx.
263 */
264static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
265{
266 int cpu;
267 unsigned long sum = 0;
268
269 for_each_possible_cpu(cpu) {
270 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
271
272 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
273 }
274 return sum;
275}
276
277/*
278 * Return true if the number of pre-existing readers is determined to
279 * be zero.
280 */
281static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
282{
283 unsigned long unlocks;
284
285 unlocks = srcu_readers_unlock_idx(ssp, idx);
286
287 /*
288 * Make sure that a lock is always counted if the corresponding
289 * unlock is counted. Needs to be a smp_mb() as the read side may
290 * contain a read from a variable that is written to before the
291 * synchronize_srcu() in the write side. In this case smp_mb()s
292 * A and B act like the store buffering pattern.
293 *
294 * This smp_mb() also pairs with smp_mb() C to prevent accesses
295 * after the synchronize_srcu() from being executed before the
296 * grace period ends.
297 */
298 smp_mb(); /* A */
299
300 /*
301 * If the locks are the same as the unlocks, then there must have
302 * been no readers on this index at some time in between. This does
303 * not mean that there are no more readers, as one could have read
304 * the current index but not have incremented the lock counter yet.
305 *
306 * So suppose that the updater is preempted here for so long
307 * that more than ULONG_MAX non-nested readers come and go in
308 * the meantime. It turns out that this cannot result in overflow
309 * because if a reader modifies its unlock count after we read it
310 * above, then that reader's next load of ->srcu_idx is guaranteed
311 * to get the new value, which will cause it to operate on the
312 * other bank of counters, where it cannot contribute to the
313 * overflow of these counters. This means that there is a maximum
314 * of 2*NR_CPUS increments, which cannot overflow given current
315 * systems, especially not on 64-bit systems.
316 *
317 * OK, how about nesting? This does impose a limit on nesting
318 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
319 * especially on 64-bit systems.
320 */
321 return srcu_readers_lock_idx(ssp, idx) == unlocks;
322}
323
324/**
325 * srcu_readers_active - returns true if there are readers. and false
326 * otherwise
327 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
328 *
329 * Note that this is not an atomic primitive, and can therefore suffer
330 * severe errors when invoked on an active srcu_struct. That said, it
331 * can be useful as an error check at cleanup time.
332 */
333static bool srcu_readers_active(struct srcu_struct *ssp)
334{
335 int cpu;
336 unsigned long sum = 0;
337
338 for_each_possible_cpu(cpu) {
339 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
340
341 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
342 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
343 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
344 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
345 }
346 return sum;
347}
348
349#define SRCU_INTERVAL 1
350
351/*
352 * Return grace-period delay, zero if there are expedited grace
353 * periods pending, SRCU_INTERVAL otherwise.
354 */
355static unsigned long srcu_get_delay(struct srcu_struct *ssp)
356{
357 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
358 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
359 return 0;
360 return SRCU_INTERVAL;
361}
362
363/**
364 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
365 * @ssp: structure to clean up.
366 *
367 * Must invoke this after you are finished using a given srcu_struct that
368 * was initialized via init_srcu_struct(), else you leak memory.
369 */
370void cleanup_srcu_struct(struct srcu_struct *ssp)
371{
372 int cpu;
373
374 if (WARN_ON(!srcu_get_delay(ssp)))
375 return; /* Just leak it! */
376 if (WARN_ON(srcu_readers_active(ssp)))
377 return; /* Just leak it! */
378 flush_delayed_work(&ssp->work);
379 for_each_possible_cpu(cpu) {
380 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
381
382 del_timer_sync(&sdp->delay_work);
383 flush_work(&sdp->work);
384 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
385 return; /* Forgot srcu_barrier(), so just leak it! */
386 }
387 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
388 WARN_ON(srcu_readers_active(ssp))) {
389 pr_info("%s: Active srcu_struct %p state: %d\n",
390 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
391 return; /* Caller forgot to stop doing call_srcu()? */
392 }
393 free_percpu(ssp->sda);
394 ssp->sda = NULL;
395}
396EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
397
398/*
399 * Counts the new reader in the appropriate per-CPU element of the
400 * srcu_struct.
401 * Returns an index that must be passed to the matching srcu_read_unlock().
402 */
403int __srcu_read_lock(struct srcu_struct *ssp)
404{
405 int idx;
406
407 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
408 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
409 smp_mb(); /* B */ /* Avoid leaking the critical section. */
410 return idx;
411}
412EXPORT_SYMBOL_GPL(__srcu_read_lock);
413
414/*
415 * Removes the count for the old reader from the appropriate per-CPU
416 * element of the srcu_struct. Note that this may well be a different
417 * CPU than that which was incremented by the corresponding srcu_read_lock().
418 */
419void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
420{
421 smp_mb(); /* C */ /* Avoid leaking the critical section. */
422 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
423}
424EXPORT_SYMBOL_GPL(__srcu_read_unlock);
425
426/*
427 * We use an adaptive strategy for synchronize_srcu() and especially for
428 * synchronize_srcu_expedited(). We spin for a fixed time period
429 * (defined below) to allow SRCU readers to exit their read-side critical
430 * sections. If there are still some readers after a few microseconds,
431 * we repeatedly block for 1-millisecond time periods.
432 */
433#define SRCU_RETRY_CHECK_DELAY 5
434
435/*
436 * Start an SRCU grace period.
437 */
438static void srcu_gp_start(struct srcu_struct *ssp)
439{
440 struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
441 int state;
442
443 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
444 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
445 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
446 rcu_segcblist_advance(&sdp->srcu_cblist,
447 rcu_seq_current(&ssp->srcu_gp_seq));
448 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
449 rcu_seq_snap(&ssp->srcu_gp_seq));
450 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
451 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
452 rcu_seq_start(&ssp->srcu_gp_seq);
453 state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
454 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
455}
456
457
458static void srcu_delay_timer(struct timer_list *t)
459{
460 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
461
462 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
463}
464
465static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
466 unsigned long delay)
467{
468 if (!delay) {
469 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
470 return;
471 }
472
473 timer_reduce(&sdp->delay_work, jiffies + delay);
474}
475
476/*
477 * Schedule callback invocation for the specified srcu_data structure,
478 * if possible, on the corresponding CPU.
479 */
480static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
481{
482 srcu_queue_delayed_work_on(sdp, delay);
483}
484
485/*
486 * Schedule callback invocation for all srcu_data structures associated
487 * with the specified srcu_node structure that have callbacks for the
488 * just-completed grace period, the one corresponding to idx. If possible,
489 * schedule this invocation on the corresponding CPUs.
490 */
491static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
492 unsigned long mask, unsigned long delay)
493{
494 int cpu;
495
496 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
497 if (!(mask & (1 << (cpu - snp->grplo))))
498 continue;
499 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
500 }
501}
502
503/*
504 * Note the end of an SRCU grace period. Initiates callback invocation
505 * and starts a new grace period if needed.
506 *
507 * The ->srcu_cb_mutex acquisition does not protect any data, but
508 * instead prevents more than one grace period from starting while we
509 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
510 * array to have a finite number of elements.
511 */
512static void srcu_gp_end(struct srcu_struct *ssp)
513{
514 unsigned long cbdelay;
515 bool cbs;
516 bool last_lvl;
517 int cpu;
518 unsigned long flags;
519 unsigned long gpseq;
520 int idx;
521 unsigned long mask;
522 struct srcu_data *sdp;
523 struct srcu_node *snp;
524
525 /* Prevent more than one additional grace period. */
526 mutex_lock(&ssp->srcu_cb_mutex);
527
528 /* End the current grace period. */
529 spin_lock_irq_rcu_node(ssp);
530 idx = rcu_seq_state(ssp->srcu_gp_seq);
531 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
532 cbdelay = srcu_get_delay(ssp);
533 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
534 rcu_seq_end(&ssp->srcu_gp_seq);
535 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
536 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
537 ssp->srcu_gp_seq_needed_exp = gpseq;
538 spin_unlock_irq_rcu_node(ssp);
539 mutex_unlock(&ssp->srcu_gp_mutex);
540 /* A new grace period can start at this point. But only one. */
541
542 /* Initiate callback invocation as needed. */
543 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
544 srcu_for_each_node_breadth_first(ssp, snp) {
545 spin_lock_irq_rcu_node(snp);
546 cbs = false;
547 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
548 if (last_lvl)
549 cbs = snp->srcu_have_cbs[idx] == gpseq;
550 snp->srcu_have_cbs[idx] = gpseq;
551 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
552 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
553 snp->srcu_gp_seq_needed_exp = gpseq;
554 mask = snp->srcu_data_have_cbs[idx];
555 snp->srcu_data_have_cbs[idx] = 0;
556 spin_unlock_irq_rcu_node(snp);
557 if (cbs)
558 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
559
560 /* Occasionally prevent srcu_data counter wrap. */
561 if (!(gpseq & counter_wrap_check) && last_lvl)
562 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
563 sdp = per_cpu_ptr(ssp->sda, cpu);
564 spin_lock_irqsave_rcu_node(sdp, flags);
565 if (ULONG_CMP_GE(gpseq,
566 sdp->srcu_gp_seq_needed + 100))
567 sdp->srcu_gp_seq_needed = gpseq;
568 if (ULONG_CMP_GE(gpseq,
569 sdp->srcu_gp_seq_needed_exp + 100))
570 sdp->srcu_gp_seq_needed_exp = gpseq;
571 spin_unlock_irqrestore_rcu_node(sdp, flags);
572 }
573 }
574
575 /* Callback initiation done, allow grace periods after next. */
576 mutex_unlock(&ssp->srcu_cb_mutex);
577
578 /* Start a new grace period if needed. */
579 spin_lock_irq_rcu_node(ssp);
580 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
581 if (!rcu_seq_state(gpseq) &&
582 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
583 srcu_gp_start(ssp);
584 spin_unlock_irq_rcu_node(ssp);
585 srcu_reschedule(ssp, 0);
586 } else {
587 spin_unlock_irq_rcu_node(ssp);
588 }
589}
590
591/*
592 * Funnel-locking scheme to scalably mediate many concurrent expedited
593 * grace-period requests. This function is invoked for the first known
594 * expedited request for a grace period that has already been requested,
595 * but without expediting. To start a completely new grace period,
596 * whether expedited or not, use srcu_funnel_gp_start() instead.
597 */
598static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
599 unsigned long s)
600{
601 unsigned long flags;
602
603 for (; snp != NULL; snp = snp->srcu_parent) {
604 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
605 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
606 return;
607 spin_lock_irqsave_rcu_node(snp, flags);
608 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
609 spin_unlock_irqrestore_rcu_node(snp, flags);
610 return;
611 }
612 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
613 spin_unlock_irqrestore_rcu_node(snp, flags);
614 }
615 spin_lock_irqsave_rcu_node(ssp, flags);
616 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
617 ssp->srcu_gp_seq_needed_exp = s;
618 spin_unlock_irqrestore_rcu_node(ssp, flags);
619}
620
621/*
622 * Funnel-locking scheme to scalably mediate many concurrent grace-period
623 * requests. The winner has to do the work of actually starting grace
624 * period s. Losers must either ensure that their desired grace-period
625 * number is recorded on at least their leaf srcu_node structure, or they
626 * must take steps to invoke their own callbacks.
627 *
628 * Note that this function also does the work of srcu_funnel_exp_start(),
629 * in some cases by directly invoking it.
630 */
631static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
632 unsigned long s, bool do_norm)
633{
634 unsigned long flags;
635 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
636 struct srcu_node *snp = sdp->mynode;
637 unsigned long snp_seq;
638
639 /* Each pass through the loop does one level of the srcu_node tree. */
640 for (; snp != NULL; snp = snp->srcu_parent) {
641 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
642 return; /* GP already done and CBs recorded. */
643 spin_lock_irqsave_rcu_node(snp, flags);
644 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
645 snp_seq = snp->srcu_have_cbs[idx];
646 if (snp == sdp->mynode && snp_seq == s)
647 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
648 spin_unlock_irqrestore_rcu_node(snp, flags);
649 if (snp == sdp->mynode && snp_seq != s) {
650 srcu_schedule_cbs_sdp(sdp, do_norm
651 ? SRCU_INTERVAL
652 : 0);
653 return;
654 }
655 if (!do_norm)
656 srcu_funnel_exp_start(ssp, snp, s);
657 return;
658 }
659 snp->srcu_have_cbs[idx] = s;
660 if (snp == sdp->mynode)
661 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
662 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
663 snp->srcu_gp_seq_needed_exp = s;
664 spin_unlock_irqrestore_rcu_node(snp, flags);
665 }
666
667 /* Top of tree, must ensure the grace period will be started. */
668 spin_lock_irqsave_rcu_node(ssp, flags);
669 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
670 /*
671 * Record need for grace period s. Pair with load
672 * acquire setting up for initialization.
673 */
674 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
675 }
676 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
677 ssp->srcu_gp_seq_needed_exp = s;
678
679 /* If grace period not already done and none in progress, start it. */
680 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
681 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
682 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
683 srcu_gp_start(ssp);
684 if (likely(srcu_init_done))
685 queue_delayed_work(rcu_gp_wq, &ssp->work,
686 srcu_get_delay(ssp));
687 else if (list_empty(&ssp->work.work.entry))
688 list_add(&ssp->work.work.entry, &srcu_boot_list);
689 }
690 spin_unlock_irqrestore_rcu_node(ssp, flags);
691}
692
693/*
694 * Wait until all readers counted by array index idx complete, but
695 * loop an additional time if there is an expedited grace period pending.
696 * The caller must ensure that ->srcu_idx is not changed while checking.
697 */
698static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
699{
700 for (;;) {
701 if (srcu_readers_active_idx_check(ssp, idx))
702 return true;
703 if (--trycount + !srcu_get_delay(ssp) <= 0)
704 return false;
705 udelay(SRCU_RETRY_CHECK_DELAY);
706 }
707}
708
709/*
710 * Increment the ->srcu_idx counter so that future SRCU readers will
711 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
712 * us to wait for pre-existing readers in a starvation-free manner.
713 */
714static void srcu_flip(struct srcu_struct *ssp)
715{
716 /*
717 * Ensure that if this updater saw a given reader's increment
718 * from __srcu_read_lock(), that reader was using an old value
719 * of ->srcu_idx. Also ensure that if a given reader sees the
720 * new value of ->srcu_idx, this updater's earlier scans cannot
721 * have seen that reader's increments (which is OK, because this
722 * grace period need not wait on that reader).
723 */
724 smp_mb(); /* E */ /* Pairs with B and C. */
725
726 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
727
728 /*
729 * Ensure that if the updater misses an __srcu_read_unlock()
730 * increment, that task's next __srcu_read_lock() will see the
731 * above counter update. Note that both this memory barrier
732 * and the one in srcu_readers_active_idx_check() provide the
733 * guarantee for __srcu_read_lock().
734 */
735 smp_mb(); /* D */ /* Pairs with C. */
736}
737
738/*
739 * If SRCU is likely idle, return true, otherwise return false.
740 *
741 * Note that it is OK for several current from-idle requests for a new
742 * grace period from idle to specify expediting because they will all end
743 * up requesting the same grace period anyhow. So no loss.
744 *
745 * Note also that if any CPU (including the current one) is still invoking
746 * callbacks, this function will nevertheless say "idle". This is not
747 * ideal, but the overhead of checking all CPUs' callback lists is even
748 * less ideal, especially on large systems. Furthermore, the wakeup
749 * can happen before the callback is fully removed, so we have no choice
750 * but to accept this type of error.
751 *
752 * This function is also subject to counter-wrap errors, but let's face
753 * it, if this function was preempted for enough time for the counters
754 * to wrap, it really doesn't matter whether or not we expedite the grace
755 * period. The extra overhead of a needlessly expedited grace period is
756 * negligible when amoritized over that time period, and the extra latency
757 * of a needlessly non-expedited grace period is similarly negligible.
758 */
759static bool srcu_might_be_idle(struct srcu_struct *ssp)
760{
761 unsigned long curseq;
762 unsigned long flags;
763 struct srcu_data *sdp;
764 unsigned long t;
765
766 /* If the local srcu_data structure has callbacks, not idle. */
767 local_irq_save(flags);
768 sdp = this_cpu_ptr(ssp->sda);
769 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
770 local_irq_restore(flags);
771 return false; /* Callbacks already present, so not idle. */
772 }
773 local_irq_restore(flags);
774
775 /*
776 * No local callbacks, so probabalistically probe global state.
777 * Exact information would require acquiring locks, which would
778 * kill scalability, hence the probabalistic nature of the probe.
779 */
780
781 /* First, see if enough time has passed since the last GP. */
782 t = ktime_get_mono_fast_ns();
783 if (exp_holdoff == 0 ||
784 time_in_range_open(t, ssp->srcu_last_gp_end,
785 ssp->srcu_last_gp_end + exp_holdoff))
786 return false; /* Too soon after last GP. */
787
788 /* Next, check for probable idleness. */
789 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
790 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
791 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
792 return false; /* Grace period in progress, so not idle. */
793 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
794 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
795 return false; /* GP # changed, so not idle. */
796 return true; /* With reasonable probability, idle! */
797}
798
799/*
800 * SRCU callback function to leak a callback.
801 */
802static void srcu_leak_callback(struct rcu_head *rhp)
803{
804}
805
806/*
807 * Enqueue an SRCU callback on the srcu_data structure associated with
808 * the current CPU and the specified srcu_struct structure, initiating
809 * grace-period processing if it is not already running.
810 *
811 * Note that all CPUs must agree that the grace period extended beyond
812 * all pre-existing SRCU read-side critical section. On systems with
813 * more than one CPU, this means that when "func()" is invoked, each CPU
814 * is guaranteed to have executed a full memory barrier since the end of
815 * its last corresponding SRCU read-side critical section whose beginning
816 * preceded the call to call_srcu(). It also means that each CPU executing
817 * an SRCU read-side critical section that continues beyond the start of
818 * "func()" must have executed a memory barrier after the call_srcu()
819 * but before the beginning of that SRCU read-side critical section.
820 * Note that these guarantees include CPUs that are offline, idle, or
821 * executing in user mode, as well as CPUs that are executing in the kernel.
822 *
823 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
824 * resulting SRCU callback function "func()", then both CPU A and CPU
825 * B are guaranteed to execute a full memory barrier during the time
826 * interval between the call to call_srcu() and the invocation of "func()".
827 * This guarantee applies even if CPU A and CPU B are the same CPU (but
828 * again only if the system has more than one CPU).
829 *
830 * Of course, these guarantees apply only for invocations of call_srcu(),
831 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
832 * srcu_struct structure.
833 */
834static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
835 rcu_callback_t func, bool do_norm)
836{
837 unsigned long flags;
838 int idx;
839 bool needexp = false;
840 bool needgp = false;
841 unsigned long s;
842 struct srcu_data *sdp;
843
844 check_init_srcu_struct(ssp);
845 if (debug_rcu_head_queue(rhp)) {
846 /* Probable double call_srcu(), so leak the callback. */
847 WRITE_ONCE(rhp->func, srcu_leak_callback);
848 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
849 return;
850 }
851 rhp->func = func;
852 idx = srcu_read_lock(ssp);
853 local_irq_save(flags);
854 sdp = this_cpu_ptr(ssp->sda);
855 spin_lock_rcu_node(sdp);
856 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
857 rcu_segcblist_advance(&sdp->srcu_cblist,
858 rcu_seq_current(&ssp->srcu_gp_seq));
859 s = rcu_seq_snap(&ssp->srcu_gp_seq);
860 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
861 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
862 sdp->srcu_gp_seq_needed = s;
863 needgp = true;
864 }
865 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
866 sdp->srcu_gp_seq_needed_exp = s;
867 needexp = true;
868 }
869 spin_unlock_irqrestore_rcu_node(sdp, flags);
870 if (needgp)
871 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
872 else if (needexp)
873 srcu_funnel_exp_start(ssp, sdp->mynode, s);
874 srcu_read_unlock(ssp, idx);
875}
876
877/**
878 * call_srcu() - Queue a callback for invocation after an SRCU grace period
879 * @ssp: srcu_struct in queue the callback
880 * @rhp: structure to be used for queueing the SRCU callback.
881 * @func: function to be invoked after the SRCU grace period
882 *
883 * The callback function will be invoked some time after a full SRCU
884 * grace period elapses, in other words after all pre-existing SRCU
885 * read-side critical sections have completed. However, the callback
886 * function might well execute concurrently with other SRCU read-side
887 * critical sections that started after call_srcu() was invoked. SRCU
888 * read-side critical sections are delimited by srcu_read_lock() and
889 * srcu_read_unlock(), and may be nested.
890 *
891 * The callback will be invoked from process context, but must nevertheless
892 * be fast and must not block.
893 */
894void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
895 rcu_callback_t func)
896{
897 __call_srcu(ssp, rhp, func, true);
898}
899EXPORT_SYMBOL_GPL(call_srcu);
900
901/*
902 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
903 */
904static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
905{
906 struct rcu_synchronize rcu;
907
908 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
909 lock_is_held(&rcu_bh_lock_map) ||
910 lock_is_held(&rcu_lock_map) ||
911 lock_is_held(&rcu_sched_lock_map),
912 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
913
914 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
915 return;
916 might_sleep();
917 check_init_srcu_struct(ssp);
918 init_completion(&rcu.completion);
919 init_rcu_head_on_stack(&rcu.head);
920 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
921 wait_for_completion(&rcu.completion);
922 destroy_rcu_head_on_stack(&rcu.head);
923
924 /*
925 * Make sure that later code is ordered after the SRCU grace
926 * period. This pairs with the spin_lock_irq_rcu_node()
927 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
928 * because the current CPU might have been totally uninvolved with
929 * (and thus unordered against) that grace period.
930 */
931 smp_mb();
932}
933
934/**
935 * synchronize_srcu_expedited - Brute-force SRCU grace period
936 * @ssp: srcu_struct with which to synchronize.
937 *
938 * Wait for an SRCU grace period to elapse, but be more aggressive about
939 * spinning rather than blocking when waiting.
940 *
941 * Note that synchronize_srcu_expedited() has the same deadlock and
942 * memory-ordering properties as does synchronize_srcu().
943 */
944void synchronize_srcu_expedited(struct srcu_struct *ssp)
945{
946 __synchronize_srcu(ssp, rcu_gp_is_normal());
947}
948EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
949
950/**
951 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
952 * @ssp: srcu_struct with which to synchronize.
953 *
954 * Wait for the count to drain to zero of both indexes. To avoid the
955 * possible starvation of synchronize_srcu(), it waits for the count of
956 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
957 * and then flip the srcu_idx and wait for the count of the other index.
958 *
959 * Can block; must be called from process context.
960 *
961 * Note that it is illegal to call synchronize_srcu() from the corresponding
962 * SRCU read-side critical section; doing so will result in deadlock.
963 * However, it is perfectly legal to call synchronize_srcu() on one
964 * srcu_struct from some other srcu_struct's read-side critical section,
965 * as long as the resulting graph of srcu_structs is acyclic.
966 *
967 * There are memory-ordering constraints implied by synchronize_srcu().
968 * On systems with more than one CPU, when synchronize_srcu() returns,
969 * each CPU is guaranteed to have executed a full memory barrier since
970 * the end of its last corresponding SRCU read-side critical section
971 * whose beginning preceded the call to synchronize_srcu(). In addition,
972 * each CPU having an SRCU read-side critical section that extends beyond
973 * the return from synchronize_srcu() is guaranteed to have executed a
974 * full memory barrier after the beginning of synchronize_srcu() and before
975 * the beginning of that SRCU read-side critical section. Note that these
976 * guarantees include CPUs that are offline, idle, or executing in user mode,
977 * as well as CPUs that are executing in the kernel.
978 *
979 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
980 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
981 * to have executed a full memory barrier during the execution of
982 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
983 * are the same CPU, but again only if the system has more than one CPU.
984 *
985 * Of course, these memory-ordering guarantees apply only when
986 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
987 * passed the same srcu_struct structure.
988 *
989 * If SRCU is likely idle, expedite the first request. This semantic
990 * was provided by Classic SRCU, and is relied upon by its users, so TREE
991 * SRCU must also provide it. Note that detecting idleness is heuristic
992 * and subject to both false positives and negatives.
993 */
994void synchronize_srcu(struct srcu_struct *ssp)
995{
996 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
997 synchronize_srcu_expedited(ssp);
998 else
999 __synchronize_srcu(ssp, true);
1000}
1001EXPORT_SYMBOL_GPL(synchronize_srcu);
1002
1003/*
1004 * Callback function for srcu_barrier() use.
1005 */
1006static void srcu_barrier_cb(struct rcu_head *rhp)
1007{
1008 struct srcu_data *sdp;
1009 struct srcu_struct *ssp;
1010
1011 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1012 ssp = sdp->ssp;
1013 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1014 complete(&ssp->srcu_barrier_completion);
1015}
1016
1017/**
1018 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1019 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1020 */
1021void srcu_barrier(struct srcu_struct *ssp)
1022{
1023 int cpu;
1024 struct srcu_data *sdp;
1025 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1026
1027 check_init_srcu_struct(ssp);
1028 mutex_lock(&ssp->srcu_barrier_mutex);
1029 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1030 smp_mb(); /* Force ordering following return. */
1031 mutex_unlock(&ssp->srcu_barrier_mutex);
1032 return; /* Someone else did our work for us. */
1033 }
1034 rcu_seq_start(&ssp->srcu_barrier_seq);
1035 init_completion(&ssp->srcu_barrier_completion);
1036
1037 /* Initial count prevents reaching zero until all CBs are posted. */
1038 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1039
1040 /*
1041 * Each pass through this loop enqueues a callback, but only
1042 * on CPUs already having callbacks enqueued. Note that if
1043 * a CPU already has callbacks enqueue, it must have already
1044 * registered the need for a future grace period, so all we
1045 * need do is enqueue a callback that will use the same
1046 * grace period as the last callback already in the queue.
1047 */
1048 for_each_possible_cpu(cpu) {
1049 sdp = per_cpu_ptr(ssp->sda, cpu);
1050 spin_lock_irq_rcu_node(sdp);
1051 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1052 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1053 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1054 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1055 &sdp->srcu_barrier_head, 0)) {
1056 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1057 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1058 }
1059 spin_unlock_irq_rcu_node(sdp);
1060 }
1061
1062 /* Remove the initial count, at which point reaching zero can happen. */
1063 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1064 complete(&ssp->srcu_barrier_completion);
1065 wait_for_completion(&ssp->srcu_barrier_completion);
1066
1067 rcu_seq_end(&ssp->srcu_barrier_seq);
1068 mutex_unlock(&ssp->srcu_barrier_mutex);
1069}
1070EXPORT_SYMBOL_GPL(srcu_barrier);
1071
1072/**
1073 * srcu_batches_completed - return batches completed.
1074 * @ssp: srcu_struct on which to report batch completion.
1075 *
1076 * Report the number of batches, correlated with, but not necessarily
1077 * precisely the same as, the number of grace periods that have elapsed.
1078 */
1079unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1080{
1081 return ssp->srcu_idx;
1082}
1083EXPORT_SYMBOL_GPL(srcu_batches_completed);
1084
1085/*
1086 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1087 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1088 * completed in that state.
1089 */
1090static void srcu_advance_state(struct srcu_struct *ssp)
1091{
1092 int idx;
1093
1094 mutex_lock(&ssp->srcu_gp_mutex);
1095
1096 /*
1097 * Because readers might be delayed for an extended period after
1098 * fetching ->srcu_idx for their index, at any point in time there
1099 * might well be readers using both idx=0 and idx=1. We therefore
1100 * need to wait for readers to clear from both index values before
1101 * invoking a callback.
1102 *
1103 * The load-acquire ensures that we see the accesses performed
1104 * by the prior grace period.
1105 */
1106 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1107 if (idx == SRCU_STATE_IDLE) {
1108 spin_lock_irq_rcu_node(ssp);
1109 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1110 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1111 spin_unlock_irq_rcu_node(ssp);
1112 mutex_unlock(&ssp->srcu_gp_mutex);
1113 return;
1114 }
1115 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1116 if (idx == SRCU_STATE_IDLE)
1117 srcu_gp_start(ssp);
1118 spin_unlock_irq_rcu_node(ssp);
1119 if (idx != SRCU_STATE_IDLE) {
1120 mutex_unlock(&ssp->srcu_gp_mutex);
1121 return; /* Someone else started the grace period. */
1122 }
1123 }
1124
1125 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1126 idx = 1 ^ (ssp->srcu_idx & 1);
1127 if (!try_check_zero(ssp, idx, 1)) {
1128 mutex_unlock(&ssp->srcu_gp_mutex);
1129 return; /* readers present, retry later. */
1130 }
1131 srcu_flip(ssp);
1132 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1133 }
1134
1135 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1136
1137 /*
1138 * SRCU read-side critical sections are normally short,
1139 * so check at least twice in quick succession after a flip.
1140 */
1141 idx = 1 ^ (ssp->srcu_idx & 1);
1142 if (!try_check_zero(ssp, idx, 2)) {
1143 mutex_unlock(&ssp->srcu_gp_mutex);
1144 return; /* readers present, retry later. */
1145 }
1146 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1147 }
1148}
1149
1150/*
1151 * Invoke a limited number of SRCU callbacks that have passed through
1152 * their grace period. If there are more to do, SRCU will reschedule
1153 * the workqueue. Note that needed memory barriers have been executed
1154 * in this task's context by srcu_readers_active_idx_check().
1155 */
1156static void srcu_invoke_callbacks(struct work_struct *work)
1157{
1158 bool more;
1159 struct rcu_cblist ready_cbs;
1160 struct rcu_head *rhp;
1161 struct srcu_data *sdp;
1162 struct srcu_struct *ssp;
1163
1164 sdp = container_of(work, struct srcu_data, work);
1165
1166 ssp = sdp->ssp;
1167 rcu_cblist_init(&ready_cbs);
1168 spin_lock_irq_rcu_node(sdp);
1169 rcu_segcblist_advance(&sdp->srcu_cblist,
1170 rcu_seq_current(&ssp->srcu_gp_seq));
1171 if (sdp->srcu_cblist_invoking ||
1172 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1173 spin_unlock_irq_rcu_node(sdp);
1174 return; /* Someone else on the job or nothing to do. */
1175 }
1176
1177 /* We are on the job! Extract and invoke ready callbacks. */
1178 sdp->srcu_cblist_invoking = true;
1179 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1180 spin_unlock_irq_rcu_node(sdp);
1181 rhp = rcu_cblist_dequeue(&ready_cbs);
1182 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1183 debug_rcu_head_unqueue(rhp);
1184 local_bh_disable();
1185 rhp->func(rhp);
1186 local_bh_enable();
1187 }
1188
1189 /*
1190 * Update counts, accelerate new callbacks, and if needed,
1191 * schedule another round of callback invocation.
1192 */
1193 spin_lock_irq_rcu_node(sdp);
1194 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1195 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1196 rcu_seq_snap(&ssp->srcu_gp_seq));
1197 sdp->srcu_cblist_invoking = false;
1198 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1199 spin_unlock_irq_rcu_node(sdp);
1200 if (more)
1201 srcu_schedule_cbs_sdp(sdp, 0);
1202}
1203
1204/*
1205 * Finished one round of SRCU grace period. Start another if there are
1206 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1207 */
1208static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1209{
1210 bool pushgp = true;
1211
1212 spin_lock_irq_rcu_node(ssp);
1213 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1214 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1215 /* All requests fulfilled, time to go idle. */
1216 pushgp = false;
1217 }
1218 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1219 /* Outstanding request and no GP. Start one. */
1220 srcu_gp_start(ssp);
1221 }
1222 spin_unlock_irq_rcu_node(ssp);
1223
1224 if (pushgp)
1225 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1226}
1227
1228/*
1229 * This is the work-queue function that handles SRCU grace periods.
1230 */
1231static void process_srcu(struct work_struct *work)
1232{
1233 struct srcu_struct *ssp;
1234
1235 ssp = container_of(work, struct srcu_struct, work.work);
1236
1237 srcu_advance_state(ssp);
1238 srcu_reschedule(ssp, srcu_get_delay(ssp));
1239}
1240
1241void srcutorture_get_gp_data(enum rcutorture_type test_type,
1242 struct srcu_struct *ssp, int *flags,
1243 unsigned long *gp_seq)
1244{
1245 if (test_type != SRCU_FLAVOR)
1246 return;
1247 *flags = 0;
1248 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1249}
1250EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1251
1252void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1253{
1254 int cpu;
1255 int idx;
1256 unsigned long s0 = 0, s1 = 0;
1257
1258 idx = ssp->srcu_idx & 0x1;
1259 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1260 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1261 for_each_possible_cpu(cpu) {
1262 unsigned long l0, l1;
1263 unsigned long u0, u1;
1264 long c0, c1;
1265 struct srcu_data *sdp;
1266
1267 sdp = per_cpu_ptr(ssp->sda, cpu);
1268 u0 = sdp->srcu_unlock_count[!idx];
1269 u1 = sdp->srcu_unlock_count[idx];
1270
1271 /*
1272 * Make sure that a lock is always counted if the corresponding
1273 * unlock is counted.
1274 */
1275 smp_rmb();
1276
1277 l0 = sdp->srcu_lock_count[!idx];
1278 l1 = sdp->srcu_lock_count[idx];
1279
1280 c0 = l0 - u0;
1281 c1 = l1 - u1;
1282 pr_cont(" %d(%ld,%ld %c)",
1283 cpu, c0, c1,
1284 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1285 s0 += c0;
1286 s1 += c1;
1287 }
1288 pr_cont(" T(%ld,%ld)\n", s0, s1);
1289}
1290EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1291
1292static int __init srcu_bootup_announce(void)
1293{
1294 pr_info("Hierarchical SRCU implementation.\n");
1295 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1296 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1297 return 0;
1298}
1299early_initcall(srcu_bootup_announce);
1300
1301void __init srcu_init(void)
1302{
1303 struct srcu_struct *ssp;
1304
1305 srcu_init_done = true;
1306 while (!list_empty(&srcu_boot_list)) {
1307 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1308 work.work.entry);
1309 check_init_srcu_struct(ssp);
1310 list_del_init(&ssp->work.work.entry);
1311 queue_work(rcu_gp_wq, &ssp->work.work);
1312 }
1313}
1314
1315#ifdef CONFIG_MODULES
1316
1317/* Initialize any global-scope srcu_struct structures used by this module. */
1318static int srcu_module_coming(struct module *mod)
1319{
1320 int i;
1321 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1322 int ret;
1323
1324 for (i = 0; i < mod->num_srcu_structs; i++) {
1325 ret = init_srcu_struct(*(sspp++));
1326 if (WARN_ON_ONCE(ret))
1327 return ret;
1328 }
1329 return 0;
1330}
1331
1332/* Clean up any global-scope srcu_struct structures used by this module. */
1333static void srcu_module_going(struct module *mod)
1334{
1335 int i;
1336 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1337
1338 for (i = 0; i < mod->num_srcu_structs; i++)
1339 cleanup_srcu_struct(*(sspp++));
1340}
1341
1342/* Handle one module, either coming or going. */
1343static int srcu_module_notify(struct notifier_block *self,
1344 unsigned long val, void *data)
1345{
1346 struct module *mod = data;
1347 int ret = 0;
1348
1349 switch (val) {
1350 case MODULE_STATE_COMING:
1351 ret = srcu_module_coming(mod);
1352 break;
1353 case MODULE_STATE_GOING:
1354 srcu_module_going(mod);
1355 break;
1356 default:
1357 break;
1358 }
1359 return ret;
1360}
1361
1362static struct notifier_block srcu_module_nb = {
1363 .notifier_call = srcu_module_notify,
1364 .priority = 0,
1365};
1366
1367static __init int init_srcu_module_notifier(void)
1368{
1369 int ret;
1370
1371 ret = register_module_notifier(&srcu_module_nb);
1372 if (ret)
1373 pr_warn("Failed to register srcu module notifier\n");
1374 return ret;
1375}
1376late_initcall(init_srcu_module_notifier);
1377
1378#endif /* #ifdef CONFIG_MODULES */
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 *
5 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
7 *
8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
9 * Lai Jiangshan <laijs@cn.fujitsu.com>
10 *
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
13 *
14 */
15
16#define pr_fmt(fmt) "rcu: " fmt
17
18#include <linux/export.h>
19#include <linux/mutex.h>
20#include <linux/percpu.h>
21#include <linux/preempt.h>
22#include <linux/rcupdate_wait.h>
23#include <linux/sched.h>
24#include <linux/smp.h>
25#include <linux/delay.h>
26#include <linux/module.h>
27#include <linux/srcu.h>
28
29#include "rcu.h"
30#include "rcu_segcblist.h"
31
32#ifndef data_race
33#define data_race(expr) \
34 ({ \
35 expr; \
36 })
37#endif
38#ifndef ASSERT_EXCLUSIVE_WRITER
39#define ASSERT_EXCLUSIVE_WRITER(var) do { } while (0)
40#endif
41#ifndef ASSERT_EXCLUSIVE_ACCESS
42#define ASSERT_EXCLUSIVE_ACCESS(var) do { } while (0)
43#endif
44
45/* Holdoff in nanoseconds for auto-expediting. */
46#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
47static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
48module_param(exp_holdoff, ulong, 0444);
49
50/* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
51static ulong counter_wrap_check = (ULONG_MAX >> 2);
52module_param(counter_wrap_check, ulong, 0444);
53
54/* Early-boot callback-management, so early that no lock is required! */
55static LIST_HEAD(srcu_boot_list);
56static bool __read_mostly srcu_init_done;
57
58static void srcu_invoke_callbacks(struct work_struct *work);
59static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
60static void process_srcu(struct work_struct *work);
61static void srcu_delay_timer(struct timer_list *t);
62
63/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
64#define spin_lock_rcu_node(p) \
65do { \
66 spin_lock(&ACCESS_PRIVATE(p, lock)); \
67 smp_mb__after_unlock_lock(); \
68} while (0)
69
70#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
71
72#define spin_lock_irq_rcu_node(p) \
73do { \
74 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
75 smp_mb__after_unlock_lock(); \
76} while (0)
77
78#define spin_unlock_irq_rcu_node(p) \
79 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
80
81#define spin_lock_irqsave_rcu_node(p, flags) \
82do { \
83 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
84 smp_mb__after_unlock_lock(); \
85} while (0)
86
87#define spin_unlock_irqrestore_rcu_node(p, flags) \
88 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
89
90/*
91 * Initialize SRCU combining tree. Note that statically allocated
92 * srcu_struct structures might already have srcu_read_lock() and
93 * srcu_read_unlock() running against them. So if the is_static parameter
94 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
95 */
96static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
97{
98 int cpu;
99 int i;
100 int level = 0;
101 int levelspread[RCU_NUM_LVLS];
102 struct srcu_data *sdp;
103 struct srcu_node *snp;
104 struct srcu_node *snp_first;
105
106 /* Work out the overall tree geometry. */
107 ssp->level[0] = &ssp->node[0];
108 for (i = 1; i < rcu_num_lvls; i++)
109 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
110 rcu_init_levelspread(levelspread, num_rcu_lvl);
111
112 /* Each pass through this loop initializes one srcu_node structure. */
113 srcu_for_each_node_breadth_first(ssp, snp) {
114 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
115 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
116 ARRAY_SIZE(snp->srcu_data_have_cbs));
117 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
118 snp->srcu_have_cbs[i] = 0;
119 snp->srcu_data_have_cbs[i] = 0;
120 }
121 snp->srcu_gp_seq_needed_exp = 0;
122 snp->grplo = -1;
123 snp->grphi = -1;
124 if (snp == &ssp->node[0]) {
125 /* Root node, special case. */
126 snp->srcu_parent = NULL;
127 continue;
128 }
129
130 /* Non-root node. */
131 if (snp == ssp->level[level + 1])
132 level++;
133 snp->srcu_parent = ssp->level[level - 1] +
134 (snp - ssp->level[level]) /
135 levelspread[level - 1];
136 }
137
138 /*
139 * Initialize the per-CPU srcu_data array, which feeds into the
140 * leaves of the srcu_node tree.
141 */
142 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
143 ARRAY_SIZE(sdp->srcu_unlock_count));
144 level = rcu_num_lvls - 1;
145 snp_first = ssp->level[level];
146 for_each_possible_cpu(cpu) {
147 sdp = per_cpu_ptr(ssp->sda, cpu);
148 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
149 rcu_segcblist_init(&sdp->srcu_cblist);
150 sdp->srcu_cblist_invoking = false;
151 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
152 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
153 sdp->mynode = &snp_first[cpu / levelspread[level]];
154 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
155 if (snp->grplo < 0)
156 snp->grplo = cpu;
157 snp->grphi = cpu;
158 }
159 sdp->cpu = cpu;
160 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
161 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
162 sdp->ssp = ssp;
163 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
164 if (is_static)
165 continue;
166
167 /* Dynamically allocated, better be no srcu_read_locks()! */
168 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
169 sdp->srcu_lock_count[i] = 0;
170 sdp->srcu_unlock_count[i] = 0;
171 }
172 }
173}
174
175/*
176 * Initialize non-compile-time initialized fields, including the
177 * associated srcu_node and srcu_data structures. The is_static
178 * parameter is passed through to init_srcu_struct_nodes(), and
179 * also tells us that ->sda has already been wired up to srcu_data.
180 */
181static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
182{
183 mutex_init(&ssp->srcu_cb_mutex);
184 mutex_init(&ssp->srcu_gp_mutex);
185 ssp->srcu_idx = 0;
186 ssp->srcu_gp_seq = 0;
187 ssp->srcu_barrier_seq = 0;
188 mutex_init(&ssp->srcu_barrier_mutex);
189 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
190 INIT_DELAYED_WORK(&ssp->work, process_srcu);
191 if (!is_static)
192 ssp->sda = alloc_percpu(struct srcu_data);
193 init_srcu_struct_nodes(ssp, is_static);
194 ssp->srcu_gp_seq_needed_exp = 0;
195 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
196 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
197 return ssp->sda ? 0 : -ENOMEM;
198}
199
200#ifdef CONFIG_DEBUG_LOCK_ALLOC
201
202int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
203 struct lock_class_key *key)
204{
205 /* Don't re-initialize a lock while it is held. */
206 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
207 lockdep_init_map(&ssp->dep_map, name, key, 0);
208 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
209 return init_srcu_struct_fields(ssp, false);
210}
211EXPORT_SYMBOL_GPL(__init_srcu_struct);
212
213#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
214
215/**
216 * init_srcu_struct - initialize a sleep-RCU structure
217 * @ssp: structure to initialize.
218 *
219 * Must invoke this on a given srcu_struct before passing that srcu_struct
220 * to any other function. Each srcu_struct represents a separate domain
221 * of SRCU protection.
222 */
223int init_srcu_struct(struct srcu_struct *ssp)
224{
225 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
226 return init_srcu_struct_fields(ssp, false);
227}
228EXPORT_SYMBOL_GPL(init_srcu_struct);
229
230#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
231
232/*
233 * First-use initialization of statically allocated srcu_struct
234 * structure. Wiring up the combining tree is more than can be
235 * done with compile-time initialization, so this check is added
236 * to each update-side SRCU primitive. Use ssp->lock, which -is-
237 * compile-time initialized, to resolve races involving multiple
238 * CPUs trying to garner first-use privileges.
239 */
240static void check_init_srcu_struct(struct srcu_struct *ssp)
241{
242 unsigned long flags;
243
244 /* The smp_load_acquire() pairs with the smp_store_release(). */
245 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
246 return; /* Already initialized. */
247 spin_lock_irqsave_rcu_node(ssp, flags);
248 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
249 spin_unlock_irqrestore_rcu_node(ssp, flags);
250 return;
251 }
252 init_srcu_struct_fields(ssp, true);
253 spin_unlock_irqrestore_rcu_node(ssp, flags);
254}
255
256/*
257 * Returns approximate total of the readers' ->srcu_lock_count[] values
258 * for the rank of per-CPU counters specified by idx.
259 */
260static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
261{
262 int cpu;
263 unsigned long sum = 0;
264
265 for_each_possible_cpu(cpu) {
266 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
267
268 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
269 }
270 return sum;
271}
272
273/*
274 * Returns approximate total of the readers' ->srcu_unlock_count[] values
275 * for the rank of per-CPU counters specified by idx.
276 */
277static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
278{
279 int cpu;
280 unsigned long sum = 0;
281
282 for_each_possible_cpu(cpu) {
283 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
284
285 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
286 }
287 return sum;
288}
289
290/*
291 * Return true if the number of pre-existing readers is determined to
292 * be zero.
293 */
294static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
295{
296 unsigned long unlocks;
297
298 unlocks = srcu_readers_unlock_idx(ssp, idx);
299
300 /*
301 * Make sure that a lock is always counted if the corresponding
302 * unlock is counted. Needs to be a smp_mb() as the read side may
303 * contain a read from a variable that is written to before the
304 * synchronize_srcu() in the write side. In this case smp_mb()s
305 * A and B act like the store buffering pattern.
306 *
307 * This smp_mb() also pairs with smp_mb() C to prevent accesses
308 * after the synchronize_srcu() from being executed before the
309 * grace period ends.
310 */
311 smp_mb(); /* A */
312
313 /*
314 * If the locks are the same as the unlocks, then there must have
315 * been no readers on this index at some time in between. This does
316 * not mean that there are no more readers, as one could have read
317 * the current index but not have incremented the lock counter yet.
318 *
319 * So suppose that the updater is preempted here for so long
320 * that more than ULONG_MAX non-nested readers come and go in
321 * the meantime. It turns out that this cannot result in overflow
322 * because if a reader modifies its unlock count after we read it
323 * above, then that reader's next load of ->srcu_idx is guaranteed
324 * to get the new value, which will cause it to operate on the
325 * other bank of counters, where it cannot contribute to the
326 * overflow of these counters. This means that there is a maximum
327 * of 2*NR_CPUS increments, which cannot overflow given current
328 * systems, especially not on 64-bit systems.
329 *
330 * OK, how about nesting? This does impose a limit on nesting
331 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
332 * especially on 64-bit systems.
333 */
334 return srcu_readers_lock_idx(ssp, idx) == unlocks;
335}
336
337/**
338 * srcu_readers_active - returns true if there are readers. and false
339 * otherwise
340 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
341 *
342 * Note that this is not an atomic primitive, and can therefore suffer
343 * severe errors when invoked on an active srcu_struct. That said, it
344 * can be useful as an error check at cleanup time.
345 */
346static bool srcu_readers_active(struct srcu_struct *ssp)
347{
348 int cpu;
349 unsigned long sum = 0;
350
351 for_each_possible_cpu(cpu) {
352 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
353
354 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
355 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
356 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
357 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
358 }
359 return sum;
360}
361
362#define SRCU_INTERVAL 1
363
364/*
365 * Return grace-period delay, zero if there are expedited grace
366 * periods pending, SRCU_INTERVAL otherwise.
367 */
368static unsigned long srcu_get_delay(struct srcu_struct *ssp)
369{
370 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
371 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
372 return 0;
373 return SRCU_INTERVAL;
374}
375
376/**
377 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
378 * @ssp: structure to clean up.
379 *
380 * Must invoke this after you are finished using a given srcu_struct that
381 * was initialized via init_srcu_struct(), else you leak memory.
382 */
383void cleanup_srcu_struct(struct srcu_struct *ssp)
384{
385 int cpu;
386
387 if (WARN_ON(!srcu_get_delay(ssp)))
388 return; /* Just leak it! */
389 if (WARN_ON(srcu_readers_active(ssp)))
390 return; /* Just leak it! */
391 flush_delayed_work(&ssp->work);
392 for_each_possible_cpu(cpu) {
393 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
394
395 del_timer_sync(&sdp->delay_work);
396 flush_work(&sdp->work);
397 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
398 return; /* Forgot srcu_barrier(), so just leak it! */
399 }
400 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
401 WARN_ON(srcu_readers_active(ssp))) {
402 pr_info("%s: Active srcu_struct %p state: %d\n",
403 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
404 return; /* Caller forgot to stop doing call_srcu()? */
405 }
406 free_percpu(ssp->sda);
407 ssp->sda = NULL;
408}
409EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
410
411/*
412 * Counts the new reader in the appropriate per-CPU element of the
413 * srcu_struct.
414 * Returns an index that must be passed to the matching srcu_read_unlock().
415 */
416int __srcu_read_lock(struct srcu_struct *ssp)
417{
418 int idx;
419
420 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
421 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
422 smp_mb(); /* B */ /* Avoid leaking the critical section. */
423 return idx;
424}
425EXPORT_SYMBOL_GPL(__srcu_read_lock);
426
427/*
428 * Removes the count for the old reader from the appropriate per-CPU
429 * element of the srcu_struct. Note that this may well be a different
430 * CPU than that which was incremented by the corresponding srcu_read_lock().
431 */
432void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
433{
434 smp_mb(); /* C */ /* Avoid leaking the critical section. */
435 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
436}
437EXPORT_SYMBOL_GPL(__srcu_read_unlock);
438
439/*
440 * We use an adaptive strategy for synchronize_srcu() and especially for
441 * synchronize_srcu_expedited(). We spin for a fixed time period
442 * (defined below) to allow SRCU readers to exit their read-side critical
443 * sections. If there are still some readers after a few microseconds,
444 * we repeatedly block for 1-millisecond time periods.
445 */
446#define SRCU_RETRY_CHECK_DELAY 5
447
448/*
449 * Start an SRCU grace period.
450 */
451static void srcu_gp_start(struct srcu_struct *ssp)
452{
453 struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
454 int state;
455
456 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
457 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
458 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
459 rcu_segcblist_advance(&sdp->srcu_cblist,
460 rcu_seq_current(&ssp->srcu_gp_seq));
461 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
462 rcu_seq_snap(&ssp->srcu_gp_seq));
463 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
464 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
465 rcu_seq_start(&ssp->srcu_gp_seq);
466 state = rcu_seq_state(ssp->srcu_gp_seq);
467 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
468}
469
470
471static void srcu_delay_timer(struct timer_list *t)
472{
473 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
474
475 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
476}
477
478static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
479 unsigned long delay)
480{
481 if (!delay) {
482 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
483 return;
484 }
485
486 timer_reduce(&sdp->delay_work, jiffies + delay);
487}
488
489/*
490 * Schedule callback invocation for the specified srcu_data structure,
491 * if possible, on the corresponding CPU.
492 */
493static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
494{
495 srcu_queue_delayed_work_on(sdp, delay);
496}
497
498/*
499 * Schedule callback invocation for all srcu_data structures associated
500 * with the specified srcu_node structure that have callbacks for the
501 * just-completed grace period, the one corresponding to idx. If possible,
502 * schedule this invocation on the corresponding CPUs.
503 */
504static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
505 unsigned long mask, unsigned long delay)
506{
507 int cpu;
508
509 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
510 if (!(mask & (1 << (cpu - snp->grplo))))
511 continue;
512 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
513 }
514}
515
516/*
517 * Note the end of an SRCU grace period. Initiates callback invocation
518 * and starts a new grace period if needed.
519 *
520 * The ->srcu_cb_mutex acquisition does not protect any data, but
521 * instead prevents more than one grace period from starting while we
522 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
523 * array to have a finite number of elements.
524 */
525static void srcu_gp_end(struct srcu_struct *ssp)
526{
527 unsigned long cbdelay;
528 bool cbs;
529 bool last_lvl;
530 int cpu;
531 unsigned long flags;
532 unsigned long gpseq;
533 int idx;
534 unsigned long mask;
535 struct srcu_data *sdp;
536 struct srcu_node *snp;
537
538 /* Prevent more than one additional grace period. */
539 mutex_lock(&ssp->srcu_cb_mutex);
540
541 /* End the current grace period. */
542 spin_lock_irq_rcu_node(ssp);
543 idx = rcu_seq_state(ssp->srcu_gp_seq);
544 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
545 cbdelay = srcu_get_delay(ssp);
546 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
547 rcu_seq_end(&ssp->srcu_gp_seq);
548 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
549 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
550 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
551 spin_unlock_irq_rcu_node(ssp);
552 mutex_unlock(&ssp->srcu_gp_mutex);
553 /* A new grace period can start at this point. But only one. */
554
555 /* Initiate callback invocation as needed. */
556 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
557 srcu_for_each_node_breadth_first(ssp, snp) {
558 spin_lock_irq_rcu_node(snp);
559 cbs = false;
560 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
561 if (last_lvl)
562 cbs = snp->srcu_have_cbs[idx] == gpseq;
563 snp->srcu_have_cbs[idx] = gpseq;
564 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
565 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
566 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
567 mask = snp->srcu_data_have_cbs[idx];
568 snp->srcu_data_have_cbs[idx] = 0;
569 spin_unlock_irq_rcu_node(snp);
570 if (cbs)
571 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
572
573 /* Occasionally prevent srcu_data counter wrap. */
574 if (!(gpseq & counter_wrap_check) && last_lvl)
575 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
576 sdp = per_cpu_ptr(ssp->sda, cpu);
577 spin_lock_irqsave_rcu_node(sdp, flags);
578 if (ULONG_CMP_GE(gpseq,
579 sdp->srcu_gp_seq_needed + 100))
580 sdp->srcu_gp_seq_needed = gpseq;
581 if (ULONG_CMP_GE(gpseq,
582 sdp->srcu_gp_seq_needed_exp + 100))
583 sdp->srcu_gp_seq_needed_exp = gpseq;
584 spin_unlock_irqrestore_rcu_node(sdp, flags);
585 }
586 }
587
588 /* Callback initiation done, allow grace periods after next. */
589 mutex_unlock(&ssp->srcu_cb_mutex);
590
591 /* Start a new grace period if needed. */
592 spin_lock_irq_rcu_node(ssp);
593 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
594 if (!rcu_seq_state(gpseq) &&
595 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
596 srcu_gp_start(ssp);
597 spin_unlock_irq_rcu_node(ssp);
598 srcu_reschedule(ssp, 0);
599 } else {
600 spin_unlock_irq_rcu_node(ssp);
601 }
602}
603
604/*
605 * Funnel-locking scheme to scalably mediate many concurrent expedited
606 * grace-period requests. This function is invoked for the first known
607 * expedited request for a grace period that has already been requested,
608 * but without expediting. To start a completely new grace period,
609 * whether expedited or not, use srcu_funnel_gp_start() instead.
610 */
611static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
612 unsigned long s)
613{
614 unsigned long flags;
615
616 for (; snp != NULL; snp = snp->srcu_parent) {
617 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
618 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
619 return;
620 spin_lock_irqsave_rcu_node(snp, flags);
621 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
622 spin_unlock_irqrestore_rcu_node(snp, flags);
623 return;
624 }
625 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
626 spin_unlock_irqrestore_rcu_node(snp, flags);
627 }
628 spin_lock_irqsave_rcu_node(ssp, flags);
629 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
630 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
631 spin_unlock_irqrestore_rcu_node(ssp, flags);
632}
633
634/*
635 * Funnel-locking scheme to scalably mediate many concurrent grace-period
636 * requests. The winner has to do the work of actually starting grace
637 * period s. Losers must either ensure that their desired grace-period
638 * number is recorded on at least their leaf srcu_node structure, or they
639 * must take steps to invoke their own callbacks.
640 *
641 * Note that this function also does the work of srcu_funnel_exp_start(),
642 * in some cases by directly invoking it.
643 */
644static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
645 unsigned long s, bool do_norm)
646{
647 unsigned long flags;
648 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
649 struct srcu_node *snp = sdp->mynode;
650 unsigned long snp_seq;
651
652 /* Each pass through the loop does one level of the srcu_node tree. */
653 for (; snp != NULL; snp = snp->srcu_parent) {
654 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
655 return; /* GP already done and CBs recorded. */
656 spin_lock_irqsave_rcu_node(snp, flags);
657 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
658 snp_seq = snp->srcu_have_cbs[idx];
659 if (snp == sdp->mynode && snp_seq == s)
660 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
661 spin_unlock_irqrestore_rcu_node(snp, flags);
662 if (snp == sdp->mynode && snp_seq != s) {
663 srcu_schedule_cbs_sdp(sdp, do_norm
664 ? SRCU_INTERVAL
665 : 0);
666 return;
667 }
668 if (!do_norm)
669 srcu_funnel_exp_start(ssp, snp, s);
670 return;
671 }
672 snp->srcu_have_cbs[idx] = s;
673 if (snp == sdp->mynode)
674 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
675 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
676 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
677 spin_unlock_irqrestore_rcu_node(snp, flags);
678 }
679
680 /* Top of tree, must ensure the grace period will be started. */
681 spin_lock_irqsave_rcu_node(ssp, flags);
682 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
683 /*
684 * Record need for grace period s. Pair with load
685 * acquire setting up for initialization.
686 */
687 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
688 }
689 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
690 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
691
692 /* If grace period not already done and none in progress, start it. */
693 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
694 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
695 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
696 srcu_gp_start(ssp);
697 if (likely(srcu_init_done))
698 queue_delayed_work(rcu_gp_wq, &ssp->work,
699 srcu_get_delay(ssp));
700 else if (list_empty(&ssp->work.work.entry))
701 list_add(&ssp->work.work.entry, &srcu_boot_list);
702 }
703 spin_unlock_irqrestore_rcu_node(ssp, flags);
704}
705
706/*
707 * Wait until all readers counted by array index idx complete, but
708 * loop an additional time if there is an expedited grace period pending.
709 * The caller must ensure that ->srcu_idx is not changed while checking.
710 */
711static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
712{
713 for (;;) {
714 if (srcu_readers_active_idx_check(ssp, idx))
715 return true;
716 if (--trycount + !srcu_get_delay(ssp) <= 0)
717 return false;
718 udelay(SRCU_RETRY_CHECK_DELAY);
719 }
720}
721
722/*
723 * Increment the ->srcu_idx counter so that future SRCU readers will
724 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
725 * us to wait for pre-existing readers in a starvation-free manner.
726 */
727static void srcu_flip(struct srcu_struct *ssp)
728{
729 /*
730 * Ensure that if this updater saw a given reader's increment
731 * from __srcu_read_lock(), that reader was using an old value
732 * of ->srcu_idx. Also ensure that if a given reader sees the
733 * new value of ->srcu_idx, this updater's earlier scans cannot
734 * have seen that reader's increments (which is OK, because this
735 * grace period need not wait on that reader).
736 */
737 smp_mb(); /* E */ /* Pairs with B and C. */
738
739 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
740
741 /*
742 * Ensure that if the updater misses an __srcu_read_unlock()
743 * increment, that task's next __srcu_read_lock() will see the
744 * above counter update. Note that both this memory barrier
745 * and the one in srcu_readers_active_idx_check() provide the
746 * guarantee for __srcu_read_lock().
747 */
748 smp_mb(); /* D */ /* Pairs with C. */
749}
750
751/*
752 * If SRCU is likely idle, return true, otherwise return false.
753 *
754 * Note that it is OK for several current from-idle requests for a new
755 * grace period from idle to specify expediting because they will all end
756 * up requesting the same grace period anyhow. So no loss.
757 *
758 * Note also that if any CPU (including the current one) is still invoking
759 * callbacks, this function will nevertheless say "idle". This is not
760 * ideal, but the overhead of checking all CPUs' callback lists is even
761 * less ideal, especially on large systems. Furthermore, the wakeup
762 * can happen before the callback is fully removed, so we have no choice
763 * but to accept this type of error.
764 *
765 * This function is also subject to counter-wrap errors, but let's face
766 * it, if this function was preempted for enough time for the counters
767 * to wrap, it really doesn't matter whether or not we expedite the grace
768 * period. The extra overhead of a needlessly expedited grace period is
769 * negligible when amortized over that time period, and the extra latency
770 * of a needlessly non-expedited grace period is similarly negligible.
771 */
772static bool srcu_might_be_idle(struct srcu_struct *ssp)
773{
774 unsigned long curseq;
775 unsigned long flags;
776 struct srcu_data *sdp;
777 unsigned long t;
778 unsigned long tlast;
779
780 check_init_srcu_struct(ssp);
781 /* If the local srcu_data structure has callbacks, not idle. */
782 sdp = raw_cpu_ptr(ssp->sda);
783 spin_lock_irqsave_rcu_node(sdp, flags);
784 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
785 spin_unlock_irqrestore_rcu_node(sdp, flags);
786 return false; /* Callbacks already present, so not idle. */
787 }
788 spin_unlock_irqrestore_rcu_node(sdp, flags);
789
790 /*
791 * No local callbacks, so probabalistically probe global state.
792 * Exact information would require acquiring locks, which would
793 * kill scalability, hence the probabalistic nature of the probe.
794 */
795
796 /* First, see if enough time has passed since the last GP. */
797 t = ktime_get_mono_fast_ns();
798 tlast = READ_ONCE(ssp->srcu_last_gp_end);
799 if (exp_holdoff == 0 ||
800 time_in_range_open(t, tlast, tlast + exp_holdoff))
801 return false; /* Too soon after last GP. */
802
803 /* Next, check for probable idleness. */
804 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
805 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
806 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
807 return false; /* Grace period in progress, so not idle. */
808 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
809 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
810 return false; /* GP # changed, so not idle. */
811 return true; /* With reasonable probability, idle! */
812}
813
814/*
815 * SRCU callback function to leak a callback.
816 */
817static void srcu_leak_callback(struct rcu_head *rhp)
818{
819}
820
821/*
822 * Enqueue an SRCU callback on the srcu_data structure associated with
823 * the current CPU and the specified srcu_struct structure, initiating
824 * grace-period processing if it is not already running.
825 *
826 * Note that all CPUs must agree that the grace period extended beyond
827 * all pre-existing SRCU read-side critical section. On systems with
828 * more than one CPU, this means that when "func()" is invoked, each CPU
829 * is guaranteed to have executed a full memory barrier since the end of
830 * its last corresponding SRCU read-side critical section whose beginning
831 * preceded the call to call_srcu(). It also means that each CPU executing
832 * an SRCU read-side critical section that continues beyond the start of
833 * "func()" must have executed a memory barrier after the call_srcu()
834 * but before the beginning of that SRCU read-side critical section.
835 * Note that these guarantees include CPUs that are offline, idle, or
836 * executing in user mode, as well as CPUs that are executing in the kernel.
837 *
838 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
839 * resulting SRCU callback function "func()", then both CPU A and CPU
840 * B are guaranteed to execute a full memory barrier during the time
841 * interval between the call to call_srcu() and the invocation of "func()".
842 * This guarantee applies even if CPU A and CPU B are the same CPU (but
843 * again only if the system has more than one CPU).
844 *
845 * Of course, these guarantees apply only for invocations of call_srcu(),
846 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
847 * srcu_struct structure.
848 */
849static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
850 rcu_callback_t func, bool do_norm)
851{
852 unsigned long flags;
853 int idx;
854 bool needexp = false;
855 bool needgp = false;
856 unsigned long s;
857 struct srcu_data *sdp;
858
859 check_init_srcu_struct(ssp);
860 if (debug_rcu_head_queue(rhp)) {
861 /* Probable double call_srcu(), so leak the callback. */
862 WRITE_ONCE(rhp->func, srcu_leak_callback);
863 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
864 return;
865 }
866 rhp->func = func;
867 idx = srcu_read_lock(ssp);
868 sdp = raw_cpu_ptr(ssp->sda);
869 spin_lock_irqsave_rcu_node(sdp, flags);
870 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
871 rcu_segcblist_advance(&sdp->srcu_cblist,
872 rcu_seq_current(&ssp->srcu_gp_seq));
873 s = rcu_seq_snap(&ssp->srcu_gp_seq);
874 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
875 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
876 sdp->srcu_gp_seq_needed = s;
877 needgp = true;
878 }
879 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
880 sdp->srcu_gp_seq_needed_exp = s;
881 needexp = true;
882 }
883 spin_unlock_irqrestore_rcu_node(sdp, flags);
884 if (needgp)
885 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
886 else if (needexp)
887 srcu_funnel_exp_start(ssp, sdp->mynode, s);
888 srcu_read_unlock(ssp, idx);
889}
890
891/**
892 * call_srcu() - Queue a callback for invocation after an SRCU grace period
893 * @ssp: srcu_struct in queue the callback
894 * @rhp: structure to be used for queueing the SRCU callback.
895 * @func: function to be invoked after the SRCU grace period
896 *
897 * The callback function will be invoked some time after a full SRCU
898 * grace period elapses, in other words after all pre-existing SRCU
899 * read-side critical sections have completed. However, the callback
900 * function might well execute concurrently with other SRCU read-side
901 * critical sections that started after call_srcu() was invoked. SRCU
902 * read-side critical sections are delimited by srcu_read_lock() and
903 * srcu_read_unlock(), and may be nested.
904 *
905 * The callback will be invoked from process context, but must nevertheless
906 * be fast and must not block.
907 */
908void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
909 rcu_callback_t func)
910{
911 __call_srcu(ssp, rhp, func, true);
912}
913EXPORT_SYMBOL_GPL(call_srcu);
914
915/*
916 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
917 */
918static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
919{
920 struct rcu_synchronize rcu;
921
922 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
923 lock_is_held(&rcu_bh_lock_map) ||
924 lock_is_held(&rcu_lock_map) ||
925 lock_is_held(&rcu_sched_lock_map),
926 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
927
928 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
929 return;
930 might_sleep();
931 check_init_srcu_struct(ssp);
932 init_completion(&rcu.completion);
933 init_rcu_head_on_stack(&rcu.head);
934 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
935 wait_for_completion(&rcu.completion);
936 destroy_rcu_head_on_stack(&rcu.head);
937
938 /*
939 * Make sure that later code is ordered after the SRCU grace
940 * period. This pairs with the spin_lock_irq_rcu_node()
941 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
942 * because the current CPU might have been totally uninvolved with
943 * (and thus unordered against) that grace period.
944 */
945 smp_mb();
946}
947
948/**
949 * synchronize_srcu_expedited - Brute-force SRCU grace period
950 * @ssp: srcu_struct with which to synchronize.
951 *
952 * Wait for an SRCU grace period to elapse, but be more aggressive about
953 * spinning rather than blocking when waiting.
954 *
955 * Note that synchronize_srcu_expedited() has the same deadlock and
956 * memory-ordering properties as does synchronize_srcu().
957 */
958void synchronize_srcu_expedited(struct srcu_struct *ssp)
959{
960 __synchronize_srcu(ssp, rcu_gp_is_normal());
961}
962EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
963
964/**
965 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
966 * @ssp: srcu_struct with which to synchronize.
967 *
968 * Wait for the count to drain to zero of both indexes. To avoid the
969 * possible starvation of synchronize_srcu(), it waits for the count of
970 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
971 * and then flip the srcu_idx and wait for the count of the other index.
972 *
973 * Can block; must be called from process context.
974 *
975 * Note that it is illegal to call synchronize_srcu() from the corresponding
976 * SRCU read-side critical section; doing so will result in deadlock.
977 * However, it is perfectly legal to call synchronize_srcu() on one
978 * srcu_struct from some other srcu_struct's read-side critical section,
979 * as long as the resulting graph of srcu_structs is acyclic.
980 *
981 * There are memory-ordering constraints implied by synchronize_srcu().
982 * On systems with more than one CPU, when synchronize_srcu() returns,
983 * each CPU is guaranteed to have executed a full memory barrier since
984 * the end of its last corresponding SRCU read-side critical section
985 * whose beginning preceded the call to synchronize_srcu(). In addition,
986 * each CPU having an SRCU read-side critical section that extends beyond
987 * the return from synchronize_srcu() is guaranteed to have executed a
988 * full memory barrier after the beginning of synchronize_srcu() and before
989 * the beginning of that SRCU read-side critical section. Note that these
990 * guarantees include CPUs that are offline, idle, or executing in user mode,
991 * as well as CPUs that are executing in the kernel.
992 *
993 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
994 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
995 * to have executed a full memory barrier during the execution of
996 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
997 * are the same CPU, but again only if the system has more than one CPU.
998 *
999 * Of course, these memory-ordering guarantees apply only when
1000 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1001 * passed the same srcu_struct structure.
1002 *
1003 * If SRCU is likely idle, expedite the first request. This semantic
1004 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1005 * SRCU must also provide it. Note that detecting idleness is heuristic
1006 * and subject to both false positives and negatives.
1007 */
1008void synchronize_srcu(struct srcu_struct *ssp)
1009{
1010 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1011 synchronize_srcu_expedited(ssp);
1012 else
1013 __synchronize_srcu(ssp, true);
1014}
1015EXPORT_SYMBOL_GPL(synchronize_srcu);
1016
1017/*
1018 * Callback function for srcu_barrier() use.
1019 */
1020static void srcu_barrier_cb(struct rcu_head *rhp)
1021{
1022 struct srcu_data *sdp;
1023 struct srcu_struct *ssp;
1024
1025 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1026 ssp = sdp->ssp;
1027 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1028 complete(&ssp->srcu_barrier_completion);
1029}
1030
1031/**
1032 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1033 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1034 */
1035void srcu_barrier(struct srcu_struct *ssp)
1036{
1037 int cpu;
1038 struct srcu_data *sdp;
1039 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1040
1041 check_init_srcu_struct(ssp);
1042 mutex_lock(&ssp->srcu_barrier_mutex);
1043 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1044 smp_mb(); /* Force ordering following return. */
1045 mutex_unlock(&ssp->srcu_barrier_mutex);
1046 return; /* Someone else did our work for us. */
1047 }
1048 rcu_seq_start(&ssp->srcu_barrier_seq);
1049 init_completion(&ssp->srcu_barrier_completion);
1050
1051 /* Initial count prevents reaching zero until all CBs are posted. */
1052 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1053
1054 /*
1055 * Each pass through this loop enqueues a callback, but only
1056 * on CPUs already having callbacks enqueued. Note that if
1057 * a CPU already has callbacks enqueue, it must have already
1058 * registered the need for a future grace period, so all we
1059 * need do is enqueue a callback that will use the same
1060 * grace period as the last callback already in the queue.
1061 */
1062 for_each_possible_cpu(cpu) {
1063 sdp = per_cpu_ptr(ssp->sda, cpu);
1064 spin_lock_irq_rcu_node(sdp);
1065 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1066 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1067 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1068 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1069 &sdp->srcu_barrier_head)) {
1070 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1071 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1072 }
1073 spin_unlock_irq_rcu_node(sdp);
1074 }
1075
1076 /* Remove the initial count, at which point reaching zero can happen. */
1077 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1078 complete(&ssp->srcu_barrier_completion);
1079 wait_for_completion(&ssp->srcu_barrier_completion);
1080
1081 rcu_seq_end(&ssp->srcu_barrier_seq);
1082 mutex_unlock(&ssp->srcu_barrier_mutex);
1083}
1084EXPORT_SYMBOL_GPL(srcu_barrier);
1085
1086/**
1087 * srcu_batches_completed - return batches completed.
1088 * @ssp: srcu_struct on which to report batch completion.
1089 *
1090 * Report the number of batches, correlated with, but not necessarily
1091 * precisely the same as, the number of grace periods that have elapsed.
1092 */
1093unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1094{
1095 return READ_ONCE(ssp->srcu_idx);
1096}
1097EXPORT_SYMBOL_GPL(srcu_batches_completed);
1098
1099/*
1100 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1101 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1102 * completed in that state.
1103 */
1104static void srcu_advance_state(struct srcu_struct *ssp)
1105{
1106 int idx;
1107
1108 mutex_lock(&ssp->srcu_gp_mutex);
1109
1110 /*
1111 * Because readers might be delayed for an extended period after
1112 * fetching ->srcu_idx for their index, at any point in time there
1113 * might well be readers using both idx=0 and idx=1. We therefore
1114 * need to wait for readers to clear from both index values before
1115 * invoking a callback.
1116 *
1117 * The load-acquire ensures that we see the accesses performed
1118 * by the prior grace period.
1119 */
1120 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1121 if (idx == SRCU_STATE_IDLE) {
1122 spin_lock_irq_rcu_node(ssp);
1123 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1124 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1125 spin_unlock_irq_rcu_node(ssp);
1126 mutex_unlock(&ssp->srcu_gp_mutex);
1127 return;
1128 }
1129 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1130 if (idx == SRCU_STATE_IDLE)
1131 srcu_gp_start(ssp);
1132 spin_unlock_irq_rcu_node(ssp);
1133 if (idx != SRCU_STATE_IDLE) {
1134 mutex_unlock(&ssp->srcu_gp_mutex);
1135 return; /* Someone else started the grace period. */
1136 }
1137 }
1138
1139 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1140 idx = 1 ^ (ssp->srcu_idx & 1);
1141 if (!try_check_zero(ssp, idx, 1)) {
1142 mutex_unlock(&ssp->srcu_gp_mutex);
1143 return; /* readers present, retry later. */
1144 }
1145 srcu_flip(ssp);
1146 spin_lock_irq_rcu_node(ssp);
1147 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1148 spin_unlock_irq_rcu_node(ssp);
1149 }
1150
1151 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1152
1153 /*
1154 * SRCU read-side critical sections are normally short,
1155 * so check at least twice in quick succession after a flip.
1156 */
1157 idx = 1 ^ (ssp->srcu_idx & 1);
1158 if (!try_check_zero(ssp, idx, 2)) {
1159 mutex_unlock(&ssp->srcu_gp_mutex);
1160 return; /* readers present, retry later. */
1161 }
1162 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
1163 }
1164}
1165
1166/*
1167 * Invoke a limited number of SRCU callbacks that have passed through
1168 * their grace period. If there are more to do, SRCU will reschedule
1169 * the workqueue. Note that needed memory barriers have been executed
1170 * in this task's context by srcu_readers_active_idx_check().
1171 */
1172static void srcu_invoke_callbacks(struct work_struct *work)
1173{
1174 bool more;
1175 struct rcu_cblist ready_cbs;
1176 struct rcu_head *rhp;
1177 struct srcu_data *sdp;
1178 struct srcu_struct *ssp;
1179
1180 sdp = container_of(work, struct srcu_data, work);
1181
1182 ssp = sdp->ssp;
1183 rcu_cblist_init(&ready_cbs);
1184 spin_lock_irq_rcu_node(sdp);
1185 rcu_segcblist_advance(&sdp->srcu_cblist,
1186 rcu_seq_current(&ssp->srcu_gp_seq));
1187 if (sdp->srcu_cblist_invoking ||
1188 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1189 spin_unlock_irq_rcu_node(sdp);
1190 return; /* Someone else on the job or nothing to do. */
1191 }
1192
1193 /* We are on the job! Extract and invoke ready callbacks. */
1194 sdp->srcu_cblist_invoking = true;
1195 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1196 spin_unlock_irq_rcu_node(sdp);
1197 rhp = rcu_cblist_dequeue(&ready_cbs);
1198 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1199 debug_rcu_head_unqueue(rhp);
1200 local_bh_disable();
1201 rhp->func(rhp);
1202 local_bh_enable();
1203 }
1204
1205 /*
1206 * Update counts, accelerate new callbacks, and if needed,
1207 * schedule another round of callback invocation.
1208 */
1209 spin_lock_irq_rcu_node(sdp);
1210 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1211 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1212 rcu_seq_snap(&ssp->srcu_gp_seq));
1213 sdp->srcu_cblist_invoking = false;
1214 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1215 spin_unlock_irq_rcu_node(sdp);
1216 if (more)
1217 srcu_schedule_cbs_sdp(sdp, 0);
1218}
1219
1220/*
1221 * Finished one round of SRCU grace period. Start another if there are
1222 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1223 */
1224static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1225{
1226 bool pushgp = true;
1227
1228 spin_lock_irq_rcu_node(ssp);
1229 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1230 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1231 /* All requests fulfilled, time to go idle. */
1232 pushgp = false;
1233 }
1234 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1235 /* Outstanding request and no GP. Start one. */
1236 srcu_gp_start(ssp);
1237 }
1238 spin_unlock_irq_rcu_node(ssp);
1239
1240 if (pushgp)
1241 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1242}
1243
1244/*
1245 * This is the work-queue function that handles SRCU grace periods.
1246 */
1247static void process_srcu(struct work_struct *work)
1248{
1249 struct srcu_struct *ssp;
1250
1251 ssp = container_of(work, struct srcu_struct, work.work);
1252
1253 srcu_advance_state(ssp);
1254 srcu_reschedule(ssp, srcu_get_delay(ssp));
1255}
1256
1257void srcutorture_get_gp_data(enum rcutorture_type test_type,
1258 struct srcu_struct *ssp, int *flags,
1259 unsigned long *gp_seq)
1260{
1261 if (test_type != SRCU_FLAVOR)
1262 return;
1263 *flags = 0;
1264 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1265}
1266EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1267
1268void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1269{
1270 int cpu;
1271 int idx;
1272 unsigned long s0 = 0, s1 = 0;
1273
1274 idx = ssp->srcu_idx & 0x1;
1275 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1276 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1277 for_each_possible_cpu(cpu) {
1278 unsigned long l0, l1;
1279 unsigned long u0, u1;
1280 long c0, c1;
1281 struct srcu_data *sdp;
1282
1283 sdp = per_cpu_ptr(ssp->sda, cpu);
1284 u0 = data_race(sdp->srcu_unlock_count[!idx]);
1285 u1 = data_race(sdp->srcu_unlock_count[idx]);
1286
1287 /*
1288 * Make sure that a lock is always counted if the corresponding
1289 * unlock is counted.
1290 */
1291 smp_rmb();
1292
1293 l0 = data_race(sdp->srcu_lock_count[!idx]);
1294 l1 = data_race(sdp->srcu_lock_count[idx]);
1295
1296 c0 = l0 - u0;
1297 c1 = l1 - u1;
1298 pr_cont(" %d(%ld,%ld %c)",
1299 cpu, c0, c1,
1300 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1301 s0 += c0;
1302 s1 += c1;
1303 }
1304 pr_cont(" T(%ld,%ld)\n", s0, s1);
1305}
1306EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1307
1308static int __init srcu_bootup_announce(void)
1309{
1310 pr_info("Hierarchical SRCU implementation.\n");
1311 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1312 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1313 return 0;
1314}
1315early_initcall(srcu_bootup_announce);
1316
1317void __init srcu_init(void)
1318{
1319 struct srcu_struct *ssp;
1320
1321 srcu_init_done = true;
1322 while (!list_empty(&srcu_boot_list)) {
1323 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1324 work.work.entry);
1325 check_init_srcu_struct(ssp);
1326 list_del_init(&ssp->work.work.entry);
1327 queue_work(rcu_gp_wq, &ssp->work.work);
1328 }
1329}
1330
1331#ifdef CONFIG_MODULES
1332
1333/* Initialize any global-scope srcu_struct structures used by this module. */
1334static int srcu_module_coming(struct module *mod)
1335{
1336 int i;
1337 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1338 int ret;
1339
1340 for (i = 0; i < mod->num_srcu_structs; i++) {
1341 ret = init_srcu_struct(*(sspp++));
1342 if (WARN_ON_ONCE(ret))
1343 return ret;
1344 }
1345 return 0;
1346}
1347
1348/* Clean up any global-scope srcu_struct structures used by this module. */
1349static void srcu_module_going(struct module *mod)
1350{
1351 int i;
1352 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1353
1354 for (i = 0; i < mod->num_srcu_structs; i++)
1355 cleanup_srcu_struct(*(sspp++));
1356}
1357
1358/* Handle one module, either coming or going. */
1359static int srcu_module_notify(struct notifier_block *self,
1360 unsigned long val, void *data)
1361{
1362 struct module *mod = data;
1363 int ret = 0;
1364
1365 switch (val) {
1366 case MODULE_STATE_COMING:
1367 ret = srcu_module_coming(mod);
1368 break;
1369 case MODULE_STATE_GOING:
1370 srcu_module_going(mod);
1371 break;
1372 default:
1373 break;
1374 }
1375 return ret;
1376}
1377
1378static struct notifier_block srcu_module_nb = {
1379 .notifier_call = srcu_module_notify,
1380 .priority = 0,
1381};
1382
1383static __init int init_srcu_module_notifier(void)
1384{
1385 int ret;
1386
1387 ret = register_module_notifier(&srcu_module_nb);
1388 if (ret)
1389 pr_warn("Failed to register srcu module notifier\n");
1390 return ret;
1391}
1392late_initcall(init_srcu_module_notifier);
1393
1394#endif /* #ifdef CONFIG_MODULES */