Loading...
Note: File does not exist in v4.6.
1/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 * Copyright (C) Fujitsu, 2012
20 *
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 * Lai Jiangshan <laijs@cn.fujitsu.com>
23 *
24 * For detailed explanation of Read-Copy Update mechanism see -
25 * Documentation/RCU/ *.txt
26 *
27 */
28
29#include <linux/export.h>
30#include <linux/mutex.h>
31#include <linux/percpu.h>
32#include <linux/preempt.h>
33#include <linux/rcupdate_wait.h>
34#include <linux/sched.h>
35#include <linux/smp.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/srcu.h>
39
40#include "rcu.h"
41#include "rcu_segcblist.h"
42
43/* Holdoff in nanoseconds for auto-expediting. */
44#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
45static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
46module_param(exp_holdoff, ulong, 0444);
47
48/* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
49static ulong counter_wrap_check = (ULONG_MAX >> 2);
50module_param(counter_wrap_check, ulong, 0444);
51
52static void srcu_invoke_callbacks(struct work_struct *work);
53static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
54static void process_srcu(struct work_struct *work);
55
56/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
57#define spin_lock_rcu_node(p) \
58do { \
59 spin_lock(&ACCESS_PRIVATE(p, lock)); \
60 smp_mb__after_unlock_lock(); \
61} while (0)
62
63#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
64
65#define spin_lock_irq_rcu_node(p) \
66do { \
67 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
68 smp_mb__after_unlock_lock(); \
69} while (0)
70
71#define spin_unlock_irq_rcu_node(p) \
72 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
73
74#define spin_lock_irqsave_rcu_node(p, flags) \
75do { \
76 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
77 smp_mb__after_unlock_lock(); \
78} while (0)
79
80#define spin_unlock_irqrestore_rcu_node(p, flags) \
81 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
82
83/*
84 * Initialize SRCU combining tree. Note that statically allocated
85 * srcu_struct structures might already have srcu_read_lock() and
86 * srcu_read_unlock() running against them. So if the is_static parameter
87 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
88 */
89static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
90{
91 int cpu;
92 int i;
93 int level = 0;
94 int levelspread[RCU_NUM_LVLS];
95 struct srcu_data *sdp;
96 struct srcu_node *snp;
97 struct srcu_node *snp_first;
98
99 /* Work out the overall tree geometry. */
100 sp->level[0] = &sp->node[0];
101 for (i = 1; i < rcu_num_lvls; i++)
102 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
103 rcu_init_levelspread(levelspread, num_rcu_lvl);
104
105 /* Each pass through this loop initializes one srcu_node structure. */
106 rcu_for_each_node_breadth_first(sp, snp) {
107 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
108 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
109 ARRAY_SIZE(snp->srcu_data_have_cbs));
110 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
111 snp->srcu_have_cbs[i] = 0;
112 snp->srcu_data_have_cbs[i] = 0;
113 }
114 snp->srcu_gp_seq_needed_exp = 0;
115 snp->grplo = -1;
116 snp->grphi = -1;
117 if (snp == &sp->node[0]) {
118 /* Root node, special case. */
119 snp->srcu_parent = NULL;
120 continue;
121 }
122
123 /* Non-root node. */
124 if (snp == sp->level[level + 1])
125 level++;
126 snp->srcu_parent = sp->level[level - 1] +
127 (snp - sp->level[level]) /
128 levelspread[level - 1];
129 }
130
131 /*
132 * Initialize the per-CPU srcu_data array, which feeds into the
133 * leaves of the srcu_node tree.
134 */
135 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
136 ARRAY_SIZE(sdp->srcu_unlock_count));
137 level = rcu_num_lvls - 1;
138 snp_first = sp->level[level];
139 for_each_possible_cpu(cpu) {
140 sdp = per_cpu_ptr(sp->sda, cpu);
141 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
142 rcu_segcblist_init(&sdp->srcu_cblist);
143 sdp->srcu_cblist_invoking = false;
144 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
145 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
146 sdp->mynode = &snp_first[cpu / levelspread[level]];
147 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
148 if (snp->grplo < 0)
149 snp->grplo = cpu;
150 snp->grphi = cpu;
151 }
152 sdp->cpu = cpu;
153 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
154 sdp->sp = sp;
155 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
156 if (is_static)
157 continue;
158
159 /* Dynamically allocated, better be no srcu_read_locks()! */
160 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
161 sdp->srcu_lock_count[i] = 0;
162 sdp->srcu_unlock_count[i] = 0;
163 }
164 }
165}
166
167/*
168 * Initialize non-compile-time initialized fields, including the
169 * associated srcu_node and srcu_data structures. The is_static
170 * parameter is passed through to init_srcu_struct_nodes(), and
171 * also tells us that ->sda has already been wired up to srcu_data.
172 */
173static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
174{
175 mutex_init(&sp->srcu_cb_mutex);
176 mutex_init(&sp->srcu_gp_mutex);
177 sp->srcu_idx = 0;
178 sp->srcu_gp_seq = 0;
179 sp->srcu_barrier_seq = 0;
180 mutex_init(&sp->srcu_barrier_mutex);
181 atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
182 INIT_DELAYED_WORK(&sp->work, process_srcu);
183 if (!is_static)
184 sp->sda = alloc_percpu(struct srcu_data);
185 init_srcu_struct_nodes(sp, is_static);
186 sp->srcu_gp_seq_needed_exp = 0;
187 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
188 smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
189 return sp->sda ? 0 : -ENOMEM;
190}
191
192#ifdef CONFIG_DEBUG_LOCK_ALLOC
193
194int __init_srcu_struct(struct srcu_struct *sp, const char *name,
195 struct lock_class_key *key)
196{
197 /* Don't re-initialize a lock while it is held. */
198 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
199 lockdep_init_map(&sp->dep_map, name, key, 0);
200 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
201 return init_srcu_struct_fields(sp, false);
202}
203EXPORT_SYMBOL_GPL(__init_srcu_struct);
204
205#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
206
207/**
208 * init_srcu_struct - initialize a sleep-RCU structure
209 * @sp: structure to initialize.
210 *
211 * Must invoke this on a given srcu_struct before passing that srcu_struct
212 * to any other function. Each srcu_struct represents a separate domain
213 * of SRCU protection.
214 */
215int init_srcu_struct(struct srcu_struct *sp)
216{
217 spin_lock_init(&ACCESS_PRIVATE(sp, lock));
218 return init_srcu_struct_fields(sp, false);
219}
220EXPORT_SYMBOL_GPL(init_srcu_struct);
221
222#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
223
224/*
225 * First-use initialization of statically allocated srcu_struct
226 * structure. Wiring up the combining tree is more than can be
227 * done with compile-time initialization, so this check is added
228 * to each update-side SRCU primitive. Use sp->lock, which -is-
229 * compile-time initialized, to resolve races involving multiple
230 * CPUs trying to garner first-use privileges.
231 */
232static void check_init_srcu_struct(struct srcu_struct *sp)
233{
234 unsigned long flags;
235
236 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
237 /* The smp_load_acquire() pairs with the smp_store_release(). */
238 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
239 return; /* Already initialized. */
240 spin_lock_irqsave_rcu_node(sp, flags);
241 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
242 spin_unlock_irqrestore_rcu_node(sp, flags);
243 return;
244 }
245 init_srcu_struct_fields(sp, true);
246 spin_unlock_irqrestore_rcu_node(sp, flags);
247}
248
249/*
250 * Returns approximate total of the readers' ->srcu_lock_count[] values
251 * for the rank of per-CPU counters specified by idx.
252 */
253static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
254{
255 int cpu;
256 unsigned long sum = 0;
257
258 for_each_possible_cpu(cpu) {
259 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
260
261 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
262 }
263 return sum;
264}
265
266/*
267 * Returns approximate total of the readers' ->srcu_unlock_count[] values
268 * for the rank of per-CPU counters specified by idx.
269 */
270static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
271{
272 int cpu;
273 unsigned long sum = 0;
274
275 for_each_possible_cpu(cpu) {
276 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
277
278 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
279 }
280 return sum;
281}
282
283/*
284 * Return true if the number of pre-existing readers is determined to
285 * be zero.
286 */
287static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
288{
289 unsigned long unlocks;
290
291 unlocks = srcu_readers_unlock_idx(sp, idx);
292
293 /*
294 * Make sure that a lock is always counted if the corresponding
295 * unlock is counted. Needs to be a smp_mb() as the read side may
296 * contain a read from a variable that is written to before the
297 * synchronize_srcu() in the write side. In this case smp_mb()s
298 * A and B act like the store buffering pattern.
299 *
300 * This smp_mb() also pairs with smp_mb() C to prevent accesses
301 * after the synchronize_srcu() from being executed before the
302 * grace period ends.
303 */
304 smp_mb(); /* A */
305
306 /*
307 * If the locks are the same as the unlocks, then there must have
308 * been no readers on this index at some time in between. This does
309 * not mean that there are no more readers, as one could have read
310 * the current index but not have incremented the lock counter yet.
311 *
312 * So suppose that the updater is preempted here for so long
313 * that more than ULONG_MAX non-nested readers come and go in
314 * the meantime. It turns out that this cannot result in overflow
315 * because if a reader modifies its unlock count after we read it
316 * above, then that reader's next load of ->srcu_idx is guaranteed
317 * to get the new value, which will cause it to operate on the
318 * other bank of counters, where it cannot contribute to the
319 * overflow of these counters. This means that there is a maximum
320 * of 2*NR_CPUS increments, which cannot overflow given current
321 * systems, especially not on 64-bit systems.
322 *
323 * OK, how about nesting? This does impose a limit on nesting
324 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
325 * especially on 64-bit systems.
326 */
327 return srcu_readers_lock_idx(sp, idx) == unlocks;
328}
329
330/**
331 * srcu_readers_active - returns true if there are readers. and false
332 * otherwise
333 * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
334 *
335 * Note that this is not an atomic primitive, and can therefore suffer
336 * severe errors when invoked on an active srcu_struct. That said, it
337 * can be useful as an error check at cleanup time.
338 */
339static bool srcu_readers_active(struct srcu_struct *sp)
340{
341 int cpu;
342 unsigned long sum = 0;
343
344 for_each_possible_cpu(cpu) {
345 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
346
347 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
348 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
349 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
350 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
351 }
352 return sum;
353}
354
355#define SRCU_INTERVAL 1
356
357/*
358 * Return grace-period delay, zero if there are expedited grace
359 * periods pending, SRCU_INTERVAL otherwise.
360 */
361static unsigned long srcu_get_delay(struct srcu_struct *sp)
362{
363 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
364 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
365 return 0;
366 return SRCU_INTERVAL;
367}
368
369/**
370 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
371 * @sp: structure to clean up.
372 *
373 * Must invoke this after you are finished using a given srcu_struct that
374 * was initialized via init_srcu_struct(), else you leak memory.
375 */
376void cleanup_srcu_struct(struct srcu_struct *sp)
377{
378 int cpu;
379
380 if (WARN_ON(!srcu_get_delay(sp)))
381 return; /* Leakage unless caller handles error. */
382 if (WARN_ON(srcu_readers_active(sp)))
383 return; /* Leakage unless caller handles error. */
384 flush_delayed_work(&sp->work);
385 for_each_possible_cpu(cpu)
386 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
387 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
388 WARN_ON(srcu_readers_active(sp))) {
389 pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
390 return; /* Caller forgot to stop doing call_srcu()? */
391 }
392 free_percpu(sp->sda);
393 sp->sda = NULL;
394}
395EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
396
397/*
398 * Counts the new reader in the appropriate per-CPU element of the
399 * srcu_struct.
400 * Returns an index that must be passed to the matching srcu_read_unlock().
401 */
402int __srcu_read_lock(struct srcu_struct *sp)
403{
404 int idx;
405
406 idx = READ_ONCE(sp->srcu_idx) & 0x1;
407 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
408 smp_mb(); /* B */ /* Avoid leaking the critical section. */
409 return idx;
410}
411EXPORT_SYMBOL_GPL(__srcu_read_lock);
412
413/*
414 * Removes the count for the old reader from the appropriate per-CPU
415 * element of the srcu_struct. Note that this may well be a different
416 * CPU than that which was incremented by the corresponding srcu_read_lock().
417 */
418void __srcu_read_unlock(struct srcu_struct *sp, int idx)
419{
420 smp_mb(); /* C */ /* Avoid leaking the critical section. */
421 this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
422}
423EXPORT_SYMBOL_GPL(__srcu_read_unlock);
424
425/*
426 * We use an adaptive strategy for synchronize_srcu() and especially for
427 * synchronize_srcu_expedited(). We spin for a fixed time period
428 * (defined below) to allow SRCU readers to exit their read-side critical
429 * sections. If there are still some readers after a few microseconds,
430 * we repeatedly block for 1-millisecond time periods.
431 */
432#define SRCU_RETRY_CHECK_DELAY 5
433
434/*
435 * Start an SRCU grace period.
436 */
437static void srcu_gp_start(struct srcu_struct *sp)
438{
439 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
440 int state;
441
442 lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
443 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
444 rcu_segcblist_advance(&sdp->srcu_cblist,
445 rcu_seq_current(&sp->srcu_gp_seq));
446 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
447 rcu_seq_snap(&sp->srcu_gp_seq));
448 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
449 rcu_seq_start(&sp->srcu_gp_seq);
450 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
451 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
452}
453
454/*
455 * Track online CPUs to guide callback workqueue placement.
456 */
457DEFINE_PER_CPU(bool, srcu_online);
458
459void srcu_online_cpu(unsigned int cpu)
460{
461 WRITE_ONCE(per_cpu(srcu_online, cpu), true);
462}
463
464void srcu_offline_cpu(unsigned int cpu)
465{
466 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
467}
468
469/*
470 * Place the workqueue handler on the specified CPU if online, otherwise
471 * just run it whereever. This is useful for placing workqueue handlers
472 * that are to invoke the specified CPU's callbacks.
473 */
474static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
475 struct delayed_work *dwork,
476 unsigned long delay)
477{
478 bool ret;
479
480 preempt_disable();
481 if (READ_ONCE(per_cpu(srcu_online, cpu)))
482 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
483 else
484 ret = queue_delayed_work(wq, dwork, delay);
485 preempt_enable();
486 return ret;
487}
488
489/*
490 * Schedule callback invocation for the specified srcu_data structure,
491 * if possible, on the corresponding CPU.
492 */
493static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
494{
495 srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
496}
497
498/*
499 * Schedule callback invocation for all srcu_data structures associated
500 * with the specified srcu_node structure that have callbacks for the
501 * just-completed grace period, the one corresponding to idx. If possible,
502 * schedule this invocation on the corresponding CPUs.
503 */
504static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
505 unsigned long mask, unsigned long delay)
506{
507 int cpu;
508
509 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
510 if (!(mask & (1 << (cpu - snp->grplo))))
511 continue;
512 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
513 }
514}
515
516/*
517 * Note the end of an SRCU grace period. Initiates callback invocation
518 * and starts a new grace period if needed.
519 *
520 * The ->srcu_cb_mutex acquisition does not protect any data, but
521 * instead prevents more than one grace period from starting while we
522 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
523 * array to have a finite number of elements.
524 */
525static void srcu_gp_end(struct srcu_struct *sp)
526{
527 unsigned long cbdelay;
528 bool cbs;
529 bool last_lvl;
530 int cpu;
531 unsigned long flags;
532 unsigned long gpseq;
533 int idx;
534 unsigned long mask;
535 struct srcu_data *sdp;
536 struct srcu_node *snp;
537
538 /* Prevent more than one additional grace period. */
539 mutex_lock(&sp->srcu_cb_mutex);
540
541 /* End the current grace period. */
542 spin_lock_irq_rcu_node(sp);
543 idx = rcu_seq_state(sp->srcu_gp_seq);
544 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
545 cbdelay = srcu_get_delay(sp);
546 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
547 rcu_seq_end(&sp->srcu_gp_seq);
548 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
549 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
550 sp->srcu_gp_seq_needed_exp = gpseq;
551 spin_unlock_irq_rcu_node(sp);
552 mutex_unlock(&sp->srcu_gp_mutex);
553 /* A new grace period can start at this point. But only one. */
554
555 /* Initiate callback invocation as needed. */
556 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
557 rcu_for_each_node_breadth_first(sp, snp) {
558 spin_lock_irq_rcu_node(snp);
559 cbs = false;
560 last_lvl = snp >= sp->level[rcu_num_lvls - 1];
561 if (last_lvl)
562 cbs = snp->srcu_have_cbs[idx] == gpseq;
563 snp->srcu_have_cbs[idx] = gpseq;
564 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
565 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
566 snp->srcu_gp_seq_needed_exp = gpseq;
567 mask = snp->srcu_data_have_cbs[idx];
568 snp->srcu_data_have_cbs[idx] = 0;
569 spin_unlock_irq_rcu_node(snp);
570 if (cbs)
571 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
572
573 /* Occasionally prevent srcu_data counter wrap. */
574 if (!(gpseq & counter_wrap_check) && last_lvl)
575 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
576 sdp = per_cpu_ptr(sp->sda, cpu);
577 spin_lock_irqsave_rcu_node(sdp, flags);
578 if (ULONG_CMP_GE(gpseq,
579 sdp->srcu_gp_seq_needed + 100))
580 sdp->srcu_gp_seq_needed = gpseq;
581 if (ULONG_CMP_GE(gpseq,
582 sdp->srcu_gp_seq_needed_exp + 100))
583 sdp->srcu_gp_seq_needed_exp = gpseq;
584 spin_unlock_irqrestore_rcu_node(sdp, flags);
585 }
586 }
587
588 /* Callback initiation done, allow grace periods after next. */
589 mutex_unlock(&sp->srcu_cb_mutex);
590
591 /* Start a new grace period if needed. */
592 spin_lock_irq_rcu_node(sp);
593 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
594 if (!rcu_seq_state(gpseq) &&
595 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
596 srcu_gp_start(sp);
597 spin_unlock_irq_rcu_node(sp);
598 srcu_reschedule(sp, 0);
599 } else {
600 spin_unlock_irq_rcu_node(sp);
601 }
602}
603
604/*
605 * Funnel-locking scheme to scalably mediate many concurrent expedited
606 * grace-period requests. This function is invoked for the first known
607 * expedited request for a grace period that has already been requested,
608 * but without expediting. To start a completely new grace period,
609 * whether expedited or not, use srcu_funnel_gp_start() instead.
610 */
611static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
612 unsigned long s)
613{
614 unsigned long flags;
615
616 for (; snp != NULL; snp = snp->srcu_parent) {
617 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
618 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
619 return;
620 spin_lock_irqsave_rcu_node(snp, flags);
621 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
622 spin_unlock_irqrestore_rcu_node(snp, flags);
623 return;
624 }
625 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
626 spin_unlock_irqrestore_rcu_node(snp, flags);
627 }
628 spin_lock_irqsave_rcu_node(sp, flags);
629 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
630 sp->srcu_gp_seq_needed_exp = s;
631 spin_unlock_irqrestore_rcu_node(sp, flags);
632}
633
634/*
635 * Funnel-locking scheme to scalably mediate many concurrent grace-period
636 * requests. The winner has to do the work of actually starting grace
637 * period s. Losers must either ensure that their desired grace-period
638 * number is recorded on at least their leaf srcu_node structure, or they
639 * must take steps to invoke their own callbacks.
640 */
641static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
642 unsigned long s, bool do_norm)
643{
644 unsigned long flags;
645 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
646 struct srcu_node *snp = sdp->mynode;
647 unsigned long snp_seq;
648
649 /* Each pass through the loop does one level of the srcu_node tree. */
650 for (; snp != NULL; snp = snp->srcu_parent) {
651 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
652 return; /* GP already done and CBs recorded. */
653 spin_lock_irqsave_rcu_node(snp, flags);
654 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
655 snp_seq = snp->srcu_have_cbs[idx];
656 if (snp == sdp->mynode && snp_seq == s)
657 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
658 spin_unlock_irqrestore_rcu_node(snp, flags);
659 if (snp == sdp->mynode && snp_seq != s) {
660 srcu_schedule_cbs_sdp(sdp, do_norm
661 ? SRCU_INTERVAL
662 : 0);
663 return;
664 }
665 if (!do_norm)
666 srcu_funnel_exp_start(sp, snp, s);
667 return;
668 }
669 snp->srcu_have_cbs[idx] = s;
670 if (snp == sdp->mynode)
671 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
672 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
673 snp->srcu_gp_seq_needed_exp = s;
674 spin_unlock_irqrestore_rcu_node(snp, flags);
675 }
676
677 /* Top of tree, must ensure the grace period will be started. */
678 spin_lock_irqsave_rcu_node(sp, flags);
679 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
680 /*
681 * Record need for grace period s. Pair with load
682 * acquire setting up for initialization.
683 */
684 smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
685 }
686 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
687 sp->srcu_gp_seq_needed_exp = s;
688
689 /* If grace period not already done and none in progress, start it. */
690 if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
691 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
692 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
693 srcu_gp_start(sp);
694 queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
695 }
696 spin_unlock_irqrestore_rcu_node(sp, flags);
697}
698
699/*
700 * Wait until all readers counted by array index idx complete, but
701 * loop an additional time if there is an expedited grace period pending.
702 * The caller must ensure that ->srcu_idx is not changed while checking.
703 */
704static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
705{
706 for (;;) {
707 if (srcu_readers_active_idx_check(sp, idx))
708 return true;
709 if (--trycount + !srcu_get_delay(sp) <= 0)
710 return false;
711 udelay(SRCU_RETRY_CHECK_DELAY);
712 }
713}
714
715/*
716 * Increment the ->srcu_idx counter so that future SRCU readers will
717 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
718 * us to wait for pre-existing readers in a starvation-free manner.
719 */
720static void srcu_flip(struct srcu_struct *sp)
721{
722 /*
723 * Ensure that if this updater saw a given reader's increment
724 * from __srcu_read_lock(), that reader was using an old value
725 * of ->srcu_idx. Also ensure that if a given reader sees the
726 * new value of ->srcu_idx, this updater's earlier scans cannot
727 * have seen that reader's increments (which is OK, because this
728 * grace period need not wait on that reader).
729 */
730 smp_mb(); /* E */ /* Pairs with B and C. */
731
732 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
733
734 /*
735 * Ensure that if the updater misses an __srcu_read_unlock()
736 * increment, that task's next __srcu_read_lock() will see the
737 * above counter update. Note that both this memory barrier
738 * and the one in srcu_readers_active_idx_check() provide the
739 * guarantee for __srcu_read_lock().
740 */
741 smp_mb(); /* D */ /* Pairs with C. */
742}
743
744/*
745 * If SRCU is likely idle, return true, otherwise return false.
746 *
747 * Note that it is OK for several current from-idle requests for a new
748 * grace period from idle to specify expediting because they will all end
749 * up requesting the same grace period anyhow. So no loss.
750 *
751 * Note also that if any CPU (including the current one) is still invoking
752 * callbacks, this function will nevertheless say "idle". This is not
753 * ideal, but the overhead of checking all CPUs' callback lists is even
754 * less ideal, especially on large systems. Furthermore, the wakeup
755 * can happen before the callback is fully removed, so we have no choice
756 * but to accept this type of error.
757 *
758 * This function is also subject to counter-wrap errors, but let's face
759 * it, if this function was preempted for enough time for the counters
760 * to wrap, it really doesn't matter whether or not we expedite the grace
761 * period. The extra overhead of a needlessly expedited grace period is
762 * negligible when amoritized over that time period, and the extra latency
763 * of a needlessly non-expedited grace period is similarly negligible.
764 */
765static bool srcu_might_be_idle(struct srcu_struct *sp)
766{
767 unsigned long curseq;
768 unsigned long flags;
769 struct srcu_data *sdp;
770 unsigned long t;
771
772 /* If the local srcu_data structure has callbacks, not idle. */
773 local_irq_save(flags);
774 sdp = this_cpu_ptr(sp->sda);
775 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
776 local_irq_restore(flags);
777 return false; /* Callbacks already present, so not idle. */
778 }
779 local_irq_restore(flags);
780
781 /*
782 * No local callbacks, so probabalistically probe global state.
783 * Exact information would require acquiring locks, which would
784 * kill scalability, hence the probabalistic nature of the probe.
785 */
786
787 /* First, see if enough time has passed since the last GP. */
788 t = ktime_get_mono_fast_ns();
789 if (exp_holdoff == 0 ||
790 time_in_range_open(t, sp->srcu_last_gp_end,
791 sp->srcu_last_gp_end + exp_holdoff))
792 return false; /* Too soon after last GP. */
793
794 /* Next, check for probable idleness. */
795 curseq = rcu_seq_current(&sp->srcu_gp_seq);
796 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
797 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
798 return false; /* Grace period in progress, so not idle. */
799 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
800 if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
801 return false; /* GP # changed, so not idle. */
802 return true; /* With reasonable probability, idle! */
803}
804
805/*
806 * SRCU callback function to leak a callback.
807 */
808static void srcu_leak_callback(struct rcu_head *rhp)
809{
810}
811
812/*
813 * Enqueue an SRCU callback on the srcu_data structure associated with
814 * the current CPU and the specified srcu_struct structure, initiating
815 * grace-period processing if it is not already running.
816 *
817 * Note that all CPUs must agree that the grace period extended beyond
818 * all pre-existing SRCU read-side critical section. On systems with
819 * more than one CPU, this means that when "func()" is invoked, each CPU
820 * is guaranteed to have executed a full memory barrier since the end of
821 * its last corresponding SRCU read-side critical section whose beginning
822 * preceded the call to call_rcu(). It also means that each CPU executing
823 * an SRCU read-side critical section that continues beyond the start of
824 * "func()" must have executed a memory barrier after the call_rcu()
825 * but before the beginning of that SRCU read-side critical section.
826 * Note that these guarantees include CPUs that are offline, idle, or
827 * executing in user mode, as well as CPUs that are executing in the kernel.
828 *
829 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
830 * resulting SRCU callback function "func()", then both CPU A and CPU
831 * B are guaranteed to execute a full memory barrier during the time
832 * interval between the call to call_rcu() and the invocation of "func()".
833 * This guarantee applies even if CPU A and CPU B are the same CPU (but
834 * again only if the system has more than one CPU).
835 *
836 * Of course, these guarantees apply only for invocations of call_srcu(),
837 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
838 * srcu_struct structure.
839 */
840void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
841 rcu_callback_t func, bool do_norm)
842{
843 unsigned long flags;
844 bool needexp = false;
845 bool needgp = false;
846 unsigned long s;
847 struct srcu_data *sdp;
848
849 check_init_srcu_struct(sp);
850 if (debug_rcu_head_queue(rhp)) {
851 /* Probable double call_srcu(), so leak the callback. */
852 WRITE_ONCE(rhp->func, srcu_leak_callback);
853 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
854 return;
855 }
856 rhp->func = func;
857 local_irq_save(flags);
858 sdp = this_cpu_ptr(sp->sda);
859 spin_lock_rcu_node(sdp);
860 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
861 rcu_segcblist_advance(&sdp->srcu_cblist,
862 rcu_seq_current(&sp->srcu_gp_seq));
863 s = rcu_seq_snap(&sp->srcu_gp_seq);
864 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
865 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
866 sdp->srcu_gp_seq_needed = s;
867 needgp = true;
868 }
869 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
870 sdp->srcu_gp_seq_needed_exp = s;
871 needexp = true;
872 }
873 spin_unlock_irqrestore_rcu_node(sdp, flags);
874 if (needgp)
875 srcu_funnel_gp_start(sp, sdp, s, do_norm);
876 else if (needexp)
877 srcu_funnel_exp_start(sp, sdp->mynode, s);
878}
879
880/**
881 * call_srcu() - Queue a callback for invocation after an SRCU grace period
882 * @sp: srcu_struct in queue the callback
883 * @rhp: structure to be used for queueing the SRCU callback.
884 * @func: function to be invoked after the SRCU grace period
885 *
886 * The callback function will be invoked some time after a full SRCU
887 * grace period elapses, in other words after all pre-existing SRCU
888 * read-side critical sections have completed. However, the callback
889 * function might well execute concurrently with other SRCU read-side
890 * critical sections that started after call_srcu() was invoked. SRCU
891 * read-side critical sections are delimited by srcu_read_lock() and
892 * srcu_read_unlock(), and may be nested.
893 *
894 * The callback will be invoked from process context, but must nevertheless
895 * be fast and must not block.
896 */
897void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
898 rcu_callback_t func)
899{
900 __call_srcu(sp, rhp, func, true);
901}
902EXPORT_SYMBOL_GPL(call_srcu);
903
904/*
905 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
906 */
907static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
908{
909 struct rcu_synchronize rcu;
910
911 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
912 lock_is_held(&rcu_bh_lock_map) ||
913 lock_is_held(&rcu_lock_map) ||
914 lock_is_held(&rcu_sched_lock_map),
915 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
916
917 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
918 return;
919 might_sleep();
920 check_init_srcu_struct(sp);
921 init_completion(&rcu.completion);
922 init_rcu_head_on_stack(&rcu.head);
923 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
924 wait_for_completion(&rcu.completion);
925 destroy_rcu_head_on_stack(&rcu.head);
926
927 /*
928 * Make sure that later code is ordered after the SRCU grace
929 * period. This pairs with the spin_lock_irq_rcu_node()
930 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
931 * because the current CPU might have been totally uninvolved with
932 * (and thus unordered against) that grace period.
933 */
934 smp_mb();
935}
936
937/**
938 * synchronize_srcu_expedited - Brute-force SRCU grace period
939 * @sp: srcu_struct with which to synchronize.
940 *
941 * Wait for an SRCU grace period to elapse, but be more aggressive about
942 * spinning rather than blocking when waiting.
943 *
944 * Note that synchronize_srcu_expedited() has the same deadlock and
945 * memory-ordering properties as does synchronize_srcu().
946 */
947void synchronize_srcu_expedited(struct srcu_struct *sp)
948{
949 __synchronize_srcu(sp, rcu_gp_is_normal());
950}
951EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
952
953/**
954 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
955 * @sp: srcu_struct with which to synchronize.
956 *
957 * Wait for the count to drain to zero of both indexes. To avoid the
958 * possible starvation of synchronize_srcu(), it waits for the count of
959 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
960 * and then flip the srcu_idx and wait for the count of the other index.
961 *
962 * Can block; must be called from process context.
963 *
964 * Note that it is illegal to call synchronize_srcu() from the corresponding
965 * SRCU read-side critical section; doing so will result in deadlock.
966 * However, it is perfectly legal to call synchronize_srcu() on one
967 * srcu_struct from some other srcu_struct's read-side critical section,
968 * as long as the resulting graph of srcu_structs is acyclic.
969 *
970 * There are memory-ordering constraints implied by synchronize_srcu().
971 * On systems with more than one CPU, when synchronize_srcu() returns,
972 * each CPU is guaranteed to have executed a full memory barrier since
973 * the end of its last corresponding SRCU-sched read-side critical section
974 * whose beginning preceded the call to synchronize_srcu(). In addition,
975 * each CPU having an SRCU read-side critical section that extends beyond
976 * the return from synchronize_srcu() is guaranteed to have executed a
977 * full memory barrier after the beginning of synchronize_srcu() and before
978 * the beginning of that SRCU read-side critical section. Note that these
979 * guarantees include CPUs that are offline, idle, or executing in user mode,
980 * as well as CPUs that are executing in the kernel.
981 *
982 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
983 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
984 * to have executed a full memory barrier during the execution of
985 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
986 * are the same CPU, but again only if the system has more than one CPU.
987 *
988 * Of course, these memory-ordering guarantees apply only when
989 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
990 * passed the same srcu_struct structure.
991 *
992 * If SRCU is likely idle, expedite the first request. This semantic
993 * was provided by Classic SRCU, and is relied upon by its users, so TREE
994 * SRCU must also provide it. Note that detecting idleness is heuristic
995 * and subject to both false positives and negatives.
996 */
997void synchronize_srcu(struct srcu_struct *sp)
998{
999 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
1000 synchronize_srcu_expedited(sp);
1001 else
1002 __synchronize_srcu(sp, true);
1003}
1004EXPORT_SYMBOL_GPL(synchronize_srcu);
1005
1006/*
1007 * Callback function for srcu_barrier() use.
1008 */
1009static void srcu_barrier_cb(struct rcu_head *rhp)
1010{
1011 struct srcu_data *sdp;
1012 struct srcu_struct *sp;
1013
1014 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1015 sp = sdp->sp;
1016 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1017 complete(&sp->srcu_barrier_completion);
1018}
1019
1020/**
1021 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1022 * @sp: srcu_struct on which to wait for in-flight callbacks.
1023 */
1024void srcu_barrier(struct srcu_struct *sp)
1025{
1026 int cpu;
1027 struct srcu_data *sdp;
1028 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1029
1030 check_init_srcu_struct(sp);
1031 mutex_lock(&sp->srcu_barrier_mutex);
1032 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1033 smp_mb(); /* Force ordering following return. */
1034 mutex_unlock(&sp->srcu_barrier_mutex);
1035 return; /* Someone else did our work for us. */
1036 }
1037 rcu_seq_start(&sp->srcu_barrier_seq);
1038 init_completion(&sp->srcu_barrier_completion);
1039
1040 /* Initial count prevents reaching zero until all CBs are posted. */
1041 atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1042
1043 /*
1044 * Each pass through this loop enqueues a callback, but only
1045 * on CPUs already having callbacks enqueued. Note that if
1046 * a CPU already has callbacks enqueue, it must have already
1047 * registered the need for a future grace period, so all we
1048 * need do is enqueue a callback that will use the same
1049 * grace period as the last callback already in the queue.
1050 */
1051 for_each_possible_cpu(cpu) {
1052 sdp = per_cpu_ptr(sp->sda, cpu);
1053 spin_lock_irq_rcu_node(sdp);
1054 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1055 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1056 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1057 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1058 &sdp->srcu_barrier_head, 0)) {
1059 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1060 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1061 }
1062 spin_unlock_irq_rcu_node(sdp);
1063 }
1064
1065 /* Remove the initial count, at which point reaching zero can happen. */
1066 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1067 complete(&sp->srcu_barrier_completion);
1068 wait_for_completion(&sp->srcu_barrier_completion);
1069
1070 rcu_seq_end(&sp->srcu_barrier_seq);
1071 mutex_unlock(&sp->srcu_barrier_mutex);
1072}
1073EXPORT_SYMBOL_GPL(srcu_barrier);
1074
1075/**
1076 * srcu_batches_completed - return batches completed.
1077 * @sp: srcu_struct on which to report batch completion.
1078 *
1079 * Report the number of batches, correlated with, but not necessarily
1080 * precisely the same as, the number of grace periods that have elapsed.
1081 */
1082unsigned long srcu_batches_completed(struct srcu_struct *sp)
1083{
1084 return sp->srcu_idx;
1085}
1086EXPORT_SYMBOL_GPL(srcu_batches_completed);
1087
1088/*
1089 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1090 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1091 * completed in that state.
1092 */
1093static void srcu_advance_state(struct srcu_struct *sp)
1094{
1095 int idx;
1096
1097 mutex_lock(&sp->srcu_gp_mutex);
1098
1099 /*
1100 * Because readers might be delayed for an extended period after
1101 * fetching ->srcu_idx for their index, at any point in time there
1102 * might well be readers using both idx=0 and idx=1. We therefore
1103 * need to wait for readers to clear from both index values before
1104 * invoking a callback.
1105 *
1106 * The load-acquire ensures that we see the accesses performed
1107 * by the prior grace period.
1108 */
1109 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1110 if (idx == SRCU_STATE_IDLE) {
1111 spin_lock_irq_rcu_node(sp);
1112 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1113 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1114 spin_unlock_irq_rcu_node(sp);
1115 mutex_unlock(&sp->srcu_gp_mutex);
1116 return;
1117 }
1118 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1119 if (idx == SRCU_STATE_IDLE)
1120 srcu_gp_start(sp);
1121 spin_unlock_irq_rcu_node(sp);
1122 if (idx != SRCU_STATE_IDLE) {
1123 mutex_unlock(&sp->srcu_gp_mutex);
1124 return; /* Someone else started the grace period. */
1125 }
1126 }
1127
1128 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1129 idx = 1 ^ (sp->srcu_idx & 1);
1130 if (!try_check_zero(sp, idx, 1)) {
1131 mutex_unlock(&sp->srcu_gp_mutex);
1132 return; /* readers present, retry later. */
1133 }
1134 srcu_flip(sp);
1135 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1136 }
1137
1138 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1139
1140 /*
1141 * SRCU read-side critical sections are normally short,
1142 * so check at least twice in quick succession after a flip.
1143 */
1144 idx = 1 ^ (sp->srcu_idx & 1);
1145 if (!try_check_zero(sp, idx, 2)) {
1146 mutex_unlock(&sp->srcu_gp_mutex);
1147 return; /* readers present, retry later. */
1148 }
1149 srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
1150 }
1151}
1152
1153/*
1154 * Invoke a limited number of SRCU callbacks that have passed through
1155 * their grace period. If there are more to do, SRCU will reschedule
1156 * the workqueue. Note that needed memory barriers have been executed
1157 * in this task's context by srcu_readers_active_idx_check().
1158 */
1159static void srcu_invoke_callbacks(struct work_struct *work)
1160{
1161 bool more;
1162 struct rcu_cblist ready_cbs;
1163 struct rcu_head *rhp;
1164 struct srcu_data *sdp;
1165 struct srcu_struct *sp;
1166
1167 sdp = container_of(work, struct srcu_data, work.work);
1168 sp = sdp->sp;
1169 rcu_cblist_init(&ready_cbs);
1170 spin_lock_irq_rcu_node(sdp);
1171 rcu_segcblist_advance(&sdp->srcu_cblist,
1172 rcu_seq_current(&sp->srcu_gp_seq));
1173 if (sdp->srcu_cblist_invoking ||
1174 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1175 spin_unlock_irq_rcu_node(sdp);
1176 return; /* Someone else on the job or nothing to do. */
1177 }
1178
1179 /* We are on the job! Extract and invoke ready callbacks. */
1180 sdp->srcu_cblist_invoking = true;
1181 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1182 spin_unlock_irq_rcu_node(sdp);
1183 rhp = rcu_cblist_dequeue(&ready_cbs);
1184 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1185 debug_rcu_head_unqueue(rhp);
1186 local_bh_disable();
1187 rhp->func(rhp);
1188 local_bh_enable();
1189 }
1190
1191 /*
1192 * Update counts, accelerate new callbacks, and if needed,
1193 * schedule another round of callback invocation.
1194 */
1195 spin_lock_irq_rcu_node(sdp);
1196 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1197 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1198 rcu_seq_snap(&sp->srcu_gp_seq));
1199 sdp->srcu_cblist_invoking = false;
1200 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1201 spin_unlock_irq_rcu_node(sdp);
1202 if (more)
1203 srcu_schedule_cbs_sdp(sdp, 0);
1204}
1205
1206/*
1207 * Finished one round of SRCU grace period. Start another if there are
1208 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1209 */
1210static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1211{
1212 bool pushgp = true;
1213
1214 spin_lock_irq_rcu_node(sp);
1215 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1216 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1217 /* All requests fulfilled, time to go idle. */
1218 pushgp = false;
1219 }
1220 } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1221 /* Outstanding request and no GP. Start one. */
1222 srcu_gp_start(sp);
1223 }
1224 spin_unlock_irq_rcu_node(sp);
1225
1226 if (pushgp)
1227 queue_delayed_work(rcu_gp_wq, &sp->work, delay);
1228}
1229
1230/*
1231 * This is the work-queue function that handles SRCU grace periods.
1232 */
1233static void process_srcu(struct work_struct *work)
1234{
1235 struct srcu_struct *sp;
1236
1237 sp = container_of(work, struct srcu_struct, work.work);
1238
1239 srcu_advance_state(sp);
1240 srcu_reschedule(sp, srcu_get_delay(sp));
1241}
1242
1243void srcutorture_get_gp_data(enum rcutorture_type test_type,
1244 struct srcu_struct *sp, int *flags,
1245 unsigned long *gpnum, unsigned long *completed)
1246{
1247 if (test_type != SRCU_FLAVOR)
1248 return;
1249 *flags = 0;
1250 *completed = rcu_seq_ctr(sp->srcu_gp_seq);
1251 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
1252}
1253EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1254
1255void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1256{
1257 int cpu;
1258 int idx;
1259 unsigned long s0 = 0, s1 = 0;
1260
1261 idx = sp->srcu_idx & 0x1;
1262 pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
1263 for_each_possible_cpu(cpu) {
1264 unsigned long l0, l1;
1265 unsigned long u0, u1;
1266 long c0, c1;
1267 struct srcu_data *counts;
1268
1269 counts = per_cpu_ptr(sp->sda, cpu);
1270 u0 = counts->srcu_unlock_count[!idx];
1271 u1 = counts->srcu_unlock_count[idx];
1272
1273 /*
1274 * Make sure that a lock is always counted if the corresponding
1275 * unlock is counted.
1276 */
1277 smp_rmb();
1278
1279 l0 = counts->srcu_lock_count[!idx];
1280 l1 = counts->srcu_lock_count[idx];
1281
1282 c0 = l0 - u0;
1283 c1 = l1 - u1;
1284 pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
1285 s0 += c0;
1286 s1 += c1;
1287 }
1288 pr_cont(" T(%ld,%ld)\n", s0, s1);
1289}
1290EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1291
1292static int __init srcu_bootup_announce(void)
1293{
1294 pr_info("Hierarchical SRCU implementation.\n");
1295 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1296 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1297 return 0;
1298}
1299early_initcall(srcu_bootup_announce);