Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 *
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
11 */
12#include <linux/completion.h>
13#include <linux/interrupt.h>
14#include <linux/notifier.h>
15#include <linux/rcupdate_wait.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/mutex.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21#include <linux/init.h>
22#include <linux/time.h>
23#include <linux/cpu.h>
24#include <linux/prefetch.h>
25
26#include "rcu.h"
27
28/* Global control variables for rcupdate callback mechanism. */
29struct rcu_ctrlblk {
30 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
31 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
32 struct rcu_head **curtail; /* ->next pointer of last CB. */
33};
34
35/* Definition for rcupdate control block. */
36static struct rcu_ctrlblk rcu_ctrlblk = {
37 .donetail = &rcu_ctrlblk.rcucblist,
38 .curtail = &rcu_ctrlblk.rcucblist,
39};
40
41void rcu_barrier(void)
42{
43 wait_rcu_gp(call_rcu);
44}
45EXPORT_SYMBOL(rcu_barrier);
46
47/* Record an rcu quiescent state. */
48void rcu_qs(void)
49{
50 unsigned long flags;
51
52 local_irq_save(flags);
53 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
54 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
55 raise_softirq_irqoff(RCU_SOFTIRQ);
56 }
57 local_irq_restore(flags);
58}
59
60/*
61 * Check to see if the scheduling-clock interrupt came from an extended
62 * quiescent state, and, if so, tell RCU about it. This function must
63 * be called from hardirq context. It is normally called from the
64 * scheduling-clock interrupt.
65 */
66void rcu_sched_clock_irq(int user)
67{
68 if (user) {
69 rcu_qs();
70 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
71 set_tsk_need_resched(current);
72 set_preempt_need_resched();
73 }
74}
75
76/* Invoke the RCU callbacks whose grace period has elapsed. */
77static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
78{
79 struct rcu_head *next, *list;
80 unsigned long flags;
81
82 /* Move the ready-to-invoke callbacks to a local list. */
83 local_irq_save(flags);
84 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
85 /* No callbacks ready, so just leave. */
86 local_irq_restore(flags);
87 return;
88 }
89 list = rcu_ctrlblk.rcucblist;
90 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
91 *rcu_ctrlblk.donetail = NULL;
92 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
93 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
94 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
95 local_irq_restore(flags);
96
97 /* Invoke the callbacks on the local list. */
98 while (list) {
99 next = list->next;
100 prefetch(next);
101 debug_rcu_head_unqueue(list);
102 local_bh_disable();
103 __rcu_reclaim("", list);
104 local_bh_enable();
105 list = next;
106 }
107}
108
109/*
110 * Wait for a grace period to elapse. But it is illegal to invoke
111 * synchronize_rcu() from within an RCU read-side critical section.
112 * Therefore, any legal call to synchronize_rcu() is a quiescent
113 * state, and so on a UP system, synchronize_rcu() need do nothing.
114 * (But Lai Jiangshan points out the benefits of doing might_sleep()
115 * to reduce latency.)
116 *
117 * Cool, huh? (Due to Josh Triplett.)
118 */
119void synchronize_rcu(void)
120{
121 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
122 lock_is_held(&rcu_lock_map) ||
123 lock_is_held(&rcu_sched_lock_map),
124 "Illegal synchronize_rcu() in RCU read-side critical section");
125}
126EXPORT_SYMBOL_GPL(synchronize_rcu);
127
128/*
129 * Post an RCU callback to be invoked after the end of an RCU grace
130 * period. But since we have but one CPU, that would be after any
131 * quiescent state.
132 */
133void call_rcu(struct rcu_head *head, rcu_callback_t func)
134{
135 unsigned long flags;
136
137 debug_rcu_head_queue(head);
138 head->func = func;
139 head->next = NULL;
140
141 local_irq_save(flags);
142 *rcu_ctrlblk.curtail = head;
143 rcu_ctrlblk.curtail = &head->next;
144 local_irq_restore(flags);
145
146 if (unlikely(is_idle_task(current))) {
147 /* force scheduling for rcu_qs() */
148 resched_cpu(0);
149 }
150}
151EXPORT_SYMBOL_GPL(call_rcu);
152
153void __init rcu_init(void)
154{
155 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
156 rcu_early_boot_tests();
157 srcu_init();
158}
1/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
24 */
25#include <linux/completion.h>
26#include <linux/interrupt.h>
27#include <linux/notifier.h>
28#include <linux/rcupdate_wait.h>
29#include <linux/kernel.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
35#include <linux/time.h>
36#include <linux/cpu.h>
37#include <linux/prefetch.h>
38
39#include "rcu.h"
40
41/* Global control variables for rcupdate callback mechanism. */
42struct rcu_ctrlblk {
43 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
44 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
45 struct rcu_head **curtail; /* ->next pointer of last CB. */
46};
47
48/* Definition for rcupdate control block. */
49static struct rcu_ctrlblk rcu_sched_ctrlblk = {
50 .donetail = &rcu_sched_ctrlblk.rcucblist,
51 .curtail = &rcu_sched_ctrlblk.rcucblist,
52};
53
54static struct rcu_ctrlblk rcu_bh_ctrlblk = {
55 .donetail = &rcu_bh_ctrlblk.rcucblist,
56 .curtail = &rcu_bh_ctrlblk.rcucblist,
57};
58
59void rcu_barrier_bh(void)
60{
61 wait_rcu_gp(call_rcu_bh);
62}
63EXPORT_SYMBOL(rcu_barrier_bh);
64
65void rcu_barrier_sched(void)
66{
67 wait_rcu_gp(call_rcu_sched);
68}
69EXPORT_SYMBOL(rcu_barrier_sched);
70
71/*
72 * Helper function for rcu_sched_qs() and rcu_bh_qs().
73 * Also irqs are disabled to avoid confusion due to interrupt handlers
74 * invoking call_rcu().
75 */
76static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
77{
78 if (rcp->donetail != rcp->curtail) {
79 rcp->donetail = rcp->curtail;
80 return 1;
81 }
82
83 return 0;
84}
85
86/*
87 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
88 * are at it, given that any rcu quiescent state is also an rcu_bh
89 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
90 */
91void rcu_sched_qs(void)
92{
93 unsigned long flags;
94
95 local_irq_save(flags);
96 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
97 rcu_qsctr_help(&rcu_bh_ctrlblk))
98 raise_softirq(RCU_SOFTIRQ);
99 local_irq_restore(flags);
100}
101
102/*
103 * Record an rcu_bh quiescent state.
104 */
105void rcu_bh_qs(void)
106{
107 unsigned long flags;
108
109 local_irq_save(flags);
110 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
111 raise_softirq(RCU_SOFTIRQ);
112 local_irq_restore(flags);
113}
114
115/*
116 * Check to see if the scheduling-clock interrupt came from an extended
117 * quiescent state, and, if so, tell RCU about it. This function must
118 * be called from hardirq context. It is normally called from the
119 * scheduling-clock interrupt.
120 */
121void rcu_check_callbacks(int user)
122{
123 if (user)
124 rcu_sched_qs();
125 else if (!in_softirq())
126 rcu_bh_qs();
127 if (user)
128 rcu_note_voluntary_context_switch(current);
129}
130
131/*
132 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
133 * whose grace period has elapsed.
134 */
135static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
136{
137 struct rcu_head *next, *list;
138 unsigned long flags;
139
140 /* Move the ready-to-invoke callbacks to a local list. */
141 local_irq_save(flags);
142 if (rcp->donetail == &rcp->rcucblist) {
143 /* No callbacks ready, so just leave. */
144 local_irq_restore(flags);
145 return;
146 }
147 list = rcp->rcucblist;
148 rcp->rcucblist = *rcp->donetail;
149 *rcp->donetail = NULL;
150 if (rcp->curtail == rcp->donetail)
151 rcp->curtail = &rcp->rcucblist;
152 rcp->donetail = &rcp->rcucblist;
153 local_irq_restore(flags);
154
155 /* Invoke the callbacks on the local list. */
156 while (list) {
157 next = list->next;
158 prefetch(next);
159 debug_rcu_head_unqueue(list);
160 local_bh_disable();
161 __rcu_reclaim("", list);
162 local_bh_enable();
163 list = next;
164 }
165}
166
167static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
168{
169 __rcu_process_callbacks(&rcu_sched_ctrlblk);
170 __rcu_process_callbacks(&rcu_bh_ctrlblk);
171}
172
173/*
174 * Wait for a grace period to elapse. But it is illegal to invoke
175 * synchronize_sched() from within an RCU read-side critical section.
176 * Therefore, any legal call to synchronize_sched() is a quiescent
177 * state, and so on a UP system, synchronize_sched() need do nothing.
178 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
179 * benefits of doing might_sleep() to reduce latency.)
180 *
181 * Cool, huh? (Due to Josh Triplett.)
182 */
183void synchronize_sched(void)
184{
185 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
186 lock_is_held(&rcu_lock_map) ||
187 lock_is_held(&rcu_sched_lock_map),
188 "Illegal synchronize_sched() in RCU read-side critical section");
189}
190EXPORT_SYMBOL_GPL(synchronize_sched);
191
192/*
193 * Helper function for call_rcu() and call_rcu_bh().
194 */
195static void __call_rcu(struct rcu_head *head,
196 rcu_callback_t func,
197 struct rcu_ctrlblk *rcp)
198{
199 unsigned long flags;
200
201 debug_rcu_head_queue(head);
202 head->func = func;
203 head->next = NULL;
204
205 local_irq_save(flags);
206 *rcp->curtail = head;
207 rcp->curtail = &head->next;
208 local_irq_restore(flags);
209
210 if (unlikely(is_idle_task(current))) {
211 /* force scheduling for rcu_sched_qs() */
212 resched_cpu(0);
213 }
214}
215
216/*
217 * Post an RCU callback to be invoked after the end of an RCU-sched grace
218 * period. But since we have but one CPU, that would be after any
219 * quiescent state.
220 */
221void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
222{
223 __call_rcu(head, func, &rcu_sched_ctrlblk);
224}
225EXPORT_SYMBOL_GPL(call_rcu_sched);
226
227/*
228 * Post an RCU bottom-half callback to be invoked after any subsequent
229 * quiescent state.
230 */
231void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
232{
233 __call_rcu(head, func, &rcu_bh_ctrlblk);
234}
235EXPORT_SYMBOL_GPL(call_rcu_bh);
236
237void __init rcu_init(void)
238{
239 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
240 rcu_early_boot_tests();
241}