Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 *
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
11 */
12#include <linux/completion.h>
13#include <linux/interrupt.h>
14#include <linux/notifier.h>
15#include <linux/rcupdate_wait.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/mutex.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21#include <linux/init.h>
22#include <linux/time.h>
23#include <linux/cpu.h>
24#include <linux/prefetch.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27
28#include "rcu.h"
29
30/* Global control variables for rcupdate callback mechanism. */
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35 unsigned long gp_seq; /* Grace-period counter. */
36};
37
38/* Definition for rcupdate control block. */
39static struct rcu_ctrlblk rcu_ctrlblk = {
40 .donetail = &rcu_ctrlblk.rcucblist,
41 .curtail = &rcu_ctrlblk.rcucblist,
42 .gp_seq = 0 - 300UL,
43};
44
45void rcu_barrier(void)
46{
47 wait_rcu_gp(call_rcu_hurry);
48}
49EXPORT_SYMBOL(rcu_barrier);
50
51/* Record an rcu quiescent state. */
52void rcu_qs(void)
53{
54 unsigned long flags;
55
56 local_irq_save(flags);
57 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
58 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
59 raise_softirq_irqoff(RCU_SOFTIRQ);
60 }
61 WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
62 local_irq_restore(flags);
63}
64
65/*
66 * Check to see if the scheduling-clock interrupt came from an extended
67 * quiescent state, and, if so, tell RCU about it. This function must
68 * be called from hardirq context. It is normally called from the
69 * scheduling-clock interrupt.
70 */
71void rcu_sched_clock_irq(int user)
72{
73 if (user) {
74 rcu_qs();
75 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
76 set_tsk_need_resched(current);
77 set_preempt_need_resched();
78 }
79}
80
81/*
82 * Reclaim the specified callback, either by invoking it for non-kfree cases or
83 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
84 */
85static inline bool rcu_reclaim_tiny(struct rcu_head *head)
86{
87 rcu_callback_t f;
88 unsigned long offset = (unsigned long)head->func;
89
90 rcu_lock_acquire(&rcu_callback_map);
91 if (__is_kvfree_rcu_offset(offset)) {
92 trace_rcu_invoke_kvfree_callback("", head, offset);
93 kvfree((void *)head - offset);
94 rcu_lock_release(&rcu_callback_map);
95 return true;
96 }
97
98 trace_rcu_invoke_callback("", head);
99 f = head->func;
100 debug_rcu_head_callback(head);
101 WRITE_ONCE(head->func, (rcu_callback_t)0L);
102 f(head);
103 rcu_lock_release(&rcu_callback_map);
104 return false;
105}
106
107/* Invoke the RCU callbacks whose grace period has elapsed. */
108static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
109{
110 struct rcu_head *next, *list;
111 unsigned long flags;
112
113 /* Move the ready-to-invoke callbacks to a local list. */
114 local_irq_save(flags);
115 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
116 /* No callbacks ready, so just leave. */
117 local_irq_restore(flags);
118 return;
119 }
120 list = rcu_ctrlblk.rcucblist;
121 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
122 *rcu_ctrlblk.donetail = NULL;
123 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
124 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
125 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
126 local_irq_restore(flags);
127
128 /* Invoke the callbacks on the local list. */
129 while (list) {
130 next = list->next;
131 prefetch(next);
132 debug_rcu_head_unqueue(list);
133 local_bh_disable();
134 rcu_reclaim_tiny(list);
135 local_bh_enable();
136 list = next;
137 }
138}
139
140/*
141 * Wait for a grace period to elapse. But it is illegal to invoke
142 * synchronize_rcu() from within an RCU read-side critical section.
143 * Therefore, any legal call to synchronize_rcu() is a quiescent state,
144 * and so on a UP system, synchronize_rcu() need do nothing, other than
145 * let the polled APIs know that another grace period elapsed.
146 *
147 * (But Lai Jiangshan points out the benefits of doing might_sleep()
148 * to reduce latency.)
149 *
150 * Cool, huh? (Due to Josh Triplett.)
151 */
152void synchronize_rcu(void)
153{
154 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
155 lock_is_held(&rcu_lock_map) ||
156 lock_is_held(&rcu_sched_lock_map),
157 "Illegal synchronize_rcu() in RCU read-side critical section");
158 WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 2);
159}
160EXPORT_SYMBOL_GPL(synchronize_rcu);
161
162static void tiny_rcu_leak_callback(struct rcu_head *rhp)
163{
164}
165
166/*
167 * Post an RCU callback to be invoked after the end of an RCU grace
168 * period. But since we have but one CPU, that would be after any
169 * quiescent state.
170 */
171void call_rcu(struct rcu_head *head, rcu_callback_t func)
172{
173 static atomic_t doublefrees;
174 unsigned long flags;
175
176 if (debug_rcu_head_queue(head)) {
177 if (atomic_inc_return(&doublefrees) < 4) {
178 pr_err("%s(): Double-freed CB %p->%pS()!!! ", __func__, head, head->func);
179 mem_dump_obj(head);
180 }
181
182 if (!__is_kvfree_rcu_offset((unsigned long)head->func))
183 WRITE_ONCE(head->func, tiny_rcu_leak_callback);
184 return;
185 }
186
187 head->func = func;
188 head->next = NULL;
189
190 local_irq_save(flags);
191 *rcu_ctrlblk.curtail = head;
192 rcu_ctrlblk.curtail = &head->next;
193 local_irq_restore(flags);
194
195 if (unlikely(is_idle_task(current))) {
196 /* force scheduling for rcu_qs() */
197 resched_cpu(0);
198 }
199}
200EXPORT_SYMBOL_GPL(call_rcu);
201
202/*
203 * Store a grace-period-counter "cookie". For more information,
204 * see the Tree RCU header comment.
205 */
206void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
207{
208 rgosp->rgos_norm = RCU_GET_STATE_COMPLETED;
209}
210EXPORT_SYMBOL_GPL(get_completed_synchronize_rcu_full);
211
212/*
213 * Return a grace-period-counter "cookie". For more information,
214 * see the Tree RCU header comment.
215 */
216unsigned long get_state_synchronize_rcu(void)
217{
218 return READ_ONCE(rcu_ctrlblk.gp_seq);
219}
220EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
221
222/*
223 * Return a grace-period-counter "cookie" and ensure that a future grace
224 * period completes. For more information, see the Tree RCU header comment.
225 */
226unsigned long start_poll_synchronize_rcu(void)
227{
228 unsigned long gp_seq = get_state_synchronize_rcu();
229
230 if (unlikely(is_idle_task(current))) {
231 /* force scheduling for rcu_qs() */
232 resched_cpu(0);
233 }
234 return gp_seq;
235}
236EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
237
238/*
239 * Return true if the grace period corresponding to oldstate has completed
240 * and false otherwise. For more information, see the Tree RCU header
241 * comment.
242 */
243bool poll_state_synchronize_rcu(unsigned long oldstate)
244{
245 return oldstate == RCU_GET_STATE_COMPLETED || READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
246}
247EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
248
249#ifdef CONFIG_KASAN_GENERIC
250void kvfree_call_rcu(struct rcu_head *head, void *ptr)
251{
252 if (head)
253 kasan_record_aux_stack_noalloc(ptr);
254
255 __kvfree_call_rcu(head, ptr);
256}
257EXPORT_SYMBOL_GPL(kvfree_call_rcu);
258#endif
259
260void __init rcu_init(void)
261{
262 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
263 rcu_early_boot_tests();
264}
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 *
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
11 */
12#include <linux/completion.h>
13#include <linux/interrupt.h>
14#include <linux/notifier.h>
15#include <linux/rcupdate_wait.h>
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/mutex.h>
19#include <linux/sched.h>
20#include <linux/types.h>
21#include <linux/init.h>
22#include <linux/time.h>
23#include <linux/cpu.h>
24#include <linux/prefetch.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27
28#include "rcu.h"
29
30/* Global control variables for rcupdate callback mechanism. */
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35};
36
37/* Definition for rcupdate control block. */
38static struct rcu_ctrlblk rcu_ctrlblk = {
39 .donetail = &rcu_ctrlblk.rcucblist,
40 .curtail = &rcu_ctrlblk.rcucblist,
41};
42
43void rcu_barrier(void)
44{
45 wait_rcu_gp(call_rcu);
46}
47EXPORT_SYMBOL(rcu_barrier);
48
49/* Record an rcu quiescent state. */
50void rcu_qs(void)
51{
52 unsigned long flags;
53
54 local_irq_save(flags);
55 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
56 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
57 raise_softirq_irqoff(RCU_SOFTIRQ);
58 }
59 local_irq_restore(flags);
60}
61
62/*
63 * Check to see if the scheduling-clock interrupt came from an extended
64 * quiescent state, and, if so, tell RCU about it. This function must
65 * be called from hardirq context. It is normally called from the
66 * scheduling-clock interrupt.
67 */
68void rcu_sched_clock_irq(int user)
69{
70 if (user) {
71 rcu_qs();
72 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
73 set_tsk_need_resched(current);
74 set_preempt_need_resched();
75 }
76}
77
78/*
79 * Reclaim the specified callback, either by invoking it for non-kfree cases or
80 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
81 */
82static inline bool rcu_reclaim_tiny(struct rcu_head *head)
83{
84 rcu_callback_t f;
85 unsigned long offset = (unsigned long)head->func;
86
87 rcu_lock_acquire(&rcu_callback_map);
88 if (__is_kvfree_rcu_offset(offset)) {
89 trace_rcu_invoke_kvfree_callback("", head, offset);
90 kvfree((void *)head - offset);
91 rcu_lock_release(&rcu_callback_map);
92 return true;
93 }
94
95 trace_rcu_invoke_callback("", head);
96 f = head->func;
97 WRITE_ONCE(head->func, (rcu_callback_t)0L);
98 f(head);
99 rcu_lock_release(&rcu_callback_map);
100 return false;
101}
102
103/* Invoke the RCU callbacks whose grace period has elapsed. */
104static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
105{
106 struct rcu_head *next, *list;
107 unsigned long flags;
108
109 /* Move the ready-to-invoke callbacks to a local list. */
110 local_irq_save(flags);
111 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
112 /* No callbacks ready, so just leave. */
113 local_irq_restore(flags);
114 return;
115 }
116 list = rcu_ctrlblk.rcucblist;
117 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
118 *rcu_ctrlblk.donetail = NULL;
119 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
120 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
121 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
122 local_irq_restore(flags);
123
124 /* Invoke the callbacks on the local list. */
125 while (list) {
126 next = list->next;
127 prefetch(next);
128 debug_rcu_head_unqueue(list);
129 local_bh_disable();
130 rcu_reclaim_tiny(list);
131 local_bh_enable();
132 list = next;
133 }
134}
135
136/*
137 * Wait for a grace period to elapse. But it is illegal to invoke
138 * synchronize_rcu() from within an RCU read-side critical section.
139 * Therefore, any legal call to synchronize_rcu() is a quiescent
140 * state, and so on a UP system, synchronize_rcu() need do nothing.
141 * (But Lai Jiangshan points out the benefits of doing might_sleep()
142 * to reduce latency.)
143 *
144 * Cool, huh? (Due to Josh Triplett.)
145 */
146void synchronize_rcu(void)
147{
148 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
149 lock_is_held(&rcu_lock_map) ||
150 lock_is_held(&rcu_sched_lock_map),
151 "Illegal synchronize_rcu() in RCU read-side critical section");
152}
153EXPORT_SYMBOL_GPL(synchronize_rcu);
154
155/*
156 * Post an RCU callback to be invoked after the end of an RCU grace
157 * period. But since we have but one CPU, that would be after any
158 * quiescent state.
159 */
160void call_rcu(struct rcu_head *head, rcu_callback_t func)
161{
162 unsigned long flags;
163
164 debug_rcu_head_queue(head);
165 head->func = func;
166 head->next = NULL;
167
168 local_irq_save(flags);
169 *rcu_ctrlblk.curtail = head;
170 rcu_ctrlblk.curtail = &head->next;
171 local_irq_restore(flags);
172
173 if (unlikely(is_idle_task(current))) {
174 /* force scheduling for rcu_qs() */
175 resched_cpu(0);
176 }
177}
178EXPORT_SYMBOL_GPL(call_rcu);
179
180void __init rcu_init(void)
181{
182 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
183 rcu_early_boot_tests();
184 srcu_init();
185}