Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * RCU-based infrastructure for lightweight reader-writer locking
4 *
5 * Copyright (c) 2015, Red Hat, Inc.
6 *
7 * Author: Oleg Nesterov <oleg@redhat.com>
8 */
9
10#include <linux/rcu_sync.h>
11#include <linux/sched.h>
12
13enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
14
15#define rss_lock gp_wait.lock
16
17/**
18 * rcu_sync_init() - Initialize an rcu_sync structure
19 * @rsp: Pointer to rcu_sync structure to be initialized
20 */
21void rcu_sync_init(struct rcu_sync *rsp)
22{
23 memset(rsp, 0, sizeof(*rsp));
24 init_waitqueue_head(&rsp->gp_wait);
25}
26
27/**
28 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
29 * @rsp: Pointer to rcu_sync structure to use for synchronization
30 *
31 * Must be called after rcu_sync_init() and before first use.
32 *
33 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
34 * pairs turn into NO-OPs.
35 */
36void rcu_sync_enter_start(struct rcu_sync *rsp)
37{
38 rsp->gp_count++;
39 rsp->gp_state = GP_PASSED;
40}
41
42
43static void rcu_sync_func(struct rcu_head *rhp);
44
45static void rcu_sync_call(struct rcu_sync *rsp)
46{
47 call_rcu_hurry(&rsp->cb_head, rcu_sync_func);
48}
49
50/**
51 * rcu_sync_func() - Callback function managing reader access to fastpath
52 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
53 *
54 * This function is passed to call_rcu() function by rcu_sync_enter() and
55 * rcu_sync_exit(), so that it is invoked after a grace period following the
56 * that invocation of enter/exit.
57 *
58 * If it is called by rcu_sync_enter() it signals that all the readers were
59 * switched onto slow path.
60 *
61 * If it is called by rcu_sync_exit() it takes action based on events that
62 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
63 * and rcu_sync_exit() pairs need not wait for a grace period.
64 *
65 * If another rcu_sync_enter() is invoked before the grace period
66 * ended, reset state to allow the next rcu_sync_exit() to let the
67 * readers back onto their fastpaths (after a grace period). If both
68 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
69 * before the grace period ended, re-invoke call_rcu() on behalf of that
70 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
71 * can again use their fastpaths.
72 */
73static void rcu_sync_func(struct rcu_head *rhp)
74{
75 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
76 unsigned long flags;
77
78 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
79 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
80
81 spin_lock_irqsave(&rsp->rss_lock, flags);
82 if (rsp->gp_count) {
83 /*
84 * We're at least a GP after the GP_IDLE->GP_ENTER transition.
85 */
86 WRITE_ONCE(rsp->gp_state, GP_PASSED);
87 wake_up_locked(&rsp->gp_wait);
88 } else if (rsp->gp_state == GP_REPLAY) {
89 /*
90 * A new rcu_sync_exit() has happened; requeue the callback to
91 * catch a later GP.
92 */
93 WRITE_ONCE(rsp->gp_state, GP_EXIT);
94 rcu_sync_call(rsp);
95 } else {
96 /*
97 * We're at least a GP after the last rcu_sync_exit(); everybody
98 * will now have observed the write side critical section.
99 * Let 'em rip!
100 */
101 WRITE_ONCE(rsp->gp_state, GP_IDLE);
102 }
103 spin_unlock_irqrestore(&rsp->rss_lock, flags);
104}
105
106/**
107 * rcu_sync_enter() - Force readers onto slowpath
108 * @rsp: Pointer to rcu_sync structure to use for synchronization
109 *
110 * This function is used by updaters who need readers to make use of
111 * a slowpath during the update. After this function returns, all
112 * subsequent calls to rcu_sync_is_idle() will return false, which
113 * tells readers to stay off their fastpaths. A later call to
114 * rcu_sync_exit() re-enables reader fastpaths.
115 *
116 * When called in isolation, rcu_sync_enter() must wait for a grace
117 * period, however, closely spaced calls to rcu_sync_enter() can
118 * optimize away the grace-period wait via a state machine implemented
119 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
120 */
121void rcu_sync_enter(struct rcu_sync *rsp)
122{
123 int gp_state;
124
125 spin_lock_irq(&rsp->rss_lock);
126 gp_state = rsp->gp_state;
127 if (gp_state == GP_IDLE) {
128 WRITE_ONCE(rsp->gp_state, GP_ENTER);
129 WARN_ON_ONCE(rsp->gp_count);
130 /*
131 * Note that we could simply do rcu_sync_call(rsp) here and
132 * avoid the "if (gp_state == GP_IDLE)" block below.
133 *
134 * However, synchronize_rcu() can be faster if rcu_expedited
135 * or rcu_blocking_is_gp() is true.
136 *
137 * Another reason is that we can't wait for rcu callback if
138 * we are called at early boot time but this shouldn't happen.
139 */
140 }
141 rsp->gp_count++;
142 spin_unlock_irq(&rsp->rss_lock);
143
144 if (gp_state == GP_IDLE) {
145 /*
146 * See the comment above, this simply does the "synchronous"
147 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
148 */
149 synchronize_rcu();
150 rcu_sync_func(&rsp->cb_head);
151 /* Not really needed, wait_event() would see GP_PASSED. */
152 return;
153 }
154
155 wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
156}
157
158/**
159 * rcu_sync_exit() - Allow readers back onto fast path after grace period
160 * @rsp: Pointer to rcu_sync structure to use for synchronization
161 *
162 * This function is used by updaters who have completed, and can therefore
163 * now allow readers to make use of their fastpaths after a grace period
164 * has elapsed. After this grace period has completed, all subsequent
165 * calls to rcu_sync_is_idle() will return true, which tells readers that
166 * they can once again use their fastpaths.
167 */
168void rcu_sync_exit(struct rcu_sync *rsp)
169{
170 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
171 WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
172
173 spin_lock_irq(&rsp->rss_lock);
174 if (!--rsp->gp_count) {
175 if (rsp->gp_state == GP_PASSED) {
176 WRITE_ONCE(rsp->gp_state, GP_EXIT);
177 rcu_sync_call(rsp);
178 } else if (rsp->gp_state == GP_EXIT) {
179 WRITE_ONCE(rsp->gp_state, GP_REPLAY);
180 }
181 }
182 spin_unlock_irq(&rsp->rss_lock);
183}
184
185/**
186 * rcu_sync_dtor() - Clean up an rcu_sync structure
187 * @rsp: Pointer to rcu_sync structure to be cleaned up
188 */
189void rcu_sync_dtor(struct rcu_sync *rsp)
190{
191 int gp_state;
192
193 WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
194 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
195
196 spin_lock_irq(&rsp->rss_lock);
197 if (rsp->gp_state == GP_REPLAY)
198 WRITE_ONCE(rsp->gp_state, GP_EXIT);
199 gp_state = rsp->gp_state;
200 spin_unlock_irq(&rsp->rss_lock);
201
202 if (gp_state != GP_IDLE) {
203 rcu_barrier();
204 WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
205 }
206}
1/*
2 * RCU-based infrastructure for lightweight reader-writer locking
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (c) 2015, Red Hat, Inc.
19 *
20 * Author: Oleg Nesterov <oleg@redhat.com>
21 */
22
23#include <linux/rcu_sync.h>
24#include <linux/sched.h>
25
26#ifdef CONFIG_PROVE_RCU
27#define __INIT_HELD(func) .held = func,
28#else
29#define __INIT_HELD(func)
30#endif
31
32static const struct {
33 void (*sync)(void);
34 void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
35 void (*wait)(void);
36#ifdef CONFIG_PROVE_RCU
37 int (*held)(void);
38#endif
39} gp_ops[] = {
40 [RCU_SYNC] = {
41 .sync = synchronize_rcu,
42 .call = call_rcu,
43 .wait = rcu_barrier,
44 __INIT_HELD(rcu_read_lock_held)
45 },
46 [RCU_SCHED_SYNC] = {
47 .sync = synchronize_sched,
48 .call = call_rcu_sched,
49 .wait = rcu_barrier_sched,
50 __INIT_HELD(rcu_read_lock_sched_held)
51 },
52 [RCU_BH_SYNC] = {
53 .sync = synchronize_rcu_bh,
54 .call = call_rcu_bh,
55 .wait = rcu_barrier_bh,
56 __INIT_HELD(rcu_read_lock_bh_held)
57 },
58};
59
60enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
61enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
62
63#define rss_lock gp_wait.lock
64
65#ifdef CONFIG_PROVE_RCU
66void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
67{
68 RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
69 "suspicious rcu_sync_is_idle() usage");
70}
71
72EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
73#endif
74
75/**
76 * rcu_sync_init() - Initialize an rcu_sync structure
77 * @rsp: Pointer to rcu_sync structure to be initialized
78 * @type: Flavor of RCU with which to synchronize rcu_sync structure
79 */
80void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
81{
82 memset(rsp, 0, sizeof(*rsp));
83 init_waitqueue_head(&rsp->gp_wait);
84 rsp->gp_type = type;
85}
86
87/**
88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
89 * @rsp: Pointer to rcu_sync structure to use for synchronization
90 *
91 * Must be called after rcu_sync_init() and before first use.
92 *
93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
94 * pairs turn into NO-OPs.
95 */
96void rcu_sync_enter_start(struct rcu_sync *rsp)
97{
98 rsp->gp_count++;
99 rsp->gp_state = GP_PASSED;
100}
101
102/**
103 * rcu_sync_enter() - Force readers onto slowpath
104 * @rsp: Pointer to rcu_sync structure to use for synchronization
105 *
106 * This function is used by updaters who need readers to make use of
107 * a slowpath during the update. After this function returns, all
108 * subsequent calls to rcu_sync_is_idle() will return false, which
109 * tells readers to stay off their fastpaths. A later call to
110 * rcu_sync_exit() re-enables reader slowpaths.
111 *
112 * When called in isolation, rcu_sync_enter() must wait for a grace
113 * period, however, closely spaced calls to rcu_sync_enter() can
114 * optimize away the grace-period wait via a state machine implemented
115 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
116 */
117void rcu_sync_enter(struct rcu_sync *rsp)
118{
119 bool need_wait, need_sync;
120
121 spin_lock_irq(&rsp->rss_lock);
122 need_wait = rsp->gp_count++;
123 need_sync = rsp->gp_state == GP_IDLE;
124 if (need_sync)
125 rsp->gp_state = GP_PENDING;
126 spin_unlock_irq(&rsp->rss_lock);
127
128 BUG_ON(need_wait && need_sync);
129
130 if (need_sync) {
131 gp_ops[rsp->gp_type].sync();
132 rsp->gp_state = GP_PASSED;
133 wake_up_all(&rsp->gp_wait);
134 } else if (need_wait) {
135 wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
136 } else {
137 /*
138 * Possible when there's a pending CB from a rcu_sync_exit().
139 * Nobody has yet been allowed the 'fast' path and thus we can
140 * avoid doing any sync(). The callback will get 'dropped'.
141 */
142 BUG_ON(rsp->gp_state != GP_PASSED);
143 }
144}
145
146/**
147 * rcu_sync_func() - Callback function managing reader access to fastpath
148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
149 *
150 * This function is passed to one of the call_rcu() functions by
151 * rcu_sync_exit(), so that it is invoked after a grace period following the
152 * that invocation of rcu_sync_exit(). It takes action based on events that
153 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
154 * and rcu_sync_exit() pairs need not wait for a grace period.
155 *
156 * If another rcu_sync_enter() is invoked before the grace period
157 * ended, reset state to allow the next rcu_sync_exit() to let the
158 * readers back onto their fastpaths (after a grace period). If both
159 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
160 * before the grace period ended, re-invoke call_rcu() on behalf of that
161 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
162 * can again use their fastpaths.
163 */
164static void rcu_sync_func(struct rcu_head *rhp)
165{
166 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
167 unsigned long flags;
168
169 BUG_ON(rsp->gp_state != GP_PASSED);
170 BUG_ON(rsp->cb_state == CB_IDLE);
171
172 spin_lock_irqsave(&rsp->rss_lock, flags);
173 if (rsp->gp_count) {
174 /*
175 * A new rcu_sync_begin() has happened; drop the callback.
176 */
177 rsp->cb_state = CB_IDLE;
178 } else if (rsp->cb_state == CB_REPLAY) {
179 /*
180 * A new rcu_sync_exit() has happened; requeue the callback
181 * to catch a later GP.
182 */
183 rsp->cb_state = CB_PENDING;
184 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
185 } else {
186 /*
187 * We're at least a GP after rcu_sync_exit(); eveybody will now
188 * have observed the write side critical section. Let 'em rip!.
189 */
190 rsp->cb_state = CB_IDLE;
191 rsp->gp_state = GP_IDLE;
192 }
193 spin_unlock_irqrestore(&rsp->rss_lock, flags);
194}
195
196/**
197 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
198 * @rsp: Pointer to rcu_sync structure to use for synchronization
199 *
200 * This function is used by updaters who have completed, and can therefore
201 * now allow readers to make use of their fastpaths after a grace period
202 * has elapsed. After this grace period has completed, all subsequent
203 * calls to rcu_sync_is_idle() will return true, which tells readers that
204 * they can once again use their fastpaths.
205 */
206void rcu_sync_exit(struct rcu_sync *rsp)
207{
208 spin_lock_irq(&rsp->rss_lock);
209 if (!--rsp->gp_count) {
210 if (rsp->cb_state == CB_IDLE) {
211 rsp->cb_state = CB_PENDING;
212 gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
213 } else if (rsp->cb_state == CB_PENDING) {
214 rsp->cb_state = CB_REPLAY;
215 }
216 }
217 spin_unlock_irq(&rsp->rss_lock);
218}
219
220/**
221 * rcu_sync_dtor() - Clean up an rcu_sync structure
222 * @rsp: Pointer to rcu_sync structure to be cleaned up
223 */
224void rcu_sync_dtor(struct rcu_sync *rsp)
225{
226 int cb_state;
227
228 BUG_ON(rsp->gp_count);
229
230 spin_lock_irq(&rsp->rss_lock);
231 if (rsp->cb_state == CB_REPLAY)
232 rsp->cb_state = CB_PENDING;
233 cb_state = rsp->cb_state;
234 spin_unlock_irq(&rsp->rss_lock);
235
236 if (cb_state != CB_IDLE) {
237 gp_ops[rsp->gp_type].wait();
238 BUG_ON(rsp->cb_state != CB_IDLE);
239 }
240}