Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0+
  2/*
  3 * RCU-based infrastructure for lightweight reader-writer locking
  4 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  5 * Copyright (c) 2015, Red Hat, Inc.
  6 *
  7 * Author: Oleg Nesterov <oleg@redhat.com>
  8 */
  9
 10#include <linux/rcu_sync.h>
 11#include <linux/sched.h>
 12
 13enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 14
 15#define	rss_lock	gp_wait.lock
 16
 
 
 
 
 
 
 
 
 
 
 17/**
 18 * rcu_sync_init() - Initialize an rcu_sync structure
 19 * @rsp: Pointer to rcu_sync structure to be initialized
 
 20 */
 21void rcu_sync_init(struct rcu_sync *rsp)
 22{
 23	memset(rsp, 0, sizeof(*rsp));
 24	init_waitqueue_head(&rsp->gp_wait);
 
 25}
 26
 27static void rcu_sync_func(struct rcu_head *rhp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 28
 29static void rcu_sync_call(struct rcu_sync *rsp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30{
 31	call_rcu_hurry(&rsp->cb_head, rcu_sync_func);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32}
 33
 34/**
 35 * rcu_sync_func() - Callback function managing reader access to fastpath
 36 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
 37 *
 38 * This function is passed to call_rcu() function by rcu_sync_enter() and
 39 * rcu_sync_exit(), so that it is invoked after a grace period following the
 40 * that invocation of enter/exit.
 41 *
 42 * If it is called by rcu_sync_enter() it signals that all the readers were
 43 * switched onto slow path.
 44 *
 45 * If it is called by rcu_sync_exit() it takes action based on events that
 46 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
 47 * and rcu_sync_exit() pairs need not wait for a grace period.
 48 *
 49 * If another rcu_sync_enter() is invoked before the grace period
 50 * ended, reset state to allow the next rcu_sync_exit() to let the
 51 * readers back onto their fastpaths (after a grace period).  If both
 52 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
 53 * before the grace period ended, re-invoke call_rcu() on behalf of that
 54 * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
 55 * can again use their fastpaths.
 56 */
 57static void rcu_sync_func(struct rcu_head *rhp)
 58{
 59	struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
 60	unsigned long flags;
 61
 62	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
 63	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
 64
 65	spin_lock_irqsave(&rsp->rss_lock, flags);
 66	if (rsp->gp_count) {
 67		/*
 68		 * We're at least a GP after the GP_IDLE->GP_ENTER transition.
 69		 */
 70		WRITE_ONCE(rsp->gp_state, GP_PASSED);
 71		wake_up_locked(&rsp->gp_wait);
 72	} else if (rsp->gp_state == GP_REPLAY) {
 73		/*
 74		 * A new rcu_sync_exit() has happened; requeue the callback to
 75		 * catch a later GP.
 76		 */
 77		WRITE_ONCE(rsp->gp_state, GP_EXIT);
 78		rcu_sync_call(rsp);
 79	} else {
 80		/*
 81		 * We're at least a GP after the last rcu_sync_exit(); everybody
 82		 * will now have observed the write side critical section.
 83		 * Let 'em rip!
 84		 */
 85		WRITE_ONCE(rsp->gp_state, GP_IDLE);
 
 86	}
 87	spin_unlock_irqrestore(&rsp->rss_lock, flags);
 88}
 89
 90/**
 91 * rcu_sync_enter() - Force readers onto slowpath
 92 * @rsp: Pointer to rcu_sync structure to use for synchronization
 93 *
 94 * This function is used by updaters who need readers to make use of
 95 * a slowpath during the update.  After this function returns, all
 96 * subsequent calls to rcu_sync_is_idle() will return false, which
 97 * tells readers to stay off their fastpaths.  A later call to
 98 * rcu_sync_exit() re-enables reader fastpaths.
 99 *
100 * When called in isolation, rcu_sync_enter() must wait for a grace
101 * period, however, closely spaced calls to rcu_sync_enter() can
102 * optimize away the grace-period wait via a state machine implemented
103 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
104 */
105void rcu_sync_enter(struct rcu_sync *rsp)
106{
107	int gp_state;
108
109	spin_lock_irq(&rsp->rss_lock);
110	gp_state = rsp->gp_state;
111	if (gp_state == GP_IDLE) {
112		WRITE_ONCE(rsp->gp_state, GP_ENTER);
113		WARN_ON_ONCE(rsp->gp_count);
114		/*
115		 * Note that we could simply do rcu_sync_call(rsp) here and
116		 * avoid the "if (gp_state == GP_IDLE)" block below.
117		 *
118		 * However, synchronize_rcu() can be faster if rcu_expedited
119		 * or rcu_blocking_is_gp() is true.
120		 *
121		 * Another reason is that we can't wait for rcu callback if
122		 * we are called at early boot time but this shouldn't happen.
123		 */
124	}
125	rsp->gp_count++;
126	spin_unlock_irq(&rsp->rss_lock);
127
128	if (gp_state == GP_IDLE) {
129		/*
130		 * See the comment above, this simply does the "synchronous"
131		 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
132		 */
133		synchronize_rcu();
134		rcu_sync_func(&rsp->cb_head);
135		/* Not really needed, wait_event() would see GP_PASSED. */
136		return;
137	}
138
139	wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
140}
141
142/**
143 * rcu_sync_exit() - Allow readers back onto fast path after grace period
144 * @rsp: Pointer to rcu_sync structure to use for synchronization
145 *
146 * This function is used by updaters who have completed, and can therefore
147 * now allow readers to make use of their fastpaths after a grace period
148 * has elapsed.  After this grace period has completed, all subsequent
149 * calls to rcu_sync_is_idle() will return true, which tells readers that
150 * they can once again use their fastpaths.
151 */
152void rcu_sync_exit(struct rcu_sync *rsp)
153{
154	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
155	WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
156
157	spin_lock_irq(&rsp->rss_lock);
158	if (!--rsp->gp_count) {
159		if (rsp->gp_state == GP_PASSED) {
160			WRITE_ONCE(rsp->gp_state, GP_EXIT);
161			rcu_sync_call(rsp);
162		} else if (rsp->gp_state == GP_EXIT) {
163			WRITE_ONCE(rsp->gp_state, GP_REPLAY);
164		}
165	}
166	spin_unlock_irq(&rsp->rss_lock);
167}
168
169/**
170 * rcu_sync_dtor() - Clean up an rcu_sync structure
171 * @rsp: Pointer to rcu_sync structure to be cleaned up
172 */
173void rcu_sync_dtor(struct rcu_sync *rsp)
174{
175	int gp_state;
176
177	WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
178	WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
179
180	spin_lock_irq(&rsp->rss_lock);
181	if (rsp->gp_state == GP_REPLAY)
182		WRITE_ONCE(rsp->gp_state, GP_EXIT);
183	gp_state = rsp->gp_state;
184	spin_unlock_irq(&rsp->rss_lock);
185
186	if (gp_state != GP_IDLE) {
187		rcu_barrier();
188		WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
189	}
190}
v4.17
 
  1/*
  2 * RCU-based infrastructure for lightweight reader-writer locking
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright (c) 2015, Red Hat, Inc.
 19 *
 20 * Author: Oleg Nesterov <oleg@redhat.com>
 21 */
 22
 23#include <linux/rcu_sync.h>
 24#include <linux/sched.h>
 25
 26#ifdef CONFIG_PROVE_RCU
 27#define __INIT_HELD(func)	.held = func,
 28#else
 29#define __INIT_HELD(func)
 30#endif
 31
 32static const struct {
 33	void (*sync)(void);
 34	void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
 35	void (*wait)(void);
 36#ifdef CONFIG_PROVE_RCU
 37	int  (*held)(void);
 38#endif
 39} gp_ops[] = {
 40	[RCU_SYNC] = {
 41		.sync = synchronize_rcu,
 42		.call = call_rcu,
 43		.wait = rcu_barrier,
 44		__INIT_HELD(rcu_read_lock_held)
 45	},
 46	[RCU_SCHED_SYNC] = {
 47		.sync = synchronize_sched,
 48		.call = call_rcu_sched,
 49		.wait = rcu_barrier_sched,
 50		__INIT_HELD(rcu_read_lock_sched_held)
 51	},
 52	[RCU_BH_SYNC] = {
 53		.sync = synchronize_rcu_bh,
 54		.call = call_rcu_bh,
 55		.wait = rcu_barrier_bh,
 56		__INIT_HELD(rcu_read_lock_bh_held)
 57	},
 58};
 59
 60enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
 61enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
 62
 63#define	rss_lock	gp_wait.lock
 64
 65#ifdef CONFIG_PROVE_RCU
 66void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
 67{
 68	RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
 69			 "suspicious rcu_sync_is_idle() usage");
 70}
 71
 72EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
 73#endif
 74
 75/**
 76 * rcu_sync_init() - Initialize an rcu_sync structure
 77 * @rsp: Pointer to rcu_sync structure to be initialized
 78 * @type: Flavor of RCU with which to synchronize rcu_sync structure
 79 */
 80void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
 81{
 82	memset(rsp, 0, sizeof(*rsp));
 83	init_waitqueue_head(&rsp->gp_wait);
 84	rsp->gp_type = type;
 85}
 86
 87/**
 88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
 89 * @rsp: Pointer to rcu_sync structure to use for synchronization
 90 *
 91 * Must be called after rcu_sync_init() and before first use.
 92 *
 93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
 94 * pairs turn into NO-OPs.
 95 */
 96void rcu_sync_enter_start(struct rcu_sync *rsp)
 97{
 98	rsp->gp_count++;
 99	rsp->gp_state = GP_PASSED;
100}
101
102/**
103 * rcu_sync_enter() - Force readers onto slowpath
104 * @rsp: Pointer to rcu_sync structure to use for synchronization
105 *
106 * This function is used by updaters who need readers to make use of
107 * a slowpath during the update.  After this function returns, all
108 * subsequent calls to rcu_sync_is_idle() will return false, which
109 * tells readers to stay off their fastpaths.  A later call to
110 * rcu_sync_exit() re-enables reader slowpaths.
111 *
112 * When called in isolation, rcu_sync_enter() must wait for a grace
113 * period, however, closely spaced calls to rcu_sync_enter() can
114 * optimize away the grace-period wait via a state machine implemented
115 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
116 */
117void rcu_sync_enter(struct rcu_sync *rsp)
118{
119	bool need_wait, need_sync;
120
121	spin_lock_irq(&rsp->rss_lock);
122	need_wait = rsp->gp_count++;
123	need_sync = rsp->gp_state == GP_IDLE;
124	if (need_sync)
125		rsp->gp_state = GP_PENDING;
126	spin_unlock_irq(&rsp->rss_lock);
127
128	BUG_ON(need_wait && need_sync);
129
130	if (need_sync) {
131		gp_ops[rsp->gp_type].sync();
132		rsp->gp_state = GP_PASSED;
133		wake_up_all(&rsp->gp_wait);
134	} else if (need_wait) {
135		wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
136	} else {
137		/*
138		 * Possible when there's a pending CB from a rcu_sync_exit().
139		 * Nobody has yet been allowed the 'fast' path and thus we can
140		 * avoid doing any sync(). The callback will get 'dropped'.
141		 */
142		BUG_ON(rsp->gp_state != GP_PASSED);
143	}
144}
145
146/**
147 * rcu_sync_func() - Callback function managing reader access to fastpath
148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
149 *
150 * This function is passed to one of the call_rcu() functions by
151 * rcu_sync_exit(), so that it is invoked after a grace period following the
152 * that invocation of rcu_sync_exit().  It takes action based on events that
 
 
 
 
 
153 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
154 * and rcu_sync_exit() pairs need not wait for a grace period.
155 *
156 * If another rcu_sync_enter() is invoked before the grace period
157 * ended, reset state to allow the next rcu_sync_exit() to let the
158 * readers back onto their fastpaths (after a grace period).  If both
159 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
160 * before the grace period ended, re-invoke call_rcu() on behalf of that
161 * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
162 * can again use their fastpaths.
163 */
164static void rcu_sync_func(struct rcu_head *rhp)
165{
166	struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
167	unsigned long flags;
168
169	BUG_ON(rsp->gp_state != GP_PASSED);
170	BUG_ON(rsp->cb_state == CB_IDLE);
171
172	spin_lock_irqsave(&rsp->rss_lock, flags);
173	if (rsp->gp_count) {
174		/*
175		 * A new rcu_sync_begin() has happened; drop the callback.
176		 */
177		rsp->cb_state = CB_IDLE;
178	} else if (rsp->cb_state == CB_REPLAY) {
 
179		/*
180		 * A new rcu_sync_exit() has happened; requeue the callback
181		 * to catch a later GP.
182		 */
183		rsp->cb_state = CB_PENDING;
184		gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
185	} else {
186		/*
187		 * We're at least a GP after rcu_sync_exit(); eveybody will now
188		 * have observed the write side critical section. Let 'em rip!.
 
189		 */
190		rsp->cb_state = CB_IDLE;
191		rsp->gp_state = GP_IDLE;
192	}
193	spin_unlock_irqrestore(&rsp->rss_lock, flags);
194}
195
196/**
197 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198 * @rsp: Pointer to rcu_sync structure to use for synchronization
199 *
200 * This function is used by updaters who have completed, and can therefore
201 * now allow readers to make use of their fastpaths after a grace period
202 * has elapsed.  After this grace period has completed, all subsequent
203 * calls to rcu_sync_is_idle() will return true, which tells readers that
204 * they can once again use their fastpaths.
205 */
206void rcu_sync_exit(struct rcu_sync *rsp)
207{
 
 
 
208	spin_lock_irq(&rsp->rss_lock);
209	if (!--rsp->gp_count) {
210		if (rsp->cb_state == CB_IDLE) {
211			rsp->cb_state = CB_PENDING;
212			gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
213		} else if (rsp->cb_state == CB_PENDING) {
214			rsp->cb_state = CB_REPLAY;
215		}
216	}
217	spin_unlock_irq(&rsp->rss_lock);
218}
219
220/**
221 * rcu_sync_dtor() - Clean up an rcu_sync structure
222 * @rsp: Pointer to rcu_sync structure to be cleaned up
223 */
224void rcu_sync_dtor(struct rcu_sync *rsp)
225{
226	int cb_state;
227
228	BUG_ON(rsp->gp_count);
 
229
230	spin_lock_irq(&rsp->rss_lock);
231	if (rsp->cb_state == CB_REPLAY)
232		rsp->cb_state = CB_PENDING;
233	cb_state = rsp->cb_state;
234	spin_unlock_irq(&rsp->rss_lock);
235
236	if (cb_state != CB_IDLE) {
237		gp_ops[rsp->gp_type].wait();
238		BUG_ON(rsp->cb_state != CB_IDLE);
239	}
240}