Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * RCU-based infrastructure for lightweight reader-writer locking
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License as published by
  6 * the Free Software Foundation; either version 2 of the License, or
  7 * (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, you can access it online at
 16 * http://www.gnu.org/licenses/gpl-2.0.html.
 17 *
 18 * Copyright (c) 2015, Red Hat, Inc.
 19 *
 20 * Author: Oleg Nesterov <oleg@redhat.com>
 21 */
 22
 23#include <linux/rcu_sync.h>
 24#include <linux/sched.h>
 25
 26#ifdef CONFIG_PROVE_RCU
 27#define __INIT_HELD(func)	.held = func,
 28#else
 29#define __INIT_HELD(func)
 30#endif
 31
 32static const struct {
 33	void (*sync)(void);
 34	void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
 35	void (*wait)(void);
 36#ifdef CONFIG_PROVE_RCU
 37	int  (*held)(void);
 38#endif
 39} gp_ops[] = {
 40	[RCU_SYNC] = {
 41		.sync = synchronize_rcu,
 42		.call = call_rcu,
 43		.wait = rcu_barrier,
 44		__INIT_HELD(rcu_read_lock_held)
 45	},
 46	[RCU_SCHED_SYNC] = {
 47		.sync = synchronize_sched,
 48		.call = call_rcu_sched,
 49		.wait = rcu_barrier_sched,
 50		__INIT_HELD(rcu_read_lock_sched_held)
 51	},
 52	[RCU_BH_SYNC] = {
 53		.sync = synchronize_rcu_bh,
 54		.call = call_rcu_bh,
 55		.wait = rcu_barrier_bh,
 56		__INIT_HELD(rcu_read_lock_bh_held)
 57	},
 58};
 59
 60enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
 61enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
 62
 63#define	rss_lock	gp_wait.lock
 64
 65#ifdef CONFIG_PROVE_RCU
 66void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
 67{
 68	RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
 69			 "suspicious rcu_sync_is_idle() usage");
 70}
 71#endif
 72
 73/**
 74 * rcu_sync_init() - Initialize an rcu_sync structure
 75 * @rsp: Pointer to rcu_sync structure to be initialized
 76 * @type: Flavor of RCU with which to synchronize rcu_sync structure
 77 */
 78void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
 79{
 80	memset(rsp, 0, sizeof(*rsp));
 81	init_waitqueue_head(&rsp->gp_wait);
 82	rsp->gp_type = type;
 83}
 84
 85/**
 86 * rcu_sync_enter() - Force readers onto slowpath
 87 * @rsp: Pointer to rcu_sync structure to use for synchronization
 88 *
 89 * This function is used by updaters who need readers to make use of
 90 * a slowpath during the update.  After this function returns, all
 91 * subsequent calls to rcu_sync_is_idle() will return false, which
 92 * tells readers to stay off their fastpaths.  A later call to
 93 * rcu_sync_exit() re-enables reader slowpaths.
 94 *
 95 * When called in isolation, rcu_sync_enter() must wait for a grace
 96 * period, however, closely spaced calls to rcu_sync_enter() can
 97 * optimize away the grace-period wait via a state machine implemented
 98 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
 99 */
100void rcu_sync_enter(struct rcu_sync *rsp)
101{
102	bool need_wait, need_sync;
103
104	spin_lock_irq(&rsp->rss_lock);
105	need_wait = rsp->gp_count++;
106	need_sync = rsp->gp_state == GP_IDLE;
107	if (need_sync)
108		rsp->gp_state = GP_PENDING;
109	spin_unlock_irq(&rsp->rss_lock);
110
111	BUG_ON(need_wait && need_sync);
112
113	if (need_sync) {
114		gp_ops[rsp->gp_type].sync();
115		rsp->gp_state = GP_PASSED;
116		wake_up_all(&rsp->gp_wait);
117	} else if (need_wait) {
118		wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
119	} else {
120		/*
121		 * Possible when there's a pending CB from a rcu_sync_exit().
122		 * Nobody has yet been allowed the 'fast' path and thus we can
123		 * avoid doing any sync(). The callback will get 'dropped'.
124		 */
125		BUG_ON(rsp->gp_state != GP_PASSED);
126	}
127}
128
129/**
130 * rcu_sync_func() - Callback function managing reader access to fastpath
131 * @rsp: Pointer to rcu_sync structure to use for synchronization
132 *
133 * This function is passed to one of the call_rcu() functions by
134 * rcu_sync_exit(), so that it is invoked after a grace period following the
135 * that invocation of rcu_sync_exit().  It takes action based on events that
136 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
137 * and rcu_sync_exit() pairs need not wait for a grace period.
138 *
139 * If another rcu_sync_enter() is invoked before the grace period
140 * ended, reset state to allow the next rcu_sync_exit() to let the
141 * readers back onto their fastpaths (after a grace period).  If both
142 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
143 * before the grace period ended, re-invoke call_rcu() on behalf of that
144 * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
145 * can again use their fastpaths.
146 */
147static void rcu_sync_func(struct rcu_head *rcu)
148{
149	struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
150	unsigned long flags;
151
152	BUG_ON(rsp->gp_state != GP_PASSED);
153	BUG_ON(rsp->cb_state == CB_IDLE);
154
155	spin_lock_irqsave(&rsp->rss_lock, flags);
156	if (rsp->gp_count) {
157		/*
158		 * A new rcu_sync_begin() has happened; drop the callback.
159		 */
160		rsp->cb_state = CB_IDLE;
161	} else if (rsp->cb_state == CB_REPLAY) {
162		/*
163		 * A new rcu_sync_exit() has happened; requeue the callback
164		 * to catch a later GP.
165		 */
166		rsp->cb_state = CB_PENDING;
167		gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
168	} else {
169		/*
170		 * We're at least a GP after rcu_sync_exit(); eveybody will now
171		 * have observed the write side critical section. Let 'em rip!.
172		 */
173		rsp->cb_state = CB_IDLE;
174		rsp->gp_state = GP_IDLE;
175	}
176	spin_unlock_irqrestore(&rsp->rss_lock, flags);
177}
178
179/**
180 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
181 * @rsp: Pointer to rcu_sync structure to use for synchronization
182 *
183 * This function is used by updaters who have completed, and can therefore
184 * now allow readers to make use of their fastpaths after a grace period
185 * has elapsed.  After this grace period has completed, all subsequent
186 * calls to rcu_sync_is_idle() will return true, which tells readers that
187 * they can once again use their fastpaths.
188 */
189void rcu_sync_exit(struct rcu_sync *rsp)
190{
191	spin_lock_irq(&rsp->rss_lock);
192	if (!--rsp->gp_count) {
193		if (rsp->cb_state == CB_IDLE) {
194			rsp->cb_state = CB_PENDING;
195			gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
196		} else if (rsp->cb_state == CB_PENDING) {
197			rsp->cb_state = CB_REPLAY;
198		}
199	}
200	spin_unlock_irq(&rsp->rss_lock);
201}
202
203/**
204 * rcu_sync_dtor() - Clean up an rcu_sync structure
205 * @rsp: Pointer to rcu_sync structure to be cleaned up
206 */
207void rcu_sync_dtor(struct rcu_sync *rsp)
208{
209	int cb_state;
210
211	BUG_ON(rsp->gp_count);
212
213	spin_lock_irq(&rsp->rss_lock);
214	if (rsp->cb_state == CB_REPLAY)
215		rsp->cb_state = CB_PENDING;
216	cb_state = rsp->cb_state;
217	spin_unlock_irq(&rsp->rss_lock);
218
219	if (cb_state != CB_IDLE) {
220		gp_ops[rsp->gp_type].wait();
221		BUG_ON(rsp->cb_state != CB_IDLE);
222	}
223}