Linux Audio

Check our new training course

Loading...
v4.17
 
  1#include <linux/atomic.h>
  2#include <linux/rwsem.h>
  3#include <linux/percpu.h>
 
  4#include <linux/lockdep.h>
  5#include <linux/percpu-rwsem.h>
  6#include <linux/rcupdate.h>
  7#include <linux/sched.h>
 
  8#include <linux/errno.h>
  9
 10int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
 11			const char *name, struct lock_class_key *rwsem_key)
 12{
 13	sem->read_count = alloc_percpu(int);
 14	if (unlikely(!sem->read_count))
 15		return -ENOMEM;
 16
 17	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
 18	rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
 19	__init_rwsem(&sem->rw_sem, name, rwsem_key);
 20	rcuwait_init(&sem->writer);
 21	sem->readers_block = 0;
 
 
 
 
 
 22	return 0;
 23}
 24EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 25
 26void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
 27{
 28	/*
 29	 * XXX: temporary kludge. The error path in alloc_super()
 30	 * assumes that percpu_free_rwsem() is safe after kzalloc().
 31	 */
 32	if (!sem->read_count)
 33		return;
 34
 35	rcu_sync_dtor(&sem->rss);
 36	free_percpu(sem->read_count);
 37	sem->read_count = NULL; /* catch use after free bugs */
 38}
 39EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 40
 41int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
 42{
 
 
 43	/*
 44	 * Due to having preemption disabled the decrement happens on
 45	 * the same CPU as the increment, avoiding the
 46	 * increment-on-one-CPU-and-decrement-on-another problem.
 47	 *
 48	 * If the reader misses the writer's assignment of readers_block, then
 49	 * the writer is guaranteed to see the reader's increment.
 50	 *
 51	 * Conversely, any readers that increment their sem->read_count after
 52	 * the writer looks are guaranteed to see the readers_block value,
 53	 * which in turn means that they are guaranteed to immediately
 54	 * decrement their sem->read_count, so that it doesn't matter that the
 55	 * writer missed them.
 56	 */
 57
 58	smp_mb(); /* A matches D */
 59
 60	/*
 61	 * If !readers_block the critical section starts here, matched by the
 62	 * release in percpu_up_write().
 63	 */
 64	if (likely(!smp_load_acquire(&sem->readers_block)))
 65		return 1;
 66
 67	/*
 68	 * Per the above comment; we still have preemption disabled and
 69	 * will thus decrement on the same CPU as we incremented.
 70	 */
 71	__percpu_up_read(sem);
 72
 73	if (try)
 74		return 0;
 75
 76	/*
 77	 * We either call schedule() in the wait, or we'll fall through
 78	 * and reschedule on the preempt_enable() in percpu_down_read().
 79	 */
 80	preempt_enable_no_resched();
 81
 82	/*
 83	 * Avoid lockdep for the down/up_read() we already have them.
 84	 */
 85	__down_read(&sem->rw_sem);
 86	this_cpu_inc(*sem->read_count);
 87	__up_read(&sem->rw_sem);
 88
 89	preempt_disable();
 90	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92EXPORT_SYMBOL_GPL(__percpu_down_read);
 93
 94void __percpu_up_read(struct percpu_rw_semaphore *sem)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95{
 96	smp_mb(); /* B matches C */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 97	/*
 98	 * In other words, if they see our decrement (presumably to aggregate
 99	 * zero, as that is the only time it matters) they will also see our
100	 * critical section.
101	 */
102	__this_cpu_dec(*sem->read_count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
104	/* Prod writer to recheck readers_active */
105	rcuwait_wake_up(&sem->writer);
 
 
 
 
 
 
 
 
 
 
 
106}
107EXPORT_SYMBOL_GPL(__percpu_up_read);
108
109#define per_cpu_sum(var)						\
110({									\
111	typeof(var) __sum = 0;						\
112	int cpu;							\
113	compiletime_assert_atomic_type(__sum);				\
114	for_each_possible_cpu(cpu)					\
115		__sum += per_cpu(var, cpu);				\
116	__sum;								\
117})
118
119/*
120 * Return true if the modular sum of the sem->read_count per-CPU variable is
121 * zero.  If this sum is zero, then it is stable due to the fact that if any
122 * newly arriving readers increment a given counter, they will immediately
123 * decrement that same counter.
 
 
124 */
125static bool readers_active_check(struct percpu_rw_semaphore *sem)
126{
127	if (per_cpu_sum(*sem->read_count) != 0)
128		return false;
129
130	/*
131	 * If we observed the decrement; ensure we see the entire critical
132	 * section.
133	 */
134
135	smp_mb(); /* C matches B */
136
137	return true;
138}
139
140void percpu_down_write(struct percpu_rw_semaphore *sem)
141{
 
 
 
142	/* Notify readers to take the slow path. */
143	rcu_sync_enter(&sem->rss);
144
145	down_write(&sem->rw_sem);
146
147	/*
148	 * Notify new readers to block; up until now, and thus throughout the
149	 * longish rcu_sync_enter() above, new readers could still come in.
150	 */
151	WRITE_ONCE(sem->readers_block, 1);
 
152
153	smp_mb(); /* D matches A */
154
155	/*
156	 * If they don't see our writer of readers_block, then we are
157	 * guaranteed to see their sem->read_count increment, and therefore
158	 * will wait for them.
159	 */
160
161	/* Wait for all now active readers to complete. */
162	rcuwait_wait_event(&sem->writer, readers_active_check(sem));
163}
164EXPORT_SYMBOL_GPL(percpu_down_write);
165
166void percpu_up_write(struct percpu_rw_semaphore *sem)
167{
 
 
168	/*
169	 * Signal the writer is done, no fast path yet.
170	 *
171	 * One reason that we cannot just immediately flip to readers_fast is
172	 * that new readers might fail to see the results of this writer's
173	 * critical section.
174	 *
175	 * Therefore we force it through the slow path which guarantees an
176	 * acquire and thereby guarantees the critical section's consistency.
177	 */
178	smp_store_release(&sem->readers_block, 0);
179
180	/*
181	 * Release the write lock, this will allow readers back in the game.
182	 */
183	up_write(&sem->rw_sem);
184
185	/*
186	 * Once this completes (at least one RCU-sched grace period hence) the
187	 * reader fast path will be available again. Safe to use outside the
188	 * exclusive write lock because its counting.
189	 */
190	rcu_sync_exit(&sem->rss);
191}
192EXPORT_SYMBOL_GPL(percpu_up_write);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/atomic.h>
 
  3#include <linux/percpu.h>
  4#include <linux/wait.h>
  5#include <linux/lockdep.h>
  6#include <linux/percpu-rwsem.h>
  7#include <linux/rcupdate.h>
  8#include <linux/sched.h>
  9#include <linux/sched/task.h>
 10#include <linux/errno.h>
 11
 12int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
 13			const char *name, struct lock_class_key *key)
 14{
 15	sem->read_count = alloc_percpu(int);
 16	if (unlikely(!sem->read_count))
 17		return -ENOMEM;
 18
 19	rcu_sync_init(&sem->rss);
 
 
 20	rcuwait_init(&sem->writer);
 21	init_waitqueue_head(&sem->waiters);
 22	atomic_set(&sem->block, 0);
 23#ifdef CONFIG_DEBUG_LOCK_ALLOC
 24	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
 25	lockdep_init_map(&sem->dep_map, name, key, 0);
 26#endif
 27	return 0;
 28}
 29EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 30
 31void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
 32{
 33	/*
 34	 * XXX: temporary kludge. The error path in alloc_super()
 35	 * assumes that percpu_free_rwsem() is safe after kzalloc().
 36	 */
 37	if (!sem->read_count)
 38		return;
 39
 40	rcu_sync_dtor(&sem->rss);
 41	free_percpu(sem->read_count);
 42	sem->read_count = NULL; /* catch use after free bugs */
 43}
 44EXPORT_SYMBOL_GPL(percpu_free_rwsem);
 45
 46static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
 47{
 48	this_cpu_inc(*sem->read_count);
 49
 50	/*
 51	 * Due to having preemption disabled the decrement happens on
 52	 * the same CPU as the increment, avoiding the
 53	 * increment-on-one-CPU-and-decrement-on-another problem.
 54	 *
 55	 * If the reader misses the writer's assignment of sem->block, then the
 56	 * writer is guaranteed to see the reader's increment.
 57	 *
 58	 * Conversely, any readers that increment their sem->read_count after
 59	 * the writer looks are guaranteed to see the sem->block value, which
 60	 * in turn means that they are guaranteed to immediately decrement
 61	 * their sem->read_count, so that it doesn't matter that the writer
 62	 * missed them.
 63	 */
 64
 65	smp_mb(); /* A matches D */
 66
 67	/*
 68	 * If !sem->block the critical section starts here, matched by the
 69	 * release in percpu_up_write().
 70	 */
 71	if (likely(!atomic_read_acquire(&sem->block)))
 72		return true;
 73
 74	this_cpu_dec(*sem->read_count);
 
 
 
 
 75
 76	/* Prod writer to re-evaluate readers_active_check() */
 77	rcuwait_wake_up(&sem->writer);
 78
 79	return false;
 80}
 
 
 
 81
 82static inline bool __percpu_down_write_trylock(struct percpu_rw_semaphore *sem)
 83{
 84	if (atomic_read(&sem->block))
 85		return false;
 
 
 86
 87	return atomic_xchg(&sem->block, 1) == 0;
 88}
 89
 90static bool __percpu_rwsem_trylock(struct percpu_rw_semaphore *sem, bool reader)
 91{
 92	if (reader) {
 93		bool ret;
 94
 95		preempt_disable();
 96		ret = __percpu_down_read_trylock(sem);
 97		preempt_enable();
 98
 99		return ret;
100	}
101	return __percpu_down_write_trylock(sem);
102}
 
103
104/*
105 * The return value of wait_queue_entry::func means:
106 *
107 *  <0 - error, wakeup is terminated and the error is returned
108 *   0 - no wakeup, a next waiter is tried
109 *  >0 - woken, if EXCLUSIVE, counted towards @nr_exclusive.
110 *
111 * We use EXCLUSIVE for both readers and writers to preserve FIFO order,
112 * and play games with the return value to allow waking multiple readers.
113 *
114 * Specifically, we wake readers until we've woken a single writer, or until a
115 * trylock fails.
116 */
117static int percpu_rwsem_wake_function(struct wait_queue_entry *wq_entry,
118				      unsigned int mode, int wake_flags,
119				      void *key)
120{
121	bool reader = wq_entry->flags & WQ_FLAG_CUSTOM;
122	struct percpu_rw_semaphore *sem = key;
123	struct task_struct *p;
124
125	/* concurrent against percpu_down_write(), can get stolen */
126	if (!__percpu_rwsem_trylock(sem, reader))
127		return 1;
128
129	p = get_task_struct(wq_entry->private);
130	list_del_init(&wq_entry->entry);
131	smp_store_release(&wq_entry->private, NULL);
132
133	wake_up_process(p);
134	put_task_struct(p);
135
136	return !reader; /* wake (readers until) 1 writer */
137}
138
139static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
140{
141	DEFINE_WAIT_FUNC(wq_entry, percpu_rwsem_wake_function);
142	bool wait;
143
144	spin_lock_irq(&sem->waiters.lock);
145	/*
146	 * Serialize against the wakeup in percpu_up_write(), if we fail
147	 * the trylock, the wakeup must see us on the list.
 
148	 */
149	wait = !__percpu_rwsem_trylock(sem, reader);
150	if (wait) {
151		wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
152		__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
153	}
154	spin_unlock_irq(&sem->waiters.lock);
155
156	while (wait) {
157		set_current_state(TASK_UNINTERRUPTIBLE);
158		if (!smp_load_acquire(&wq_entry.private))
159			break;
160		schedule();
161	}
162	__set_current_state(TASK_RUNNING);
163}
164
165bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
166{
167	if (__percpu_down_read_trylock(sem))
168		return true;
169
170	if (try)
171		return false;
172
173	preempt_enable();
174	percpu_rwsem_wait(sem, /* .reader = */ true);
175	preempt_disable();
176
177	return true;
178}
179EXPORT_SYMBOL_GPL(__percpu_down_read);
180
181#define per_cpu_sum(var)						\
182({									\
183	typeof(var) __sum = 0;						\
184	int cpu;							\
185	compiletime_assert_atomic_type(__sum);				\
186	for_each_possible_cpu(cpu)					\
187		__sum += per_cpu(var, cpu);				\
188	__sum;								\
189})
190
191/*
192 * Return true if the modular sum of the sem->read_count per-CPU variable is
193 * zero.  If this sum is zero, then it is stable due to the fact that if any
194 * newly arriving readers increment a given counter, they will immediately
195 * decrement that same counter.
196 *
197 * Assumes sem->block is set.
198 */
199static bool readers_active_check(struct percpu_rw_semaphore *sem)
200{
201	if (per_cpu_sum(*sem->read_count) != 0)
202		return false;
203
204	/*
205	 * If we observed the decrement; ensure we see the entire critical
206	 * section.
207	 */
208
209	smp_mb(); /* C matches B */
210
211	return true;
212}
213
214void percpu_down_write(struct percpu_rw_semaphore *sem)
215{
216	might_sleep();
217	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
218
219	/* Notify readers to take the slow path. */
220	rcu_sync_enter(&sem->rss);
221
 
 
222	/*
223	 * Try set sem->block; this provides writer-writer exclusion.
224	 * Having sem->block set makes new readers block.
225	 */
226	if (!__percpu_down_write_trylock(sem))
227		percpu_rwsem_wait(sem, /* .reader = */ false);
228
229	/* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
230
231	/*
232	 * If they don't see our store of sem->block, then we are guaranteed to
233	 * see their sem->read_count increment, and therefore will wait for
234	 * them.
235	 */
236
237	/* Wait for all active readers to complete. */
238	rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
239}
240EXPORT_SYMBOL_GPL(percpu_down_write);
241
242void percpu_up_write(struct percpu_rw_semaphore *sem)
243{
244	rwsem_release(&sem->dep_map, _RET_IP_);
245
246	/*
247	 * Signal the writer is done, no fast path yet.
248	 *
249	 * One reason that we cannot just immediately flip to readers_fast is
250	 * that new readers might fail to see the results of this writer's
251	 * critical section.
252	 *
253	 * Therefore we force it through the slow path which guarantees an
254	 * acquire and thereby guarantees the critical section's consistency.
255	 */
256	atomic_set_release(&sem->block, 0);
257
258	/*
259	 * Prod any pending reader/writer to make progress.
260	 */
261	__wake_up(&sem->waiters, TASK_NORMAL, 1, sem);
262
263	/*
264	 * Once this completes (at least one RCU-sched grace period hence) the
265	 * reader fast path will be available again. Safe to use outside the
266	 * exclusive write lock because its counting.
267	 */
268	rcu_sync_exit(&sem->rss);
269}
270EXPORT_SYMBOL_GPL(percpu_up_write);