Linux Audio

Check our new training course

Loading...
v3.15
  1#include <linux/atomic.h>
  2#include <linux/rwsem.h>
  3#include <linux/percpu.h>
  4#include <linux/wait.h>
  5#include <linux/lockdep.h>
  6#include <linux/percpu-rwsem.h>
  7#include <linux/rcupdate.h>
  8#include <linux/sched.h>
  9#include <linux/errno.h>
 10
 11int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
 12			const char *name, struct lock_class_key *rwsem_key)
 13{
 14	brw->fast_read_ctr = alloc_percpu(int);
 15	if (unlikely(!brw->fast_read_ctr))
 16		return -ENOMEM;
 17
 18	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
 19	__init_rwsem(&brw->rw_sem, name, rwsem_key);
 20	atomic_set(&brw->write_ctr, 0);
 21	atomic_set(&brw->slow_read_ctr, 0);
 22	init_waitqueue_head(&brw->write_waitq);
 23	return 0;
 24}
 
 25
 26void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
 27{
 
 
 
 
 
 
 
 
 28	free_percpu(brw->fast_read_ctr);
 29	brw->fast_read_ctr = NULL; /* catch use after free bugs */
 30}
 31
 32/*
 33 * This is the fast-path for down_read/up_read, it only needs to ensure
 34 * there is no pending writer (atomic_read(write_ctr) == 0) and inc/dec the
 35 * fast per-cpu counter. The writer uses synchronize_sched_expedited() to
 36 * serialize with the preempt-disabled section below.
 37 *
 38 * The nontrivial part is that we should guarantee acquire/release semantics
 39 * in case when
 40 *
 41 *	R_W: down_write() comes after up_read(), the writer should see all
 42 *	     changes done by the reader
 43 * or
 44 *	W_R: down_read() comes after up_write(), the reader should see all
 45 *	     changes done by the writer
 46 *
 47 * If this helper fails the callers rely on the normal rw_semaphore and
 48 * atomic_dec_and_test(), so in this case we have the necessary barriers.
 49 *
 50 * But if it succeeds we do not have any barriers, atomic_read(write_ctr) or
 51 * __this_cpu_add() below can be reordered with any LOAD/STORE done by the
 52 * reader inside the critical section. See the comments in down_write and
 53 * up_write below.
 54 */
 55static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
 56{
 57	bool success = false;
 58
 59	preempt_disable();
 60	if (likely(!atomic_read(&brw->write_ctr))) {
 
 61		__this_cpu_add(*brw->fast_read_ctr, val);
 62		success = true;
 63	}
 64	preempt_enable();
 65
 66	return success;
 67}
 68
 69/*
 70 * Like the normal down_read() this is not recursive, the writer can
 71 * come after the first percpu_down_read() and create the deadlock.
 72 *
 73 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
 74 * percpu_up_read() does rwsem_release(). This pairs with the usage
 75 * of ->rw_sem in percpu_down/up_write().
 76 */
 77void percpu_down_read(struct percpu_rw_semaphore *brw)
 78{
 79	might_sleep();
 80	if (likely(update_fast_ctr(brw, +1))) {
 81		rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
 
 82		return;
 83	}
 84
 85	down_read(&brw->rw_sem);
 
 86	atomic_inc(&brw->slow_read_ctr);
 87	/* avoid up_read()->rwsem_release() */
 88	__up_read(&brw->rw_sem);
 89}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90
 91void percpu_up_read(struct percpu_rw_semaphore *brw)
 92{
 93	rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
 94
 95	if (likely(update_fast_ctr(brw, -1)))
 96		return;
 97
 98	/* false-positive is possible but harmless */
 99	if (atomic_dec_and_test(&brw->slow_read_ctr))
100		wake_up_all(&brw->write_waitq);
101}
 
102
103static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
104{
105	unsigned int sum = 0;
106	int cpu;
107
108	for_each_possible_cpu(cpu) {
109		sum += per_cpu(*brw->fast_read_ctr, cpu);
110		per_cpu(*brw->fast_read_ctr, cpu) = 0;
111	}
112
113	return sum;
114}
115
116/*
117 * A writer increments ->write_ctr to force the readers to switch to the
118 * slow mode, note the atomic_read() check in update_fast_ctr().
119 *
120 * After that the readers can only inc/dec the slow ->slow_read_ctr counter,
121 * ->fast_read_ctr is stable. Once the writer moves its sum into the slow
122 * counter it represents the number of active readers.
123 *
124 * Finally the writer takes ->rw_sem for writing and blocks the new readers,
125 * then waits until the slow counter becomes zero.
126 */
127void percpu_down_write(struct percpu_rw_semaphore *brw)
128{
129	/* tell update_fast_ctr() there is a pending writer */
130	atomic_inc(&brw->write_ctr);
131	/*
132	 * 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
133	 *    so that update_fast_ctr() can't succeed.
134	 *
135	 * 2. Ensures we see the result of every previous this_cpu_add() in
136	 *    update_fast_ctr().
137	 *
138	 * 3. Ensures that if any reader has exited its critical section via
139	 *    fast-path, it executes a full memory barrier before we return.
140	 *    See R_W case in the comment above update_fast_ctr().
141	 */
142	synchronize_sched_expedited();
143
144	/* exclude other writers, and block the new readers completely */
145	down_write(&brw->rw_sem);
146
147	/* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
148	atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
149
150	/* wait for all readers to complete their percpu_up_read() */
151	wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
152}
 
153
154void percpu_up_write(struct percpu_rw_semaphore *brw)
155{
156	/* release the lock, but the readers can't use the fast-path */
157	up_write(&brw->rw_sem);
158	/*
159	 * Insert the barrier before the next fast-path in down_read,
160	 * see W_R case in the comment above update_fast_ctr().
 
161	 */
162	synchronize_sched_expedited();
163	/* the last writer unblocks update_fast_ctr() */
164	atomic_dec(&brw->write_ctr);
165}
v4.6
  1#include <linux/atomic.h>
  2#include <linux/rwsem.h>
  3#include <linux/percpu.h>
  4#include <linux/wait.h>
  5#include <linux/lockdep.h>
  6#include <linux/percpu-rwsem.h>
  7#include <linux/rcupdate.h>
  8#include <linux/sched.h>
  9#include <linux/errno.h>
 10
 11int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
 12			const char *name, struct lock_class_key *rwsem_key)
 13{
 14	brw->fast_read_ctr = alloc_percpu(int);
 15	if (unlikely(!brw->fast_read_ctr))
 16		return -ENOMEM;
 17
 18	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
 19	__init_rwsem(&brw->rw_sem, name, rwsem_key);
 20	rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
 21	atomic_set(&brw->slow_read_ctr, 0);
 22	init_waitqueue_head(&brw->write_waitq);
 23	return 0;
 24}
 25EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
 26
 27void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
 28{
 29	/*
 30	 * XXX: temporary kludge. The error path in alloc_super()
 31	 * assumes that percpu_free_rwsem() is safe after kzalloc().
 32	 */
 33	if (!brw->fast_read_ctr)
 34		return;
 35
 36	rcu_sync_dtor(&brw->rss);
 37	free_percpu(brw->fast_read_ctr);
 38	brw->fast_read_ctr = NULL; /* catch use after free bugs */
 39}
 40
 41/*
 42 * This is the fast-path for down_read/up_read. If it succeeds we rely
 43 * on the barriers provided by rcu_sync_enter/exit; see the comments in
 44 * percpu_down_write() and percpu_up_write().
 
 
 
 
 
 
 
 
 
 
 45 *
 46 * If this helper fails the callers rely on the normal rw_semaphore and
 47 * atomic_dec_and_test(), so in this case we have the necessary barriers.
 
 
 
 
 
 48 */
 49static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
 50{
 51	bool success;
 52
 53	preempt_disable();
 54	success = rcu_sync_is_idle(&brw->rss);
 55	if (likely(success))
 56		__this_cpu_add(*brw->fast_read_ctr, val);
 
 
 57	preempt_enable();
 58
 59	return success;
 60}
 61
 62/*
 63 * Like the normal down_read() this is not recursive, the writer can
 64 * come after the first percpu_down_read() and create the deadlock.
 65 *
 66 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
 67 * percpu_up_read() does rwsem_release(). This pairs with the usage
 68 * of ->rw_sem in percpu_down/up_write().
 69 */
 70void percpu_down_read(struct percpu_rw_semaphore *brw)
 71{
 72	might_sleep();
 73	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
 74
 75	if (likely(update_fast_ctr(brw, +1)))
 76		return;
 
 77
 78	/* Avoid rwsem_acquire_read() and rwsem_release() */
 79	__down_read(&brw->rw_sem);
 80	atomic_inc(&brw->slow_read_ctr);
 
 81	__up_read(&brw->rw_sem);
 82}
 83EXPORT_SYMBOL_GPL(percpu_down_read);
 84
 85int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
 86{
 87	if (unlikely(!update_fast_ctr(brw, +1))) {
 88		if (!__down_read_trylock(&brw->rw_sem))
 89			return 0;
 90		atomic_inc(&brw->slow_read_ctr);
 91		__up_read(&brw->rw_sem);
 92	}
 93
 94	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
 95	return 1;
 96}
 97
 98void percpu_up_read(struct percpu_rw_semaphore *brw)
 99{
100	rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
101
102	if (likely(update_fast_ctr(brw, -1)))
103		return;
104
105	/* false-positive is possible but harmless */
106	if (atomic_dec_and_test(&brw->slow_read_ctr))
107		wake_up_all(&brw->write_waitq);
108}
109EXPORT_SYMBOL_GPL(percpu_up_read);
110
111static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
112{
113	unsigned int sum = 0;
114	int cpu;
115
116	for_each_possible_cpu(cpu) {
117		sum += per_cpu(*brw->fast_read_ctr, cpu);
118		per_cpu(*brw->fast_read_ctr, cpu) = 0;
119	}
120
121	return sum;
122}
123
 
 
 
 
 
 
 
 
 
 
 
124void percpu_down_write(struct percpu_rw_semaphore *brw)
125{
 
 
126	/*
127	 * Make rcu_sync_is_idle() == F and thus disable the fast-path in
128	 * percpu_down_read() and percpu_up_read(), and wait for gp pass.
 
 
 
129	 *
130	 * The latter synchronises us with the preceding readers which used
131	 * the fast-past, so we can not miss the result of __this_cpu_add()
132	 * or anything else inside their criticial sections.
133	 */
134	rcu_sync_enter(&brw->rss);
135
136	/* exclude other writers, and block the new readers completely */
137	down_write(&brw->rw_sem);
138
139	/* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
140	atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
141
142	/* wait for all readers to complete their percpu_up_read() */
143	wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
144}
145EXPORT_SYMBOL_GPL(percpu_down_write);
146
147void percpu_up_write(struct percpu_rw_semaphore *brw)
148{
149	/* release the lock, but the readers can't use the fast-path */
150	up_write(&brw->rw_sem);
151	/*
152	 * Enable the fast-path in percpu_down_read() and percpu_up_read()
153	 * but only after another gp pass; this adds the necessary barrier
154	 * to ensure the reader can't miss the changes done by us.
155	 */
156	rcu_sync_exit(&brw->rss);
 
 
157}
158EXPORT_SYMBOL_GPL(percpu_up_write);