Loading...
1#include <linux/atomic.h>
2#include <linux/rwsem.h>
3#include <linux/percpu.h>
4#include <linux/lockdep.h>
5#include <linux/percpu-rwsem.h>
6#include <linux/rcupdate.h>
7#include <linux/sched.h>
8#include <linux/errno.h>
9
10int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
11 const char *name, struct lock_class_key *rwsem_key)
12{
13 sem->read_count = alloc_percpu(int);
14 if (unlikely(!sem->read_count))
15 return -ENOMEM;
16
17 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
18 rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
19 __init_rwsem(&sem->rw_sem, name, rwsem_key);
20 rcuwait_init(&sem->writer);
21 sem->readers_block = 0;
22 return 0;
23}
24EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
25
26void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
27{
28 /*
29 * XXX: temporary kludge. The error path in alloc_super()
30 * assumes that percpu_free_rwsem() is safe after kzalloc().
31 */
32 if (!sem->read_count)
33 return;
34
35 rcu_sync_dtor(&sem->rss);
36 free_percpu(sem->read_count);
37 sem->read_count = NULL; /* catch use after free bugs */
38}
39EXPORT_SYMBOL_GPL(percpu_free_rwsem);
40
41int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
42{
43 /*
44 * Due to having preemption disabled the decrement happens on
45 * the same CPU as the increment, avoiding the
46 * increment-on-one-CPU-and-decrement-on-another problem.
47 *
48 * If the reader misses the writer's assignment of readers_block, then
49 * the writer is guaranteed to see the reader's increment.
50 *
51 * Conversely, any readers that increment their sem->read_count after
52 * the writer looks are guaranteed to see the readers_block value,
53 * which in turn means that they are guaranteed to immediately
54 * decrement their sem->read_count, so that it doesn't matter that the
55 * writer missed them.
56 */
57
58 smp_mb(); /* A matches D */
59
60 /*
61 * If !readers_block the critical section starts here, matched by the
62 * release in percpu_up_write().
63 */
64 if (likely(!smp_load_acquire(&sem->readers_block)))
65 return 1;
66
67 /*
68 * Per the above comment; we still have preemption disabled and
69 * will thus decrement on the same CPU as we incremented.
70 */
71 __percpu_up_read(sem);
72
73 if (try)
74 return 0;
75
76 /*
77 * We either call schedule() in the wait, or we'll fall through
78 * and reschedule on the preempt_enable() in percpu_down_read().
79 */
80 preempt_enable_no_resched();
81
82 /*
83 * Avoid lockdep for the down/up_read() we already have them.
84 */
85 __down_read(&sem->rw_sem);
86 this_cpu_inc(*sem->read_count);
87 __up_read(&sem->rw_sem);
88
89 preempt_disable();
90 return 1;
91}
92EXPORT_SYMBOL_GPL(__percpu_down_read);
93
94void __percpu_up_read(struct percpu_rw_semaphore *sem)
95{
96 smp_mb(); /* B matches C */
97 /*
98 * In other words, if they see our decrement (presumably to aggregate
99 * zero, as that is the only time it matters) they will also see our
100 * critical section.
101 */
102 __this_cpu_dec(*sem->read_count);
103
104 /* Prod writer to recheck readers_active */
105 rcuwait_wake_up(&sem->writer);
106}
107EXPORT_SYMBOL_GPL(__percpu_up_read);
108
109#define per_cpu_sum(var) \
110({ \
111 typeof(var) __sum = 0; \
112 int cpu; \
113 compiletime_assert_atomic_type(__sum); \
114 for_each_possible_cpu(cpu) \
115 __sum += per_cpu(var, cpu); \
116 __sum; \
117})
118
119/*
120 * Return true if the modular sum of the sem->read_count per-CPU variable is
121 * zero. If this sum is zero, then it is stable due to the fact that if any
122 * newly arriving readers increment a given counter, they will immediately
123 * decrement that same counter.
124 */
125static bool readers_active_check(struct percpu_rw_semaphore *sem)
126{
127 if (per_cpu_sum(*sem->read_count) != 0)
128 return false;
129
130 /*
131 * If we observed the decrement; ensure we see the entire critical
132 * section.
133 */
134
135 smp_mb(); /* C matches B */
136
137 return true;
138}
139
140void percpu_down_write(struct percpu_rw_semaphore *sem)
141{
142 /* Notify readers to take the slow path. */
143 rcu_sync_enter(&sem->rss);
144
145 down_write(&sem->rw_sem);
146
147 /*
148 * Notify new readers to block; up until now, and thus throughout the
149 * longish rcu_sync_enter() above, new readers could still come in.
150 */
151 WRITE_ONCE(sem->readers_block, 1);
152
153 smp_mb(); /* D matches A */
154
155 /*
156 * If they don't see our writer of readers_block, then we are
157 * guaranteed to see their sem->read_count increment, and therefore
158 * will wait for them.
159 */
160
161 /* Wait for all now active readers to complete. */
162 rcuwait_wait_event(&sem->writer, readers_active_check(sem));
163}
164EXPORT_SYMBOL_GPL(percpu_down_write);
165
166void percpu_up_write(struct percpu_rw_semaphore *sem)
167{
168 /*
169 * Signal the writer is done, no fast path yet.
170 *
171 * One reason that we cannot just immediately flip to readers_fast is
172 * that new readers might fail to see the results of this writer's
173 * critical section.
174 *
175 * Therefore we force it through the slow path which guarantees an
176 * acquire and thereby guarantees the critical section's consistency.
177 */
178 smp_store_release(&sem->readers_block, 0);
179
180 /*
181 * Release the write lock, this will allow readers back in the game.
182 */
183 up_write(&sem->rw_sem);
184
185 /*
186 * Once this completes (at least one RCU-sched grace period hence) the
187 * reader fast path will be available again. Safe to use outside the
188 * exclusive write lock because its counting.
189 */
190 rcu_sync_exit(&sem->rss);
191}
192EXPORT_SYMBOL_GPL(percpu_up_write);
1#include <linux/atomic.h>
2#include <linux/rwsem.h>
3#include <linux/percpu.h>
4#include <linux/wait.h>
5#include <linux/lockdep.h>
6#include <linux/percpu-rwsem.h>
7#include <linux/rcupdate.h>
8#include <linux/sched.h>
9#include <linux/errno.h>
10
11int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 const char *name, struct lock_class_key *rwsem_key)
13{
14 brw->fast_read_ctr = alloc_percpu(int);
15 if (unlikely(!brw->fast_read_ctr))
16 return -ENOMEM;
17
18 /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 __init_rwsem(&brw->rw_sem, name, rwsem_key);
20 rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
21 atomic_set(&brw->slow_read_ctr, 0);
22 init_waitqueue_head(&brw->write_waitq);
23 return 0;
24}
25EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
26
27void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
28{
29 /*
30 * XXX: temporary kludge. The error path in alloc_super()
31 * assumes that percpu_free_rwsem() is safe after kzalloc().
32 */
33 if (!brw->fast_read_ctr)
34 return;
35
36 rcu_sync_dtor(&brw->rss);
37 free_percpu(brw->fast_read_ctr);
38 brw->fast_read_ctr = NULL; /* catch use after free bugs */
39}
40
41/*
42 * This is the fast-path for down_read/up_read. If it succeeds we rely
43 * on the barriers provided by rcu_sync_enter/exit; see the comments in
44 * percpu_down_write() and percpu_up_write().
45 *
46 * If this helper fails the callers rely on the normal rw_semaphore and
47 * atomic_dec_and_test(), so in this case we have the necessary barriers.
48 */
49static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
50{
51 bool success;
52
53 preempt_disable();
54 success = rcu_sync_is_idle(&brw->rss);
55 if (likely(success))
56 __this_cpu_add(*brw->fast_read_ctr, val);
57 preempt_enable();
58
59 return success;
60}
61
62/*
63 * Like the normal down_read() this is not recursive, the writer can
64 * come after the first percpu_down_read() and create the deadlock.
65 *
66 * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
67 * percpu_up_read() does rwsem_release(). This pairs with the usage
68 * of ->rw_sem in percpu_down/up_write().
69 */
70void percpu_down_read(struct percpu_rw_semaphore *brw)
71{
72 might_sleep();
73 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
74
75 if (likely(update_fast_ctr(brw, +1)))
76 return;
77
78 /* Avoid rwsem_acquire_read() and rwsem_release() */
79 __down_read(&brw->rw_sem);
80 atomic_inc(&brw->slow_read_ctr);
81 __up_read(&brw->rw_sem);
82}
83EXPORT_SYMBOL_GPL(percpu_down_read);
84
85int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
86{
87 if (unlikely(!update_fast_ctr(brw, +1))) {
88 if (!__down_read_trylock(&brw->rw_sem))
89 return 0;
90 atomic_inc(&brw->slow_read_ctr);
91 __up_read(&brw->rw_sem);
92 }
93
94 rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
95 return 1;
96}
97
98void percpu_up_read(struct percpu_rw_semaphore *brw)
99{
100 rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
101
102 if (likely(update_fast_ctr(brw, -1)))
103 return;
104
105 /* false-positive is possible but harmless */
106 if (atomic_dec_and_test(&brw->slow_read_ctr))
107 wake_up_all(&brw->write_waitq);
108}
109EXPORT_SYMBOL_GPL(percpu_up_read);
110
111static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
112{
113 unsigned int sum = 0;
114 int cpu;
115
116 for_each_possible_cpu(cpu) {
117 sum += per_cpu(*brw->fast_read_ctr, cpu);
118 per_cpu(*brw->fast_read_ctr, cpu) = 0;
119 }
120
121 return sum;
122}
123
124void percpu_down_write(struct percpu_rw_semaphore *brw)
125{
126 /*
127 * Make rcu_sync_is_idle() == F and thus disable the fast-path in
128 * percpu_down_read() and percpu_up_read(), and wait for gp pass.
129 *
130 * The latter synchronises us with the preceding readers which used
131 * the fast-past, so we can not miss the result of __this_cpu_add()
132 * or anything else inside their criticial sections.
133 */
134 rcu_sync_enter(&brw->rss);
135
136 /* exclude other writers, and block the new readers completely */
137 down_write(&brw->rw_sem);
138
139 /* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
140 atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
141
142 /* wait for all readers to complete their percpu_up_read() */
143 wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
144}
145EXPORT_SYMBOL_GPL(percpu_down_write);
146
147void percpu_up_write(struct percpu_rw_semaphore *brw)
148{
149 /* release the lock, but the readers can't use the fast-path */
150 up_write(&brw->rw_sem);
151 /*
152 * Enable the fast-path in percpu_down_read() and percpu_up_read()
153 * but only after another gp pass; this adds the necessary barrier
154 * to ensure the reader can't miss the changes done by us.
155 */
156 rcu_sync_exit(&brw->rss);
157}
158EXPORT_SYMBOL_GPL(percpu_up_write);