Loading...
1/*
2 * Percpu refcounts:
3 * (C) 2012 Google, Inc.
4 * Author: Kent Overstreet <koverstreet@google.com>
5 *
6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7 * atomic_dec_and_test() - but percpu.
8 *
9 * There's one important difference between percpu refs and normal atomic_t
10 * refcounts; you have to keep track of your initial refcount, and then when you
11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12 * refcount.
13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS.
17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
24 *
25 * USAGE:
26 *
27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28 * is created when userspaces calls io_setup(), and destroyed when userspace
29 * calls io_destroy() or the process exits.
30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put().
36 *
37 * Code that does a two stage shutdown like this often needs some kind of
38 * explicit synchronization to ensure the initial refcount can only be dropped
39 * once - percpu_ref_kill() does this for you, it returns true once and false if
40 * someone else already called it. The aio code uses it this way, but it's not
41 * necessary if the code has some other mechanism to synchronize teardown.
42 * around.
43 */
44
45#ifndef _LINUX_PERCPU_REFCOUNT_H
46#define _LINUX_PERCPU_REFCOUNT_H
47
48#include <linux/atomic.h>
49#include <linux/kernel.h>
50#include <linux/percpu.h>
51#include <linux/rcupdate.h>
52
53struct percpu_ref;
54typedef void (percpu_ref_func_t)(struct percpu_ref *);
55
56struct percpu_ref {
57 atomic_t count;
58 /*
59 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t (this is a
61 * hack because we need to keep the pointer around for
62 * percpu_ref_kill_rcu())
63 */
64 unsigned __percpu *pcpu_count;
65 percpu_ref_func_t *release;
66 percpu_ref_func_t *confirm_kill;
67 struct rcu_head rcu;
68};
69
70int __must_check percpu_ref_init(struct percpu_ref *ref,
71 percpu_ref_func_t *release);
72void percpu_ref_cancel_init(struct percpu_ref *ref);
73void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
74 percpu_ref_func_t *confirm_kill);
75
76/**
77 * percpu_ref_kill - drop the initial ref
78 * @ref: percpu_ref to kill
79 *
80 * Must be used to drop the initial ref on a percpu refcount; must be called
81 * precisely once before shutdown.
82 *
83 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
84 * percpu counters and dropping the initial ref.
85 */
86static inline void percpu_ref_kill(struct percpu_ref *ref)
87{
88 return percpu_ref_kill_and_confirm(ref, NULL);
89}
90
91#define PCPU_STATUS_BITS 2
92#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1)
93#define PCPU_REF_PTR 0
94#define PCPU_REF_DEAD 1
95
96#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK)
97
98/**
99 * percpu_ref_get - increment a percpu refcount
100 * @ref: percpu_ref to get
101 *
102 * Analagous to atomic_inc().
103 */
104static inline void percpu_ref_get(struct percpu_ref *ref)
105{
106 unsigned __percpu *pcpu_count;
107
108 rcu_read_lock_sched();
109
110 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
111
112 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
113 this_cpu_inc(*pcpu_count);
114 else
115 atomic_inc(&ref->count);
116
117 rcu_read_unlock_sched();
118}
119
120/**
121 * percpu_ref_tryget - try to increment a percpu refcount
122 * @ref: percpu_ref to try-get
123 *
124 * Increment a percpu refcount unless it has already been killed. Returns
125 * %true on success; %false on failure.
126 *
127 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
128 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
129 * used. After the confirm_kill callback is invoked, it's guaranteed that
130 * no new reference will be given out by percpu_ref_tryget().
131 */
132static inline bool percpu_ref_tryget(struct percpu_ref *ref)
133{
134 unsigned __percpu *pcpu_count;
135 int ret = false;
136
137 rcu_read_lock_sched();
138
139 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
140
141 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
142 this_cpu_inc(*pcpu_count);
143 ret = true;
144 }
145
146 rcu_read_unlock_sched();
147
148 return ret;
149}
150
151/**
152 * percpu_ref_put - decrement a percpu refcount
153 * @ref: percpu_ref to put
154 *
155 * Decrement the refcount, and if 0, call the release function (which was passed
156 * to percpu_ref_init())
157 */
158static inline void percpu_ref_put(struct percpu_ref *ref)
159{
160 unsigned __percpu *pcpu_count;
161
162 rcu_read_lock_sched();
163
164 pcpu_count = ACCESS_ONCE(ref->pcpu_count);
165
166 if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
167 this_cpu_dec(*pcpu_count);
168 else if (unlikely(atomic_dec_and_test(&ref->count)))
169 ref->release(ref);
170
171 rcu_read_unlock_sched();
172}
173
174#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Percpu refcounts:
4 * (C) 2012 Google, Inc.
5 * Author: Kent Overstreet <koverstreet@google.com>
6 *
7 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
8 * atomic_dec_and_test() - but percpu.
9 *
10 * There's one important difference between percpu refs and normal atomic_t
11 * refcounts; you have to keep track of your initial refcount, and then when you
12 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
13 * refcount.
14 *
15 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
16 * than an atomic_t - this is because of the way shutdown works, see
17 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
18 *
19 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
20 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
21 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22 * issuing the appropriate barriers, and then marks the ref as shutting down so
23 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
24 * it's safe to drop the initial ref.
25 *
26 * USAGE:
27 *
28 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
29 * is created when userspaces calls io_setup(), and destroyed when userspace
30 * calls io_destroy() or the process exits.
31 *
32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
33 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
34 * After that, there can't be any new users of the kioctx (from lookup_ioctx())
35 * and it's then safe to drop the initial ref with percpu_ref_put().
36 *
37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
38 * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
40 * with RCU protection, it must be done explicitly.
41 *
42 * Code that does a two stage shutdown like this often needs some kind of
43 * explicit synchronization to ensure the initial refcount can only be dropped
44 * once - percpu_ref_kill() does this for you, it returns true once and false if
45 * someone else already called it. The aio code uses it this way, but it's not
46 * necessary if the code has some other mechanism to synchronize teardown.
47 * around.
48 */
49
50#ifndef _LINUX_PERCPU_REFCOUNT_H
51#define _LINUX_PERCPU_REFCOUNT_H
52
53#include <linux/atomic.h>
54#include <linux/kernel.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
57#include <linux/gfp.h>
58
59struct percpu_ref;
60typedef void (percpu_ref_func_t)(struct percpu_ref *);
61
62/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
63enum {
64 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
65 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
66 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
67
68 __PERCPU_REF_FLAG_BITS = 2,
69};
70
71/* @flags for percpu_ref_init() */
72enum {
73 /*
74 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
75 * operation using percpu_ref_switch_to_percpu(). If initialized
76 * with this flag, the ref will stay in atomic mode until
77 * percpu_ref_switch_to_percpu() is invoked on it.
78 */
79 PERCPU_REF_INIT_ATOMIC = 1 << 0,
80
81 /*
82 * Start dead w/ ref == 0 in atomic mode. Must be revived with
83 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
84 */
85 PERCPU_REF_INIT_DEAD = 1 << 1,
86};
87
88struct percpu_ref {
89 atomic_long_t count;
90 /*
91 * The low bit of the pointer indicates whether the ref is in percpu
92 * mode; if set, then get/put will manipulate the atomic_t.
93 */
94 unsigned long percpu_count_ptr;
95 percpu_ref_func_t *release;
96 percpu_ref_func_t *confirm_switch;
97 bool force_atomic:1;
98 struct rcu_head rcu;
99};
100
101int __must_check percpu_ref_init(struct percpu_ref *ref,
102 percpu_ref_func_t *release, unsigned int flags,
103 gfp_t gfp);
104void percpu_ref_exit(struct percpu_ref *ref);
105void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
106 percpu_ref_func_t *confirm_switch);
107void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
108void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
109void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
110 percpu_ref_func_t *confirm_kill);
111void percpu_ref_reinit(struct percpu_ref *ref);
112
113/**
114 * percpu_ref_kill - drop the initial ref
115 * @ref: percpu_ref to kill
116 *
117 * Must be used to drop the initial ref on a percpu refcount; must be called
118 * precisely once before shutdown.
119 *
120 * Switches @ref into atomic mode before gathering up the percpu counters
121 * and dropping the initial ref.
122 *
123 * There are no implied RCU grace periods between kill and release.
124 */
125static inline void percpu_ref_kill(struct percpu_ref *ref)
126{
127 percpu_ref_kill_and_confirm(ref, NULL);
128}
129
130/*
131 * Internal helper. Don't use outside percpu-refcount proper. The
132 * function doesn't return the pointer and let the caller test it for NULL
133 * because doing so forces the compiler to generate two conditional
134 * branches as it can't assume that @ref->percpu_count is not NULL.
135 */
136static inline bool __ref_is_percpu(struct percpu_ref *ref,
137 unsigned long __percpu **percpu_countp)
138{
139 unsigned long percpu_ptr;
140
141 /*
142 * The value of @ref->percpu_count_ptr is tested for
143 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
144 * used as a pointer. If the compiler generates a separate fetch
145 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
146 * between contaminating the pointer value, meaning that
147 * READ_ONCE() is required when fetching it.
148 *
149 * The smp_read_barrier_depends() implied by READ_ONCE() pairs
150 * with smp_store_release() in __percpu_ref_switch_to_percpu().
151 */
152 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
153
154 /*
155 * Theoretically, the following could test just ATOMIC; however,
156 * then we'd have to mask off DEAD separately as DEAD may be
157 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
158 * implies ATOMIC anyway. Test them together.
159 */
160 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
161 return false;
162
163 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
164 return true;
165}
166
167/**
168 * percpu_ref_get_many - increment a percpu refcount
169 * @ref: percpu_ref to get
170 * @nr: number of references to get
171 *
172 * Analogous to atomic_long_add().
173 *
174 * This function is safe to call as long as @ref is between init and exit.
175 */
176static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
177{
178 unsigned long __percpu *percpu_count;
179
180 rcu_read_lock_sched();
181
182 if (__ref_is_percpu(ref, &percpu_count))
183 this_cpu_add(*percpu_count, nr);
184 else
185 atomic_long_add(nr, &ref->count);
186
187 rcu_read_unlock_sched();
188}
189
190/**
191 * percpu_ref_get - increment a percpu refcount
192 * @ref: percpu_ref to get
193 *
194 * Analagous to atomic_long_inc().
195 *
196 * This function is safe to call as long as @ref is between init and exit.
197 */
198static inline void percpu_ref_get(struct percpu_ref *ref)
199{
200 percpu_ref_get_many(ref, 1);
201}
202
203/**
204 * percpu_ref_tryget - try to increment a percpu refcount
205 * @ref: percpu_ref to try-get
206 *
207 * Increment a percpu refcount unless its count already reached zero.
208 * Returns %true on success; %false on failure.
209 *
210 * This function is safe to call as long as @ref is between init and exit.
211 */
212static inline bool percpu_ref_tryget(struct percpu_ref *ref)
213{
214 unsigned long __percpu *percpu_count;
215 bool ret;
216
217 rcu_read_lock_sched();
218
219 if (__ref_is_percpu(ref, &percpu_count)) {
220 this_cpu_inc(*percpu_count);
221 ret = true;
222 } else {
223 ret = atomic_long_inc_not_zero(&ref->count);
224 }
225
226 rcu_read_unlock_sched();
227
228 return ret;
229}
230
231/**
232 * percpu_ref_tryget_live - try to increment a live percpu refcount
233 * @ref: percpu_ref to try-get
234 *
235 * Increment a percpu refcount unless it has already been killed. Returns
236 * %true on success; %false on failure.
237 *
238 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
239 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
240 * should be used. After the confirm_kill callback is invoked, it's
241 * guaranteed that no new reference will be given out by
242 * percpu_ref_tryget_live().
243 *
244 * This function is safe to call as long as @ref is between init and exit.
245 */
246static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
247{
248 unsigned long __percpu *percpu_count;
249 bool ret = false;
250
251 rcu_read_lock_sched();
252
253 if (__ref_is_percpu(ref, &percpu_count)) {
254 this_cpu_inc(*percpu_count);
255 ret = true;
256 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
257 ret = atomic_long_inc_not_zero(&ref->count);
258 }
259
260 rcu_read_unlock_sched();
261
262 return ret;
263}
264
265/**
266 * percpu_ref_put_many - decrement a percpu refcount
267 * @ref: percpu_ref to put
268 * @nr: number of references to put
269 *
270 * Decrement the refcount, and if 0, call the release function (which was passed
271 * to percpu_ref_init())
272 *
273 * This function is safe to call as long as @ref is between init and exit.
274 */
275static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
276{
277 unsigned long __percpu *percpu_count;
278
279 rcu_read_lock_sched();
280
281 if (__ref_is_percpu(ref, &percpu_count))
282 this_cpu_sub(*percpu_count, nr);
283 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
284 ref->release(ref);
285
286 rcu_read_unlock_sched();
287}
288
289/**
290 * percpu_ref_put - decrement a percpu refcount
291 * @ref: percpu_ref to put
292 *
293 * Decrement the refcount, and if 0, call the release function (which was passed
294 * to percpu_ref_init())
295 *
296 * This function is safe to call as long as @ref is between init and exit.
297 */
298static inline void percpu_ref_put(struct percpu_ref *ref)
299{
300 percpu_ref_put_many(ref, 1);
301}
302
303/**
304 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
305 * @ref: percpu_ref to test
306 *
307 * Returns %true if @ref is dying or dead.
308 *
309 * This function is safe to call as long as @ref is between init and exit
310 * and the caller is responsible for synchronizing against state changes.
311 */
312static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
313{
314 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
315}
316
317/**
318 * percpu_ref_is_zero - test whether a percpu refcount reached zero
319 * @ref: percpu_ref to test
320 *
321 * Returns %true if @ref reached zero.
322 *
323 * This function is safe to call as long as @ref is between init and exit.
324 */
325static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
326{
327 unsigned long __percpu *percpu_count;
328
329 if (__ref_is_percpu(ref, &percpu_count))
330 return false;
331 return !atomic_long_read(&ref->count);
332}
333
334#endif