Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2#define pr_fmt(fmt) "%s: " fmt, __func__
  3
  4#include <linux/kernel.h>
  5#include <linux/sched.h>
  6#include <linux/wait.h>
  7#include <linux/slab.h>
  8#include <linux/mm.h>
  9#include <linux/percpu-refcount.h>
 10
 11/*
 12 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
 13 * don't try to detect the ref hitting 0 - which means that get/put can just
 14 * increment or decrement the local counter. Note that the counter on a
 15 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
 16 * percpu counters will all sum to the correct value
 17 *
 18 * (More precisely: because modular arithmetic is commutative the sum of all the
 19 * percpu_count vars will be equal to what it would have been if all the gets
 20 * and puts were done to a single integer, even if some of the percpu integers
 21 * overflow or underflow).
 22 *
 23 * The real trick to implementing percpu refcounts is shutdown. We can't detect
 24 * the ref hitting 0 on every put - this would require global synchronization
 25 * and defeat the whole purpose of using percpu refs.
 26 *
 27 * What we do is require the user to keep track of the initial refcount; we know
 28 * the ref can't hit 0 before the user drops the initial ref, so as long as we
 29 * convert to non percpu mode before the initial ref is dropped everything
 30 * works.
 31 *
 32 * Converting to non percpu mode is done with some RCUish stuff in
 33 * percpu_ref_kill. Additionally, we need a bias value so that the
 34 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
 35 */
 36
 37#define PERCPU_COUNT_BIAS	(1LU << (BITS_PER_LONG - 1))
 38
 39static DEFINE_SPINLOCK(percpu_ref_switch_lock);
 40static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
 41
 42static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
 43{
 44	return (unsigned long __percpu *)
 45		(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
 46}
 47
 48/**
 49 * percpu_ref_init - initialize a percpu refcount
 50 * @ref: percpu_ref to initialize
 51 * @release: function which will be called when refcount hits 0
 52 * @flags: PERCPU_REF_INIT_* flags
 53 * @gfp: allocation mask to use
 54 *
 55 * Initializes @ref.  @ref starts out in percpu mode with a refcount of 1 unless
 56 * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD.  These flags
 57 * change the start state to atomic with the latter setting the initial refcount
 58 * to 0.  See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
 59 *
 60 * Note that @release must not sleep - it may potentially be called from RCU
 61 * callback context by percpu_ref_kill().
 62 */
 63int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
 64		    unsigned int flags, gfp_t gfp)
 65{
 66	size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
 67			     __alignof__(unsigned long));
 68	unsigned long start_count = 0;
 69	struct percpu_ref_data *data;
 70
 71	ref->percpu_count_ptr = (unsigned long)
 72		__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
 73	if (!ref->percpu_count_ptr)
 74		return -ENOMEM;
 75
 76	data = kzalloc(sizeof(*ref->data), gfp);
 77	if (!data) {
 78		free_percpu((void __percpu *)ref->percpu_count_ptr);
 79		return -ENOMEM;
 80	}
 81
 82	data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
 83	data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
 84
 85	if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
 86		ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
 87		data->allow_reinit = true;
 88	} else {
 89		start_count += PERCPU_COUNT_BIAS;
 90	}
 91
 92	if (flags & PERCPU_REF_INIT_DEAD)
 93		ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
 94	else
 95		start_count++;
 96
 97	atomic_long_set(&data->count, start_count);
 98
 99	data->release = release;
100	data->confirm_switch = NULL;
101	data->ref = ref;
102	ref->data = data;
103	return 0;
104}
105EXPORT_SYMBOL_GPL(percpu_ref_init);
106
107static void __percpu_ref_exit(struct percpu_ref *ref)
108{
109	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
110
111	if (percpu_count) {
112		/* non-NULL confirm_switch indicates switching in progress */
113		WARN_ON_ONCE(ref->data && ref->data->confirm_switch);
114		free_percpu(percpu_count);
115		ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
116	}
117}
118
119/**
120 * percpu_ref_exit - undo percpu_ref_init()
121 * @ref: percpu_ref to exit
122 *
123 * This function exits @ref.  The caller is responsible for ensuring that
124 * @ref is no longer in active use.  The usual places to invoke this
125 * function from are the @ref->release() callback or in init failure path
126 * where percpu_ref_init() succeeded but other parts of the initialization
127 * of the embedding object failed.
128 */
129void percpu_ref_exit(struct percpu_ref *ref)
130{
131	struct percpu_ref_data *data = ref->data;
132	unsigned long flags;
133
134	__percpu_ref_exit(ref);
135
136	if (!data)
137		return;
138
139	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
140	ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) <<
141		__PERCPU_REF_FLAG_BITS;
142	ref->data = NULL;
143	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
144
145	kfree(data);
 
 
 
 
 
146}
147EXPORT_SYMBOL_GPL(percpu_ref_exit);
148
149static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
150{
151	struct percpu_ref_data *data = container_of(rcu,
152			struct percpu_ref_data, rcu);
153	struct percpu_ref *ref = data->ref;
154
155	data->confirm_switch(ref);
156	data->confirm_switch = NULL;
157	wake_up_all(&percpu_ref_switch_waitq);
158
159	if (!data->allow_reinit)
160		__percpu_ref_exit(ref);
161
162	/* drop ref from percpu_ref_switch_to_atomic() */
163	percpu_ref_put(ref);
164}
165
166static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
167{
168	struct percpu_ref_data *data = container_of(rcu,
169			struct percpu_ref_data, rcu);
170	struct percpu_ref *ref = data->ref;
171	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
172	static atomic_t underflows;
173	unsigned long count = 0;
174	int cpu;
175
176	for_each_possible_cpu(cpu)
177		count += *per_cpu_ptr(percpu_count, cpu);
178
179	pr_debug("global %lu percpu %lu\n",
180		 atomic_long_read(&data->count), count);
181
182	/*
183	 * It's crucial that we sum the percpu counters _before_ adding the sum
184	 * to &ref->count; since gets could be happening on one cpu while puts
185	 * happen on another, adding a single cpu's count could cause
186	 * @ref->count to hit 0 before we've got a consistent value - but the
187	 * sum of all the counts will be consistent and correct.
188	 *
189	 * Subtracting the bias value then has to happen _after_ adding count to
190	 * &ref->count; we need the bias value to prevent &ref->count from
191	 * reaching 0 before we add the percpu counts. But doing it at the same
192	 * time is equivalent and saves us atomic operations:
193	 */
194	atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count);
195
196	if (WARN_ONCE(atomic_long_read(&data->count) <= 0,
197		      "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
198		      data->release, atomic_long_read(&data->count)) &&
199	    atomic_inc_return(&underflows) < 4) {
200		pr_err("%s(): percpu_ref underflow", __func__);
201		mem_dump_obj(data);
202	}
203
204	/* @ref is viewed as dead on all CPUs, send out switch confirmation */
205	percpu_ref_call_confirm_rcu(rcu);
206}
207
208static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
209{
210}
211
212static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
213					  percpu_ref_func_t *confirm_switch)
214{
215	if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
216		if (confirm_switch)
217			confirm_switch(ref);
218		return;
219	}
220
221	/* switching from percpu to atomic */
222	ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
223
224	/*
225	 * Non-NULL ->confirm_switch is used to indicate that switching is
226	 * in progress.  Use noop one if unspecified.
227	 */
228	ref->data->confirm_switch = confirm_switch ?:
229		percpu_ref_noop_confirm_switch;
230
231	percpu_ref_get(ref);	/* put after confirmation */
232	call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
233}
234
235static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
236{
237	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
238	int cpu;
239
240	BUG_ON(!percpu_count);
241
242	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
243		return;
244
245	if (WARN_ON_ONCE(!ref->data->allow_reinit))
246		return;
247
248	atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count);
249
250	/*
251	 * Restore per-cpu operation.  smp_store_release() is paired
252	 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
253	 * zeroing is visible to all percpu accesses which can see the
254	 * following __PERCPU_REF_ATOMIC clearing.
255	 */
256	for_each_possible_cpu(cpu)
257		*per_cpu_ptr(percpu_count, cpu) = 0;
258
259	smp_store_release(&ref->percpu_count_ptr,
260			  ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
261}
262
263static void __percpu_ref_switch_mode(struct percpu_ref *ref,
264				     percpu_ref_func_t *confirm_switch)
265{
266	struct percpu_ref_data *data = ref->data;
267
268	lockdep_assert_held(&percpu_ref_switch_lock);
269
270	/*
271	 * If the previous ATOMIC switching hasn't finished yet, wait for
272	 * its completion.  If the caller ensures that ATOMIC switching
273	 * isn't in progress, this function can be called from any context.
274	 */
275	wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch,
276			    percpu_ref_switch_lock);
277
278	if (data->force_atomic || percpu_ref_is_dying(ref))
279		__percpu_ref_switch_to_atomic(ref, confirm_switch);
280	else
281		__percpu_ref_switch_to_percpu(ref);
282}
283
284/**
285 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
286 * @ref: percpu_ref to switch to atomic mode
287 * @confirm_switch: optional confirmation callback
288 *
289 * There's no reason to use this function for the usual reference counting.
290 * Use percpu_ref_kill[_and_confirm]().
291 *
292 * Schedule switching of @ref to atomic mode.  All its percpu counts will
293 * be collected to the main atomic counter.  On completion, when all CPUs
294 * are guaraneed to be in atomic mode, @confirm_switch, which may not
295 * block, is invoked.  This function may be invoked concurrently with all
296 * the get/put operations and can safely be mixed with kill and reinit
297 * operations.  Note that @ref will stay in atomic mode across kill/reinit
298 * cycles until percpu_ref_switch_to_percpu() is called.
299 *
300 * This function may block if @ref is in the process of switching to atomic
301 * mode.  If the caller ensures that @ref is not in the process of
302 * switching to atomic mode, this function can be called from any context.
303 */
304void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
305				 percpu_ref_func_t *confirm_switch)
306{
307	unsigned long flags;
308
309	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
310
311	ref->data->force_atomic = true;
312	__percpu_ref_switch_mode(ref, confirm_switch);
313
314	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
315}
316EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
317
318/**
319 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
320 * @ref: percpu_ref to switch to atomic mode
321 *
322 * Schedule switching the ref to atomic mode, and wait for the
323 * switch to complete.  Caller must ensure that no other thread
324 * will switch back to percpu mode.
325 */
326void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
327{
328	percpu_ref_switch_to_atomic(ref, NULL);
329	wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch);
330}
331EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
332
333/**
334 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
335 * @ref: percpu_ref to switch to percpu mode
336 *
337 * There's no reason to use this function for the usual reference counting.
338 * To re-use an expired ref, use percpu_ref_reinit().
339 *
340 * Switch @ref to percpu mode.  This function may be invoked concurrently
341 * with all the get/put operations and can safely be mixed with kill and
342 * reinit operations.  This function reverses the sticky atomic state set
343 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is
344 * dying or dead, the actual switching takes place on the following
345 * percpu_ref_reinit().
346 *
347 * This function may block if @ref is in the process of switching to atomic
348 * mode.  If the caller ensures that @ref is not in the process of
349 * switching to atomic mode, this function can be called from any context.
350 */
351void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
352{
353	unsigned long flags;
354
355	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
356
357	ref->data->force_atomic = false;
358	__percpu_ref_switch_mode(ref, NULL);
359
360	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
361}
362EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
363
364/**
365 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
366 * @ref: percpu_ref to kill
367 * @confirm_kill: optional confirmation callback
368 *
369 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
370 * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
371 * called after @ref is seen as dead from all CPUs at which point all
372 * further invocations of percpu_ref_tryget_live() will fail.  See
373 * percpu_ref_tryget_live() for details.
374 *
375 * This function normally doesn't block and can be called from any context
376 * but it may block if @confirm_kill is specified and @ref is in the
377 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
378 *
379 * There are no implied RCU grace periods between kill and release.
380 */
381void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
382				 percpu_ref_func_t *confirm_kill)
383{
384	unsigned long flags;
385
386	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
387
388	WARN_ONCE(percpu_ref_is_dying(ref),
389		  "%s called more than once on %ps!", __func__,
390		  ref->data->release);
391
392	ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
393	__percpu_ref_switch_mode(ref, confirm_kill);
394	percpu_ref_put(ref);
395
396	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
397}
398EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
399
400/**
401 * percpu_ref_is_zero - test whether a percpu refcount reached zero
402 * @ref: percpu_ref to test
403 *
404 * Returns %true if @ref reached zero.
405 *
406 * This function is safe to call as long as @ref is between init and exit.
407 */
408bool percpu_ref_is_zero(struct percpu_ref *ref)
409{
410	unsigned long __percpu *percpu_count;
411	unsigned long count, flags;
412
413	if (__ref_is_percpu(ref, &percpu_count))
414		return false;
415
416	/* protect us from being destroyed */
417	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
418	if (ref->data)
419		count = atomic_long_read(&ref->data->count);
420	else
421		count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS;
422	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
423
424	return count == 0;
425}
426EXPORT_SYMBOL_GPL(percpu_ref_is_zero);
427
428/**
429 * percpu_ref_reinit - re-initialize a percpu refcount
430 * @ref: perpcu_ref to re-initialize
431 *
432 * Re-initialize @ref so that it's in the same state as when it finished
433 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been
434 * initialized successfully and reached 0 but not exited.
435 *
436 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
437 * this function is in progress.
438 */
439void percpu_ref_reinit(struct percpu_ref *ref)
440{
441	WARN_ON_ONCE(!percpu_ref_is_zero(ref));
442
443	percpu_ref_resurrect(ref);
444}
445EXPORT_SYMBOL_GPL(percpu_ref_reinit);
446
447/**
448 * percpu_ref_resurrect - modify a percpu refcount from dead to live
449 * @ref: perpcu_ref to resurrect
450 *
451 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
452 * called. @ref must be dead but must not yet have exited.
453 *
454 * If @ref->release() frees @ref then the caller is responsible for
455 * guaranteeing that @ref->release() does not get called while this
456 * function is in progress.
457 *
458 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
459 * this function is in progress.
460 */
461void percpu_ref_resurrect(struct percpu_ref *ref)
462{
463	unsigned long __percpu *percpu_count;
464	unsigned long flags;
465
466	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
467
468	WARN_ON_ONCE(!percpu_ref_is_dying(ref));
469	WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
470
471	ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
472	percpu_ref_get(ref);
473	__percpu_ref_switch_mode(ref, NULL);
474
475	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
476}
477EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#define pr_fmt(fmt) "%s: " fmt, __func__
  3
  4#include <linux/kernel.h>
  5#include <linux/sched.h>
  6#include <linux/wait.h>
 
 
  7#include <linux/percpu-refcount.h>
  8
  9/*
 10 * Initially, a percpu refcount is just a set of percpu counters. Initially, we
 11 * don't try to detect the ref hitting 0 - which means that get/put can just
 12 * increment or decrement the local counter. Note that the counter on a
 13 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
 14 * percpu counters will all sum to the correct value
 15 *
 16 * (More precisely: because modular arithmetic is commutative the sum of all the
 17 * percpu_count vars will be equal to what it would have been if all the gets
 18 * and puts were done to a single integer, even if some of the percpu integers
 19 * overflow or underflow).
 20 *
 21 * The real trick to implementing percpu refcounts is shutdown. We can't detect
 22 * the ref hitting 0 on every put - this would require global synchronization
 23 * and defeat the whole purpose of using percpu refs.
 24 *
 25 * What we do is require the user to keep track of the initial refcount; we know
 26 * the ref can't hit 0 before the user drops the initial ref, so as long as we
 27 * convert to non percpu mode before the initial ref is dropped everything
 28 * works.
 29 *
 30 * Converting to non percpu mode is done with some RCUish stuff in
 31 * percpu_ref_kill. Additionally, we need a bias value so that the
 32 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
 33 */
 34
 35#define PERCPU_COUNT_BIAS	(1LU << (BITS_PER_LONG - 1))
 36
 37static DEFINE_SPINLOCK(percpu_ref_switch_lock);
 38static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
 39
 40static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
 41{
 42	return (unsigned long __percpu *)
 43		(ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
 44}
 45
 46/**
 47 * percpu_ref_init - initialize a percpu refcount
 48 * @ref: percpu_ref to initialize
 49 * @release: function which will be called when refcount hits 0
 50 * @flags: PERCPU_REF_INIT_* flags
 51 * @gfp: allocation mask to use
 52 *
 53 * Initializes @ref.  @ref starts out in percpu mode with a refcount of 1 unless
 54 * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD.  These flags
 55 * change the start state to atomic with the latter setting the initial refcount
 56 * to 0.  See the definitions of PERCPU_REF_INIT_* flags for flag behaviors.
 57 *
 58 * Note that @release must not sleep - it may potentially be called from RCU
 59 * callback context by percpu_ref_kill().
 60 */
 61int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
 62		    unsigned int flags, gfp_t gfp)
 63{
 64	size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
 65			     __alignof__(unsigned long));
 66	unsigned long start_count = 0;
 
 67
 68	ref->percpu_count_ptr = (unsigned long)
 69		__alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
 70	if (!ref->percpu_count_ptr)
 71		return -ENOMEM;
 72
 73	ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
 74	ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
 
 
 
 
 
 
 75
 76	if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
 77		ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
 78		ref->allow_reinit = true;
 79	} else {
 80		start_count += PERCPU_COUNT_BIAS;
 81	}
 82
 83	if (flags & PERCPU_REF_INIT_DEAD)
 84		ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
 85	else
 86		start_count++;
 87
 88	atomic_long_set(&ref->count, start_count);
 89
 90	ref->release = release;
 91	ref->confirm_switch = NULL;
 
 
 92	return 0;
 93}
 94EXPORT_SYMBOL_GPL(percpu_ref_init);
 95
 
 
 
 
 
 
 
 
 
 
 
 
 96/**
 97 * percpu_ref_exit - undo percpu_ref_init()
 98 * @ref: percpu_ref to exit
 99 *
100 * This function exits @ref.  The caller is responsible for ensuring that
101 * @ref is no longer in active use.  The usual places to invoke this
102 * function from are the @ref->release() callback or in init failure path
103 * where percpu_ref_init() succeeded but other parts of the initialization
104 * of the embedding object failed.
105 */
106void percpu_ref_exit(struct percpu_ref *ref)
107{
108	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
 
 
 
 
 
 
 
 
 
 
 
 
109
110	if (percpu_count) {
111		/* non-NULL confirm_switch indicates switching in progress */
112		WARN_ON_ONCE(ref->confirm_switch);
113		free_percpu(percpu_count);
114		ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
115	}
116}
117EXPORT_SYMBOL_GPL(percpu_ref_exit);
118
119static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
120{
121	struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
 
 
122
123	ref->confirm_switch(ref);
124	ref->confirm_switch = NULL;
125	wake_up_all(&percpu_ref_switch_waitq);
126
127	if (!ref->allow_reinit)
128		percpu_ref_exit(ref);
129
130	/* drop ref from percpu_ref_switch_to_atomic() */
131	percpu_ref_put(ref);
132}
133
134static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
135{
136	struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
 
 
137	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
 
138	unsigned long count = 0;
139	int cpu;
140
141	for_each_possible_cpu(cpu)
142		count += *per_cpu_ptr(percpu_count, cpu);
143
144	pr_debug("global %lu percpu %lu\n",
145		 atomic_long_read(&ref->count), count);
146
147	/*
148	 * It's crucial that we sum the percpu counters _before_ adding the sum
149	 * to &ref->count; since gets could be happening on one cpu while puts
150	 * happen on another, adding a single cpu's count could cause
151	 * @ref->count to hit 0 before we've got a consistent value - but the
152	 * sum of all the counts will be consistent and correct.
153	 *
154	 * Subtracting the bias value then has to happen _after_ adding count to
155	 * &ref->count; we need the bias value to prevent &ref->count from
156	 * reaching 0 before we add the percpu counts. But doing it at the same
157	 * time is equivalent and saves us atomic operations:
158	 */
159	atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
160
161	WARN_ONCE(atomic_long_read(&ref->count) <= 0,
162		  "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
163		  ref->release, atomic_long_read(&ref->count));
 
 
 
 
164
165	/* @ref is viewed as dead on all CPUs, send out switch confirmation */
166	percpu_ref_call_confirm_rcu(rcu);
167}
168
169static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
170{
171}
172
173static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
174					  percpu_ref_func_t *confirm_switch)
175{
176	if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
177		if (confirm_switch)
178			confirm_switch(ref);
179		return;
180	}
181
182	/* switching from percpu to atomic */
183	ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
184
185	/*
186	 * Non-NULL ->confirm_switch is used to indicate that switching is
187	 * in progress.  Use noop one if unspecified.
188	 */
189	ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
 
190
191	percpu_ref_get(ref);	/* put after confirmation */
192	call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
193}
194
195static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
196{
197	unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
198	int cpu;
199
200	BUG_ON(!percpu_count);
201
202	if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
203		return;
204
205	if (WARN_ON_ONCE(!ref->allow_reinit))
206		return;
207
208	atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
209
210	/*
211	 * Restore per-cpu operation.  smp_store_release() is paired
212	 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
213	 * zeroing is visible to all percpu accesses which can see the
214	 * following __PERCPU_REF_ATOMIC clearing.
215	 */
216	for_each_possible_cpu(cpu)
217		*per_cpu_ptr(percpu_count, cpu) = 0;
218
219	smp_store_release(&ref->percpu_count_ptr,
220			  ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
221}
222
223static void __percpu_ref_switch_mode(struct percpu_ref *ref,
224				     percpu_ref_func_t *confirm_switch)
225{
 
 
226	lockdep_assert_held(&percpu_ref_switch_lock);
227
228	/*
229	 * If the previous ATOMIC switching hasn't finished yet, wait for
230	 * its completion.  If the caller ensures that ATOMIC switching
231	 * isn't in progress, this function can be called from any context.
232	 */
233	wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch,
234			    percpu_ref_switch_lock);
235
236	if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
237		__percpu_ref_switch_to_atomic(ref, confirm_switch);
238	else
239		__percpu_ref_switch_to_percpu(ref);
240}
241
242/**
243 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
244 * @ref: percpu_ref to switch to atomic mode
245 * @confirm_switch: optional confirmation callback
246 *
247 * There's no reason to use this function for the usual reference counting.
248 * Use percpu_ref_kill[_and_confirm]().
249 *
250 * Schedule switching of @ref to atomic mode.  All its percpu counts will
251 * be collected to the main atomic counter.  On completion, when all CPUs
252 * are guaraneed to be in atomic mode, @confirm_switch, which may not
253 * block, is invoked.  This function may be invoked concurrently with all
254 * the get/put operations and can safely be mixed with kill and reinit
255 * operations.  Note that @ref will stay in atomic mode across kill/reinit
256 * cycles until percpu_ref_switch_to_percpu() is called.
257 *
258 * This function may block if @ref is in the process of switching to atomic
259 * mode.  If the caller ensures that @ref is not in the process of
260 * switching to atomic mode, this function can be called from any context.
261 */
262void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
263				 percpu_ref_func_t *confirm_switch)
264{
265	unsigned long flags;
266
267	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
268
269	ref->force_atomic = true;
270	__percpu_ref_switch_mode(ref, confirm_switch);
271
272	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
273}
274EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic);
275
276/**
277 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
278 * @ref: percpu_ref to switch to atomic mode
279 *
280 * Schedule switching the ref to atomic mode, and wait for the
281 * switch to complete.  Caller must ensure that no other thread
282 * will switch back to percpu mode.
283 */
284void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref)
285{
286	percpu_ref_switch_to_atomic(ref, NULL);
287	wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
288}
289EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
290
291/**
292 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
293 * @ref: percpu_ref to switch to percpu mode
294 *
295 * There's no reason to use this function for the usual reference counting.
296 * To re-use an expired ref, use percpu_ref_reinit().
297 *
298 * Switch @ref to percpu mode.  This function may be invoked concurrently
299 * with all the get/put operations and can safely be mixed with kill and
300 * reinit operations.  This function reverses the sticky atomic state set
301 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().  If @ref is
302 * dying or dead, the actual switching takes place on the following
303 * percpu_ref_reinit().
304 *
305 * This function may block if @ref is in the process of switching to atomic
306 * mode.  If the caller ensures that @ref is not in the process of
307 * switching to atomic mode, this function can be called from any context.
308 */
309void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
310{
311	unsigned long flags;
312
313	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
314
315	ref->force_atomic = false;
316	__percpu_ref_switch_mode(ref, NULL);
317
318	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
319}
320EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
321
322/**
323 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
324 * @ref: percpu_ref to kill
325 * @confirm_kill: optional confirmation callback
326 *
327 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
328 * @confirm_kill is not NULL.  @confirm_kill, which may not block, will be
329 * called after @ref is seen as dead from all CPUs at which point all
330 * further invocations of percpu_ref_tryget_live() will fail.  See
331 * percpu_ref_tryget_live() for details.
332 *
333 * This function normally doesn't block and can be called from any context
334 * but it may block if @confirm_kill is specified and @ref is in the
335 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
336 *
337 * There are no implied RCU grace periods between kill and release.
338 */
339void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
340				 percpu_ref_func_t *confirm_kill)
341{
342	unsigned long flags;
343
344	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
345
346	WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
347		  "%s called more than once on %ps!", __func__, ref->release);
 
348
349	ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
350	__percpu_ref_switch_mode(ref, confirm_kill);
351	percpu_ref_put(ref);
352
353	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
354}
355EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
356
357/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358 * percpu_ref_reinit - re-initialize a percpu refcount
359 * @ref: perpcu_ref to re-initialize
360 *
361 * Re-initialize @ref so that it's in the same state as when it finished
362 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD.  @ref must have been
363 * initialized successfully and reached 0 but not exited.
364 *
365 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
366 * this function is in progress.
367 */
368void percpu_ref_reinit(struct percpu_ref *ref)
369{
370	WARN_ON_ONCE(!percpu_ref_is_zero(ref));
371
372	percpu_ref_resurrect(ref);
373}
374EXPORT_SYMBOL_GPL(percpu_ref_reinit);
375
376/**
377 * percpu_ref_resurrect - modify a percpu refcount from dead to live
378 * @ref: perpcu_ref to resurrect
379 *
380 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
381 * called. @ref must be dead but must not yet have exited.
382 *
383 * If @ref->release() frees @ref then the caller is responsible for
384 * guaranteeing that @ref->release() does not get called while this
385 * function is in progress.
386 *
387 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
388 * this function is in progress.
389 */
390void percpu_ref_resurrect(struct percpu_ref *ref)
391{
392	unsigned long __percpu *percpu_count;
393	unsigned long flags;
394
395	spin_lock_irqsave(&percpu_ref_switch_lock, flags);
396
397	WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
398	WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
399
400	ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
401	percpu_ref_get(ref);
402	__percpu_ref_switch_mode(ref, NULL);
403
404	spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
405}
406EXPORT_SYMBOL_GPL(percpu_ref_resurrect);