Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Fast batching percpu counters.
  3 */
  4
  5#include <linux/percpu_counter.h>
  6#include <linux/notifier.h>
  7#include <linux/mutex.h>
  8#include <linux/init.h>
  9#include <linux/cpu.h>
 10#include <linux/module.h>
 11#include <linux/debugobjects.h>
 12
 13#ifdef CONFIG_HOTPLUG_CPU
 14static LIST_HEAD(percpu_counters);
 15static DEFINE_SPINLOCK(percpu_counters_lock);
 16#endif
 17
 18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 19
 20static struct debug_obj_descr percpu_counter_debug_descr;
 21
 22static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 23{
 24	struct percpu_counter *fbc = addr;
 25
 26	switch (state) {
 27	case ODEBUG_STATE_ACTIVE:
 28		percpu_counter_destroy(fbc);
 29		debug_object_free(fbc, &percpu_counter_debug_descr);
 30		return 1;
 31	default:
 32		return 0;
 33	}
 34}
 35
 36static struct debug_obj_descr percpu_counter_debug_descr = {
 37	.name		= "percpu_counter",
 38	.fixup_free	= percpu_counter_fixup_free,
 39};
 40
 41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 42{
 43	debug_object_init(fbc, &percpu_counter_debug_descr);
 44	debug_object_activate(fbc, &percpu_counter_debug_descr);
 45}
 46
 47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 48{
 49	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 50	debug_object_free(fbc, &percpu_counter_debug_descr);
 51}
 52
 53#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 55{ }
 56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 57{ }
 58#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 59
 60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 61{
 62	int cpu;
 63	unsigned long flags;
 64
 65	raw_spin_lock_irqsave(&fbc->lock, flags);
 66	for_each_possible_cpu(cpu) {
 67		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 68		*pcount = 0;
 69	}
 70	fbc->count = amount;
 71	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 72}
 73EXPORT_SYMBOL(percpu_counter_set);
 74
 75void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76{
 77	s64 count;
 
 78
 79	preempt_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80	count = __this_cpu_read(*fbc->counters) + amount;
 81	if (count >= batch || count <= -batch) {
 82		unsigned long flags;
 83		raw_spin_lock_irqsave(&fbc->lock, flags);
 84		fbc->count += count;
 85		__this_cpu_sub(*fbc->counters, count - amount);
 86		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 87	} else {
 88		this_cpu_add(*fbc->counters, amount);
 89	}
 90	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91}
 92EXPORT_SYMBOL(__percpu_counter_add);
 93
 94/*
 95 * Add up all the per-cpu counts, return the result.  This is a more accurate
 96 * but much slower version of percpu_counter_read_positive()
 
 
 
 
 
 
 
 
 97 */
 98s64 __percpu_counter_sum(struct percpu_counter *fbc)
 99{
100	s64 ret;
101	int cpu;
102	unsigned long flags;
103
104	raw_spin_lock_irqsave(&fbc->lock, flags);
105	ret = fbc->count;
106	for_each_online_cpu(cpu) {
107		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
108		ret += *pcount;
109	}
110	raw_spin_unlock_irqrestore(&fbc->lock, flags);
111	return ret;
112}
113EXPORT_SYMBOL(__percpu_counter_sum);
114
115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
116			  struct lock_class_key *key)
117{
118	raw_spin_lock_init(&fbc->lock);
119	lockdep_set_class(&fbc->lock, key);
120	fbc->count = amount;
121	fbc->counters = alloc_percpu(s32);
122	if (!fbc->counters)
 
 
 
 
 
 
123		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
124
125	debug_percpu_counter_activate(fbc);
 
126
127#ifdef CONFIG_HOTPLUG_CPU
128	INIT_LIST_HEAD(&fbc->list);
129	spin_lock(&percpu_counters_lock);
130	list_add(&fbc->list, &percpu_counters);
131	spin_unlock(&percpu_counters_lock);
132#endif
133	return 0;
134}
135EXPORT_SYMBOL(__percpu_counter_init);
136
137void percpu_counter_destroy(struct percpu_counter *fbc)
138{
139	if (!fbc->counters)
 
 
 
 
 
 
140		return;
141
142	debug_percpu_counter_deactivate(fbc);
 
143
144#ifdef CONFIG_HOTPLUG_CPU
145	spin_lock(&percpu_counters_lock);
146	list_del(&fbc->list);
147	spin_unlock(&percpu_counters_lock);
 
148#endif
149	free_percpu(fbc->counters);
150	fbc->counters = NULL;
 
 
 
151}
152EXPORT_SYMBOL(percpu_counter_destroy);
153
154int percpu_counter_batch __read_mostly = 32;
155EXPORT_SYMBOL(percpu_counter_batch);
156
157static void compute_batch_value(void)
158{
159	int nr = num_online_cpus();
160
161	percpu_counter_batch = max(32, nr*2);
 
162}
163
164static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
165					unsigned long action, void *hcpu)
166{
167#ifdef CONFIG_HOTPLUG_CPU
168	unsigned int cpu;
169	struct percpu_counter *fbc;
170
171	compute_batch_value();
172	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
173		return NOTIFY_OK;
174
175	cpu = (unsigned long)hcpu;
176	spin_lock(&percpu_counters_lock);
177	list_for_each_entry(fbc, &percpu_counters, list) {
178		s32 *pcount;
179		unsigned long flags;
180
181		raw_spin_lock_irqsave(&fbc->lock, flags);
182		pcount = per_cpu_ptr(fbc->counters, cpu);
183		fbc->count += *pcount;
184		*pcount = 0;
185		raw_spin_unlock_irqrestore(&fbc->lock, flags);
186	}
187	spin_unlock(&percpu_counters_lock);
188#endif
189	return NOTIFY_OK;
190}
191
192/*
193 * Compare counter against given value.
194 * Return 1 if greater, 0 if equal and -1 if less
195 */
196int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
197{
198	s64	count;
199
200	count = percpu_counter_read(fbc);
201	/* Check to see if rough count will be sufficient for comparison */
202	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
203		if (count > rhs)
204			return 1;
205		else
206			return -1;
207	}
208	/* Need to use precise count */
209	count = percpu_counter_sum(fbc);
210	if (count > rhs)
211		return 1;
212	else if (count < rhs)
213		return -1;
214	else
215		return 0;
216}
217EXPORT_SYMBOL(percpu_counter_compare);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
219static int __init percpu_counter_startup(void)
220{
221	compute_batch_value();
222	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 
 
 
 
 
 
 
223	return 0;
224}
225module_init(percpu_counter_startup);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Fast batching percpu counters.
  4 */
  5
  6#include <linux/percpu_counter.h>
 
  7#include <linux/mutex.h>
  8#include <linux/init.h>
  9#include <linux/cpu.h>
 10#include <linux/module.h>
 11#include <linux/debugobjects.h>
 12
 13#ifdef CONFIG_HOTPLUG_CPU
 14static LIST_HEAD(percpu_counters);
 15static DEFINE_SPINLOCK(percpu_counters_lock);
 16#endif
 17
 18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 19
 20static const struct debug_obj_descr percpu_counter_debug_descr;
 21
 22static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 23{
 24	struct percpu_counter *fbc = addr;
 25
 26	switch (state) {
 27	case ODEBUG_STATE_ACTIVE:
 28		percpu_counter_destroy(fbc);
 29		debug_object_free(fbc, &percpu_counter_debug_descr);
 30		return true;
 31	default:
 32		return false;
 33	}
 34}
 35
 36static const struct debug_obj_descr percpu_counter_debug_descr = {
 37	.name		= "percpu_counter",
 38	.fixup_free	= percpu_counter_fixup_free,
 39};
 40
 41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 42{
 43	debug_object_init(fbc, &percpu_counter_debug_descr);
 44	debug_object_activate(fbc, &percpu_counter_debug_descr);
 45}
 46
 47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 48{
 49	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 50	debug_object_free(fbc, &percpu_counter_debug_descr);
 51}
 52
 53#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 55{ }
 56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 57{ }
 58#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 59
 60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 61{
 62	int cpu;
 63	unsigned long flags;
 64
 65	raw_spin_lock_irqsave(&fbc->lock, flags);
 66	for_each_possible_cpu(cpu) {
 67		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 68		*pcount = 0;
 69	}
 70	fbc->count = amount;
 71	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 72}
 73EXPORT_SYMBOL(percpu_counter_set);
 74
 75/*
 76 * Add to a counter while respecting batch size.
 77 *
 78 * There are 2 implementations, both dealing with the following problem:
 79 *
 80 * The decision slow path/fast path and the actual update must be atomic.
 81 * Otherwise a call in process context could check the current values and
 82 * decide that the fast path can be used. If now an interrupt occurs before
 83 * the this_cpu_add(), and the interrupt updates this_cpu(*fbc->counters),
 84 * then the this_cpu_add() that is executed after the interrupt has completed
 85 * can produce values larger than "batch" or even overflows.
 86 */
 87#ifdef CONFIG_HAVE_CMPXCHG_LOCAL
 88/*
 89 * Safety against interrupts is achieved in 2 ways:
 90 * 1. the fast path uses local cmpxchg (note: no lock prefix)
 91 * 2. the slow path operates with interrupts disabled
 92 */
 93void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
 94{
 95	s64 count;
 96	unsigned long flags;
 97
 98	count = this_cpu_read(*fbc->counters);
 99	do {
100		if (unlikely(abs(count + amount) >= batch)) {
101			raw_spin_lock_irqsave(&fbc->lock, flags);
102			/*
103			 * Note: by now we might have migrated to another CPU
104			 * or the value might have changed.
105			 */
106			count = __this_cpu_read(*fbc->counters);
107			fbc->count += count + amount;
108			__this_cpu_sub(*fbc->counters, count);
109			raw_spin_unlock_irqrestore(&fbc->lock, flags);
110			return;
111		}
112	} while (!this_cpu_try_cmpxchg(*fbc->counters, &count, count + amount));
113}
114#else
115/*
116 * local_irq_save() is used to make the function irq safe:
117 * - The slow path would be ok as protected by an irq-safe spinlock.
118 * - this_cpu_add would be ok as it is irq-safe by definition.
119 */
120void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
121{
122	s64 count;
123	unsigned long flags;
124
125	local_irq_save(flags);
126	count = __this_cpu_read(*fbc->counters) + amount;
127	if (abs(count) >= batch) {
128		raw_spin_lock(&fbc->lock);
 
129		fbc->count += count;
130		__this_cpu_sub(*fbc->counters, count - amount);
131		raw_spin_unlock(&fbc->lock);
132	} else {
133		this_cpu_add(*fbc->counters, amount);
134	}
135	local_irq_restore(flags);
136}
137#endif
138EXPORT_SYMBOL(percpu_counter_add_batch);
139
140/*
141 * For percpu_counter with a big batch, the devication of its count could
142 * be big, and there is requirement to reduce the deviation, like when the
143 * counter's batch could be runtime decreased to get a better accuracy,
144 * which can be achieved by running this sync function on each CPU.
145 */
146void percpu_counter_sync(struct percpu_counter *fbc)
147{
148	unsigned long flags;
149	s64 count;
150
151	raw_spin_lock_irqsave(&fbc->lock, flags);
152	count = __this_cpu_read(*fbc->counters);
153	fbc->count += count;
154	__this_cpu_sub(*fbc->counters, count);
155	raw_spin_unlock_irqrestore(&fbc->lock, flags);
156}
157EXPORT_SYMBOL(percpu_counter_sync);
158
159/*
160 * Add up all the per-cpu counts, return the result.  This is a more accurate
161 * but much slower version of percpu_counter_read_positive().
162 *
163 * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
164 * from CPUs that are in the process of being taken offline. Dying cpus have
165 * been removed from the online mask, but may not have had the hotplug dead
166 * notifier called to fold the percpu count back into the global counter sum.
167 * By including dying CPUs in the iteration mask, we avoid this race condition
168 * so __percpu_counter_sum() just does the right thing when CPUs are being taken
169 * offline.
170 */
171s64 __percpu_counter_sum(struct percpu_counter *fbc)
172{
173	s64 ret;
174	int cpu;
175	unsigned long flags;
176
177	raw_spin_lock_irqsave(&fbc->lock, flags);
178	ret = fbc->count;
179	for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
180		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
181		ret += *pcount;
182	}
183	raw_spin_unlock_irqrestore(&fbc->lock, flags);
184	return ret;
185}
186EXPORT_SYMBOL(__percpu_counter_sum);
187
188int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
189			       gfp_t gfp, u32 nr_counters,
190			       struct lock_class_key *key)
191{
192	unsigned long flags __maybe_unused;
193	size_t counter_size;
194	s32 __percpu *counters;
195	u32 i;
196
197	counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
198	counters = __alloc_percpu_gfp(nr_counters * counter_size,
199				      __alignof__(*counters), gfp);
200	if (!counters) {
201		fbc[0].counters = NULL;
202		return -ENOMEM;
203	}
204
205	for (i = 0; i < nr_counters; i++) {
206		raw_spin_lock_init(&fbc[i].lock);
207		lockdep_set_class(&fbc[i].lock, key);
208#ifdef CONFIG_HOTPLUG_CPU
209		INIT_LIST_HEAD(&fbc[i].list);
210#endif
211		fbc[i].count = amount;
212		fbc[i].counters = (void __percpu *)counters + i * counter_size;
213
214		debug_percpu_counter_activate(&fbc[i]);
215	}
216
217#ifdef CONFIG_HOTPLUG_CPU
218	spin_lock_irqsave(&percpu_counters_lock, flags);
219	for (i = 0; i < nr_counters; i++)
220		list_add(&fbc[i].list, &percpu_counters);
221	spin_unlock_irqrestore(&percpu_counters_lock, flags);
222#endif
223	return 0;
224}
225EXPORT_SYMBOL(__percpu_counter_init_many);
226
227void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
228{
229	unsigned long flags __maybe_unused;
230	u32 i;
231
232	if (WARN_ON_ONCE(!fbc))
233		return;
234
235	if (!fbc[0].counters)
236		return;
237
238	for (i = 0; i < nr_counters; i++)
239		debug_percpu_counter_deactivate(&fbc[i]);
240
241#ifdef CONFIG_HOTPLUG_CPU
242	spin_lock_irqsave(&percpu_counters_lock, flags);
243	for (i = 0; i < nr_counters; i++)
244		list_del(&fbc[i].list);
245	spin_unlock_irqrestore(&percpu_counters_lock, flags);
246#endif
247
248	free_percpu(fbc[0].counters);
249
250	for (i = 0; i < nr_counters; i++)
251		fbc[i].counters = NULL;
252}
253EXPORT_SYMBOL(percpu_counter_destroy_many);
254
255int percpu_counter_batch __read_mostly = 32;
256EXPORT_SYMBOL(percpu_counter_batch);
257
258static int compute_batch_value(unsigned int cpu)
259{
260	int nr = num_online_cpus();
261
262	percpu_counter_batch = max(32, nr*2);
263	return 0;
264}
265
266static int percpu_counter_cpu_dead(unsigned int cpu)
 
267{
268#ifdef CONFIG_HOTPLUG_CPU
 
269	struct percpu_counter *fbc;
270
271	compute_batch_value(cpu);
 
 
272
273	spin_lock_irq(&percpu_counters_lock);
 
274	list_for_each_entry(fbc, &percpu_counters, list) {
275		s32 *pcount;
 
276
277		raw_spin_lock(&fbc->lock);
278		pcount = per_cpu_ptr(fbc->counters, cpu);
279		fbc->count += *pcount;
280		*pcount = 0;
281		raw_spin_unlock(&fbc->lock);
282	}
283	spin_unlock_irq(&percpu_counters_lock);
284#endif
285	return 0;
286}
287
288/*
289 * Compare counter against given value.
290 * Return 1 if greater, 0 if equal and -1 if less
291 */
292int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
293{
294	s64	count;
295
296	count = percpu_counter_read(fbc);
297	/* Check to see if rough count will be sufficient for comparison */
298	if (abs(count - rhs) > (batch * num_online_cpus())) {
299		if (count > rhs)
300			return 1;
301		else
302			return -1;
303	}
304	/* Need to use precise count */
305	count = percpu_counter_sum(fbc);
306	if (count > rhs)
307		return 1;
308	else if (count < rhs)
309		return -1;
310	else
311		return 0;
312}
313EXPORT_SYMBOL(__percpu_counter_compare);
314
315/*
316 * Compare counter, and add amount if total is: less than or equal to limit if
317 * amount is positive, or greater than or equal to limit if amount is negative.
318 * Return true if amount is added, or false if total would be beyond the limit.
319 *
320 * Negative limit is allowed, but unusual.
321 * When negative amounts (subs) are given to percpu_counter_limited_add(),
322 * the limit would most naturally be 0 - but other limits are also allowed.
323 *
324 * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
325 * are all assumed to be sane (far from S64_MIN and S64_MAX).
326 */
327bool __percpu_counter_limited_add(struct percpu_counter *fbc,
328				  s64 limit, s64 amount, s32 batch)
329{
330	s64 count;
331	s64 unknown;
332	unsigned long flags;
333	bool good = false;
334
335	if (amount == 0)
336		return true;
337
338	local_irq_save(flags);
339	unknown = batch * num_online_cpus();
340	count = __this_cpu_read(*fbc->counters);
341
342	/* Skip taking the lock when safe */
343	if (abs(count + amount) <= batch &&
344	    ((amount > 0 && fbc->count + unknown <= limit) ||
345	     (amount < 0 && fbc->count - unknown >= limit))) {
346		this_cpu_add(*fbc->counters, amount);
347		local_irq_restore(flags);
348		return true;
349	}
350
351	raw_spin_lock(&fbc->lock);
352	count = fbc->count + amount;
353
354	/* Skip percpu_counter_sum() when safe */
355	if (amount > 0) {
356		if (count - unknown > limit)
357			goto out;
358		if (count + unknown <= limit)
359			good = true;
360	} else {
361		if (count + unknown < limit)
362			goto out;
363		if (count - unknown >= limit)
364			good = true;
365	}
366
367	if (!good) {
368		s32 *pcount;
369		int cpu;
370
371		for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
372			pcount = per_cpu_ptr(fbc->counters, cpu);
373			count += *pcount;
374		}
375		if (amount > 0) {
376			if (count > limit)
377				goto out;
378		} else {
379			if (count < limit)
380				goto out;
381		}
382		good = true;
383	}
384
385	count = __this_cpu_read(*fbc->counters);
386	fbc->count += count + amount;
387	__this_cpu_sub(*fbc->counters, count);
388out:
389	raw_spin_unlock(&fbc->lock);
390	local_irq_restore(flags);
391	return good;
392}
393
394static int __init percpu_counter_startup(void)
395{
396	int ret;
397
398	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
399				compute_batch_value, NULL);
400	WARN_ON(ret < 0);
401	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
402					"lib/percpu_cnt:dead", NULL,
403					percpu_counter_cpu_dead);
404	WARN_ON(ret < 0);
405	return 0;
406}
407module_init(percpu_counter_startup);