Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Fast batching percpu counters.
  3 */
  4
  5#include <linux/percpu_counter.h>
  6#include <linux/notifier.h>
  7#include <linux/mutex.h>
  8#include <linux/init.h>
  9#include <linux/cpu.h>
 10#include <linux/module.h>
 11#include <linux/debugobjects.h>
 12
 13#ifdef CONFIG_HOTPLUG_CPU
 14static LIST_HEAD(percpu_counters);
 15static DEFINE_SPINLOCK(percpu_counters_lock);
 16#endif
 17
 18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 19
 20static struct debug_obj_descr percpu_counter_debug_descr;
 21
 22static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 23{
 24	struct percpu_counter *fbc = addr;
 25
 26	switch (state) {
 27	case ODEBUG_STATE_ACTIVE:
 28		percpu_counter_destroy(fbc);
 29		debug_object_free(fbc, &percpu_counter_debug_descr);
 30		return 1;
 31	default:
 32		return 0;
 33	}
 34}
 35
 36static struct debug_obj_descr percpu_counter_debug_descr = {
 37	.name		= "percpu_counter",
 38	.fixup_free	= percpu_counter_fixup_free,
 39};
 40
 41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 42{
 43	debug_object_init(fbc, &percpu_counter_debug_descr);
 44	debug_object_activate(fbc, &percpu_counter_debug_descr);
 45}
 46
 47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 48{
 49	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 50	debug_object_free(fbc, &percpu_counter_debug_descr);
 51}
 52
 53#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 55{ }
 56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 57{ }
 58#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 59
 60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 61{
 62	int cpu;
 63	unsigned long flags;
 64
 65	raw_spin_lock_irqsave(&fbc->lock, flags);
 66	for_each_possible_cpu(cpu) {
 67		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 68		*pcount = 0;
 69	}
 70	fbc->count = amount;
 71	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 72}
 73EXPORT_SYMBOL(percpu_counter_set);
 74
 75void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 
 
 
 
 
 
 
 76{
 77	s64 count;
 78
 79	preempt_disable();
 80	count = __this_cpu_read(*fbc->counters) + amount;
 81	if (count >= batch || count <= -batch) {
 82		unsigned long flags;
 83		raw_spin_lock_irqsave(&fbc->lock, flags);
 84		fbc->count += count;
 85		__this_cpu_sub(*fbc->counters, count - amount);
 86		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 87	} else {
 88		this_cpu_add(*fbc->counters, amount);
 89	}
 90	preempt_enable();
 91}
 92EXPORT_SYMBOL(__percpu_counter_add);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93
 94/*
 95 * Add up all the per-cpu counts, return the result.  This is a more accurate
 96 * but much slower version of percpu_counter_read_positive()
 97 */
 98s64 __percpu_counter_sum(struct percpu_counter *fbc)
 99{
100	s64 ret;
101	int cpu;
102	unsigned long flags;
103
104	raw_spin_lock_irqsave(&fbc->lock, flags);
105	ret = fbc->count;
106	for_each_online_cpu(cpu) {
107		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
108		ret += *pcount;
109	}
110	raw_spin_unlock_irqrestore(&fbc->lock, flags);
111	return ret;
112}
113EXPORT_SYMBOL(__percpu_counter_sum);
114
115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
116			  struct lock_class_key *key)
117{
 
 
118	raw_spin_lock_init(&fbc->lock);
119	lockdep_set_class(&fbc->lock, key);
120	fbc->count = amount;
121	fbc->counters = alloc_percpu(s32);
122	if (!fbc->counters)
123		return -ENOMEM;
124
125	debug_percpu_counter_activate(fbc);
126
127#ifdef CONFIG_HOTPLUG_CPU
128	INIT_LIST_HEAD(&fbc->list);
129	spin_lock(&percpu_counters_lock);
130	list_add(&fbc->list, &percpu_counters);
131	spin_unlock(&percpu_counters_lock);
132#endif
133	return 0;
134}
135EXPORT_SYMBOL(__percpu_counter_init);
136
137void percpu_counter_destroy(struct percpu_counter *fbc)
138{
 
 
139	if (!fbc->counters)
140		return;
141
142	debug_percpu_counter_deactivate(fbc);
143
144#ifdef CONFIG_HOTPLUG_CPU
145	spin_lock(&percpu_counters_lock);
146	list_del(&fbc->list);
147	spin_unlock(&percpu_counters_lock);
148#endif
149	free_percpu(fbc->counters);
150	fbc->counters = NULL;
151}
152EXPORT_SYMBOL(percpu_counter_destroy);
153
154int percpu_counter_batch __read_mostly = 32;
155EXPORT_SYMBOL(percpu_counter_batch);
156
157static void compute_batch_value(void)
158{
159	int nr = num_online_cpus();
160
161	percpu_counter_batch = max(32, nr*2);
 
162}
163
164static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
165					unsigned long action, void *hcpu)
166{
167#ifdef CONFIG_HOTPLUG_CPU
168	unsigned int cpu;
169	struct percpu_counter *fbc;
170
171	compute_batch_value();
172	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
173		return NOTIFY_OK;
174
175	cpu = (unsigned long)hcpu;
176	spin_lock(&percpu_counters_lock);
177	list_for_each_entry(fbc, &percpu_counters, list) {
178		s32 *pcount;
179		unsigned long flags;
180
181		raw_spin_lock_irqsave(&fbc->lock, flags);
182		pcount = per_cpu_ptr(fbc->counters, cpu);
183		fbc->count += *pcount;
184		*pcount = 0;
185		raw_spin_unlock_irqrestore(&fbc->lock, flags);
186	}
187	spin_unlock(&percpu_counters_lock);
188#endif
189	return NOTIFY_OK;
190}
191
192/*
193 * Compare counter against given value.
194 * Return 1 if greater, 0 if equal and -1 if less
195 */
196int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
197{
198	s64	count;
199
200	count = percpu_counter_read(fbc);
201	/* Check to see if rough count will be sufficient for comparison */
202	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
203		if (count > rhs)
204			return 1;
205		else
206			return -1;
207	}
208	/* Need to use precise count */
209	count = percpu_counter_sum(fbc);
210	if (count > rhs)
211		return 1;
212	else if (count < rhs)
213		return -1;
214	else
215		return 0;
216}
217EXPORT_SYMBOL(percpu_counter_compare);
218
219static int __init percpu_counter_startup(void)
220{
221	compute_batch_value();
222	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
 
 
 
 
 
 
 
223	return 0;
224}
225module_init(percpu_counter_startup);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Fast batching percpu counters.
  4 */
  5
  6#include <linux/percpu_counter.h>
 
  7#include <linux/mutex.h>
  8#include <linux/init.h>
  9#include <linux/cpu.h>
 10#include <linux/module.h>
 11#include <linux/debugobjects.h>
 12
 13#ifdef CONFIG_HOTPLUG_CPU
 14static LIST_HEAD(percpu_counters);
 15static DEFINE_SPINLOCK(percpu_counters_lock);
 16#endif
 17
 18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 19
 20static struct debug_obj_descr percpu_counter_debug_descr;
 21
 22static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 23{
 24	struct percpu_counter *fbc = addr;
 25
 26	switch (state) {
 27	case ODEBUG_STATE_ACTIVE:
 28		percpu_counter_destroy(fbc);
 29		debug_object_free(fbc, &percpu_counter_debug_descr);
 30		return true;
 31	default:
 32		return false;
 33	}
 34}
 35
 36static struct debug_obj_descr percpu_counter_debug_descr = {
 37	.name		= "percpu_counter",
 38	.fixup_free	= percpu_counter_fixup_free,
 39};
 40
 41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 42{
 43	debug_object_init(fbc, &percpu_counter_debug_descr);
 44	debug_object_activate(fbc, &percpu_counter_debug_descr);
 45}
 46
 47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 48{
 49	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 50	debug_object_free(fbc, &percpu_counter_debug_descr);
 51}
 52
 53#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 55{ }
 56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 57{ }
 58#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 59
 60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 61{
 62	int cpu;
 63	unsigned long flags;
 64
 65	raw_spin_lock_irqsave(&fbc->lock, flags);
 66	for_each_possible_cpu(cpu) {
 67		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 68		*pcount = 0;
 69	}
 70	fbc->count = amount;
 71	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 72}
 73EXPORT_SYMBOL(percpu_counter_set);
 74
 75/**
 76 * This function is both preempt and irq safe. The former is due to explicit
 77 * preemption disable. The latter is guaranteed by the fact that the slow path
 78 * is explicitly protected by an irq-safe spinlock whereas the fast patch uses
 79 * this_cpu_add which is irq-safe by definition. Hence there is no need muck
 80 * with irq state before calling this one
 81 */
 82void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
 83{
 84	s64 count;
 85
 86	preempt_disable();
 87	count = __this_cpu_read(*fbc->counters) + amount;
 88	if (count >= batch || count <= -batch) {
 89		unsigned long flags;
 90		raw_spin_lock_irqsave(&fbc->lock, flags);
 91		fbc->count += count;
 92		__this_cpu_sub(*fbc->counters, count - amount);
 93		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 94	} else {
 95		this_cpu_add(*fbc->counters, amount);
 96	}
 97	preempt_enable();
 98}
 99EXPORT_SYMBOL(percpu_counter_add_batch);
100
101/*
102 * For percpu_counter with a big batch, the devication of its count could
103 * be big, and there is requirement to reduce the deviation, like when the
104 * counter's batch could be runtime decreased to get a better accuracy,
105 * which can be achieved by running this sync function on each CPU.
106 */
107void percpu_counter_sync(struct percpu_counter *fbc)
108{
109	unsigned long flags;
110	s64 count;
111
112	raw_spin_lock_irqsave(&fbc->lock, flags);
113	count = __this_cpu_read(*fbc->counters);
114	fbc->count += count;
115	__this_cpu_sub(*fbc->counters, count);
116	raw_spin_unlock_irqrestore(&fbc->lock, flags);
117}
118EXPORT_SYMBOL(percpu_counter_sync);
119
120/*
121 * Add up all the per-cpu counts, return the result.  This is a more accurate
122 * but much slower version of percpu_counter_read_positive()
123 */
124s64 __percpu_counter_sum(struct percpu_counter *fbc)
125{
126	s64 ret;
127	int cpu;
128	unsigned long flags;
129
130	raw_spin_lock_irqsave(&fbc->lock, flags);
131	ret = fbc->count;
132	for_each_online_cpu(cpu) {
133		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
134		ret += *pcount;
135	}
136	raw_spin_unlock_irqrestore(&fbc->lock, flags);
137	return ret;
138}
139EXPORT_SYMBOL(__percpu_counter_sum);
140
141int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
142			  struct lock_class_key *key)
143{
144	unsigned long flags __maybe_unused;
145
146	raw_spin_lock_init(&fbc->lock);
147	lockdep_set_class(&fbc->lock, key);
148	fbc->count = amount;
149	fbc->counters = alloc_percpu_gfp(s32, gfp);
150	if (!fbc->counters)
151		return -ENOMEM;
152
153	debug_percpu_counter_activate(fbc);
154
155#ifdef CONFIG_HOTPLUG_CPU
156	INIT_LIST_HEAD(&fbc->list);
157	spin_lock_irqsave(&percpu_counters_lock, flags);
158	list_add(&fbc->list, &percpu_counters);
159	spin_unlock_irqrestore(&percpu_counters_lock, flags);
160#endif
161	return 0;
162}
163EXPORT_SYMBOL(__percpu_counter_init);
164
165void percpu_counter_destroy(struct percpu_counter *fbc)
166{
167	unsigned long flags __maybe_unused;
168
169	if (!fbc->counters)
170		return;
171
172	debug_percpu_counter_deactivate(fbc);
173
174#ifdef CONFIG_HOTPLUG_CPU
175	spin_lock_irqsave(&percpu_counters_lock, flags);
176	list_del(&fbc->list);
177	spin_unlock_irqrestore(&percpu_counters_lock, flags);
178#endif
179	free_percpu(fbc->counters);
180	fbc->counters = NULL;
181}
182EXPORT_SYMBOL(percpu_counter_destroy);
183
184int percpu_counter_batch __read_mostly = 32;
185EXPORT_SYMBOL(percpu_counter_batch);
186
187static int compute_batch_value(unsigned int cpu)
188{
189	int nr = num_online_cpus();
190
191	percpu_counter_batch = max(32, nr*2);
192	return 0;
193}
194
195static int percpu_counter_cpu_dead(unsigned int cpu)
 
196{
197#ifdef CONFIG_HOTPLUG_CPU
 
198	struct percpu_counter *fbc;
199
200	compute_batch_value(cpu);
 
 
201
202	spin_lock_irq(&percpu_counters_lock);
 
203	list_for_each_entry(fbc, &percpu_counters, list) {
204		s32 *pcount;
 
205
206		raw_spin_lock(&fbc->lock);
207		pcount = per_cpu_ptr(fbc->counters, cpu);
208		fbc->count += *pcount;
209		*pcount = 0;
210		raw_spin_unlock(&fbc->lock);
211	}
212	spin_unlock_irq(&percpu_counters_lock);
213#endif
214	return 0;
215}
216
217/*
218 * Compare counter against given value.
219 * Return 1 if greater, 0 if equal and -1 if less
220 */
221int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
222{
223	s64	count;
224
225	count = percpu_counter_read(fbc);
226	/* Check to see if rough count will be sufficient for comparison */
227	if (abs(count - rhs) > (batch * num_online_cpus())) {
228		if (count > rhs)
229			return 1;
230		else
231			return -1;
232	}
233	/* Need to use precise count */
234	count = percpu_counter_sum(fbc);
235	if (count > rhs)
236		return 1;
237	else if (count < rhs)
238		return -1;
239	else
240		return 0;
241}
242EXPORT_SYMBOL(__percpu_counter_compare);
243
244static int __init percpu_counter_startup(void)
245{
246	int ret;
247
248	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lib/percpu_cnt:online",
249				compute_batch_value, NULL);
250	WARN_ON(ret < 0);
251	ret = cpuhp_setup_state_nocalls(CPUHP_PERCPU_CNT_DEAD,
252					"lib/percpu_cnt:dead", NULL,
253					percpu_counter_cpu_dead);
254	WARN_ON(ret < 0);
255	return 0;
256}
257module_init(percpu_counter_startup);