Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Fast batching percpu counters.
  3 */
  4
  5#include <linux/percpu_counter.h>
  6#include <linux/notifier.h>
  7#include <linux/mutex.h>
  8#include <linux/init.h>
  9#include <linux/cpu.h>
 10#include <linux/module.h>
 11#include <linux/debugobjects.h>
 12
 13#ifdef CONFIG_HOTPLUG_CPU
 14static LIST_HEAD(percpu_counters);
 15static DEFINE_SPINLOCK(percpu_counters_lock);
 16#endif
 17
 18#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 19
 20static struct debug_obj_descr percpu_counter_debug_descr;
 21
 22static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 23{
 24	struct percpu_counter *fbc = addr;
 25
 26	switch (state) {
 27	case ODEBUG_STATE_ACTIVE:
 28		percpu_counter_destroy(fbc);
 29		debug_object_free(fbc, &percpu_counter_debug_descr);
 30		return 1;
 31	default:
 32		return 0;
 33	}
 34}
 35
 36static struct debug_obj_descr percpu_counter_debug_descr = {
 37	.name		= "percpu_counter",
 38	.fixup_free	= percpu_counter_fixup_free,
 39};
 40
 41static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 42{
 43	debug_object_init(fbc, &percpu_counter_debug_descr);
 44	debug_object_activate(fbc, &percpu_counter_debug_descr);
 45}
 46
 47static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 48{
 49	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 50	debug_object_free(fbc, &percpu_counter_debug_descr);
 51}
 52
 53#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 54static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 55{ }
 56static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 57{ }
 58#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 59
 60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 61{
 62	int cpu;
 63	unsigned long flags;
 64
 65	raw_spin_lock_irqsave(&fbc->lock, flags);
 66	for_each_possible_cpu(cpu) {
 67		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 68		*pcount = 0;
 69	}
 70	fbc->count = amount;
 71	raw_spin_unlock_irqrestore(&fbc->lock, flags);
 72}
 73EXPORT_SYMBOL(percpu_counter_set);
 74
 75void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 76{
 77	s64 count;
 78
 79	preempt_disable();
 80	count = __this_cpu_read(*fbc->counters) + amount;
 81	if (count >= batch || count <= -batch) {
 82		unsigned long flags;
 83		raw_spin_lock_irqsave(&fbc->lock, flags);
 84		fbc->count += count;
 85		__this_cpu_sub(*fbc->counters, count - amount);
 86		raw_spin_unlock_irqrestore(&fbc->lock, flags);
 87	} else {
 88		this_cpu_add(*fbc->counters, amount);
 89	}
 90	preempt_enable();
 91}
 92EXPORT_SYMBOL(__percpu_counter_add);
 93
 94/*
 95 * Add up all the per-cpu counts, return the result.  This is a more accurate
 96 * but much slower version of percpu_counter_read_positive()
 97 */
 98s64 __percpu_counter_sum(struct percpu_counter *fbc)
 99{
100	s64 ret;
101	int cpu;
102	unsigned long flags;
103
104	raw_spin_lock_irqsave(&fbc->lock, flags);
105	ret = fbc->count;
106	for_each_online_cpu(cpu) {
107		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
108		ret += *pcount;
109	}
110	raw_spin_unlock_irqrestore(&fbc->lock, flags);
111	return ret;
112}
113EXPORT_SYMBOL(__percpu_counter_sum);
114
115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
116			  struct lock_class_key *key)
117{
118	raw_spin_lock_init(&fbc->lock);
119	lockdep_set_class(&fbc->lock, key);
120	fbc->count = amount;
121	fbc->counters = alloc_percpu(s32);
122	if (!fbc->counters)
123		return -ENOMEM;
124
125	debug_percpu_counter_activate(fbc);
126
127#ifdef CONFIG_HOTPLUG_CPU
128	INIT_LIST_HEAD(&fbc->list);
129	spin_lock(&percpu_counters_lock);
130	list_add(&fbc->list, &percpu_counters);
131	spin_unlock(&percpu_counters_lock);
132#endif
133	return 0;
134}
135EXPORT_SYMBOL(__percpu_counter_init);
136
137void percpu_counter_destroy(struct percpu_counter *fbc)
138{
139	if (!fbc->counters)
140		return;
141
142	debug_percpu_counter_deactivate(fbc);
143
144#ifdef CONFIG_HOTPLUG_CPU
145	spin_lock(&percpu_counters_lock);
146	list_del(&fbc->list);
147	spin_unlock(&percpu_counters_lock);
148#endif
149	free_percpu(fbc->counters);
150	fbc->counters = NULL;
151}
152EXPORT_SYMBOL(percpu_counter_destroy);
153
154int percpu_counter_batch __read_mostly = 32;
155EXPORT_SYMBOL(percpu_counter_batch);
156
157static void compute_batch_value(void)
158{
159	int nr = num_online_cpus();
160
161	percpu_counter_batch = max(32, nr*2);
162}
163
164static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
165					unsigned long action, void *hcpu)
166{
167#ifdef CONFIG_HOTPLUG_CPU
168	unsigned int cpu;
169	struct percpu_counter *fbc;
170
171	compute_batch_value();
172	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
173		return NOTIFY_OK;
174
175	cpu = (unsigned long)hcpu;
176	spin_lock(&percpu_counters_lock);
177	list_for_each_entry(fbc, &percpu_counters, list) {
178		s32 *pcount;
179		unsigned long flags;
180
181		raw_spin_lock_irqsave(&fbc->lock, flags);
182		pcount = per_cpu_ptr(fbc->counters, cpu);
183		fbc->count += *pcount;
184		*pcount = 0;
185		raw_spin_unlock_irqrestore(&fbc->lock, flags);
186	}
187	spin_unlock(&percpu_counters_lock);
188#endif
189	return NOTIFY_OK;
190}
191
192/*
193 * Compare counter against given value.
194 * Return 1 if greater, 0 if equal and -1 if less
195 */
196int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
197{
198	s64	count;
199
200	count = percpu_counter_read(fbc);
201	/* Check to see if rough count will be sufficient for comparison */
202	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
203		if (count > rhs)
204			return 1;
205		else
206			return -1;
207	}
208	/* Need to use precise count */
209	count = percpu_counter_sum(fbc);
210	if (count > rhs)
211		return 1;
212	else if (count < rhs)
213		return -1;
214	else
215		return 0;
216}
217EXPORT_SYMBOL(percpu_counter_compare);
218
219static int __init percpu_counter_startup(void)
220{
221	compute_batch_value();
222	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
223	return 0;
224}
225module_init(percpu_counter_startup);
v3.1
  1/*
  2 * Fast batching percpu counters.
  3 */
  4
  5#include <linux/percpu_counter.h>
  6#include <linux/notifier.h>
  7#include <linux/mutex.h>
  8#include <linux/init.h>
  9#include <linux/cpu.h>
 10#include <linux/module.h>
 11#include <linux/debugobjects.h>
 12
 
 13static LIST_HEAD(percpu_counters);
 14static DEFINE_MUTEX(percpu_counters_lock);
 
 15
 16#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
 17
 18static struct debug_obj_descr percpu_counter_debug_descr;
 19
 20static int percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
 21{
 22	struct percpu_counter *fbc = addr;
 23
 24	switch (state) {
 25	case ODEBUG_STATE_ACTIVE:
 26		percpu_counter_destroy(fbc);
 27		debug_object_free(fbc, &percpu_counter_debug_descr);
 28		return 1;
 29	default:
 30		return 0;
 31	}
 32}
 33
 34static struct debug_obj_descr percpu_counter_debug_descr = {
 35	.name		= "percpu_counter",
 36	.fixup_free	= percpu_counter_fixup_free,
 37};
 38
 39static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 40{
 41	debug_object_init(fbc, &percpu_counter_debug_descr);
 42	debug_object_activate(fbc, &percpu_counter_debug_descr);
 43}
 44
 45static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 46{
 47	debug_object_deactivate(fbc, &percpu_counter_debug_descr);
 48	debug_object_free(fbc, &percpu_counter_debug_descr);
 49}
 50
 51#else	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 52static inline void debug_percpu_counter_activate(struct percpu_counter *fbc)
 53{ }
 54static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
 55{ }
 56#endif	/* CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER */
 57
 58void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
 59{
 60	int cpu;
 
 61
 62	spin_lock(&fbc->lock);
 63	for_each_possible_cpu(cpu) {
 64		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
 65		*pcount = 0;
 66	}
 67	fbc->count = amount;
 68	spin_unlock(&fbc->lock);
 69}
 70EXPORT_SYMBOL(percpu_counter_set);
 71
 72void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 73{
 74	s64 count;
 75
 76	preempt_disable();
 77	count = __this_cpu_read(*fbc->counters) + amount;
 78	if (count >= batch || count <= -batch) {
 79		spin_lock(&fbc->lock);
 
 80		fbc->count += count;
 81		__this_cpu_write(*fbc->counters, 0);
 82		spin_unlock(&fbc->lock);
 83	} else {
 84		__this_cpu_write(*fbc->counters, count);
 85	}
 86	preempt_enable();
 87}
 88EXPORT_SYMBOL(__percpu_counter_add);
 89
 90/*
 91 * Add up all the per-cpu counts, return the result.  This is a more accurate
 92 * but much slower version of percpu_counter_read_positive()
 93 */
 94s64 __percpu_counter_sum(struct percpu_counter *fbc)
 95{
 96	s64 ret;
 97	int cpu;
 
 98
 99	spin_lock(&fbc->lock);
100	ret = fbc->count;
101	for_each_online_cpu(cpu) {
102		s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
103		ret += *pcount;
104	}
105	spin_unlock(&fbc->lock);
106	return ret;
107}
108EXPORT_SYMBOL(__percpu_counter_sum);
109
110int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
111			  struct lock_class_key *key)
112{
113	spin_lock_init(&fbc->lock);
114	lockdep_set_class(&fbc->lock, key);
115	fbc->count = amount;
116	fbc->counters = alloc_percpu(s32);
117	if (!fbc->counters)
118		return -ENOMEM;
119
120	debug_percpu_counter_activate(fbc);
121
122#ifdef CONFIG_HOTPLUG_CPU
123	INIT_LIST_HEAD(&fbc->list);
124	mutex_lock(&percpu_counters_lock);
125	list_add(&fbc->list, &percpu_counters);
126	mutex_unlock(&percpu_counters_lock);
127#endif
128	return 0;
129}
130EXPORT_SYMBOL(__percpu_counter_init);
131
132void percpu_counter_destroy(struct percpu_counter *fbc)
133{
134	if (!fbc->counters)
135		return;
136
137	debug_percpu_counter_deactivate(fbc);
138
139#ifdef CONFIG_HOTPLUG_CPU
140	mutex_lock(&percpu_counters_lock);
141	list_del(&fbc->list);
142	mutex_unlock(&percpu_counters_lock);
143#endif
144	free_percpu(fbc->counters);
145	fbc->counters = NULL;
146}
147EXPORT_SYMBOL(percpu_counter_destroy);
148
149int percpu_counter_batch __read_mostly = 32;
150EXPORT_SYMBOL(percpu_counter_batch);
151
152static void compute_batch_value(void)
153{
154	int nr = num_online_cpus();
155
156	percpu_counter_batch = max(32, nr*2);
157}
158
159static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
160					unsigned long action, void *hcpu)
161{
162#ifdef CONFIG_HOTPLUG_CPU
163	unsigned int cpu;
164	struct percpu_counter *fbc;
165
166	compute_batch_value();
167	if (action != CPU_DEAD)
168		return NOTIFY_OK;
169
170	cpu = (unsigned long)hcpu;
171	mutex_lock(&percpu_counters_lock);
172	list_for_each_entry(fbc, &percpu_counters, list) {
173		s32 *pcount;
174		unsigned long flags;
175
176		spin_lock_irqsave(&fbc->lock, flags);
177		pcount = per_cpu_ptr(fbc->counters, cpu);
178		fbc->count += *pcount;
179		*pcount = 0;
180		spin_unlock_irqrestore(&fbc->lock, flags);
181	}
182	mutex_unlock(&percpu_counters_lock);
183#endif
184	return NOTIFY_OK;
185}
186
187/*
188 * Compare counter against given value.
189 * Return 1 if greater, 0 if equal and -1 if less
190 */
191int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
192{
193	s64	count;
194
195	count = percpu_counter_read(fbc);
196	/* Check to see if rough count will be sufficient for comparison */
197	if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
198		if (count > rhs)
199			return 1;
200		else
201			return -1;
202	}
203	/* Need to use precise count */
204	count = percpu_counter_sum(fbc);
205	if (count > rhs)
206		return 1;
207	else if (count < rhs)
208		return -1;
209	else
210		return 0;
211}
212EXPORT_SYMBOL(percpu_counter_compare);
213
214static int __init percpu_counter_startup(void)
215{
216	compute_batch_value();
217	hotcpu_notifier(percpu_counter_hotcpu_callback, 0);
218	return 0;
219}
220module_init(percpu_counter_startup);