Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  Floating proportions with flexible aging period
  3 *
  4 *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
  5 *
  6 * The goal of this code is: Given different types of event, measure proportion
  7 * of each type of event over time. The proportions are measured with
  8 * exponentially decaying history to give smooth transitions. A formula
  9 * expressing proportion of event of type 'j' is:
 10 *
 11 *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
 12 *
 13 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
 14 * total number of events in i-th last time period.
 15 *
 16 * Note that p_{j}'s are normalised, i.e.
 17 *
 18 *   \Sum_{j} p_{j} = 1,
 19 *
 20 * This formula can be straightforwardly computed by maintaing denominator
 21 * (let's call it 'd') and for each event type its numerator (let's call it
 22 * 'n_j'). When an event of type 'j' happens, we simply need to do:
 23 *   n_j++; d++;
 24 *
 25 * When a new period is declared, we could do:
 26 *   d /= 2
 27 *   for each j
 28 *     n_j /= 2
 29 *
 30 * To avoid iteration over all event types, we instead shift numerator of event
 31 * j lazily when someone asks for a proportion of event j or when event j
 32 * occurs. This can bit trivially implemented by remembering last period in
 33 * which something happened with proportion of type j.
 34 */
 35#include <linux/flex_proportions.h>
 36
 37int fprop_global_init(struct fprop_global *p)
 38{
 39	int err;
 40
 41	p->period = 0;
 42	/* Use 1 to avoid dealing with periods with 0 events... */
 43	err = percpu_counter_init(&p->events, 1);
 44	if (err)
 45		return err;
 46	seqcount_init(&p->sequence);
 47	return 0;
 48}
 49
 50void fprop_global_destroy(struct fprop_global *p)
 51{
 52	percpu_counter_destroy(&p->events);
 53}
 54
 55/*
 56 * Declare @periods new periods. It is upto the caller to make sure period
 57 * transitions cannot happen in parallel.
 58 *
 59 * The function returns true if the proportions are still defined and false
 60 * if aging zeroed out all events. This can be used to detect whether declaring
 61 * further periods has any effect.
 62 */
 63bool fprop_new_period(struct fprop_global *p, int periods)
 64{
 65	s64 events;
 66	unsigned long flags;
 67
 68	local_irq_save(flags);
 69	events = percpu_counter_sum(&p->events);
 70	/*
 71	 * Don't do anything if there are no events.
 72	 */
 73	if (events <= 1) {
 74		local_irq_restore(flags);
 75		return false;
 76	}
 77	write_seqcount_begin(&p->sequence);
 78	if (periods < 64)
 79		events -= events >> periods;
 80	/* Use addition to avoid losing events happening between sum and set */
 81	percpu_counter_add(&p->events, -events);
 82	p->period += periods;
 83	write_seqcount_end(&p->sequence);
 84	local_irq_restore(flags);
 85
 86	return true;
 87}
 88
 89/*
 90 * ---- SINGLE ----
 91 */
 92
 93int fprop_local_init_single(struct fprop_local_single *pl)
 94{
 95	pl->events = 0;
 96	pl->period = 0;
 97	raw_spin_lock_init(&pl->lock);
 98	return 0;
 99}
100
101void fprop_local_destroy_single(struct fprop_local_single *pl)
102{
103}
104
105static void fprop_reflect_period_single(struct fprop_global *p,
106					struct fprop_local_single *pl)
107{
108	unsigned int period = p->period;
109	unsigned long flags;
110
111	/* Fast path - period didn't change */
112	if (pl->period == period)
113		return;
114	raw_spin_lock_irqsave(&pl->lock, flags);
115	/* Someone updated pl->period while we were spinning? */
116	if (pl->period >= period) {
117		raw_spin_unlock_irqrestore(&pl->lock, flags);
118		return;
119	}
120	/* Aging zeroed our fraction? */
121	if (period - pl->period < BITS_PER_LONG)
122		pl->events >>= period - pl->period;
123	else
124		pl->events = 0;
125	pl->period = period;
126	raw_spin_unlock_irqrestore(&pl->lock, flags);
127}
128
129/* Event of type pl happened */
130void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
131{
132	fprop_reflect_period_single(p, pl);
133	pl->events++;
134	percpu_counter_add(&p->events, 1);
135}
136
137/* Return fraction of events of type pl */
138void fprop_fraction_single(struct fprop_global *p,
139			   struct fprop_local_single *pl,
140			   unsigned long *numerator, unsigned long *denominator)
141{
142	unsigned int seq;
143	s64 num, den;
144
145	do {
146		seq = read_seqcount_begin(&p->sequence);
147		fprop_reflect_period_single(p, pl);
148		num = pl->events;
149		den = percpu_counter_read_positive(&p->events);
150	} while (read_seqcount_retry(&p->sequence, seq));
151
152	/*
153	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
154	 * counter errors
155	 */
156	if (den <= num) {
157		if (num)
158			den = num;
159		else
160			den = 1;
161	}
162	*denominator = den;
163	*numerator = num;
164}
165
166/*
167 * ---- PERCPU ----
168 */
169#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
170
171int fprop_local_init_percpu(struct fprop_local_percpu *pl)
172{
173	int err;
174
175	err = percpu_counter_init(&pl->events, 0);
176	if (err)
177		return err;
178	pl->period = 0;
179	raw_spin_lock_init(&pl->lock);
180	return 0;
181}
182
183void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
184{
185	percpu_counter_destroy(&pl->events);
186}
187
188static void fprop_reflect_period_percpu(struct fprop_global *p,
189					struct fprop_local_percpu *pl)
190{
191	unsigned int period = p->period;
192	unsigned long flags;
193
194	/* Fast path - period didn't change */
195	if (pl->period == period)
196		return;
197	raw_spin_lock_irqsave(&pl->lock, flags);
198	/* Someone updated pl->period while we were spinning? */
199	if (pl->period >= period) {
200		raw_spin_unlock_irqrestore(&pl->lock, flags);
201		return;
202	}
203	/* Aging zeroed our fraction? */
204	if (period - pl->period < BITS_PER_LONG) {
205		s64 val = percpu_counter_read(&pl->events);
206
207		if (val < (nr_cpu_ids * PROP_BATCH))
208			val = percpu_counter_sum(&pl->events);
209
210		__percpu_counter_add(&pl->events,
211			-val + (val >> (period-pl->period)), PROP_BATCH);
212	} else
213		percpu_counter_set(&pl->events, 0);
214	pl->period = period;
215	raw_spin_unlock_irqrestore(&pl->lock, flags);
216}
217
218/* Event of type pl happened */
219void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
 
220{
221	fprop_reflect_period_percpu(p, pl);
222	__percpu_counter_add(&pl->events, 1, PROP_BATCH);
223	percpu_counter_add(&p->events, 1);
224}
225
226void fprop_fraction_percpu(struct fprop_global *p,
227			   struct fprop_local_percpu *pl,
228			   unsigned long *numerator, unsigned long *denominator)
229{
230	unsigned int seq;
231	s64 num, den;
232
233	do {
234		seq = read_seqcount_begin(&p->sequence);
235		fprop_reflect_period_percpu(p, pl);
236		num = percpu_counter_read_positive(&pl->events);
237		den = percpu_counter_read_positive(&p->events);
238	} while (read_seqcount_retry(&p->sequence, seq));
239
240	/*
241	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
242	 * counter errors
243	 */
244	if (den <= num) {
245		if (num)
246			den = num;
247		else
248			den = 1;
249	}
250	*denominator = den;
251	*numerator = num;
252}
253
254/*
255 * Like __fprop_inc_percpu() except that event is counted only if the given
256 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
257 */
258void __fprop_inc_percpu_max(struct fprop_global *p,
259			    struct fprop_local_percpu *pl, int max_frac)
260{
261	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
262		unsigned long numerator, denominator;
 
263
264		fprop_fraction_percpu(p, pl, &numerator, &denominator);
265		if (numerator >
266		    (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
 
 
 
267			return;
268	} else
269		fprop_reflect_period_percpu(p, pl);
270	__percpu_counter_add(&pl->events, 1, PROP_BATCH);
271	percpu_counter_add(&p->events, 1);
 
 
 
 
272}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Floating proportions with flexible aging period
  4 *
  5 *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
  6 *
  7 * The goal of this code is: Given different types of event, measure proportion
  8 * of each type of event over time. The proportions are measured with
  9 * exponentially decaying history to give smooth transitions. A formula
 10 * expressing proportion of event of type 'j' is:
 11 *
 12 *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
 13 *
 14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
 15 * total number of events in i-th last time period.
 16 *
 17 * Note that p_{j}'s are normalised, i.e.
 18 *
 19 *   \Sum_{j} p_{j} = 1,
 20 *
 21 * This formula can be straightforwardly computed by maintaining denominator
 22 * (let's call it 'd') and for each event type its numerator (let's call it
 23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
 24 *   n_j++; d++;
 25 *
 26 * When a new period is declared, we could do:
 27 *   d /= 2
 28 *   for each j
 29 *     n_j /= 2
 30 *
 31 * To avoid iteration over all event types, we instead shift numerator of event
 32 * j lazily when someone asks for a proportion of event j or when event j
 33 * occurs. This can bit trivially implemented by remembering last period in
 34 * which something happened with proportion of type j.
 35 */
 36#include <linux/flex_proportions.h>
 37
 38int fprop_global_init(struct fprop_global *p, gfp_t gfp)
 39{
 40	int err;
 41
 42	p->period = 0;
 43	/* Use 1 to avoid dealing with periods with 0 events... */
 44	err = percpu_counter_init(&p->events, 1, gfp);
 45	if (err)
 46		return err;
 47	seqcount_init(&p->sequence);
 48	return 0;
 49}
 50
 51void fprop_global_destroy(struct fprop_global *p)
 52{
 53	percpu_counter_destroy(&p->events);
 54}
 55
 56/*
 57 * Declare @periods new periods. It is upto the caller to make sure period
 58 * transitions cannot happen in parallel.
 59 *
 60 * The function returns true if the proportions are still defined and false
 61 * if aging zeroed out all events. This can be used to detect whether declaring
 62 * further periods has any effect.
 63 */
 64bool fprop_new_period(struct fprop_global *p, int periods)
 65{
 66	s64 events = percpu_counter_sum(&p->events);
 
 67
 
 
 68	/*
 69	 * Don't do anything if there are no events.
 70	 */
 71	if (events <= 1)
 
 72		return false;
 73	preempt_disable_nested();
 74	write_seqcount_begin(&p->sequence);
 75	if (periods < 64)
 76		events -= events >> periods;
 77	/* Use addition to avoid losing events happening between sum and set */
 78	percpu_counter_add(&p->events, -events);
 79	p->period += periods;
 80	write_seqcount_end(&p->sequence);
 81	preempt_enable_nested();
 82
 83	return true;
 84}
 85
 86/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87 * ---- PERCPU ----
 88 */
 89#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
 90
 91int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
 92{
 93	int err;
 94
 95	err = percpu_counter_init(&pl->events, 0, gfp);
 96	if (err)
 97		return err;
 98	pl->period = 0;
 99	raw_spin_lock_init(&pl->lock);
100	return 0;
101}
102
103void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
104{
105	percpu_counter_destroy(&pl->events);
106}
107
108static void fprop_reflect_period_percpu(struct fprop_global *p,
109					struct fprop_local_percpu *pl)
110{
111	unsigned int period = p->period;
112	unsigned long flags;
113
114	/* Fast path - period didn't change */
115	if (pl->period == period)
116		return;
117	raw_spin_lock_irqsave(&pl->lock, flags);
118	/* Someone updated pl->period while we were spinning? */
119	if (pl->period >= period) {
120		raw_spin_unlock_irqrestore(&pl->lock, flags);
121		return;
122	}
123	/* Aging zeroed our fraction? */
124	if (period - pl->period < BITS_PER_LONG) {
125		s64 val = percpu_counter_read(&pl->events);
126
127		if (val < (nr_cpu_ids * PROP_BATCH))
128			val = percpu_counter_sum(&pl->events);
129
130		percpu_counter_add_batch(&pl->events,
131			-val + (val >> (period-pl->period)), PROP_BATCH);
132	} else
133		percpu_counter_set(&pl->events, 0);
134	pl->period = period;
135	raw_spin_unlock_irqrestore(&pl->lock, flags);
136}
137
138/* Event of type pl happened */
139void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
140		long nr)
141{
142	fprop_reflect_period_percpu(p, pl);
143	percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
144	percpu_counter_add(&p->events, nr);
145}
146
147void fprop_fraction_percpu(struct fprop_global *p,
148			   struct fprop_local_percpu *pl,
149			   unsigned long *numerator, unsigned long *denominator)
150{
151	unsigned int seq;
152	s64 num, den;
153
154	do {
155		seq = read_seqcount_begin(&p->sequence);
156		fprop_reflect_period_percpu(p, pl);
157		num = percpu_counter_read_positive(&pl->events);
158		den = percpu_counter_read_positive(&p->events);
159	} while (read_seqcount_retry(&p->sequence, seq));
160
161	/*
162	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
163	 * counter errors
164	 */
165	if (den <= num) {
166		if (num)
167			den = num;
168		else
169			den = 1;
170	}
171	*denominator = den;
172	*numerator = num;
173}
174
175/*
176 * Like __fprop_add_percpu() except that event is counted only if the given
177 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
178 */
179void __fprop_add_percpu_max(struct fprop_global *p,
180		struct fprop_local_percpu *pl, int max_frac, long nr)
181{
182	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
183		unsigned long numerator, denominator;
184		s64 tmp;
185
186		fprop_fraction_percpu(p, pl, &numerator, &denominator);
187		/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
188		tmp = (u64)denominator * max_frac -
189					((u64)numerator << FPROP_FRAC_SHIFT);
190		if (tmp < 0) {
191			/* Maximum fraction already exceeded? */
192			return;
193		} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
194			/* Add just enough for the fraction to saturate */
195			nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
196					FPROP_FRAC_BASE - max_frac);
197		}
198	}
199
200	__fprop_add_percpu(p, pl, nr);
201}