Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Floating proportions with flexible aging period
  4 *
  5 *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
  6 *
  7 * The goal of this code is: Given different types of event, measure proportion
  8 * of each type of event over time. The proportions are measured with
  9 * exponentially decaying history to give smooth transitions. A formula
 10 * expressing proportion of event of type 'j' is:
 11 *
 12 *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
 13 *
 14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
 15 * total number of events in i-th last time period.
 16 *
 17 * Note that p_{j}'s are normalised, i.e.
 18 *
 19 *   \Sum_{j} p_{j} = 1,
 20 *
 21 * This formula can be straightforwardly computed by maintaining denominator
 22 * (let's call it 'd') and for each event type its numerator (let's call it
 23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
 24 *   n_j++; d++;
 25 *
 26 * When a new period is declared, we could do:
 27 *   d /= 2
 28 *   for each j
 29 *     n_j /= 2
 30 *
 31 * To avoid iteration over all event types, we instead shift numerator of event
 32 * j lazily when someone asks for a proportion of event j or when event j
 33 * occurs. This can bit trivially implemented by remembering last period in
 34 * which something happened with proportion of type j.
 35 */
 36#include <linux/flex_proportions.h>
 37
 38int fprop_global_init(struct fprop_global *p, gfp_t gfp)
 39{
 40	int err;
 41
 42	p->period = 0;
 43	/* Use 1 to avoid dealing with periods with 0 events... */
 44	err = percpu_counter_init(&p->events, 1, gfp);
 45	if (err)
 46		return err;
 47	seqcount_init(&p->sequence);
 48	return 0;
 49}
 50
 51void fprop_global_destroy(struct fprop_global *p)
 52{
 53	percpu_counter_destroy(&p->events);
 54}
 55
 56/*
 57 * Declare @periods new periods. It is upto the caller to make sure period
 58 * transitions cannot happen in parallel.
 59 *
 60 * The function returns true if the proportions are still defined and false
 61 * if aging zeroed out all events. This can be used to detect whether declaring
 62 * further periods has any effect.
 63 */
 64bool fprop_new_period(struct fprop_global *p, int periods)
 65{
 66	s64 events;
 67	unsigned long flags;
 68
 69	local_irq_save(flags);
 70	events = percpu_counter_sum(&p->events);
 71	/*
 72	 * Don't do anything if there are no events.
 73	 */
 74	if (events <= 1) {
 75		local_irq_restore(flags);
 76		return false;
 77	}
 78	write_seqcount_begin(&p->sequence);
 79	if (periods < 64)
 80		events -= events >> periods;
 81	/* Use addition to avoid losing events happening between sum and set */
 82	percpu_counter_add(&p->events, -events);
 83	p->period += periods;
 84	write_seqcount_end(&p->sequence);
 85	local_irq_restore(flags);
 86
 87	return true;
 88}
 89
 90/*
 91 * ---- SINGLE ----
 92 */
 93
 94int fprop_local_init_single(struct fprop_local_single *pl)
 95{
 96	pl->events = 0;
 97	pl->period = 0;
 98	raw_spin_lock_init(&pl->lock);
 99	return 0;
100}
101
102void fprop_local_destroy_single(struct fprop_local_single *pl)
103{
104}
105
106static void fprop_reflect_period_single(struct fprop_global *p,
107					struct fprop_local_single *pl)
108{
109	unsigned int period = p->period;
110	unsigned long flags;
111
112	/* Fast path - period didn't change */
113	if (pl->period == period)
114		return;
115	raw_spin_lock_irqsave(&pl->lock, flags);
116	/* Someone updated pl->period while we were spinning? */
117	if (pl->period >= period) {
118		raw_spin_unlock_irqrestore(&pl->lock, flags);
119		return;
120	}
121	/* Aging zeroed our fraction? */
122	if (period - pl->period < BITS_PER_LONG)
123		pl->events >>= period - pl->period;
124	else
125		pl->events = 0;
126	pl->period = period;
127	raw_spin_unlock_irqrestore(&pl->lock, flags);
128}
129
130/* Event of type pl happened */
131void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
132{
133	fprop_reflect_period_single(p, pl);
134	pl->events++;
135	percpu_counter_add(&p->events, 1);
136}
137
138/* Return fraction of events of type pl */
139void fprop_fraction_single(struct fprop_global *p,
140			   struct fprop_local_single *pl,
141			   unsigned long *numerator, unsigned long *denominator)
142{
143	unsigned int seq;
144	s64 num, den;
145
146	do {
147		seq = read_seqcount_begin(&p->sequence);
148		fprop_reflect_period_single(p, pl);
149		num = pl->events;
150		den = percpu_counter_read_positive(&p->events);
151	} while (read_seqcount_retry(&p->sequence, seq));
152
153	/*
154	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
155	 * counter errors
156	 */
157	if (den <= num) {
158		if (num)
159			den = num;
160		else
161			den = 1;
162	}
163	*denominator = den;
164	*numerator = num;
165}
166
167/*
168 * ---- PERCPU ----
169 */
170#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
171
172int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
173{
174	int err;
175
176	err = percpu_counter_init(&pl->events, 0, gfp);
177	if (err)
178		return err;
179	pl->period = 0;
180	raw_spin_lock_init(&pl->lock);
181	return 0;
182}
183
184void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
185{
186	percpu_counter_destroy(&pl->events);
187}
188
189static void fprop_reflect_period_percpu(struct fprop_global *p,
190					struct fprop_local_percpu *pl)
191{
192	unsigned int period = p->period;
193	unsigned long flags;
194
195	/* Fast path - period didn't change */
196	if (pl->period == period)
197		return;
198	raw_spin_lock_irqsave(&pl->lock, flags);
199	/* Someone updated pl->period while we were spinning? */
200	if (pl->period >= period) {
201		raw_spin_unlock_irqrestore(&pl->lock, flags);
202		return;
203	}
204	/* Aging zeroed our fraction? */
205	if (period - pl->period < BITS_PER_LONG) {
206		s64 val = percpu_counter_read(&pl->events);
207
208		if (val < (nr_cpu_ids * PROP_BATCH))
209			val = percpu_counter_sum(&pl->events);
210
211		percpu_counter_add_batch(&pl->events,
212			-val + (val >> (period-pl->period)), PROP_BATCH);
213	} else
214		percpu_counter_set(&pl->events, 0);
215	pl->period = period;
216	raw_spin_unlock_irqrestore(&pl->lock, flags);
217}
218
219/* Event of type pl happened */
220void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
 
221{
222	fprop_reflect_period_percpu(p, pl);
223	percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
224	percpu_counter_add(&p->events, 1);
225}
226
227void fprop_fraction_percpu(struct fprop_global *p,
228			   struct fprop_local_percpu *pl,
229			   unsigned long *numerator, unsigned long *denominator)
230{
231	unsigned int seq;
232	s64 num, den;
233
234	do {
235		seq = read_seqcount_begin(&p->sequence);
236		fprop_reflect_period_percpu(p, pl);
237		num = percpu_counter_read_positive(&pl->events);
238		den = percpu_counter_read_positive(&p->events);
239	} while (read_seqcount_retry(&p->sequence, seq));
240
241	/*
242	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
243	 * counter errors
244	 */
245	if (den <= num) {
246		if (num)
247			den = num;
248		else
249			den = 1;
250	}
251	*denominator = den;
252	*numerator = num;
253}
254
255/*
256 * Like __fprop_inc_percpu() except that event is counted only if the given
257 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
258 */
259void __fprop_inc_percpu_max(struct fprop_global *p,
260			    struct fprop_local_percpu *pl, int max_frac)
261{
262	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
263		unsigned long numerator, denominator;
 
264
265		fprop_fraction_percpu(p, pl, &numerator, &denominator);
266		if (numerator >
267		    (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
 
 
 
268			return;
 
 
 
 
 
269	}
270
271	__fprop_inc_percpu(p, pl);
272}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Floating proportions with flexible aging period
  4 *
  5 *   Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
  6 *
  7 * The goal of this code is: Given different types of event, measure proportion
  8 * of each type of event over time. The proportions are measured with
  9 * exponentially decaying history to give smooth transitions. A formula
 10 * expressing proportion of event of type 'j' is:
 11 *
 12 *   p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
 13 *
 14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
 15 * total number of events in i-th last time period.
 16 *
 17 * Note that p_{j}'s are normalised, i.e.
 18 *
 19 *   \Sum_{j} p_{j} = 1,
 20 *
 21 * This formula can be straightforwardly computed by maintaining denominator
 22 * (let's call it 'd') and for each event type its numerator (let's call it
 23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
 24 *   n_j++; d++;
 25 *
 26 * When a new period is declared, we could do:
 27 *   d /= 2
 28 *   for each j
 29 *     n_j /= 2
 30 *
 31 * To avoid iteration over all event types, we instead shift numerator of event
 32 * j lazily when someone asks for a proportion of event j or when event j
 33 * occurs. This can bit trivially implemented by remembering last period in
 34 * which something happened with proportion of type j.
 35 */
 36#include <linux/flex_proportions.h>
 37
 38int fprop_global_init(struct fprop_global *p, gfp_t gfp)
 39{
 40	int err;
 41
 42	p->period = 0;
 43	/* Use 1 to avoid dealing with periods with 0 events... */
 44	err = percpu_counter_init(&p->events, 1, gfp);
 45	if (err)
 46		return err;
 47	seqcount_init(&p->sequence);
 48	return 0;
 49}
 50
 51void fprop_global_destroy(struct fprop_global *p)
 52{
 53	percpu_counter_destroy(&p->events);
 54}
 55
 56/*
 57 * Declare @periods new periods. It is upto the caller to make sure period
 58 * transitions cannot happen in parallel.
 59 *
 60 * The function returns true if the proportions are still defined and false
 61 * if aging zeroed out all events. This can be used to detect whether declaring
 62 * further periods has any effect.
 63 */
 64bool fprop_new_period(struct fprop_global *p, int periods)
 65{
 66	s64 events = percpu_counter_sum(&p->events);
 
 67
 
 
 68	/*
 69	 * Don't do anything if there are no events.
 70	 */
 71	if (events <= 1)
 
 72		return false;
 73	preempt_disable_nested();
 74	write_seqcount_begin(&p->sequence);
 75	if (periods < 64)
 76		events -= events >> periods;
 77	/* Use addition to avoid losing events happening between sum and set */
 78	percpu_counter_add(&p->events, -events);
 79	p->period += periods;
 80	write_seqcount_end(&p->sequence);
 81	preempt_enable_nested();
 82
 83	return true;
 84}
 85
 86/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87 * ---- PERCPU ----
 88 */
 89#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
 90
 91int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
 92{
 93	int err;
 94
 95	err = percpu_counter_init(&pl->events, 0, gfp);
 96	if (err)
 97		return err;
 98	pl->period = 0;
 99	raw_spin_lock_init(&pl->lock);
100	return 0;
101}
102
103void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
104{
105	percpu_counter_destroy(&pl->events);
106}
107
108static void fprop_reflect_period_percpu(struct fprop_global *p,
109					struct fprop_local_percpu *pl)
110{
111	unsigned int period = p->period;
112	unsigned long flags;
113
114	/* Fast path - period didn't change */
115	if (pl->period == period)
116		return;
117	raw_spin_lock_irqsave(&pl->lock, flags);
118	/* Someone updated pl->period while we were spinning? */
119	if (pl->period >= period) {
120		raw_spin_unlock_irqrestore(&pl->lock, flags);
121		return;
122	}
123	/* Aging zeroed our fraction? */
124	if (period - pl->period < BITS_PER_LONG) {
125		s64 val = percpu_counter_read(&pl->events);
126
127		if (val < (nr_cpu_ids * PROP_BATCH))
128			val = percpu_counter_sum(&pl->events);
129
130		percpu_counter_add_batch(&pl->events,
131			-val + (val >> (period-pl->period)), PROP_BATCH);
132	} else
133		percpu_counter_set(&pl->events, 0);
134	pl->period = period;
135	raw_spin_unlock_irqrestore(&pl->lock, flags);
136}
137
138/* Event of type pl happened */
139void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
140		long nr)
141{
142	fprop_reflect_period_percpu(p, pl);
143	percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
144	percpu_counter_add(&p->events, nr);
145}
146
147void fprop_fraction_percpu(struct fprop_global *p,
148			   struct fprop_local_percpu *pl,
149			   unsigned long *numerator, unsigned long *denominator)
150{
151	unsigned int seq;
152	s64 num, den;
153
154	do {
155		seq = read_seqcount_begin(&p->sequence);
156		fprop_reflect_period_percpu(p, pl);
157		num = percpu_counter_read_positive(&pl->events);
158		den = percpu_counter_read_positive(&p->events);
159	} while (read_seqcount_retry(&p->sequence, seq));
160
161	/*
162	 * Make fraction <= 1 and denominator > 0 even in presence of percpu
163	 * counter errors
164	 */
165	if (den <= num) {
166		if (num)
167			den = num;
168		else
169			den = 1;
170	}
171	*denominator = den;
172	*numerator = num;
173}
174
175/*
176 * Like __fprop_add_percpu() except that event is counted only if the given
177 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
178 */
179void __fprop_add_percpu_max(struct fprop_global *p,
180		struct fprop_local_percpu *pl, int max_frac, long nr)
181{
182	if (unlikely(max_frac < FPROP_FRAC_BASE)) {
183		unsigned long numerator, denominator;
184		s64 tmp;
185
186		fprop_fraction_percpu(p, pl, &numerator, &denominator);
187		/* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
188		tmp = (u64)denominator * max_frac -
189					((u64)numerator << FPROP_FRAC_SHIFT);
190		if (tmp < 0) {
191			/* Maximum fraction already exceeded? */
192			return;
193		} else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
194			/* Add just enough for the fraction to saturate */
195			nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
196					FPROP_FRAC_BASE - max_frac);
197		}
198	}
199
200	__fprop_add_percpu(p, pl, nr);
201}