Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  drivers/cpufreq/cpufreq_ondemand.c
  4 *
  5 *  Copyright (C)  2001 Russell King
  6 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  7 *                      Jun Nakajima <jun.nakajima@intel.com>
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/percpu-defs.h>
 14#include <linux/slab.h>
 15#include <linux/tick.h>
 16#include <linux/sched/cpufreq.h>
 17
 18#include "cpufreq_ondemand.h"
 19
 20/* On-demand governor macros */
 21#define DEF_FREQUENCY_UP_THRESHOLD		(80)
 22#define DEF_SAMPLING_DOWN_FACTOR		(1)
 23#define MAX_SAMPLING_DOWN_FACTOR		(100000)
 24#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 25#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 26#define MIN_FREQUENCY_UP_THRESHOLD		(1)
 27#define MAX_FREQUENCY_UP_THRESHOLD		(100)
 28
 29static struct od_ops od_ops;
 30
 31static unsigned int default_powersave_bias;
 32
 33/*
 34 * Not all CPUs want IO time to be accounted as busy; this depends on how
 35 * efficient idling at a higher frequency/voltage is.
 36 * Pavel Machek says this is not so for various generations of AMD and old
 37 * Intel systems.
 38 * Mike Chan (android.com) claims this is also not true for ARM.
 39 * Because of this, whitelist specific known (series) of CPUs by default, and
 40 * leave all others up to the user.
 41 */
 42static int should_io_be_busy(void)
 43{
 44#if defined(CONFIG_X86)
 45	/*
 46	 * For Intel, Core 2 (model 15) and later have an efficient idle.
 47	 */
 48	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 49			boot_cpu_data.x86 == 6 &&
 50			boot_cpu_data.x86_model >= 15)
 51		return 1;
 52#endif
 53	return 0;
 54}
 55
 56/*
 57 * Find right freq to be set now with powersave_bias on.
 58 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
 59 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
 60 */
 61static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
 62		unsigned int freq_next, unsigned int relation)
 63{
 64	unsigned int freq_req, freq_reduc, freq_avg;
 65	unsigned int freq_hi, freq_lo;
 66	unsigned int index;
 67	unsigned int delay_hi_us;
 68	struct policy_dbs_info *policy_dbs = policy->governor_data;
 69	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
 70	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 71	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 72	struct cpufreq_frequency_table *freq_table = policy->freq_table;
 73
 74	if (!freq_table) {
 75		dbs_info->freq_lo = 0;
 76		dbs_info->freq_lo_delay_us = 0;
 77		return freq_next;
 78	}
 79
 80	index = cpufreq_frequency_table_target(policy, freq_next, relation);
 81	freq_req = freq_table[index].frequency;
 82	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 83	freq_avg = freq_req - freq_reduc;
 84
 85	/* Find freq bounds for freq_avg in freq_table */
 86	index = cpufreq_table_find_index_h(policy, freq_avg,
 87					   relation & CPUFREQ_RELATION_E);
 88	freq_lo = freq_table[index].frequency;
 89	index = cpufreq_table_find_index_l(policy, freq_avg,
 90					   relation & CPUFREQ_RELATION_E);
 91	freq_hi = freq_table[index].frequency;
 92
 93	/* Find out how long we have to be in hi and lo freqs */
 94	if (freq_hi == freq_lo) {
 95		dbs_info->freq_lo = 0;
 96		dbs_info->freq_lo_delay_us = 0;
 97		return freq_lo;
 98	}
 99	delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
100	delay_hi_us += (freq_hi - freq_lo) / 2;
101	delay_hi_us /= freq_hi - freq_lo;
102	dbs_info->freq_hi_delay_us = delay_hi_us;
103	dbs_info->freq_lo = freq_lo;
104	dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
105	return freq_hi;
106}
107
108static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
109{
110	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
111
112	dbs_info->freq_lo = 0;
113}
114
115static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
116{
117	struct policy_dbs_info *policy_dbs = policy->governor_data;
118	struct dbs_data *dbs_data = policy_dbs->dbs_data;
119	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
120
121	if (od_tuners->powersave_bias)
122		freq = od_ops.powersave_bias_target(policy, freq,
123						    CPUFREQ_RELATION_HE);
124	else if (policy->cur == policy->max)
125		return;
126
127	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
128			CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE);
129}
130
131/*
132 * Every sampling_rate, we check, if current idle time is less than 20%
133 * (default), then we try to increase frequency. Else, we adjust the frequency
134 * proportional to load.
135 */
136static void od_update(struct cpufreq_policy *policy)
137{
138	struct policy_dbs_info *policy_dbs = policy->governor_data;
139	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
140	struct dbs_data *dbs_data = policy_dbs->dbs_data;
141	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
142	unsigned int load = dbs_update(policy);
143
144	dbs_info->freq_lo = 0;
145
146	/* Check for frequency increase */
147	if (load > dbs_data->up_threshold) {
148		/* If switching to max speed, apply sampling_down_factor */
149		if (policy->cur < policy->max)
150			policy_dbs->rate_mult = dbs_data->sampling_down_factor;
151		dbs_freq_increase(policy, policy->max);
152	} else {
153		/* Calculate the next frequency proportional to load */
154		unsigned int freq_next, min_f, max_f;
155
156		min_f = policy->cpuinfo.min_freq;
157		max_f = policy->cpuinfo.max_freq;
158		freq_next = min_f + load * (max_f - min_f) / 100;
159
160		/* No longer fully busy, reset rate_mult */
161		policy_dbs->rate_mult = 1;
162
163		if (od_tuners->powersave_bias)
164			freq_next = od_ops.powersave_bias_target(policy,
165								 freq_next,
166								 CPUFREQ_RELATION_LE);
167
168		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE);
169	}
170}
171
172static unsigned int od_dbs_update(struct cpufreq_policy *policy)
173{
174	struct policy_dbs_info *policy_dbs = policy->governor_data;
175	struct dbs_data *dbs_data = policy_dbs->dbs_data;
176	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
177	int sample_type = dbs_info->sample_type;
178
179	/* Common NORMAL_SAMPLE setup */
180	dbs_info->sample_type = OD_NORMAL_SAMPLE;
181	/*
182	 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
183	 * it then.
184	 */
185	if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
186		__cpufreq_driver_target(policy, dbs_info->freq_lo,
187					CPUFREQ_RELATION_HE);
188		return dbs_info->freq_lo_delay_us;
189	}
190
191	od_update(policy);
192
193	if (dbs_info->freq_lo) {
194		/* Setup SUB_SAMPLE */
195		dbs_info->sample_type = OD_SUB_SAMPLE;
196		return dbs_info->freq_hi_delay_us;
197	}
198
199	return dbs_data->sampling_rate * policy_dbs->rate_mult;
200}
201
202/************************** sysfs interface ************************/
203static struct dbs_governor od_dbs_gov;
204
205static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
206				size_t count)
207{
208	struct dbs_data *dbs_data = to_dbs_data(attr_set);
209	unsigned int input;
210	int ret;
211
212	ret = sscanf(buf, "%u", &input);
213	if (ret != 1)
214		return -EINVAL;
215	dbs_data->io_is_busy = !!input;
216
217	/* we need to re-evaluate prev_cpu_idle */
218	gov_update_cpu_data(dbs_data);
219
220	return count;
221}
222
223static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
224				  const char *buf, size_t count)
225{
226	struct dbs_data *dbs_data = to_dbs_data(attr_set);
227	unsigned int input;
228	int ret;
229	ret = sscanf(buf, "%u", &input);
230
231	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
232			input < MIN_FREQUENCY_UP_THRESHOLD) {
233		return -EINVAL;
234	}
235
236	dbs_data->up_threshold = input;
237	return count;
238}
239
240static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
241					  const char *buf, size_t count)
242{
243	struct dbs_data *dbs_data = to_dbs_data(attr_set);
244	struct policy_dbs_info *policy_dbs;
245	unsigned int input;
246	int ret;
247	ret = sscanf(buf, "%u", &input);
248
249	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
250		return -EINVAL;
251
252	dbs_data->sampling_down_factor = input;
253
254	/* Reset down sampling multiplier in case it was active */
255	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
256		/*
257		 * Doing this without locking might lead to using different
258		 * rate_mult values in od_update() and od_dbs_update().
259		 */
260		mutex_lock(&policy_dbs->update_mutex);
261		policy_dbs->rate_mult = 1;
262		mutex_unlock(&policy_dbs->update_mutex);
263	}
264
265	return count;
266}
267
268static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
269				      const char *buf, size_t count)
270{
271	struct dbs_data *dbs_data = to_dbs_data(attr_set);
272	unsigned int input;
273	int ret;
274
275	ret = sscanf(buf, "%u", &input);
276	if (ret != 1)
277		return -EINVAL;
278
279	if (input > 1)
280		input = 1;
281
282	if (input == dbs_data->ignore_nice_load) { /* nothing to do */
283		return count;
284	}
285	dbs_data->ignore_nice_load = input;
286
287	/* we need to re-evaluate prev_cpu_idle */
288	gov_update_cpu_data(dbs_data);
289
290	return count;
291}
292
293static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
294				    const char *buf, size_t count)
295{
296	struct dbs_data *dbs_data = to_dbs_data(attr_set);
297	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
298	struct policy_dbs_info *policy_dbs;
299	unsigned int input;
300	int ret;
301	ret = sscanf(buf, "%u", &input);
302
303	if (ret != 1)
304		return -EINVAL;
305
306	if (input > 1000)
307		input = 1000;
308
309	od_tuners->powersave_bias = input;
310
311	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
312		ondemand_powersave_bias_init(policy_dbs->policy);
313
314	return count;
315}
316
317gov_show_one_common(sampling_rate);
318gov_show_one_common(up_threshold);
319gov_show_one_common(sampling_down_factor);
320gov_show_one_common(ignore_nice_load);
321gov_show_one_common(io_is_busy);
322gov_show_one(od, powersave_bias);
323
324gov_attr_rw(sampling_rate);
325gov_attr_rw(io_is_busy);
326gov_attr_rw(up_threshold);
327gov_attr_rw(sampling_down_factor);
328gov_attr_rw(ignore_nice_load);
329gov_attr_rw(powersave_bias);
330
331static struct attribute *od_attrs[] = {
332	&sampling_rate.attr,
333	&up_threshold.attr,
334	&sampling_down_factor.attr,
335	&ignore_nice_load.attr,
336	&powersave_bias.attr,
337	&io_is_busy.attr,
338	NULL
339};
340ATTRIBUTE_GROUPS(od);
341
342/************************** sysfs end ************************/
343
344static struct policy_dbs_info *od_alloc(void)
345{
346	struct od_policy_dbs_info *dbs_info;
347
348	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
349	return dbs_info ? &dbs_info->policy_dbs : NULL;
350}
351
352static void od_free(struct policy_dbs_info *policy_dbs)
353{
354	kfree(to_dbs_info(policy_dbs));
355}
356
357static int od_init(struct dbs_data *dbs_data)
358{
359	struct od_dbs_tuners *tuners;
360	u64 idle_time;
361	int cpu;
362
363	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
364	if (!tuners)
365		return -ENOMEM;
366
367	cpu = get_cpu();
368	idle_time = get_cpu_idle_time_us(cpu, NULL);
369	put_cpu();
370	if (idle_time != -1ULL) {
371		/* Idle micro accounting is supported. Use finer thresholds */
372		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
373	} else {
374		dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
375	}
376
377	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
378	dbs_data->ignore_nice_load = 0;
379	tuners->powersave_bias = default_powersave_bias;
380	dbs_data->io_is_busy = should_io_be_busy();
381
382	dbs_data->tuners = tuners;
383	return 0;
384}
385
386static void od_exit(struct dbs_data *dbs_data)
387{
388	kfree(dbs_data->tuners);
389}
390
391static void od_start(struct cpufreq_policy *policy)
392{
393	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
394
395	dbs_info->sample_type = OD_NORMAL_SAMPLE;
396	ondemand_powersave_bias_init(policy);
397}
398
399static struct od_ops od_ops = {
400	.powersave_bias_target = generic_powersave_bias_target,
401};
402
403static struct dbs_governor od_dbs_gov = {
404	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
405	.kobj_type = { .default_groups = od_groups },
406	.gov_dbs_update = od_dbs_update,
407	.alloc = od_alloc,
408	.free = od_free,
409	.init = od_init,
410	.exit = od_exit,
411	.start = od_start,
412};
413
414#define CPU_FREQ_GOV_ONDEMAND	(od_dbs_gov.gov)
415
416static void od_set_powersave_bias(unsigned int powersave_bias)
417{
418	unsigned int cpu;
419	cpumask_var_t done;
420
421	if (!alloc_cpumask_var(&done, GFP_KERNEL))
422		return;
423
424	default_powersave_bias = powersave_bias;
425	cpumask_clear(done);
426
427	cpus_read_lock();
428	for_each_online_cpu(cpu) {
429		struct cpufreq_policy *policy;
430		struct policy_dbs_info *policy_dbs;
431		struct dbs_data *dbs_data;
432		struct od_dbs_tuners *od_tuners;
433
434		if (cpumask_test_cpu(cpu, done))
435			continue;
436
437		policy = cpufreq_cpu_get_raw(cpu);
438		if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
439			continue;
440
441		policy_dbs = policy->governor_data;
442		if (!policy_dbs)
443			continue;
444
445		cpumask_or(done, done, policy->cpus);
446
447		dbs_data = policy_dbs->dbs_data;
448		od_tuners = dbs_data->tuners;
449		od_tuners->powersave_bias = default_powersave_bias;
450	}
451	cpus_read_unlock();
452
453	free_cpumask_var(done);
454}
455
456void od_register_powersave_bias_handler(unsigned int (*f)
457		(struct cpufreq_policy *, unsigned int, unsigned int),
458		unsigned int powersave_bias)
459{
460	od_ops.powersave_bias_target = f;
461	od_set_powersave_bias(powersave_bias);
462}
463EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
464
465void od_unregister_powersave_bias_handler(void)
466{
467	od_ops.powersave_bias_target = generic_powersave_bias_target;
468	od_set_powersave_bias(0);
469}
470EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
471
472MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
473MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
474MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
475	"Low Latency Frequency Transition capable processors");
476MODULE_LICENSE("GPL");
477
478#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
479struct cpufreq_governor *cpufreq_default_governor(void)
480{
481	return &CPU_FREQ_GOV_ONDEMAND;
482}
483#endif
484
485cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
486cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  drivers/cpufreq/cpufreq_ondemand.c
  4 *
  5 *  Copyright (C)  2001 Russell King
  6 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  7 *                      Jun Nakajima <jun.nakajima@intel.com>
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/percpu-defs.h>
 14#include <linux/slab.h>
 15#include <linux/tick.h>
 16#include <linux/sched/cpufreq.h>
 17
 18#include "cpufreq_ondemand.h"
 19
 20/* On-demand governor macros */
 21#define DEF_FREQUENCY_UP_THRESHOLD		(80)
 22#define DEF_SAMPLING_DOWN_FACTOR		(1)
 23#define MAX_SAMPLING_DOWN_FACTOR		(100000)
 24#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 25#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 26#define MIN_FREQUENCY_UP_THRESHOLD		(1)
 27#define MAX_FREQUENCY_UP_THRESHOLD		(100)
 28
 29static struct od_ops od_ops;
 30
 31static unsigned int default_powersave_bias;
 32
 33/*
 34 * Not all CPUs want IO time to be accounted as busy; this depends on how
 35 * efficient idling at a higher frequency/voltage is.
 36 * Pavel Machek says this is not so for various generations of AMD and old
 37 * Intel systems.
 38 * Mike Chan (android.com) claims this is also not true for ARM.
 39 * Because of this, whitelist specific known (series) of CPUs by default, and
 40 * leave all others up to the user.
 41 */
 42static int should_io_be_busy(void)
 43{
 44#if defined(CONFIG_X86)
 45	/*
 46	 * For Intel, Core 2 (model 15) and later have an efficient idle.
 47	 */
 48	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 49			boot_cpu_data.x86 == 6 &&
 50			boot_cpu_data.x86_model >= 15)
 51		return 1;
 52#endif
 53	return 0;
 54}
 55
 56/*
 57 * Find right freq to be set now with powersave_bias on.
 58 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
 59 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
 60 */
 61static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
 62		unsigned int freq_next, unsigned int relation)
 63{
 64	unsigned int freq_req, freq_reduc, freq_avg;
 65	unsigned int freq_hi, freq_lo;
 66	unsigned int index;
 67	unsigned int delay_hi_us;
 68	struct policy_dbs_info *policy_dbs = policy->governor_data;
 69	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
 70	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 71	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 72	struct cpufreq_frequency_table *freq_table = policy->freq_table;
 73
 74	if (!freq_table) {
 75		dbs_info->freq_lo = 0;
 76		dbs_info->freq_lo_delay_us = 0;
 77		return freq_next;
 78	}
 79
 80	index = cpufreq_frequency_table_target(policy, freq_next, relation);
 81	freq_req = freq_table[index].frequency;
 82	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 83	freq_avg = freq_req - freq_reduc;
 84
 85	/* Find freq bounds for freq_avg in freq_table */
 86	index = cpufreq_table_find_index_h(policy, freq_avg);
 
 87	freq_lo = freq_table[index].frequency;
 88	index = cpufreq_table_find_index_l(policy, freq_avg);
 
 89	freq_hi = freq_table[index].frequency;
 90
 91	/* Find out how long we have to be in hi and lo freqs */
 92	if (freq_hi == freq_lo) {
 93		dbs_info->freq_lo = 0;
 94		dbs_info->freq_lo_delay_us = 0;
 95		return freq_lo;
 96	}
 97	delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
 98	delay_hi_us += (freq_hi - freq_lo) / 2;
 99	delay_hi_us /= freq_hi - freq_lo;
100	dbs_info->freq_hi_delay_us = delay_hi_us;
101	dbs_info->freq_lo = freq_lo;
102	dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
103	return freq_hi;
104}
105
106static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
107{
108	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
109
110	dbs_info->freq_lo = 0;
111}
112
113static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
114{
115	struct policy_dbs_info *policy_dbs = policy->governor_data;
116	struct dbs_data *dbs_data = policy_dbs->dbs_data;
117	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
118
119	if (od_tuners->powersave_bias)
120		freq = od_ops.powersave_bias_target(policy, freq,
121				CPUFREQ_RELATION_H);
122	else if (policy->cur == policy->max)
123		return;
124
125	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
126			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
127}
128
129/*
130 * Every sampling_rate, we check, if current idle time is less than 20%
131 * (default), then we try to increase frequency. Else, we adjust the frequency
132 * proportional to load.
133 */
134static void od_update(struct cpufreq_policy *policy)
135{
136	struct policy_dbs_info *policy_dbs = policy->governor_data;
137	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
138	struct dbs_data *dbs_data = policy_dbs->dbs_data;
139	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
140	unsigned int load = dbs_update(policy);
141
142	dbs_info->freq_lo = 0;
143
144	/* Check for frequency increase */
145	if (load > dbs_data->up_threshold) {
146		/* If switching to max speed, apply sampling_down_factor */
147		if (policy->cur < policy->max)
148			policy_dbs->rate_mult = dbs_data->sampling_down_factor;
149		dbs_freq_increase(policy, policy->max);
150	} else {
151		/* Calculate the next frequency proportional to load */
152		unsigned int freq_next, min_f, max_f;
153
154		min_f = policy->cpuinfo.min_freq;
155		max_f = policy->cpuinfo.max_freq;
156		freq_next = min_f + load * (max_f - min_f) / 100;
157
158		/* No longer fully busy, reset rate_mult */
159		policy_dbs->rate_mult = 1;
160
161		if (od_tuners->powersave_bias)
162			freq_next = od_ops.powersave_bias_target(policy,
163								 freq_next,
164								 CPUFREQ_RELATION_L);
165
166		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
167	}
168}
169
170static unsigned int od_dbs_update(struct cpufreq_policy *policy)
171{
172	struct policy_dbs_info *policy_dbs = policy->governor_data;
173	struct dbs_data *dbs_data = policy_dbs->dbs_data;
174	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
175	int sample_type = dbs_info->sample_type;
176
177	/* Common NORMAL_SAMPLE setup */
178	dbs_info->sample_type = OD_NORMAL_SAMPLE;
179	/*
180	 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
181	 * it then.
182	 */
183	if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
184		__cpufreq_driver_target(policy, dbs_info->freq_lo,
185					CPUFREQ_RELATION_H);
186		return dbs_info->freq_lo_delay_us;
187	}
188
189	od_update(policy);
190
191	if (dbs_info->freq_lo) {
192		/* Setup SUB_SAMPLE */
193		dbs_info->sample_type = OD_SUB_SAMPLE;
194		return dbs_info->freq_hi_delay_us;
195	}
196
197	return dbs_data->sampling_rate * policy_dbs->rate_mult;
198}
199
200/************************** sysfs interface ************************/
201static struct dbs_governor od_dbs_gov;
202
203static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
204				size_t count)
205{
206	struct dbs_data *dbs_data = to_dbs_data(attr_set);
207	unsigned int input;
208	int ret;
209
210	ret = sscanf(buf, "%u", &input);
211	if (ret != 1)
212		return -EINVAL;
213	dbs_data->io_is_busy = !!input;
214
215	/* we need to re-evaluate prev_cpu_idle */
216	gov_update_cpu_data(dbs_data);
217
218	return count;
219}
220
221static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
222				  const char *buf, size_t count)
223{
224	struct dbs_data *dbs_data = to_dbs_data(attr_set);
225	unsigned int input;
226	int ret;
227	ret = sscanf(buf, "%u", &input);
228
229	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
230			input < MIN_FREQUENCY_UP_THRESHOLD) {
231		return -EINVAL;
232	}
233
234	dbs_data->up_threshold = input;
235	return count;
236}
237
238static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
239					  const char *buf, size_t count)
240{
241	struct dbs_data *dbs_data = to_dbs_data(attr_set);
242	struct policy_dbs_info *policy_dbs;
243	unsigned int input;
244	int ret;
245	ret = sscanf(buf, "%u", &input);
246
247	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
248		return -EINVAL;
249
250	dbs_data->sampling_down_factor = input;
251
252	/* Reset down sampling multiplier in case it was active */
253	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
254		/*
255		 * Doing this without locking might lead to using different
256		 * rate_mult values in od_update() and od_dbs_update().
257		 */
258		mutex_lock(&policy_dbs->update_mutex);
259		policy_dbs->rate_mult = 1;
260		mutex_unlock(&policy_dbs->update_mutex);
261	}
262
263	return count;
264}
265
266static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
267				      const char *buf, size_t count)
268{
269	struct dbs_data *dbs_data = to_dbs_data(attr_set);
270	unsigned int input;
271	int ret;
272
273	ret = sscanf(buf, "%u", &input);
274	if (ret != 1)
275		return -EINVAL;
276
277	if (input > 1)
278		input = 1;
279
280	if (input == dbs_data->ignore_nice_load) { /* nothing to do */
281		return count;
282	}
283	dbs_data->ignore_nice_load = input;
284
285	/* we need to re-evaluate prev_cpu_idle */
286	gov_update_cpu_data(dbs_data);
287
288	return count;
289}
290
291static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
292				    const char *buf, size_t count)
293{
294	struct dbs_data *dbs_data = to_dbs_data(attr_set);
295	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
296	struct policy_dbs_info *policy_dbs;
297	unsigned int input;
298	int ret;
299	ret = sscanf(buf, "%u", &input);
300
301	if (ret != 1)
302		return -EINVAL;
303
304	if (input > 1000)
305		input = 1000;
306
307	od_tuners->powersave_bias = input;
308
309	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
310		ondemand_powersave_bias_init(policy_dbs->policy);
311
312	return count;
313}
314
315gov_show_one_common(sampling_rate);
316gov_show_one_common(up_threshold);
317gov_show_one_common(sampling_down_factor);
318gov_show_one_common(ignore_nice_load);
319gov_show_one_common(io_is_busy);
320gov_show_one(od, powersave_bias);
321
322gov_attr_rw(sampling_rate);
323gov_attr_rw(io_is_busy);
324gov_attr_rw(up_threshold);
325gov_attr_rw(sampling_down_factor);
326gov_attr_rw(ignore_nice_load);
327gov_attr_rw(powersave_bias);
328
329static struct attribute *od_attributes[] = {
330	&sampling_rate.attr,
331	&up_threshold.attr,
332	&sampling_down_factor.attr,
333	&ignore_nice_load.attr,
334	&powersave_bias.attr,
335	&io_is_busy.attr,
336	NULL
337};
 
338
339/************************** sysfs end ************************/
340
341static struct policy_dbs_info *od_alloc(void)
342{
343	struct od_policy_dbs_info *dbs_info;
344
345	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
346	return dbs_info ? &dbs_info->policy_dbs : NULL;
347}
348
349static void od_free(struct policy_dbs_info *policy_dbs)
350{
351	kfree(to_dbs_info(policy_dbs));
352}
353
354static int od_init(struct dbs_data *dbs_data)
355{
356	struct od_dbs_tuners *tuners;
357	u64 idle_time;
358	int cpu;
359
360	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
361	if (!tuners)
362		return -ENOMEM;
363
364	cpu = get_cpu();
365	idle_time = get_cpu_idle_time_us(cpu, NULL);
366	put_cpu();
367	if (idle_time != -1ULL) {
368		/* Idle micro accounting is supported. Use finer thresholds */
369		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
370	} else {
371		dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
372	}
373
374	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
375	dbs_data->ignore_nice_load = 0;
376	tuners->powersave_bias = default_powersave_bias;
377	dbs_data->io_is_busy = should_io_be_busy();
378
379	dbs_data->tuners = tuners;
380	return 0;
381}
382
383static void od_exit(struct dbs_data *dbs_data)
384{
385	kfree(dbs_data->tuners);
386}
387
388static void od_start(struct cpufreq_policy *policy)
389{
390	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
391
392	dbs_info->sample_type = OD_NORMAL_SAMPLE;
393	ondemand_powersave_bias_init(policy);
394}
395
396static struct od_ops od_ops = {
397	.powersave_bias_target = generic_powersave_bias_target,
398};
399
400static struct dbs_governor od_dbs_gov = {
401	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
402	.kobj_type = { .default_attrs = od_attributes },
403	.gov_dbs_update = od_dbs_update,
404	.alloc = od_alloc,
405	.free = od_free,
406	.init = od_init,
407	.exit = od_exit,
408	.start = od_start,
409};
410
411#define CPU_FREQ_GOV_ONDEMAND	(od_dbs_gov.gov)
412
413static void od_set_powersave_bias(unsigned int powersave_bias)
414{
415	unsigned int cpu;
416	cpumask_t done;
 
 
 
417
418	default_powersave_bias = powersave_bias;
419	cpumask_clear(&done);
420
421	get_online_cpus();
422	for_each_online_cpu(cpu) {
423		struct cpufreq_policy *policy;
424		struct policy_dbs_info *policy_dbs;
425		struct dbs_data *dbs_data;
426		struct od_dbs_tuners *od_tuners;
427
428		if (cpumask_test_cpu(cpu, &done))
429			continue;
430
431		policy = cpufreq_cpu_get_raw(cpu);
432		if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
433			continue;
434
435		policy_dbs = policy->governor_data;
436		if (!policy_dbs)
437			continue;
438
439		cpumask_or(&done, &done, policy->cpus);
440
441		dbs_data = policy_dbs->dbs_data;
442		od_tuners = dbs_data->tuners;
443		od_tuners->powersave_bias = default_powersave_bias;
444	}
445	put_online_cpus();
 
 
446}
447
448void od_register_powersave_bias_handler(unsigned int (*f)
449		(struct cpufreq_policy *, unsigned int, unsigned int),
450		unsigned int powersave_bias)
451{
452	od_ops.powersave_bias_target = f;
453	od_set_powersave_bias(powersave_bias);
454}
455EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
456
457void od_unregister_powersave_bias_handler(void)
458{
459	od_ops.powersave_bias_target = generic_powersave_bias_target;
460	od_set_powersave_bias(0);
461}
462EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
463
464MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
465MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
466MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
467	"Low Latency Frequency Transition capable processors");
468MODULE_LICENSE("GPL");
469
470#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
471struct cpufreq_governor *cpufreq_default_governor(void)
472{
473	return &CPU_FREQ_GOV_ONDEMAND;
474}
475#endif
476
477cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
478cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);