Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  drivers/cpufreq/cpufreq_ondemand.c
  4 *
  5 *  Copyright (C)  2001 Russell King
  6 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  7 *                      Jun Nakajima <jun.nakajima@intel.com>
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/percpu-defs.h>
 14#include <linux/slab.h>
 15#include <linux/tick.h>
 16#include <linux/sched/cpufreq.h>
 17
 18#include "cpufreq_ondemand.h"
 19
 20/* On-demand governor macros */
 21#define DEF_FREQUENCY_UP_THRESHOLD		(80)
 22#define DEF_SAMPLING_DOWN_FACTOR		(1)
 23#define MAX_SAMPLING_DOWN_FACTOR		(100000)
 24#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 25#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 26#define MIN_FREQUENCY_UP_THRESHOLD		(1)
 27#define MAX_FREQUENCY_UP_THRESHOLD		(100)
 28
 29static struct od_ops od_ops;
 30
 31static unsigned int default_powersave_bias;
 32
 33/*
 34 * Not all CPUs want IO time to be accounted as busy; this depends on how
 35 * efficient idling at a higher frequency/voltage is.
 36 * Pavel Machek says this is not so for various generations of AMD and old
 37 * Intel systems.
 38 * Mike Chan (android.com) claims this is also not true for ARM.
 39 * Because of this, whitelist specific known (series) of CPUs by default, and
 40 * leave all others up to the user.
 41 */
 42static int should_io_be_busy(void)
 43{
 44#if defined(CONFIG_X86)
 45	/*
 46	 * For Intel, Core 2 (model 15) and later have an efficient idle.
 47	 */
 48	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 49			boot_cpu_data.x86 == 6 &&
 50			boot_cpu_data.x86_model >= 15)
 51		return 1;
 52#endif
 53	return 0;
 54}
 55
 56/*
 57 * Find right freq to be set now with powersave_bias on.
 58 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
 59 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
 60 */
 61static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
 62		unsigned int freq_next, unsigned int relation)
 63{
 64	unsigned int freq_req, freq_reduc, freq_avg;
 65	unsigned int freq_hi, freq_lo;
 66	unsigned int index;
 67	unsigned int delay_hi_us;
 68	struct policy_dbs_info *policy_dbs = policy->governor_data;
 69	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
 70	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 71	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 72	struct cpufreq_frequency_table *freq_table = policy->freq_table;
 73
 74	if (!freq_table) {
 75		dbs_info->freq_lo = 0;
 76		dbs_info->freq_lo_delay_us = 0;
 77		return freq_next;
 78	}
 79
 80	index = cpufreq_frequency_table_target(policy, freq_next, relation);
 81	freq_req = freq_table[index].frequency;
 82	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 83	freq_avg = freq_req - freq_reduc;
 84
 85	/* Find freq bounds for freq_avg in freq_table */
 86	index = cpufreq_table_find_index_h(policy, freq_avg);
 87	freq_lo = freq_table[index].frequency;
 88	index = cpufreq_table_find_index_l(policy, freq_avg);
 89	freq_hi = freq_table[index].frequency;
 90
 91	/* Find out how long we have to be in hi and lo freqs */
 92	if (freq_hi == freq_lo) {
 93		dbs_info->freq_lo = 0;
 94		dbs_info->freq_lo_delay_us = 0;
 95		return freq_lo;
 96	}
 97	delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
 98	delay_hi_us += (freq_hi - freq_lo) / 2;
 99	delay_hi_us /= freq_hi - freq_lo;
100	dbs_info->freq_hi_delay_us = delay_hi_us;
101	dbs_info->freq_lo = freq_lo;
102	dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
103	return freq_hi;
104}
105
106static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
107{
108	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
109
110	dbs_info->freq_lo = 0;
111}
112
113static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
114{
115	struct policy_dbs_info *policy_dbs = policy->governor_data;
116	struct dbs_data *dbs_data = policy_dbs->dbs_data;
117	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
118
119	if (od_tuners->powersave_bias)
120		freq = od_ops.powersave_bias_target(policy, freq,
121				CPUFREQ_RELATION_H);
122	else if (policy->cur == policy->max)
123		return;
124
125	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
126			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
127}
128
129/*
130 * Every sampling_rate, we check, if current idle time is less than 20%
131 * (default), then we try to increase frequency. Else, we adjust the frequency
132 * proportional to load.
133 */
134static void od_update(struct cpufreq_policy *policy)
135{
136	struct policy_dbs_info *policy_dbs = policy->governor_data;
137	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
138	struct dbs_data *dbs_data = policy_dbs->dbs_data;
139	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
140	unsigned int load = dbs_update(policy);
141
142	dbs_info->freq_lo = 0;
143
144	/* Check for frequency increase */
145	if (load > dbs_data->up_threshold) {
146		/* If switching to max speed, apply sampling_down_factor */
147		if (policy->cur < policy->max)
148			policy_dbs->rate_mult = dbs_data->sampling_down_factor;
149		dbs_freq_increase(policy, policy->max);
150	} else {
151		/* Calculate the next frequency proportional to load */
152		unsigned int freq_next, min_f, max_f;
153
154		min_f = policy->cpuinfo.min_freq;
155		max_f = policy->cpuinfo.max_freq;
156		freq_next = min_f + load * (max_f - min_f) / 100;
157
158		/* No longer fully busy, reset rate_mult */
159		policy_dbs->rate_mult = 1;
160
161		if (od_tuners->powersave_bias)
162			freq_next = od_ops.powersave_bias_target(policy,
163								 freq_next,
164								 CPUFREQ_RELATION_L);
165
166		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
167	}
168}
169
170static unsigned int od_dbs_update(struct cpufreq_policy *policy)
171{
172	struct policy_dbs_info *policy_dbs = policy->governor_data;
173	struct dbs_data *dbs_data = policy_dbs->dbs_data;
174	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
175	int sample_type = dbs_info->sample_type;
176
177	/* Common NORMAL_SAMPLE setup */
178	dbs_info->sample_type = OD_NORMAL_SAMPLE;
179	/*
180	 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
181	 * it then.
182	 */
183	if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
184		__cpufreq_driver_target(policy, dbs_info->freq_lo,
185					CPUFREQ_RELATION_H);
186		return dbs_info->freq_lo_delay_us;
187	}
188
189	od_update(policy);
190
191	if (dbs_info->freq_lo) {
192		/* Setup SUB_SAMPLE */
193		dbs_info->sample_type = OD_SUB_SAMPLE;
194		return dbs_info->freq_hi_delay_us;
195	}
196
197	return dbs_data->sampling_rate * policy_dbs->rate_mult;
198}
199
200/************************** sysfs interface ************************/
201static struct dbs_governor od_dbs_gov;
202
203static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
204				size_t count)
205{
206	struct dbs_data *dbs_data = to_dbs_data(attr_set);
207	unsigned int input;
208	int ret;
209
210	ret = sscanf(buf, "%u", &input);
211	if (ret != 1)
212		return -EINVAL;
213	dbs_data->io_is_busy = !!input;
214
215	/* we need to re-evaluate prev_cpu_idle */
216	gov_update_cpu_data(dbs_data);
217
218	return count;
219}
220
221static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
222				  const char *buf, size_t count)
223{
224	struct dbs_data *dbs_data = to_dbs_data(attr_set);
225	unsigned int input;
226	int ret;
227	ret = sscanf(buf, "%u", &input);
228
229	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
230			input < MIN_FREQUENCY_UP_THRESHOLD) {
231		return -EINVAL;
232	}
233
234	dbs_data->up_threshold = input;
235	return count;
236}
237
238static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
239					  const char *buf, size_t count)
240{
241	struct dbs_data *dbs_data = to_dbs_data(attr_set);
242	struct policy_dbs_info *policy_dbs;
243	unsigned int input;
244	int ret;
245	ret = sscanf(buf, "%u", &input);
246
247	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
248		return -EINVAL;
249
250	dbs_data->sampling_down_factor = input;
251
252	/* Reset down sampling multiplier in case it was active */
253	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
254		/*
255		 * Doing this without locking might lead to using different
256		 * rate_mult values in od_update() and od_dbs_update().
257		 */
258		mutex_lock(&policy_dbs->update_mutex);
259		policy_dbs->rate_mult = 1;
260		mutex_unlock(&policy_dbs->update_mutex);
261	}
262
263	return count;
264}
265
266static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
267				      const char *buf, size_t count)
268{
269	struct dbs_data *dbs_data = to_dbs_data(attr_set);
270	unsigned int input;
271	int ret;
272
273	ret = sscanf(buf, "%u", &input);
274	if (ret != 1)
275		return -EINVAL;
276
277	if (input > 1)
278		input = 1;
279
280	if (input == dbs_data->ignore_nice_load) { /* nothing to do */
281		return count;
282	}
283	dbs_data->ignore_nice_load = input;
284
285	/* we need to re-evaluate prev_cpu_idle */
286	gov_update_cpu_data(dbs_data);
287
288	return count;
289}
290
291static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
292				    const char *buf, size_t count)
293{
294	struct dbs_data *dbs_data = to_dbs_data(attr_set);
295	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
296	struct policy_dbs_info *policy_dbs;
297	unsigned int input;
298	int ret;
299	ret = sscanf(buf, "%u", &input);
300
301	if (ret != 1)
302		return -EINVAL;
303
304	if (input > 1000)
305		input = 1000;
306
307	od_tuners->powersave_bias = input;
308
309	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
310		ondemand_powersave_bias_init(policy_dbs->policy);
311
312	return count;
313}
314
315gov_show_one_common(sampling_rate);
316gov_show_one_common(up_threshold);
317gov_show_one_common(sampling_down_factor);
318gov_show_one_common(ignore_nice_load);
319gov_show_one_common(io_is_busy);
320gov_show_one(od, powersave_bias);
321
322gov_attr_rw(sampling_rate);
323gov_attr_rw(io_is_busy);
324gov_attr_rw(up_threshold);
325gov_attr_rw(sampling_down_factor);
326gov_attr_rw(ignore_nice_load);
327gov_attr_rw(powersave_bias);
328
329static struct attribute *od_attributes[] = {
330	&sampling_rate.attr,
331	&up_threshold.attr,
332	&sampling_down_factor.attr,
333	&ignore_nice_load.attr,
334	&powersave_bias.attr,
335	&io_is_busy.attr,
336	NULL
337};
338
339/************************** sysfs end ************************/
340
341static struct policy_dbs_info *od_alloc(void)
342{
343	struct od_policy_dbs_info *dbs_info;
344
345	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
346	return dbs_info ? &dbs_info->policy_dbs : NULL;
347}
348
349static void od_free(struct policy_dbs_info *policy_dbs)
350{
351	kfree(to_dbs_info(policy_dbs));
352}
353
354static int od_init(struct dbs_data *dbs_data)
355{
356	struct od_dbs_tuners *tuners;
357	u64 idle_time;
358	int cpu;
359
360	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
361	if (!tuners)
362		return -ENOMEM;
363
364	cpu = get_cpu();
365	idle_time = get_cpu_idle_time_us(cpu, NULL);
366	put_cpu();
367	if (idle_time != -1ULL) {
368		/* Idle micro accounting is supported. Use finer thresholds */
369		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
370	} else {
371		dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
372	}
373
374	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
375	dbs_data->ignore_nice_load = 0;
376	tuners->powersave_bias = default_powersave_bias;
377	dbs_data->io_is_busy = should_io_be_busy();
378
379	dbs_data->tuners = tuners;
380	return 0;
381}
382
383static void od_exit(struct dbs_data *dbs_data)
384{
385	kfree(dbs_data->tuners);
386}
387
388static void od_start(struct cpufreq_policy *policy)
389{
390	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
391
392	dbs_info->sample_type = OD_NORMAL_SAMPLE;
393	ondemand_powersave_bias_init(policy);
394}
395
396static struct od_ops od_ops = {
397	.powersave_bias_target = generic_powersave_bias_target,
398};
399
400static struct dbs_governor od_dbs_gov = {
401	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
402	.kobj_type = { .default_attrs = od_attributes },
403	.gov_dbs_update = od_dbs_update,
404	.alloc = od_alloc,
405	.free = od_free,
406	.init = od_init,
407	.exit = od_exit,
408	.start = od_start,
409};
410
411#define CPU_FREQ_GOV_ONDEMAND	(od_dbs_gov.gov)
412
413static void od_set_powersave_bias(unsigned int powersave_bias)
414{
415	unsigned int cpu;
416	cpumask_t done;
417
418	default_powersave_bias = powersave_bias;
419	cpumask_clear(&done);
420
421	get_online_cpus();
422	for_each_online_cpu(cpu) {
423		struct cpufreq_policy *policy;
424		struct policy_dbs_info *policy_dbs;
425		struct dbs_data *dbs_data;
426		struct od_dbs_tuners *od_tuners;
427
428		if (cpumask_test_cpu(cpu, &done))
429			continue;
430
431		policy = cpufreq_cpu_get_raw(cpu);
432		if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
433			continue;
434
435		policy_dbs = policy->governor_data;
436		if (!policy_dbs)
437			continue;
438
439		cpumask_or(&done, &done, policy->cpus);
440
441		dbs_data = policy_dbs->dbs_data;
442		od_tuners = dbs_data->tuners;
443		od_tuners->powersave_bias = default_powersave_bias;
444	}
445	put_online_cpus();
446}
447
448void od_register_powersave_bias_handler(unsigned int (*f)
449		(struct cpufreq_policy *, unsigned int, unsigned int),
450		unsigned int powersave_bias)
451{
452	od_ops.powersave_bias_target = f;
453	od_set_powersave_bias(powersave_bias);
454}
455EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
456
457void od_unregister_powersave_bias_handler(void)
458{
459	od_ops.powersave_bias_target = generic_powersave_bias_target;
460	od_set_powersave_bias(0);
461}
462EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
463
 
 
 
 
 
 
 
 
 
 
464MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
465MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
466MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
467	"Low Latency Frequency Transition capable processors");
468MODULE_LICENSE("GPL");
469
470#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
471struct cpufreq_governor *cpufreq_default_governor(void)
472{
473	return &CPU_FREQ_GOV_ONDEMAND;
474}
475#endif
476
477cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
478cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);
 
 
 
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  drivers/cpufreq/cpufreq_ondemand.c
  4 *
  5 *  Copyright (C)  2001 Russell King
  6 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  7 *                      Jun Nakajima <jun.nakajima@intel.com>
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/percpu-defs.h>
 14#include <linux/slab.h>
 15#include <linux/tick.h>
 16#include <linux/sched/cpufreq.h>
 17
 18#include "cpufreq_ondemand.h"
 19
 20/* On-demand governor macros */
 21#define DEF_FREQUENCY_UP_THRESHOLD		(80)
 22#define DEF_SAMPLING_DOWN_FACTOR		(1)
 23#define MAX_SAMPLING_DOWN_FACTOR		(100000)
 24#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 25#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 26#define MIN_FREQUENCY_UP_THRESHOLD		(1)
 27#define MAX_FREQUENCY_UP_THRESHOLD		(100)
 28
 29static struct od_ops od_ops;
 30
 31static unsigned int default_powersave_bias;
 32
 33/*
 34 * Not all CPUs want IO time to be accounted as busy; this depends on how
 35 * efficient idling at a higher frequency/voltage is.
 36 * Pavel Machek says this is not so for various generations of AMD and old
 37 * Intel systems.
 38 * Mike Chan (android.com) claims this is also not true for ARM.
 39 * Because of this, whitelist specific known (series) of CPUs by default, and
 40 * leave all others up to the user.
 41 */
 42static int should_io_be_busy(void)
 43{
 44#if defined(CONFIG_X86)
 45	/*
 46	 * For Intel, Core 2 (model 15) and later have an efficient idle.
 47	 */
 48	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 49			boot_cpu_data.x86 == 6 &&
 50			boot_cpu_data.x86_model >= 15)
 51		return 1;
 52#endif
 53	return 0;
 54}
 55
 56/*
 57 * Find right freq to be set now with powersave_bias on.
 58 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
 59 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
 60 */
 61static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
 62		unsigned int freq_next, unsigned int relation)
 63{
 64	unsigned int freq_req, freq_reduc, freq_avg;
 65	unsigned int freq_hi, freq_lo;
 66	unsigned int index;
 67	unsigned int delay_hi_us;
 68	struct policy_dbs_info *policy_dbs = policy->governor_data;
 69	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
 70	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 71	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 72	struct cpufreq_frequency_table *freq_table = policy->freq_table;
 73
 74	if (!freq_table) {
 75		dbs_info->freq_lo = 0;
 76		dbs_info->freq_lo_delay_us = 0;
 77		return freq_next;
 78	}
 79
 80	index = cpufreq_frequency_table_target(policy, freq_next, relation);
 81	freq_req = freq_table[index].frequency;
 82	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 83	freq_avg = freq_req - freq_reduc;
 84
 85	/* Find freq bounds for freq_avg in freq_table */
 86	index = cpufreq_table_find_index_h(policy, freq_avg);
 87	freq_lo = freq_table[index].frequency;
 88	index = cpufreq_table_find_index_l(policy, freq_avg);
 89	freq_hi = freq_table[index].frequency;
 90
 91	/* Find out how long we have to be in hi and lo freqs */
 92	if (freq_hi == freq_lo) {
 93		dbs_info->freq_lo = 0;
 94		dbs_info->freq_lo_delay_us = 0;
 95		return freq_lo;
 96	}
 97	delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
 98	delay_hi_us += (freq_hi - freq_lo) / 2;
 99	delay_hi_us /= freq_hi - freq_lo;
100	dbs_info->freq_hi_delay_us = delay_hi_us;
101	dbs_info->freq_lo = freq_lo;
102	dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
103	return freq_hi;
104}
105
106static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
107{
108	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
109
110	dbs_info->freq_lo = 0;
111}
112
113static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
114{
115	struct policy_dbs_info *policy_dbs = policy->governor_data;
116	struct dbs_data *dbs_data = policy_dbs->dbs_data;
117	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
118
119	if (od_tuners->powersave_bias)
120		freq = od_ops.powersave_bias_target(policy, freq,
121				CPUFREQ_RELATION_H);
122	else if (policy->cur == policy->max)
123		return;
124
125	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
126			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
127}
128
129/*
130 * Every sampling_rate, we check, if current idle time is less than 20%
131 * (default), then we try to increase frequency. Else, we adjust the frequency
132 * proportional to load.
133 */
134static void od_update(struct cpufreq_policy *policy)
135{
136	struct policy_dbs_info *policy_dbs = policy->governor_data;
137	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
138	struct dbs_data *dbs_data = policy_dbs->dbs_data;
139	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
140	unsigned int load = dbs_update(policy);
141
142	dbs_info->freq_lo = 0;
143
144	/* Check for frequency increase */
145	if (load > dbs_data->up_threshold) {
146		/* If switching to max speed, apply sampling_down_factor */
147		if (policy->cur < policy->max)
148			policy_dbs->rate_mult = dbs_data->sampling_down_factor;
149		dbs_freq_increase(policy, policy->max);
150	} else {
151		/* Calculate the next frequency proportional to load */
152		unsigned int freq_next, min_f, max_f;
153
154		min_f = policy->cpuinfo.min_freq;
155		max_f = policy->cpuinfo.max_freq;
156		freq_next = min_f + load * (max_f - min_f) / 100;
157
158		/* No longer fully busy, reset rate_mult */
159		policy_dbs->rate_mult = 1;
160
161		if (od_tuners->powersave_bias)
162			freq_next = od_ops.powersave_bias_target(policy,
163								 freq_next,
164								 CPUFREQ_RELATION_L);
165
166		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
167	}
168}
169
170static unsigned int od_dbs_update(struct cpufreq_policy *policy)
171{
172	struct policy_dbs_info *policy_dbs = policy->governor_data;
173	struct dbs_data *dbs_data = policy_dbs->dbs_data;
174	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
175	int sample_type = dbs_info->sample_type;
176
177	/* Common NORMAL_SAMPLE setup */
178	dbs_info->sample_type = OD_NORMAL_SAMPLE;
179	/*
180	 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
181	 * it then.
182	 */
183	if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
184		__cpufreq_driver_target(policy, dbs_info->freq_lo,
185					CPUFREQ_RELATION_H);
186		return dbs_info->freq_lo_delay_us;
187	}
188
189	od_update(policy);
190
191	if (dbs_info->freq_lo) {
192		/* Setup SUB_SAMPLE */
193		dbs_info->sample_type = OD_SUB_SAMPLE;
194		return dbs_info->freq_hi_delay_us;
195	}
196
197	return dbs_data->sampling_rate * policy_dbs->rate_mult;
198}
199
200/************************** sysfs interface ************************/
201static struct dbs_governor od_dbs_gov;
202
203static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
204				size_t count)
205{
206	struct dbs_data *dbs_data = to_dbs_data(attr_set);
207	unsigned int input;
208	int ret;
209
210	ret = sscanf(buf, "%u", &input);
211	if (ret != 1)
212		return -EINVAL;
213	dbs_data->io_is_busy = !!input;
214
215	/* we need to re-evaluate prev_cpu_idle */
216	gov_update_cpu_data(dbs_data);
217
218	return count;
219}
220
221static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
222				  const char *buf, size_t count)
223{
224	struct dbs_data *dbs_data = to_dbs_data(attr_set);
225	unsigned int input;
226	int ret;
227	ret = sscanf(buf, "%u", &input);
228
229	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
230			input < MIN_FREQUENCY_UP_THRESHOLD) {
231		return -EINVAL;
232	}
233
234	dbs_data->up_threshold = input;
235	return count;
236}
237
238static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
239					  const char *buf, size_t count)
240{
241	struct dbs_data *dbs_data = to_dbs_data(attr_set);
242	struct policy_dbs_info *policy_dbs;
243	unsigned int input;
244	int ret;
245	ret = sscanf(buf, "%u", &input);
246
247	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
248		return -EINVAL;
249
250	dbs_data->sampling_down_factor = input;
251
252	/* Reset down sampling multiplier in case it was active */
253	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
254		/*
255		 * Doing this without locking might lead to using different
256		 * rate_mult values in od_update() and od_dbs_update().
257		 */
258		mutex_lock(&policy_dbs->update_mutex);
259		policy_dbs->rate_mult = 1;
260		mutex_unlock(&policy_dbs->update_mutex);
261	}
262
263	return count;
264}
265
266static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
267				      const char *buf, size_t count)
268{
269	struct dbs_data *dbs_data = to_dbs_data(attr_set);
270	unsigned int input;
271	int ret;
272
273	ret = sscanf(buf, "%u", &input);
274	if (ret != 1)
275		return -EINVAL;
276
277	if (input > 1)
278		input = 1;
279
280	if (input == dbs_data->ignore_nice_load) { /* nothing to do */
281		return count;
282	}
283	dbs_data->ignore_nice_load = input;
284
285	/* we need to re-evaluate prev_cpu_idle */
286	gov_update_cpu_data(dbs_data);
287
288	return count;
289}
290
291static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
292				    const char *buf, size_t count)
293{
294	struct dbs_data *dbs_data = to_dbs_data(attr_set);
295	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
296	struct policy_dbs_info *policy_dbs;
297	unsigned int input;
298	int ret;
299	ret = sscanf(buf, "%u", &input);
300
301	if (ret != 1)
302		return -EINVAL;
303
304	if (input > 1000)
305		input = 1000;
306
307	od_tuners->powersave_bias = input;
308
309	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
310		ondemand_powersave_bias_init(policy_dbs->policy);
311
312	return count;
313}
314
315gov_show_one_common(sampling_rate);
316gov_show_one_common(up_threshold);
317gov_show_one_common(sampling_down_factor);
318gov_show_one_common(ignore_nice_load);
319gov_show_one_common(io_is_busy);
320gov_show_one(od, powersave_bias);
321
322gov_attr_rw(sampling_rate);
323gov_attr_rw(io_is_busy);
324gov_attr_rw(up_threshold);
325gov_attr_rw(sampling_down_factor);
326gov_attr_rw(ignore_nice_load);
327gov_attr_rw(powersave_bias);
328
329static struct attribute *od_attributes[] = {
330	&sampling_rate.attr,
331	&up_threshold.attr,
332	&sampling_down_factor.attr,
333	&ignore_nice_load.attr,
334	&powersave_bias.attr,
335	&io_is_busy.attr,
336	NULL
337};
338
339/************************** sysfs end ************************/
340
341static struct policy_dbs_info *od_alloc(void)
342{
343	struct od_policy_dbs_info *dbs_info;
344
345	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
346	return dbs_info ? &dbs_info->policy_dbs : NULL;
347}
348
349static void od_free(struct policy_dbs_info *policy_dbs)
350{
351	kfree(to_dbs_info(policy_dbs));
352}
353
354static int od_init(struct dbs_data *dbs_data)
355{
356	struct od_dbs_tuners *tuners;
357	u64 idle_time;
358	int cpu;
359
360	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
361	if (!tuners)
362		return -ENOMEM;
363
364	cpu = get_cpu();
365	idle_time = get_cpu_idle_time_us(cpu, NULL);
366	put_cpu();
367	if (idle_time != -1ULL) {
368		/* Idle micro accounting is supported. Use finer thresholds */
369		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
370	} else {
371		dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
372	}
373
374	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
375	dbs_data->ignore_nice_load = 0;
376	tuners->powersave_bias = default_powersave_bias;
377	dbs_data->io_is_busy = should_io_be_busy();
378
379	dbs_data->tuners = tuners;
380	return 0;
381}
382
383static void od_exit(struct dbs_data *dbs_data)
384{
385	kfree(dbs_data->tuners);
386}
387
388static void od_start(struct cpufreq_policy *policy)
389{
390	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
391
392	dbs_info->sample_type = OD_NORMAL_SAMPLE;
393	ondemand_powersave_bias_init(policy);
394}
395
396static struct od_ops od_ops = {
397	.powersave_bias_target = generic_powersave_bias_target,
398};
399
400static struct dbs_governor od_dbs_gov = {
401	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
402	.kobj_type = { .default_attrs = od_attributes },
403	.gov_dbs_update = od_dbs_update,
404	.alloc = od_alloc,
405	.free = od_free,
406	.init = od_init,
407	.exit = od_exit,
408	.start = od_start,
409};
410
411#define CPU_FREQ_GOV_ONDEMAND	(&od_dbs_gov.gov)
412
413static void od_set_powersave_bias(unsigned int powersave_bias)
414{
415	unsigned int cpu;
416	cpumask_t done;
417
418	default_powersave_bias = powersave_bias;
419	cpumask_clear(&done);
420
421	get_online_cpus();
422	for_each_online_cpu(cpu) {
423		struct cpufreq_policy *policy;
424		struct policy_dbs_info *policy_dbs;
425		struct dbs_data *dbs_data;
426		struct od_dbs_tuners *od_tuners;
427
428		if (cpumask_test_cpu(cpu, &done))
429			continue;
430
431		policy = cpufreq_cpu_get_raw(cpu);
432		if (!policy || policy->governor != CPU_FREQ_GOV_ONDEMAND)
433			continue;
434
435		policy_dbs = policy->governor_data;
436		if (!policy_dbs)
437			continue;
438
439		cpumask_or(&done, &done, policy->cpus);
440
441		dbs_data = policy_dbs->dbs_data;
442		od_tuners = dbs_data->tuners;
443		od_tuners->powersave_bias = default_powersave_bias;
444	}
445	put_online_cpus();
446}
447
448void od_register_powersave_bias_handler(unsigned int (*f)
449		(struct cpufreq_policy *, unsigned int, unsigned int),
450		unsigned int powersave_bias)
451{
452	od_ops.powersave_bias_target = f;
453	od_set_powersave_bias(powersave_bias);
454}
455EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
456
457void od_unregister_powersave_bias_handler(void)
458{
459	od_ops.powersave_bias_target = generic_powersave_bias_target;
460	od_set_powersave_bias(0);
461}
462EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
463
464static int __init cpufreq_gov_dbs_init(void)
465{
466	return cpufreq_register_governor(CPU_FREQ_GOV_ONDEMAND);
467}
468
469static void __exit cpufreq_gov_dbs_exit(void)
470{
471	cpufreq_unregister_governor(CPU_FREQ_GOV_ONDEMAND);
472}
473
474MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
475MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
476MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
477	"Low Latency Frequency Transition capable processors");
478MODULE_LICENSE("GPL");
479
480#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
481struct cpufreq_governor *cpufreq_default_governor(void)
482{
483	return CPU_FREQ_GOV_ONDEMAND;
484}
 
485
486fs_initcall(cpufreq_gov_dbs_init);
487#else
488module_init(cpufreq_gov_dbs_init);
489#endif
490module_exit(cpufreq_gov_dbs_exit);