Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  drivers/cpufreq/cpufreq_ondemand.c
  3 *
  4 *  Copyright (C)  2001 Russell King
  5 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6 *                      Jun Nakajima <jun.nakajima@intel.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/kernel.h>
 14#include <linux/module.h>
 15#include <linux/init.h>
 16#include <linux/cpufreq.h>
 17#include <linux/cpu.h>
 18#include <linux/jiffies.h>
 19#include <linux/kernel_stat.h>
 20#include <linux/mutex.h>
 21#include <linux/hrtimer.h>
 22#include <linux/tick.h>
 23#include <linux/ktime.h>
 24#include <linux/sched.h>
 25
 26/*
 27 * dbs is used in this file as a shortform for demandbased switching
 28 * It helps to keep variable names smaller, simpler
 29 */
 30
 31#define DEF_FREQUENCY_DOWN_DIFFERENTIAL		(10)
 32#define DEF_FREQUENCY_UP_THRESHOLD		(80)
 33#define DEF_SAMPLING_DOWN_FACTOR		(1)
 34#define MAX_SAMPLING_DOWN_FACTOR		(100000)
 35#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL	(3)
 36#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 37#define MICRO_FREQUENCY_MIN_SAMPLE_RATE		(10000)
 38#define MIN_FREQUENCY_UP_THRESHOLD		(11)
 39#define MAX_FREQUENCY_UP_THRESHOLD		(100)
 40
 41/*
 42 * The polling frequency of this governor depends on the capability of
 43 * the processor. Default polling frequency is 1000 times the transition
 44 * latency of the processor. The governor will work on any processor with
 45 * transition latency <= 10mS, using appropriate sampling
 46 * rate.
 47 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
 48 * this governor will not work.
 49 * All times here are in uS.
 50 */
 51#define MIN_SAMPLING_RATE_RATIO			(2)
 52
 53static unsigned int min_sampling_rate;
 54
 55#define LATENCY_MULTIPLIER			(1000)
 56#define MIN_LATENCY_MULTIPLIER			(100)
 57#define TRANSITION_LATENCY_LIMIT		(10 * 1000 * 1000)
 58
 59static void do_dbs_timer(struct work_struct *work);
 60static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 61				unsigned int event);
 62
 63#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 64static
 65#endif
 66struct cpufreq_governor cpufreq_gov_ondemand = {
 67       .name                   = "ondemand",
 68       .governor               = cpufreq_governor_dbs,
 69       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
 70       .owner                  = THIS_MODULE,
 71};
 72
 73/* Sampling types */
 74enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
 75
 76struct cpu_dbs_info_s {
 77	cputime64_t prev_cpu_idle;
 78	cputime64_t prev_cpu_iowait;
 79	cputime64_t prev_cpu_wall;
 80	cputime64_t prev_cpu_nice;
 81	struct cpufreq_policy *cur_policy;
 82	struct delayed_work work;
 83	struct cpufreq_frequency_table *freq_table;
 84	unsigned int freq_lo;
 85	unsigned int freq_lo_jiffies;
 86	unsigned int freq_hi_jiffies;
 87	unsigned int rate_mult;
 88	int cpu;
 89	unsigned int sample_type:1;
 90	/*
 91	 * percpu mutex that serializes governor limit change with
 92	 * do_dbs_timer invocation. We do not want do_dbs_timer to run
 93	 * when user is changing the governor or limits.
 94	 */
 95	struct mutex timer_mutex;
 96};
 97static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
 98
 99static unsigned int dbs_enable;	/* number of CPUs using this policy */
100
101/*
102 * dbs_mutex protects dbs_enable in governor start/stop.
 
 
 
 
 
 
103 */
104static DEFINE_MUTEX(dbs_mutex);
105
106static struct dbs_tuners {
107	unsigned int sampling_rate;
108	unsigned int up_threshold;
109	unsigned int down_differential;
110	unsigned int ignore_nice;
111	unsigned int sampling_down_factor;
112	unsigned int powersave_bias;
113	unsigned int io_is_busy;
114} dbs_tuners_ins = {
115	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
116	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
117	.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
118	.ignore_nice = 0,
119	.powersave_bias = 0,
120};
121
122static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
123							cputime64_t *wall)
124{
125	cputime64_t idle_time;
126	cputime64_t cur_wall_time;
127	cputime64_t busy_time;
128
129	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
130	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
131			kstat_cpu(cpu).cpustat.system);
132
133	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
134	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
135	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
136	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
137
138	idle_time = cputime64_sub(cur_wall_time, busy_time);
139	if (wall)
140		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
141
142	return (cputime64_t)jiffies_to_usecs(idle_time);
143}
144
145static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
146{
147	u64 idle_time = get_cpu_idle_time_us(cpu, wall);
148
149	if (idle_time == -1ULL)
150		return get_cpu_idle_time_jiffy(cpu, wall);
151
152	return idle_time;
153}
154
155static inline cputime64_t get_cpu_iowait_time(unsigned int cpu, cputime64_t *wall)
156{
157	u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
158
159	if (iowait_time == -1ULL)
160		return 0;
161
162	return iowait_time;
 
 
 
 
163}
164
165/*
166 * Find right freq to be set now with powersave_bias on.
167 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
168 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
169 */
170static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
171					  unsigned int freq_next,
172					  unsigned int relation)
173{
174	unsigned int freq_req, freq_reduc, freq_avg;
175	unsigned int freq_hi, freq_lo;
176	unsigned int index = 0;
177	unsigned int jiffies_total, jiffies_hi, jiffies_lo;
178	struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
179						   policy->cpu);
 
 
 
180
181	if (!dbs_info->freq_table) {
182		dbs_info->freq_lo = 0;
183		dbs_info->freq_lo_jiffies = 0;
184		return freq_next;
185	}
186
187	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
188			relation, &index);
189	freq_req = dbs_info->freq_table[index].frequency;
190	freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
191	freq_avg = freq_req - freq_reduc;
192
193	/* Find freq bounds for freq_avg in freq_table */
194	index = 0;
195	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
196			CPUFREQ_RELATION_H, &index);
197	freq_lo = dbs_info->freq_table[index].frequency;
198	index = 0;
199	cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
200			CPUFREQ_RELATION_L, &index);
201	freq_hi = dbs_info->freq_table[index].frequency;
202
203	/* Find out how long we have to be in hi and lo freqs */
204	if (freq_hi == freq_lo) {
205		dbs_info->freq_lo = 0;
206		dbs_info->freq_lo_jiffies = 0;
207		return freq_lo;
208	}
209	jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
210	jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
211	jiffies_hi += ((freq_hi - freq_lo) / 2);
212	jiffies_hi /= (freq_hi - freq_lo);
213	jiffies_lo = jiffies_total - jiffies_hi;
214	dbs_info->freq_lo = freq_lo;
215	dbs_info->freq_lo_jiffies = jiffies_lo;
216	dbs_info->freq_hi_jiffies = jiffies_hi;
217	return freq_hi;
218}
219
220static void ondemand_powersave_bias_init_cpu(int cpu)
221{
222	struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
223	dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
224	dbs_info->freq_lo = 0;
225}
226
227static void ondemand_powersave_bias_init(void)
228{
229	int i;
230	for_each_online_cpu(i) {
231		ondemand_powersave_bias_init_cpu(i);
232	}
233}
 
 
 
 
234
235/************************** sysfs interface ************************/
 
 
236
237static ssize_t show_sampling_rate_min(struct kobject *kobj,
238				      struct attribute *attr, char *buf)
 
 
 
 
239{
240	return sprintf(buf, "%u\n", min_sampling_rate);
241}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
243define_one_global_ro(sampling_rate_min);
 
 
 
 
 
 
244
245/* cpufreq_ondemand Governor Tunables */
246#define show_one(file_name, object)					\
247static ssize_t show_##file_name						\
248(struct kobject *kobj, struct attribute *attr, char *buf)              \
249{									\
250	return sprintf(buf, "%u\n", dbs_tuners_ins.object);		\
251}
252show_one(sampling_rate, sampling_rate);
253show_one(io_is_busy, io_is_busy);
254show_one(up_threshold, up_threshold);
255show_one(sampling_down_factor, sampling_down_factor);
256show_one(ignore_nice_load, ignore_nice);
257show_one(powersave_bias, powersave_bias);
258
259static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
260				   const char *buf, size_t count)
261{
262	unsigned int input;
263	int ret;
264	ret = sscanf(buf, "%u", &input);
265	if (ret != 1)
266		return -EINVAL;
267	dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
268	return count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269}
270
271static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
272				   const char *buf, size_t count)
 
 
 
273{
 
274	unsigned int input;
275	int ret;
276
277	ret = sscanf(buf, "%u", &input);
278	if (ret != 1)
279		return -EINVAL;
280	dbs_tuners_ins.io_is_busy = !!input;
 
 
 
 
281	return count;
282}
283
284static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
285				  const char *buf, size_t count)
286{
 
287	unsigned int input;
288	int ret;
289	ret = sscanf(buf, "%u", &input);
290
291	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
292			input < MIN_FREQUENCY_UP_THRESHOLD) {
293		return -EINVAL;
294	}
295	dbs_tuners_ins.up_threshold = input;
 
296	return count;
297}
298
299static ssize_t store_sampling_down_factor(struct kobject *a,
300			struct attribute *b, const char *buf, size_t count)
301{
302	unsigned int input, j;
 
 
303	int ret;
304	ret = sscanf(buf, "%u", &input);
305
306	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
307		return -EINVAL;
308	dbs_tuners_ins.sampling_down_factor = input;
 
309
310	/* Reset down sampling multiplier in case it was active */
311	for_each_online_cpu(j) {
312		struct cpu_dbs_info_s *dbs_info;
313		dbs_info = &per_cpu(od_cpu_dbs_info, j);
314		dbs_info->rate_mult = 1;
 
 
 
 
315	}
 
316	return count;
317}
318
319static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
320				      const char *buf, size_t count)
321{
 
322	unsigned int input;
323	int ret;
324
325	unsigned int j;
326
327	ret = sscanf(buf, "%u", &input);
328	if (ret != 1)
329		return -EINVAL;
330
331	if (input > 1)
332		input = 1;
333
334	if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
335		return count;
336	}
337	dbs_tuners_ins.ignore_nice = input;
338
339	/* we need to re-evaluate prev_cpu_idle */
340	for_each_online_cpu(j) {
341		struct cpu_dbs_info_s *dbs_info;
342		dbs_info = &per_cpu(od_cpu_dbs_info, j);
343		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
344						&dbs_info->prev_cpu_wall);
345		if (dbs_tuners_ins.ignore_nice)
346			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
347
348	}
349	return count;
350}
351
352static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
353				    const char *buf, size_t count)
354{
 
 
 
355	unsigned int input;
356	int ret;
357	ret = sscanf(buf, "%u", &input);
358
359	if (ret != 1)
360		return -EINVAL;
361
362	if (input > 1000)
363		input = 1000;
364
365	dbs_tuners_ins.powersave_bias = input;
366	ondemand_powersave_bias_init();
 
 
 
367	return count;
368}
369
370define_one_global_rw(sampling_rate);
371define_one_global_rw(io_is_busy);
372define_one_global_rw(up_threshold);
373define_one_global_rw(sampling_down_factor);
374define_one_global_rw(ignore_nice_load);
375define_one_global_rw(powersave_bias);
 
 
 
 
 
 
 
376
377static struct attribute *dbs_attributes[] = {
378	&sampling_rate_min.attr,
379	&sampling_rate.attr,
380	&up_threshold.attr,
381	&sampling_down_factor.attr,
382	&ignore_nice_load.attr,
383	&powersave_bias.attr,
384	&io_is_busy.attr,
385	NULL
386};
387
388static struct attribute_group dbs_attr_group = {
389	.attrs = dbs_attributes,
390	.name = "ondemand",
391};
392
393/************************** sysfs end ************************/
394
395static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
396{
397	if (dbs_tuners_ins.powersave_bias)
398		freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H);
399	else if (p->cur == p->max)
400		return;
401
402	__cpufreq_driver_target(p, freq, dbs_tuners_ins.powersave_bias ?
403			CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
404}
405
406static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
407{
408	unsigned int max_load_freq;
409
410	struct cpufreq_policy *policy;
411	unsigned int j;
412
413	this_dbs_info->freq_lo = 0;
414	policy = this_dbs_info->cur_policy;
415
416	/*
417	 * Every sampling_rate, we check, if current idle time is less
418	 * than 20% (default), then we try to increase frequency
419	 * Every sampling_rate, we look for a the lowest
420	 * frequency which can sustain the load while keeping idle time over
421	 * 30%. If such a frequency exist, we try to decrease to this frequency.
422	 *
423	 * Any frequency increase takes it to the maximum frequency.
424	 * Frequency reduction happens at minimum steps of
425	 * 5% (default) of current frequency
426	 */
427
428	/* Get Absolute Load - in terms of freq */
429	max_load_freq = 0;
430
431	for_each_cpu(j, policy->cpus) {
432		struct cpu_dbs_info_s *j_dbs_info;
433		cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
434		unsigned int idle_time, wall_time, iowait_time;
435		unsigned int load, load_freq;
436		int freq_avg;
437
438		j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
439
440		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
441		cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
442
443		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
444				j_dbs_info->prev_cpu_wall);
445		j_dbs_info->prev_cpu_wall = cur_wall_time;
446
447		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
448				j_dbs_info->prev_cpu_idle);
449		j_dbs_info->prev_cpu_idle = cur_idle_time;
450
451		iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
452				j_dbs_info->prev_cpu_iowait);
453		j_dbs_info->prev_cpu_iowait = cur_iowait_time;
454
455		if (dbs_tuners_ins.ignore_nice) {
456			cputime64_t cur_nice;
457			unsigned long cur_nice_jiffies;
458
459			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
460					 j_dbs_info->prev_cpu_nice);
461			/*
462			 * Assumption: nice time between sampling periods will
463			 * be less than 2^32 jiffies for 32 bit sys
464			 */
465			cur_nice_jiffies = (unsigned long)
466					cputime64_to_jiffies64(cur_nice);
467
468			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
469			idle_time += jiffies_to_usecs(cur_nice_jiffies);
470		}
471
472		/*
473		 * For the purpose of ondemand, waiting for disk IO is an
474		 * indication that you're performance critical, and not that
475		 * the system is actually idle. So subtract the iowait time
476		 * from the cpu idle time.
477		 */
478
479		if (dbs_tuners_ins.io_is_busy && idle_time >= iowait_time)
480			idle_time -= iowait_time;
481
482		if (unlikely(!wall_time || wall_time < idle_time))
483			continue;
484
485		load = 100 * (wall_time - idle_time) / wall_time;
 
 
 
 
486
487		freq_avg = __cpufreq_driver_getavg(policy, j);
488		if (freq_avg <= 0)
489			freq_avg = policy->cur;
490
491		load_freq = load * freq_avg;
492		if (load_freq > max_load_freq)
493			max_load_freq = load_freq;
494	}
495
496	/* Check for frequency increase */
497	if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
498		/* If switching to max speed, apply sampling_down_factor */
499		if (policy->cur < policy->max)
500			this_dbs_info->rate_mult =
501				dbs_tuners_ins.sampling_down_factor;
502		dbs_freq_increase(policy, policy->max);
503		return;
504	}
505
506	/* Check for frequency decrease */
507	/* if we cannot reduce the frequency anymore, break out early */
508	if (policy->cur == policy->min)
509		return;
510
511	/*
512	 * The optimal frequency is the frequency that is the lowest that
513	 * can support the current CPU usage without triggering the up
514	 * policy. To be safe, we focus 10 points under the threshold.
515	 */
516	if (max_load_freq <
517	    (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
518	     policy->cur) {
519		unsigned int freq_next;
520		freq_next = max_load_freq /
521				(dbs_tuners_ins.up_threshold -
522				 dbs_tuners_ins.down_differential);
523
524		/* No longer fully busy, reset rate_mult */
525		this_dbs_info->rate_mult = 1;
526
527		if (freq_next < policy->min)
528			freq_next = policy->min;
 
529
530		if (!dbs_tuners_ins.powersave_bias) {
531			__cpufreq_driver_target(policy, freq_next,
532					CPUFREQ_RELATION_L);
533		} else {
534			int freq = powersave_bias_target(policy, freq_next,
535					CPUFREQ_RELATION_L);
536			__cpufreq_driver_target(policy, freq,
537				CPUFREQ_RELATION_L);
538		}
539	}
540}
541
542static void do_dbs_timer(struct work_struct *work)
543{
544	struct cpu_dbs_info_s *dbs_info =
545		container_of(work, struct cpu_dbs_info_s, work.work);
546	unsigned int cpu = dbs_info->cpu;
547	int sample_type = dbs_info->sample_type;
 
548
549	int delay;
 
 
550
551	mutex_lock(&dbs_info->timer_mutex);
 
 
 
 
 
 
 
 
 
552
553	/* Common NORMAL_SAMPLE setup */
554	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
555	if (!dbs_tuners_ins.powersave_bias ||
556	    sample_type == DBS_NORMAL_SAMPLE) {
557		dbs_check_cpu(dbs_info);
558		if (dbs_info->freq_lo) {
559			/* Setup timer for SUB_SAMPLE */
560			dbs_info->sample_type = DBS_SUB_SAMPLE;
561			delay = dbs_info->freq_hi_jiffies;
562		} else {
563			/* We want all CPUs to do sampling nearly on
564			 * same jiffy
565			 */
566			delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
567				* dbs_info->rate_mult);
568
569			if (num_online_cpus() > 1)
570				delay -= jiffies % delay;
571		}
572	} else {
573		__cpufreq_driver_target(dbs_info->cur_policy,
574			dbs_info->freq_lo, CPUFREQ_RELATION_H);
575		delay = dbs_info->freq_lo_jiffies;
576	}
577	schedule_delayed_work_on(cpu, &dbs_info->work, delay);
578	mutex_unlock(&dbs_info->timer_mutex);
579}
580
581static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
582{
583	/* We want all CPUs to do sampling nearly on same jiffy */
584	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
585
586	if (num_online_cpus() > 1)
587		delay -= jiffies % delay;
588
589	dbs_info->sample_type = DBS_NORMAL_SAMPLE;
590	INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
591	schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
592}
593
594static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
595{
596	cancel_delayed_work_sync(&dbs_info->work);
597}
 
 
598
599/*
600 * Not all CPUs want IO time to be accounted as busy; this dependson how
601 * efficient idling at a higher frequency/voltage is.
602 * Pavel Machek says this is not so for various generations of AMD and old
603 * Intel systems.
604 * Mike Chan (androidlcom) calis this is also not true for ARM.
605 * Because of this, whitelist specific known (series) of CPUs by default, and
606 * leave all others up to the user.
607 */
608static int should_io_be_busy(void)
609{
610#if defined(CONFIG_X86)
611	/*
612	 * For Intel, Core 2 (model 15) andl later have an efficient idle.
613	 */
614	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
615	    boot_cpu_data.x86 == 6 &&
616	    boot_cpu_data.x86_model >= 15)
617		return 1;
618#endif
619	return 0;
620}
621
622static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
623				   unsigned int event)
624{
625	unsigned int cpu = policy->cpu;
626	struct cpu_dbs_info_s *this_dbs_info;
627	unsigned int j;
628	int rc;
629
630	this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
631
632	switch (event) {
633	case CPUFREQ_GOV_START:
634		if ((!cpu_online(cpu)) || (!policy->cur))
635			return -EINVAL;
636
637		mutex_lock(&dbs_mutex);
638
639		dbs_enable++;
640		for_each_cpu(j, policy->cpus) {
641			struct cpu_dbs_info_s *j_dbs_info;
642			j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
643			j_dbs_info->cur_policy = policy;
644
645			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
646						&j_dbs_info->prev_cpu_wall);
647			if (dbs_tuners_ins.ignore_nice) {
648				j_dbs_info->prev_cpu_nice =
649						kstat_cpu(j).cpustat.nice;
650			}
651		}
652		this_dbs_info->cpu = cpu;
653		this_dbs_info->rate_mult = 1;
654		ondemand_powersave_bias_init_cpu(cpu);
655		/*
656		 * Start the timerschedule work, when this governor
657		 * is used for first time
658		 */
659		if (dbs_enable == 1) {
660			unsigned int latency;
661
662			rc = sysfs_create_group(cpufreq_global_kobject,
663						&dbs_attr_group);
664			if (rc) {
665				mutex_unlock(&dbs_mutex);
666				return rc;
667			}
668
669			/* policy latency is in nS. Convert it to uS first */
670			latency = policy->cpuinfo.transition_latency / 1000;
671			if (latency == 0)
672				latency = 1;
673			/* Bring kernel and HW constraints together */
674			min_sampling_rate = max(min_sampling_rate,
675					MIN_LATENCY_MULTIPLIER * latency);
676			dbs_tuners_ins.sampling_rate =
677				max(min_sampling_rate,
678				    latency * LATENCY_MULTIPLIER);
679			dbs_tuners_ins.io_is_busy = should_io_be_busy();
680		}
681		mutex_unlock(&dbs_mutex);
682
683		mutex_init(&this_dbs_info->timer_mutex);
684		dbs_timer_init(this_dbs_info);
685		break;
686
687	case CPUFREQ_GOV_STOP:
688		dbs_timer_exit(this_dbs_info);
689
690		mutex_lock(&dbs_mutex);
691		mutex_destroy(&this_dbs_info->timer_mutex);
692		dbs_enable--;
693		mutex_unlock(&dbs_mutex);
694		if (!dbs_enable)
695			sysfs_remove_group(cpufreq_global_kobject,
696					   &dbs_attr_group);
697
698		break;
699
700	case CPUFREQ_GOV_LIMITS:
701		mutex_lock(&this_dbs_info->timer_mutex);
702		if (policy->max < this_dbs_info->cur_policy->cur)
703			__cpufreq_driver_target(this_dbs_info->cur_policy,
704				policy->max, CPUFREQ_RELATION_H);
705		else if (policy->min > this_dbs_info->cur_policy->cur)
706			__cpufreq_driver_target(this_dbs_info->cur_policy,
707				policy->min, CPUFREQ_RELATION_L);
708		mutex_unlock(&this_dbs_info->timer_mutex);
709		break;
710	}
711	return 0;
712}
713
714static int __init cpufreq_gov_dbs_init(void)
715{
716	cputime64_t wall;
717	u64 idle_time;
718	int cpu = get_cpu();
719
720	idle_time = get_cpu_idle_time_us(cpu, &wall);
721	put_cpu();
722	if (idle_time != -1ULL) {
723		/* Idle micro accounting is supported. Use finer thresholds */
724		dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
725		dbs_tuners_ins.down_differential =
726					MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
727		/*
728		 * In no_hz/micro accounting case we set the minimum frequency
729		 * not depending on HZ, but fixed (very low). The deferred
730		 * timer might skip some samples if idle/sleeping as needed.
731		*/
732		min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
733	} else {
734		/* For correct statistics, we need 10 ticks for each measure */
735		min_sampling_rate =
736			MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
737	}
 
738
739	return cpufreq_register_governor(&cpufreq_gov_ondemand);
740}
741
742static void __exit cpufreq_gov_dbs_exit(void)
 
 
743{
744	cpufreq_unregister_governor(&cpufreq_gov_ondemand);
 
745}
 
746
 
 
 
 
 
 
747
748MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
749MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
750MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
751	"Low Latency Frequency Transition capable processors");
752MODULE_LICENSE("GPL");
753
754#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
755fs_initcall(cpufreq_gov_dbs_init);
756#else
757module_init(cpufreq_gov_dbs_init);
 
758#endif
759module_exit(cpufreq_gov_dbs_exit);
 
 
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  drivers/cpufreq/cpufreq_ondemand.c
  4 *
  5 *  Copyright (C)  2001 Russell King
  6 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  7 *                      Jun Nakajima <jun.nakajima@intel.com>
 
 
 
 
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 
 
 12#include <linux/cpu.h>
 13#include <linux/percpu-defs.h>
 14#include <linux/slab.h>
 
 
 15#include <linux/tick.h>
 16#include <linux/sched/cpufreq.h>
 
 17
 18#include "cpufreq_ondemand.h"
 
 
 
 19
 20/* On-demand governor macros */
 21#define DEF_FREQUENCY_UP_THRESHOLD		(80)
 22#define DEF_SAMPLING_DOWN_FACTOR		(1)
 23#define MAX_SAMPLING_DOWN_FACTOR		(100000)
 
 24#define MICRO_FREQUENCY_UP_THRESHOLD		(95)
 25#define MIN_FREQUENCY_UP_THRESHOLD		(1)
 
 26#define MAX_FREQUENCY_UP_THRESHOLD		(100)
 27
 28static struct od_ops od_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29
 30static unsigned int default_powersave_bias;
 31
 32/*
 33 * Not all CPUs want IO time to be accounted as busy; this depends on how
 34 * efficient idling at a higher frequency/voltage is.
 35 * Pavel Machek says this is not so for various generations of AMD and old
 36 * Intel systems.
 37 * Mike Chan (android.com) claims this is also not true for ARM.
 38 * Because of this, whitelist specific known (series) of CPUs by default, and
 39 * leave all others up to the user.
 40 */
 41static int should_io_be_busy(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42{
 43#if defined(CONFIG_X86)
 44	/*
 45	 * For Intel, Core 2 (model 15) and later have an efficient idle.
 46	 */
 47	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
 48			boot_cpu_data.x86 == 6 &&
 49			boot_cpu_data.x86_model >= 15)
 50		return 1;
 51#endif
 52	return 0;
 53}
 54
 55/*
 56 * Find right freq to be set now with powersave_bias on.
 57 * Returns the freq_hi to be used right now and will set freq_hi_delay_us,
 58 * freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
 59 */
 60static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
 61		unsigned int freq_next, unsigned int relation)
 
 62{
 63	unsigned int freq_req, freq_reduc, freq_avg;
 64	unsigned int freq_hi, freq_lo;
 65	unsigned int index;
 66	unsigned int delay_hi_us;
 67	struct policy_dbs_info *policy_dbs = policy->governor_data;
 68	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
 69	struct dbs_data *dbs_data = policy_dbs->dbs_data;
 70	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 71	struct cpufreq_frequency_table *freq_table = policy->freq_table;
 72
 73	if (!freq_table) {
 74		dbs_info->freq_lo = 0;
 75		dbs_info->freq_lo_delay_us = 0;
 76		return freq_next;
 77	}
 78
 79	index = cpufreq_frequency_table_target(policy, freq_next, relation);
 80	freq_req = freq_table[index].frequency;
 81	freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
 
 82	freq_avg = freq_req - freq_reduc;
 83
 84	/* Find freq bounds for freq_avg in freq_table */
 85	index = cpufreq_table_find_index_h(policy, freq_avg,
 86					   relation & CPUFREQ_RELATION_E);
 87	freq_lo = freq_table[index].frequency;
 88	index = cpufreq_table_find_index_l(policy, freq_avg,
 89					   relation & CPUFREQ_RELATION_E);
 90	freq_hi = freq_table[index].frequency;
 
 
 91
 92	/* Find out how long we have to be in hi and lo freqs */
 93	if (freq_hi == freq_lo) {
 94		dbs_info->freq_lo = 0;
 95		dbs_info->freq_lo_delay_us = 0;
 96		return freq_lo;
 97	}
 98	delay_hi_us = (freq_avg - freq_lo) * dbs_data->sampling_rate;
 99	delay_hi_us += (freq_hi - freq_lo) / 2;
100	delay_hi_us /= freq_hi - freq_lo;
101	dbs_info->freq_hi_delay_us = delay_hi_us;
 
102	dbs_info->freq_lo = freq_lo;
103	dbs_info->freq_lo_delay_us = dbs_data->sampling_rate - delay_hi_us;
 
104	return freq_hi;
105}
106
107static void ondemand_powersave_bias_init(struct cpufreq_policy *policy)
108{
109	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
110
111	dbs_info->freq_lo = 0;
112}
113
114static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
115{
116	struct policy_dbs_info *policy_dbs = policy->governor_data;
117	struct dbs_data *dbs_data = policy_dbs->dbs_data;
118	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
119
120	if (od_tuners->powersave_bias)
121		freq = od_ops.powersave_bias_target(policy, freq,
122						    CPUFREQ_RELATION_HE);
123	else if (policy->cur == policy->max)
124		return;
125
126	__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
127			CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE);
128}
129
130/*
131 * Every sampling_rate, we check, if current idle time is less than 20%
132 * (default), then we try to increase frequency. Else, we adjust the frequency
133 * proportional to load.
134 */
135static void od_update(struct cpufreq_policy *policy)
136{
137	struct policy_dbs_info *policy_dbs = policy->governor_data;
138	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
139	struct dbs_data *dbs_data = policy_dbs->dbs_data;
140	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
141	unsigned int load = dbs_update(policy);
142
143	dbs_info->freq_lo = 0;
144
145	/* Check for frequency increase */
146	if (load > dbs_data->up_threshold) {
147		/* If switching to max speed, apply sampling_down_factor */
148		if (policy->cur < policy->max)
149			policy_dbs->rate_mult = dbs_data->sampling_down_factor;
150		dbs_freq_increase(policy, policy->max);
151	} else {
152		/* Calculate the next frequency proportional to load */
153		unsigned int freq_next, min_f, max_f;
154
155		min_f = policy->cpuinfo.min_freq;
156		max_f = policy->cpuinfo.max_freq;
157		freq_next = min_f + load * (max_f - min_f) / 100;
158
159		/* No longer fully busy, reset rate_mult */
160		policy_dbs->rate_mult = 1;
161
162		if (od_tuners->powersave_bias)
163			freq_next = od_ops.powersave_bias_target(policy,
164								 freq_next,
165								 CPUFREQ_RELATION_LE);
166
167		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE);
168	}
169}
 
 
 
 
 
 
 
 
 
 
170
171static unsigned int od_dbs_update(struct cpufreq_policy *policy)
 
172{
173	struct policy_dbs_info *policy_dbs = policy->governor_data;
174	struct dbs_data *dbs_data = policy_dbs->dbs_data;
175	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
176	int sample_type = dbs_info->sample_type;
177
178	/* Common NORMAL_SAMPLE setup */
179	dbs_info->sample_type = OD_NORMAL_SAMPLE;
180	/*
181	 * OD_SUB_SAMPLE doesn't make sense if sample_delay_ns is 0, so ignore
182	 * it then.
183	 */
184	if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
185		__cpufreq_driver_target(policy, dbs_info->freq_lo,
186					CPUFREQ_RELATION_HE);
187		return dbs_info->freq_lo_delay_us;
188	}
189
190	od_update(policy);
191
192	if (dbs_info->freq_lo) {
193		/* Setup SUB_SAMPLE */
194		dbs_info->sample_type = OD_SUB_SAMPLE;
195		return dbs_info->freq_hi_delay_us;
196	}
197
198	return dbs_data->sampling_rate * policy_dbs->rate_mult;
199}
200
201/************************** sysfs interface ************************/
202static struct dbs_governor od_dbs_gov;
203
204static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf,
205				size_t count)
206{
207	struct dbs_data *dbs_data = to_dbs_data(attr_set);
208	unsigned int input;
209	int ret;
210
211	ret = sscanf(buf, "%u", &input);
212	if (ret != 1)
213		return -EINVAL;
214	dbs_data->io_is_busy = !!input;
215
216	/* we need to re-evaluate prev_cpu_idle */
217	gov_update_cpu_data(dbs_data);
218
219	return count;
220}
221
222static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
223				  const char *buf, size_t count)
224{
225	struct dbs_data *dbs_data = to_dbs_data(attr_set);
226	unsigned int input;
227	int ret;
228	ret = sscanf(buf, "%u", &input);
229
230	if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
231			input < MIN_FREQUENCY_UP_THRESHOLD) {
232		return -EINVAL;
233	}
234
235	dbs_data->up_threshold = input;
236	return count;
237}
238
239static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
240					  const char *buf, size_t count)
241{
242	struct dbs_data *dbs_data = to_dbs_data(attr_set);
243	struct policy_dbs_info *policy_dbs;
244	unsigned int input;
245	int ret;
246	ret = sscanf(buf, "%u", &input);
247
248	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
249		return -EINVAL;
250
251	dbs_data->sampling_down_factor = input;
252
253	/* Reset down sampling multiplier in case it was active */
254	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
255		/*
256		 * Doing this without locking might lead to using different
257		 * rate_mult values in od_update() and od_dbs_update().
258		 */
259		mutex_lock(&policy_dbs->update_mutex);
260		policy_dbs->rate_mult = 1;
261		mutex_unlock(&policy_dbs->update_mutex);
262	}
263
264	return count;
265}
266
267static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
268				      const char *buf, size_t count)
269{
270	struct dbs_data *dbs_data = to_dbs_data(attr_set);
271	unsigned int input;
272	int ret;
273
 
 
274	ret = sscanf(buf, "%u", &input);
275	if (ret != 1)
276		return -EINVAL;
277
278	if (input > 1)
279		input = 1;
280
281	if (input == dbs_data->ignore_nice_load) { /* nothing to do */
282		return count;
283	}
284	dbs_data->ignore_nice_load = input;
285
286	/* we need to re-evaluate prev_cpu_idle */
287	gov_update_cpu_data(dbs_data);
 
 
 
 
 
 
288
 
289	return count;
290}
291
292static ssize_t powersave_bias_store(struct gov_attr_set *attr_set,
293				    const char *buf, size_t count)
294{
295	struct dbs_data *dbs_data = to_dbs_data(attr_set);
296	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
297	struct policy_dbs_info *policy_dbs;
298	unsigned int input;
299	int ret;
300	ret = sscanf(buf, "%u", &input);
301
302	if (ret != 1)
303		return -EINVAL;
304
305	if (input > 1000)
306		input = 1000;
307
308	od_tuners->powersave_bias = input;
309
310	list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
311		ondemand_powersave_bias_init(policy_dbs->policy);
312
313	return count;
314}
315
316gov_show_one_common(sampling_rate);
317gov_show_one_common(up_threshold);
318gov_show_one_common(sampling_down_factor);
319gov_show_one_common(ignore_nice_load);
320gov_show_one_common(io_is_busy);
321gov_show_one(od, powersave_bias);
322
323gov_attr_rw(sampling_rate);
324gov_attr_rw(io_is_busy);
325gov_attr_rw(up_threshold);
326gov_attr_rw(sampling_down_factor);
327gov_attr_rw(ignore_nice_load);
328gov_attr_rw(powersave_bias);
329
330static struct attribute *od_attrs[] = {
 
331	&sampling_rate.attr,
332	&up_threshold.attr,
333	&sampling_down_factor.attr,
334	&ignore_nice_load.attr,
335	&powersave_bias.attr,
336	&io_is_busy.attr,
337	NULL
338};
339ATTRIBUTE_GROUPS(od);
 
 
 
 
340
341/************************** sysfs end ************************/
342
343static struct policy_dbs_info *od_alloc(void)
344{
345	struct od_policy_dbs_info *dbs_info;
 
 
 
346
347	dbs_info = kzalloc(sizeof(*dbs_info), GFP_KERNEL);
348	return dbs_info ? &dbs_info->policy_dbs : NULL;
349}
350
351static void od_free(struct policy_dbs_info *policy_dbs)
352{
353	kfree(to_dbs_info(policy_dbs));
354}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
356static int od_init(struct dbs_data *dbs_data)
357{
358	struct od_dbs_tuners *tuners;
359	u64 idle_time;
360	int cpu;
361
362	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
363	if (!tuners)
364		return -ENOMEM;
 
 
 
 
 
365
366	cpu = get_cpu();
367	idle_time = get_cpu_idle_time_us(cpu, NULL);
368	put_cpu();
369	if (idle_time != -1ULL) {
370		/* Idle micro accounting is supported. Use finer thresholds */
371		dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
372	} else {
373		dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
374	}
375
376	dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
377	dbs_data->ignore_nice_load = 0;
378	tuners->powersave_bias = default_powersave_bias;
379	dbs_data->io_is_busy = should_io_be_busy();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
380
381	dbs_data->tuners = tuners;
382	return 0;
383}
384
385static void od_exit(struct dbs_data *dbs_data)
386{
387	kfree(dbs_data->tuners);
 
 
 
 
 
 
 
388}
389
390static void od_start(struct cpufreq_policy *policy)
391{
392	struct od_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
393
394	dbs_info->sample_type = OD_NORMAL_SAMPLE;
395	ondemand_powersave_bias_init(policy);
396}
397
398static struct od_ops od_ops = {
399	.powersave_bias_target = generic_powersave_bias_target,
400};
401
402static struct dbs_governor od_dbs_gov = {
403	.gov = CPUFREQ_DBS_GOVERNOR_INITIALIZER("ondemand"),
404	.kobj_type = { .default_groups = od_groups },
405	.gov_dbs_update = od_dbs_update,
406	.alloc = od_alloc,
407	.free = od_free,
408	.init = od_init,
409	.exit = od_exit,
410	.start = od_start,
411};
412
413#define CPU_FREQ_GOV_ONDEMAND	(od_dbs_gov.gov)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
415static void od_set_powersave_bias(unsigned int powersave_bias)
416{
417	unsigned int cpu;
418	cpumask_var_t done;
419
420	if (!alloc_cpumask_var(&done, GFP_KERNEL))
421		return;
422
423	default_powersave_bias = powersave_bias;
424	cpumask_clear(done);
 
 
425
426	cpus_read_lock();
427	for_each_online_cpu(cpu) {
428		struct cpufreq_policy *policy;
429		struct policy_dbs_info *policy_dbs;
430		struct dbs_data *dbs_data;
431		struct od_dbs_tuners *od_tuners;
432
433		if (cpumask_test_cpu(cpu, done))
434			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435
436		policy = cpufreq_cpu_get_raw(cpu);
437		if (!policy || policy->governor != &CPU_FREQ_GOV_ONDEMAND)
438			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439
440		policy_dbs = policy->governor_data;
441		if (!policy_dbs)
442			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443
444		cpumask_or(done, done, policy->cpus);
 
 
 
 
445
446		dbs_data = policy_dbs->dbs_data;
447		od_tuners = dbs_data->tuners;
448		od_tuners->powersave_bias = default_powersave_bias;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449	}
450	cpus_read_unlock();
451
452	free_cpumask_var(done);
453}
454
455void od_register_powersave_bias_handler(unsigned int (*f)
456		(struct cpufreq_policy *, unsigned int, unsigned int),
457		unsigned int powersave_bias)
458{
459	od_ops.powersave_bias_target = f;
460	od_set_powersave_bias(powersave_bias);
461}
462EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
463
464void od_unregister_powersave_bias_handler(void)
465{
466	od_ops.powersave_bias_target = generic_powersave_bias_target;
467	od_set_powersave_bias(0);
468}
469EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
470
471MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
472MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
473MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
474	"Low Latency Frequency Transition capable processors");
475MODULE_LICENSE("GPL");
476
477#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
478struct cpufreq_governor *cpufreq_default_governor(void)
479{
480	return &CPU_FREQ_GOV_ONDEMAND;
481}
482#endif
483
484cpufreq_governor_init(CPU_FREQ_GOV_ONDEMAND);
485cpufreq_governor_exit(CPU_FREQ_GOV_ONDEMAND);