Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * drivers/cpufreq/cpufreq_governor.c
  4 *
  5 * CPUFREQ governors common code
  6 *
  7 * Copyright	(C) 2001 Russell King
  8 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  9 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
 10 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 11 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 
 
 
 
 12 */
 13
 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 15
 16#include <linux/export.h>
 17#include <linux/kernel_stat.h>
 18#include <linux/slab.h>
 19
 20#include "cpufreq_governor.h"
 21
 22#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL	(2 * TICK_NSEC / NSEC_PER_USEC)
 23
 24static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
 25
 26static DEFINE_MUTEX(gov_dbs_data_mutex);
 27
 28/* Common sysfs tunables */
 29/*
 30 * sampling_rate_store - update sampling rate effective immediately if needed.
 31 *
 32 * If new rate is smaller than the old, simply updating
 33 * dbs.sampling_rate might not be appropriate. For example, if the
 34 * original sampling_rate was 1 second and the requested new sampling rate is 10
 35 * ms because the user needs immediate reaction from ondemand governor, but not
 36 * sure if higher frequency will be required or not, then, the governor may
 37 * change the sampling rate too late; up to 1 second later. Thus, if we are
 38 * reducing the sampling rate, we need to make the new value effective
 39 * immediately.
 40 *
 41 * This must be called with dbs_data->mutex held, otherwise traversing
 42 * policy_dbs_list isn't safe.
 43 */
 44ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf,
 45			    size_t count)
 46{
 47	struct dbs_data *dbs_data = to_dbs_data(attr_set);
 48	struct policy_dbs_info *policy_dbs;
 49	unsigned int sampling_interval;
 50	int ret;
 51
 52	ret = sscanf(buf, "%u", &sampling_interval);
 53	if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
 54		return -EINVAL;
 55
 56	dbs_data->sampling_rate = sampling_interval;
 57
 58	/*
 59	 * We are operating under dbs_data->mutex and so the list and its
 60	 * entries can't be freed concurrently.
 61	 */
 62	list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
 63		mutex_lock(&policy_dbs->update_mutex);
 64		/*
 65		 * On 32-bit architectures this may race with the
 66		 * sample_delay_ns read in dbs_update_util_handler(), but that
 67		 * really doesn't matter.  If the read returns a value that's
 68		 * too big, the sample will be skipped, but the next invocation
 69		 * of dbs_update_util_handler() (when the update has been
 70		 * completed) will take a sample.
 71		 *
 72		 * If this runs in parallel with dbs_work_handler(), we may end
 73		 * up overwriting the sample_delay_ns value that it has just
 74		 * written, but it will be corrected next time a sample is
 75		 * taken, so it shouldn't be significant.
 76		 */
 77		gov_update_sample_delay(policy_dbs, 0);
 78		mutex_unlock(&policy_dbs->update_mutex);
 79	}
 80
 81	return count;
 82}
 83EXPORT_SYMBOL_GPL(sampling_rate_store);
 84
 85/**
 86 * gov_update_cpu_data - Update CPU load data.
 87 * @dbs_data: Top-level governor data pointer.
 88 *
 89 * Update CPU load data for all CPUs in the domain governed by @dbs_data
 90 * (that may be a single policy or a bunch of them if governor tunables are
 91 * system-wide).
 92 *
 93 * Call under the @dbs_data mutex.
 94 */
 95void gov_update_cpu_data(struct dbs_data *dbs_data)
 96{
 97	struct policy_dbs_info *policy_dbs;
 98
 99	list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
100		unsigned int j;
101
102		for_each_cpu(j, policy_dbs->policy->cpus) {
103			struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
104
105			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
106								  dbs_data->io_is_busy);
107			if (dbs_data->ignore_nice_load)
108				j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
109		}
110	}
111}
112EXPORT_SYMBOL_GPL(gov_update_cpu_data);
113
114unsigned int dbs_update(struct cpufreq_policy *policy)
115{
116	struct policy_dbs_info *policy_dbs = policy->governor_data;
117	struct dbs_data *dbs_data = policy_dbs->dbs_data;
118	unsigned int ignore_nice = dbs_data->ignore_nice_load;
119	unsigned int max_load = 0, idle_periods = UINT_MAX;
120	unsigned int sampling_rate, io_busy, j;
121
122	/*
123	 * Sometimes governors may use an additional multiplier to increase
124	 * sample delays temporarily.  Apply that multiplier to sampling_rate
125	 * so as to keep the wake-up-from-idle detection logic a bit
126	 * conservative.
127	 */
128	sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
129	/*
130	 * For the purpose of ondemand, waiting for disk IO is an indication
131	 * that you're performance critical, and not that the system is actually
132	 * idle, so do not add the iowait time to the CPU idle time then.
133	 */
134	io_busy = dbs_data->io_is_busy;
135
136	/* Get Absolute Load */
137	for_each_cpu(j, policy->cpus) {
138		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
139		u64 update_time, cur_idle_time;
140		unsigned int idle_time, time_elapsed;
141		unsigned int load;
 
142
143		cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
144
145		time_elapsed = update_time - j_cdbs->prev_update_time;
146		j_cdbs->prev_update_time = update_time;
 
 
 
 
 
 
 
 
 
 
 
147
148		idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
 
149		j_cdbs->prev_cpu_idle = cur_idle_time;
150
151		if (ignore_nice) {
152			u64 cur_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
153
154			idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
155			j_cdbs->prev_cpu_nice = cur_nice;
156		}
157
158		if (unlikely(!time_elapsed)) {
159			/*
160			 * That can only happen when this function is called
161			 * twice in a row with a very short interval between the
162			 * calls, so the previous load value can be used then.
163			 */
164			load = j_cdbs->prev_load;
165		} else if (unlikely((int)idle_time > 2 * sampling_rate &&
166				    j_cdbs->prev_load)) {
167			/*
168			 * If the CPU had gone completely idle and a task has
169			 * just woken up on this CPU now, it would be unfair to
170			 * calculate 'load' the usual way for this elapsed
171			 * time-window, because it would show near-zero load,
172			 * irrespective of how CPU intensive that task actually
173			 * was. This is undesirable for latency-sensitive bursty
174			 * workloads.
175			 *
176			 * To avoid this, reuse the 'load' from the previous
177			 * time-window and give this task a chance to start with
178			 * a reasonably high CPU frequency. However, that
179			 * shouldn't be over-done, lest we get stuck at a high
180			 * load (high frequency) for too long, even when the
181			 * current system load has actually dropped down, so
182			 * clear prev_load to guarantee that the load will be
183			 * computed again next time.
184			 *
185			 * Detecting this situation is easy: an unusually large
186			 * 'idle_time' (as compared to the sampling rate)
187			 * indicates this scenario.
188			 */
189			load = j_cdbs->prev_load;
190			j_cdbs->prev_load = 0;
191		} else {
192			if (time_elapsed >= idle_time) {
193				load = 100 * (time_elapsed - idle_time) / time_elapsed;
194			} else {
195				/*
196				 * That can happen if idle_time is returned by
197				 * get_cpu_idle_time_jiffy().  In that case
198				 * idle_time is roughly equal to the difference
199				 * between time_elapsed and "busy time" obtained
200				 * from CPU statistics.  Then, the "busy time"
201				 * can end up being greater than time_elapsed
202				 * (for example, if jiffies_64 and the CPU
203				 * statistics are updated by different CPUs),
204				 * so idle_time may in fact be negative.  That
205				 * means, though, that the CPU was busy all
206				 * the time (on the rough average) during the
207				 * last sampling interval and 100 can be
208				 * returned as the load.
209				 */
210				load = (int)idle_time < 0 ? 100 : 0;
211			}
212			j_cdbs->prev_load = load;
213		}
214
215		if (unlikely((int)idle_time > 2 * sampling_rate)) {
216			unsigned int periods = idle_time / sampling_rate;
217
218			if (periods < idle_periods)
219				idle_periods = periods;
220		}
221
222		if (load > max_load)
223			max_load = load;
224	}
225
226	policy_dbs->idle_periods = idle_periods;
227
228	return max_load;
229}
230EXPORT_SYMBOL_GPL(dbs_update);
231
232static void dbs_work_handler(struct work_struct *work)
 
233{
234	struct policy_dbs_info *policy_dbs;
235	struct cpufreq_policy *policy;
236	struct dbs_governor *gov;
237
238	policy_dbs = container_of(work, struct policy_dbs_info, work);
239	policy = policy_dbs->policy;
240	gov = dbs_governor_of(policy);
241
242	/*
243	 * Make sure cpufreq_governor_limits() isn't evaluating load or the
244	 * ondemand governor isn't updating the sampling rate in parallel.
245	 */
246	mutex_lock(&policy_dbs->update_mutex);
247	gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
248	mutex_unlock(&policy_dbs->update_mutex);
249
250	/* Allow the utilization update handler to queue up more work. */
251	atomic_set(&policy_dbs->work_count, 0);
252	/*
253	 * If the update below is reordered with respect to the sample delay
254	 * modification, the utilization update handler may end up using a stale
255	 * sample delay value.
256	 */
257	smp_wmb();
258	policy_dbs->work_in_progress = false;
259}
260
261static void dbs_irq_work(struct irq_work *irq_work)
 
262{
263	struct policy_dbs_info *policy_dbs;
264
265	policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
266	schedule_work_on(smp_processor_id(), &policy_dbs->work);
267}
268
269static void dbs_update_util_handler(struct update_util_data *data, u64 time,
270				    unsigned int flags)
271{
272	struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
273	struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
274	u64 delta_ns, lst;
275
276	if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
277		return;
278
279	/*
280	 * The work may not be allowed to be queued up right now.
281	 * Possible reasons:
282	 * - Work has already been queued up or is in progress.
283	 * - It is too early (too little time from the previous sample).
284	 */
285	if (policy_dbs->work_in_progress)
286		return;
287
288	/*
289	 * If the reads below are reordered before the check above, the value
290	 * of sample_delay_ns used in the computation may be stale.
291	 */
292	smp_rmb();
293	lst = READ_ONCE(policy_dbs->last_sample_time);
294	delta_ns = time - lst;
295	if ((s64)delta_ns < policy_dbs->sample_delay_ns)
296		return;
297
298	/*
299	 * If the policy is not shared, the irq_work may be queued up right away
300	 * at this point.  Otherwise, we need to ensure that only one of the
301	 * CPUs sharing the policy will do that.
302	 */
303	if (policy_dbs->is_shared) {
304		if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
305			return;
306
 
307		/*
308		 * If another CPU updated last_sample_time in the meantime, we
309		 * shouldn't be here, so clear the work counter and bail out.
 
 
 
310		 */
311		if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
312			atomic_set(&policy_dbs->work_count, 0);
313			return;
314		}
315	}
316
317	policy_dbs->last_sample_time = time;
318	policy_dbs->work_in_progress = true;
319	irq_work_queue(&policy_dbs->irq_work);
320}
 
321
322static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
323				unsigned int delay_us)
324{
325	struct cpufreq_policy *policy = policy_dbs->policy;
326	int cpu;
327
328	gov_update_sample_delay(policy_dbs, delay_us);
329	policy_dbs->last_sample_time = 0;
330
331	for_each_cpu(cpu, policy->cpus) {
332		struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
333
334		cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
335					     dbs_update_util_handler);
 
336	}
337}
338
339static inline void gov_clear_update_util(struct cpufreq_policy *policy)
340{
341	int i;
342
343	for_each_cpu(i, policy->cpus)
344		cpufreq_remove_update_util_hook(i);
 
 
 
 
 
 
 
 
345
346	synchronize_rcu();
347}
 
348
349static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
350						     struct dbs_governor *gov)
351{
352	struct policy_dbs_info *policy_dbs;
353	int j;
354
355	/* Allocate memory for per-policy governor data. */
356	policy_dbs = gov->alloc();
357	if (!policy_dbs)
358		return NULL;
359
360	policy_dbs->policy = policy;
361	mutex_init(&policy_dbs->update_mutex);
362	atomic_set(&policy_dbs->work_count, 0);
363	init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
364	INIT_WORK(&policy_dbs->work, dbs_work_handler);
365
366	/* Set policy_dbs for all CPUs, online+offline */
367	for_each_cpu(j, policy->related_cpus) {
368		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
369
370		j_cdbs->policy_dbs = policy_dbs;
371	}
372	return policy_dbs;
373}
374
375static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
376				 struct dbs_governor *gov)
377{
378	int j;
379
380	mutex_destroy(&policy_dbs->update_mutex);
381
382	for_each_cpu(j, policy_dbs->policy->related_cpus) {
383		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
384
385		j_cdbs->policy_dbs = NULL;
386		j_cdbs->update_util.func = NULL;
387	}
388	gov->free(policy_dbs);
389}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390
391static void cpufreq_dbs_data_release(struct kobject *kobj)
392{
393	struct dbs_data *dbs_data = to_dbs_data(to_gov_attr_set(kobj));
394	struct dbs_governor *gov = dbs_data->gov;
 
395
396	gov->exit(dbs_data);
397	kfree(dbs_data);
398}
 
 
 
 
 
399
400int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
401{
402	struct dbs_governor *gov = dbs_governor_of(policy);
403	struct dbs_data *dbs_data;
404	struct policy_dbs_info *policy_dbs;
405	int ret = 0;
406
407	/* State should be equivalent to EXIT */
408	if (policy->governor_data)
409		return -EBUSY;
410
411	policy_dbs = alloc_policy_dbs_info(policy, gov);
412	if (!policy_dbs)
413		return -ENOMEM;
414
415	/* Protect gov->gdbs_data against concurrent updates. */
416	mutex_lock(&gov_dbs_data_mutex);
417
418	dbs_data = gov->gdbs_data;
419	if (dbs_data) {
420		if (WARN_ON(have_governor_per_policy())) {
421			ret = -EINVAL;
422			goto free_policy_dbs_info;
423		}
424		policy_dbs->dbs_data = dbs_data;
425		policy->governor_data = policy_dbs;
426
427		gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
428		goto out;
429	}
430
431	dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
432	if (!dbs_data) {
433		ret = -ENOMEM;
434		goto free_policy_dbs_info;
435	}
436
437	dbs_data->gov = gov;
438	gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
 
 
 
 
 
 
 
 
 
 
 
 
439
440	ret = gov->init(dbs_data);
441	if (ret)
442		goto free_dbs_data;
443
444	/*
445	 * The sampling interval should not be less than the transition latency
446	 * of the CPU and it also cannot be too small for dbs_update() to work
447	 * correctly.
448	 */
449	dbs_data->sampling_rate = max_t(unsigned int,
450					CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
451					cpufreq_policy_transition_delay_us(policy));
452
453	if (!have_governor_per_policy())
454		gov->gdbs_data = dbs_data;
455
456	policy_dbs->dbs_data = dbs_data;
457	policy->governor_data = policy_dbs;
458
459	gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
460	gov->kobj_type.release = cpufreq_dbs_data_release;
461	ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
462				   get_governor_parent_kobj(policy),
463				   "%s", gov->gov.name);
464	if (!ret)
465		goto out;
466
467	/* Failure, so roll back. */
468	pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
469
470	kobject_put(&dbs_data->attr_set.kobj);
471
472	policy->governor_data = NULL;
473
474	if (!have_governor_per_policy())
475		gov->gdbs_data = NULL;
476	gov->exit(dbs_data);
477
478free_dbs_data:
479	kfree(dbs_data);
480
481free_policy_dbs_info:
482	free_policy_dbs_info(policy_dbs, gov);
483
484out:
485	mutex_unlock(&gov_dbs_data_mutex);
486	return ret;
487}
488EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
489
490void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
491{
492	struct dbs_governor *gov = dbs_governor_of(policy);
493	struct policy_dbs_info *policy_dbs = policy->governor_data;
494	struct dbs_data *dbs_data = policy_dbs->dbs_data;
495	unsigned int count;
496
497	/* Protect gov->gdbs_data against concurrent updates. */
498	mutex_lock(&gov_dbs_data_mutex);
 
 
 
 
 
 
 
 
 
 
499
500	count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
 
 
501
502	policy->governor_data = NULL;
 
 
 
503
504	if (!count && !have_governor_per_policy())
505		gov->gdbs_data = NULL;
 
506
507	free_policy_dbs_info(policy_dbs, gov);
508
509	mutex_unlock(&gov_dbs_data_mutex);
510}
511EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
 
 
 
 
 
 
 
 
 
 
512
513int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
514{
515	struct dbs_governor *gov = dbs_governor_of(policy);
516	struct policy_dbs_info *policy_dbs = policy->governor_data;
517	struct dbs_data *dbs_data = policy_dbs->dbs_data;
518	unsigned int sampling_rate, ignore_nice, j;
519	unsigned int io_busy;
520
521	if (!policy->cur)
522		return -EINVAL;
523
524	policy_dbs->is_shared = policy_is_shared(policy);
525	policy_dbs->rate_mult = 1;
526
527	sampling_rate = dbs_data->sampling_rate;
528	ignore_nice = dbs_data->ignore_nice_load;
529	io_busy = dbs_data->io_is_busy;
 
 
 
 
 
 
530
531	for_each_cpu(j, policy->cpus) {
532		struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 
 
 
 
 
 
 
533
534		j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
535		/*
536		 * Make the first invocation of dbs_update() compute the load.
537		 */
538		j_cdbs->prev_load = 0;
539
540		if (ignore_nice)
541			j_cdbs->prev_cpu_nice = kcpustat_field(&kcpustat_cpu(j), CPUTIME_NICE, j);
542	}
543
544	gov->start(policy);
 
 
545
546	gov_set_update_util(policy_dbs, sampling_rate);
547	return 0;
548}
549EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
550
551void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
552{
553	struct policy_dbs_info *policy_dbs = policy->governor_data;
554
555	gov_clear_update_util(policy_dbs->policy);
556	irq_work_sync(&policy_dbs->irq_work);
557	cancel_work_sync(&policy_dbs->work);
558	atomic_set(&policy_dbs->work_count, 0);
559	policy_dbs->work_in_progress = false;
560}
561EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
562
563void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
564{
565	struct policy_dbs_info *policy_dbs;
566
567	/* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
568	mutex_lock(&gov_dbs_data_mutex);
569	policy_dbs = policy->governor_data;
570	if (!policy_dbs)
571		goto out;
572
573	mutex_lock(&policy_dbs->update_mutex);
574	cpufreq_policy_apply_limits(policy);
575	gov_update_sample_delay(policy_dbs, 0);
576	mutex_unlock(&policy_dbs->update_mutex);
577
578out:
579	mutex_unlock(&gov_dbs_data_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580}
581EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
v3.15
 
  1/*
  2 * drivers/cpufreq/cpufreq_governor.c
  3 *
  4 * CPUFREQ governors common code
  5 *
  6 * Copyright	(C) 2001 Russell King
  7 *		(C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  8 *		(C) 2003 Jun Nakajima <jun.nakajima@intel.com>
  9 *		(C) 2009 Alexander Clouter <alex@digriz.org.uk>
 10 *		(c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
 11 *
 12 * This program is free software; you can redistribute it and/or modify
 13 * it under the terms of the GNU General Public License version 2 as
 14 * published by the Free Software Foundation.
 15 */
 16
 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 18
 19#include <linux/export.h>
 20#include <linux/kernel_stat.h>
 21#include <linux/slab.h>
 22
 23#include "cpufreq_governor.h"
 24
 25static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26{
 27	if (have_governor_per_policy())
 28		return dbs_data->cdata->attr_group_gov_pol;
 29	else
 30		return dbs_data->cdata->attr_group_gov_sys;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31}
 
 32
 33void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
 
 
 
 
 
 
 
 
 
 
 34{
 35	struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 36	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 37	struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 38	struct cpufreq_policy *policy;
 39	unsigned int max_load = 0;
 40	unsigned int ignore_nice;
 41	unsigned int j;
 42
 43	if (dbs_data->cdata->governor == GOV_ONDEMAND)
 44		ignore_nice = od_tuners->ignore_nice_load;
 45	else
 46		ignore_nice = cs_tuners->ignore_nice_load;
 
 
 
 
 47
 48	policy = cdbs->cur_policy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50	/* Get Absolute Load */
 51	for_each_cpu(j, policy->cpus) {
 52		struct cpu_dbs_common_info *j_cdbs;
 53		u64 cur_wall_time, cur_idle_time;
 54		unsigned int idle_time, wall_time;
 55		unsigned int load;
 56		int io_busy = 0;
 57
 58		j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
 59
 60		/*
 61		 * For the purpose of ondemand, waiting for disk IO is
 62		 * an indication that you're performance critical, and
 63		 * not that the system is actually idle. So do not add
 64		 * the iowait time to the cpu idle time.
 65		 */
 66		if (dbs_data->cdata->governor == GOV_ONDEMAND)
 67			io_busy = od_tuners->io_is_busy;
 68		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
 69
 70		wall_time = (unsigned int)
 71			(cur_wall_time - j_cdbs->prev_cpu_wall);
 72		j_cdbs->prev_cpu_wall = cur_wall_time;
 73
 74		idle_time = (unsigned int)
 75			(cur_idle_time - j_cdbs->prev_cpu_idle);
 76		j_cdbs->prev_cpu_idle = cur_idle_time;
 77
 78		if (ignore_nice) {
 79			u64 cur_nice;
 80			unsigned long cur_nice_jiffies;
 
 
 
 81
 82			cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
 83					 cdbs->prev_cpu_nice;
 
 
 
 
 
 
 
 84			/*
 85			 * Assumption: nice time between sampling periods will
 86			 * be less than 2^32 jiffies for 32 bit sys
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87			 */
 88			cur_nice_jiffies = (unsigned long)
 89					cputime64_to_jiffies64(cur_nice);
 90
 91			cdbs->prev_cpu_nice =
 92				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 93			idle_time += jiffies_to_usecs(cur_nice_jiffies);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94		}
 95
 96		if (unlikely(!wall_time || wall_time < idle_time))
 97			continue;
 98
 99		load = 100 * (wall_time - idle_time) / wall_time;
 
 
100
101		if (load > max_load)
102			max_load = load;
103	}
104
105	dbs_data->cdata->gov_check_cpu(cpu, max_load);
 
 
106}
107EXPORT_SYMBOL_GPL(dbs_check_cpu);
108
109static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
110		unsigned int delay)
111{
112	struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 
 
113
114	mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115}
116
117void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
118		unsigned int delay, bool all_cpus)
119{
120	int i;
 
 
 
 
121
122	mutex_lock(&cpufreq_governor_lock);
123	if (!policy->governor_enabled)
124		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	if (!all_cpus) {
127		/*
128		 * Use raw_smp_processor_id() to avoid preemptible warnings.
129		 * We know that this is only called with all_cpus == false from
130		 * works that have been queued with *_work_on() functions and
131		 * those works are canceled during CPU_DOWN_PREPARE so they
132		 * can't possibly run on any other CPU.
133		 */
134		__gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
135	} else {
136		for_each_cpu(i, policy->cpus)
137			__gov_queue_work(i, dbs_data, delay);
138	}
139
140out_unlock:
141	mutex_unlock(&cpufreq_governor_lock);
 
142}
143EXPORT_SYMBOL_GPL(gov_queue_work);
144
145static inline void gov_cancel_work(struct dbs_data *dbs_data,
146		struct cpufreq_policy *policy)
147{
148	struct cpu_dbs_common_info *cdbs;
149	int i;
 
 
 
 
 
 
150
151	for_each_cpu(i, policy->cpus) {
152		cdbs = dbs_data->cdata->get_cpu_cdbs(i);
153		cancel_delayed_work_sync(&cdbs->work);
154	}
155}
156
157/* Will return if we need to evaluate cpu load again or not */
158bool need_load_eval(struct cpu_dbs_common_info *cdbs,
159		unsigned int sampling_rate)
160{
161	if (policy_is_shared(cdbs->cur_policy)) {
162		ktime_t time_now = ktime_get();
163		s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
164
165		/* Do nothing if we recently have sampled */
166		if (delta_us < (s64)(sampling_rate / 2))
167			return false;
168		else
169			cdbs->time_stamp = time_now;
170	}
171
172	return true;
173}
174EXPORT_SYMBOL_GPL(need_load_eval);
175
176static void set_sampling_rate(struct dbs_data *dbs_data,
177		unsigned int sampling_rate)
178{
179	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
180		struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
181		cs_tuners->sampling_rate = sampling_rate;
182	} else {
183		struct od_dbs_tuners *od_tuners = dbs_data->tuners;
184		od_tuners->sampling_rate = sampling_rate;
 
 
 
 
 
 
 
 
 
 
 
 
 
185	}
 
186}
187
188int cpufreq_governor_dbs(struct cpufreq_policy *policy,
189		struct common_dbs_data *cdata, unsigned int event)
190{
191	struct dbs_data *dbs_data;
192	struct od_cpu_dbs_info_s *od_dbs_info = NULL;
193	struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
194	struct od_ops *od_ops = NULL;
195	struct od_dbs_tuners *od_tuners = NULL;
196	struct cs_dbs_tuners *cs_tuners = NULL;
197	struct cpu_dbs_common_info *cpu_cdbs;
198	unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
199	int io_busy = 0;
200	int rc;
201
202	if (have_governor_per_policy())
203		dbs_data = policy->governor_data;
204	else
205		dbs_data = cdata->gdbs_data;
206
207	WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
208
209	switch (event) {
210	case CPUFREQ_GOV_POLICY_INIT:
211		if (have_governor_per_policy()) {
212			WARN_ON(dbs_data);
213		} else if (dbs_data) {
214			dbs_data->usage_count++;
215			policy->governor_data = dbs_data;
216			return 0;
217		}
218
219		dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
220		if (!dbs_data) {
221			pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
222			return -ENOMEM;
223		}
224
225		dbs_data->cdata = cdata;
226		dbs_data->usage_count = 1;
227		rc = cdata->init(dbs_data);
228		if (rc) {
229			pr_err("%s: POLICY_INIT: init() failed\n", __func__);
230			kfree(dbs_data);
231			return rc;
232		}
233
234		if (!have_governor_per_policy())
235			WARN_ON(cpufreq_get_global_kobject());
 
 
 
 
236
237		rc = sysfs_create_group(get_governor_parent_kobj(policy),
238				get_sysfs_attr(dbs_data));
239		if (rc) {
240			cdata->exit(dbs_data);
241			kfree(dbs_data);
242			return rc;
 
 
 
 
 
 
 
 
 
 
243		}
 
 
244
245		policy->governor_data = dbs_data;
 
 
 
 
 
 
 
 
246
247		/* policy latency is in ns. Convert it to us first */
248		latency = policy->cpuinfo.transition_latency / 1000;
249		if (latency == 0)
250			latency = 1;
251
252		/* Bring kernel and HW constraints together */
253		dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
254				MIN_LATENCY_MULTIPLIER * latency);
255		set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
256					latency * LATENCY_MULTIPLIER));
257
258		if ((cdata->governor == GOV_CONSERVATIVE) &&
259				(!policy->governor->initialized)) {
260			struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
261
262			cpufreq_register_notifier(cs_ops->notifier_block,
263					CPUFREQ_TRANSITION_NOTIFIER);
264		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
266		if (!have_governor_per_policy())
267			cdata->gdbs_data = dbs_data;
 
 
 
 
268
269		return 0;
270	case CPUFREQ_GOV_POLICY_EXIT:
271		if (!--dbs_data->usage_count) {
272			sysfs_remove_group(get_governor_parent_kobj(policy),
273					get_sysfs_attr(dbs_data));
274
275			if (!have_governor_per_policy())
276				cpufreq_put_global_kobject();
277
278			if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
279				(policy->governor->initialized == 1)) {
280				struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
281
282				cpufreq_unregister_notifier(cs_ops->notifier_block,
283						CPUFREQ_TRANSITION_NOTIFIER);
284			}
285
286			cdata->exit(dbs_data);
287			kfree(dbs_data);
288			cdata->gdbs_data = NULL;
289		}
290
291		policy->governor_data = NULL;
292		return 0;
293	}
294
295	cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
296
297	if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
298		cs_tuners = dbs_data->tuners;
299		cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
300		sampling_rate = cs_tuners->sampling_rate;
301		ignore_nice = cs_tuners->ignore_nice_load;
302	} else {
303		od_tuners = dbs_data->tuners;
304		od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
305		sampling_rate = od_tuners->sampling_rate;
306		ignore_nice = od_tuners->ignore_nice_load;
307		od_ops = dbs_data->cdata->gov_ops;
308		io_busy = od_tuners->io_is_busy;
309	}
310
311	switch (event) {
312	case CPUFREQ_GOV_START:
313		if (!policy->cur)
314			return -EINVAL;
315
316		mutex_lock(&dbs_data->mutex);
317
318		for_each_cpu(j, policy->cpus) {
319			struct cpu_dbs_common_info *j_cdbs =
320				dbs_data->cdata->get_cpu_cdbs(j);
321
322			j_cdbs->cpu = j;
323			j_cdbs->cur_policy = policy;
324			j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
325					       &j_cdbs->prev_cpu_wall, io_busy);
326			if (ignore_nice)
327				j_cdbs->prev_cpu_nice =
328					kcpustat_cpu(j).cpustat[CPUTIME_NICE];
329
330			mutex_init(&j_cdbs->timer_mutex);
331			INIT_DEFERRABLE_WORK(&j_cdbs->work,
332					     dbs_data->cdata->gov_dbs_timer);
333		}
334
335		if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
336			cs_dbs_info->down_skip = 0;
337			cs_dbs_info->enable = 1;
338			cs_dbs_info->requested_freq = policy->cur;
339		} else {
340			od_dbs_info->rate_mult = 1;
341			od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
342			od_ops->powersave_bias_init_cpu(cpu);
343		}
344
345		mutex_unlock(&dbs_data->mutex);
 
 
 
 
346
347		/* Initiate timer time stamp */
348		cpu_cdbs->time_stamp = ktime_get();
 
349
350		gov_queue_work(dbs_data, policy,
351				delay_for_sampling_rate(sampling_rate), true);
352		break;
353
354	case CPUFREQ_GOV_STOP:
355		if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
356			cs_dbs_info->enable = 0;
 
357
358		gov_cancel_work(dbs_data, policy);
 
 
359
360		mutex_lock(&dbs_data->mutex);
361		mutex_destroy(&cpu_cdbs->timer_mutex);
362		cpu_cdbs->cur_policy = NULL;
 
 
 
 
363
364		mutex_unlock(&dbs_data->mutex);
 
 
365
366		break;
 
 
 
 
 
 
 
 
 
367
368	case CPUFREQ_GOV_LIMITS:
369		mutex_lock(&dbs_data->mutex);
370		if (!cpu_cdbs->cur_policy) {
371			mutex_unlock(&dbs_data->mutex);
372			break;
373		}
374		mutex_lock(&cpu_cdbs->timer_mutex);
375		if (policy->max < cpu_cdbs->cur_policy->cur)
376			__cpufreq_driver_target(cpu_cdbs->cur_policy,
377					policy->max, CPUFREQ_RELATION_H);
378		else if (policy->min > cpu_cdbs->cur_policy->cur)
379			__cpufreq_driver_target(cpu_cdbs->cur_policy,
380					policy->min, CPUFREQ_RELATION_L);
381		dbs_check_cpu(dbs_data, cpu);
382		mutex_unlock(&cpu_cdbs->timer_mutex);
383		mutex_unlock(&dbs_data->mutex);
384		break;
385	}
386	return 0;
387}
388EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);