Loading...
1/*
2 * CPUFreq governor based on scheduler-provided CPU utilization data.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include "sched.h"
15
16#include <trace/events/power.h>
17
18struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
21};
22
23struct sugov_policy {
24 struct cpufreq_policy *policy;
25
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
28
29 raw_spinlock_t update_lock; /* For shared policies */
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
34
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
42
43 bool need_freq_update;
44};
45
46struct sugov_cpu {
47 struct update_util_data update_util;
48 struct sugov_policy *sg_policy;
49 unsigned int cpu;
50
51 bool iowait_boost_pending;
52 unsigned int iowait_boost;
53 unsigned int iowait_boost_max;
54 u64 last_update;
55
56 /* The fields below are only needed when sharing a policy: */
57 unsigned long util_cfs;
58 unsigned long util_dl;
59 unsigned long max;
60
61 /* The field below is for single-CPU policies only: */
62#ifdef CONFIG_NO_HZ_COMMON
63 unsigned long saved_idle_calls;
64#endif
65};
66
67static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
68
69/************************ Governor internals ***********************/
70
71static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
72{
73 s64 delta_ns;
74
75 /*
76 * Since cpufreq_update_util() is called with rq->lock held for
77 * the @target_cpu, our per-CPU data is fully serialized.
78 *
79 * However, drivers cannot in general deal with cross-CPU
80 * requests, so while get_next_freq() will work, our
81 * sugov_update_commit() call may not for the fast switching platforms.
82 *
83 * Hence stop here for remote requests if they aren't supported
84 * by the hardware, as calculating the frequency is pointless if
85 * we cannot in fact act on it.
86 *
87 * For the slow switching platforms, the kthread is always scheduled on
88 * the right set of CPUs and any CPU can find the next frequency and
89 * schedule the kthread.
90 */
91 if (sg_policy->policy->fast_switch_enabled &&
92 !cpufreq_can_do_remote_dvfs(sg_policy->policy))
93 return false;
94
95 if (sg_policy->work_in_progress)
96 return false;
97
98 if (unlikely(sg_policy->need_freq_update)) {
99 sg_policy->need_freq_update = false;
100 /*
101 * This happens when limits change, so forget the previous
102 * next_freq value and force an update.
103 */
104 sg_policy->next_freq = UINT_MAX;
105 return true;
106 }
107
108 delta_ns = time - sg_policy->last_freq_update_time;
109
110 return delta_ns >= sg_policy->freq_update_delay_ns;
111}
112
113static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
114 unsigned int next_freq)
115{
116 struct cpufreq_policy *policy = sg_policy->policy;
117
118 if (sg_policy->next_freq == next_freq)
119 return;
120
121 sg_policy->next_freq = next_freq;
122 sg_policy->last_freq_update_time = time;
123
124 if (policy->fast_switch_enabled) {
125 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
126 if (!next_freq)
127 return;
128
129 policy->cur = next_freq;
130 trace_cpu_frequency(next_freq, smp_processor_id());
131 } else {
132 sg_policy->work_in_progress = true;
133 irq_work_queue(&sg_policy->irq_work);
134 }
135}
136
137/**
138 * get_next_freq - Compute a new frequency for a given cpufreq policy.
139 * @sg_policy: schedutil policy object to compute the new frequency for.
140 * @util: Current CPU utilization.
141 * @max: CPU capacity.
142 *
143 * If the utilization is frequency-invariant, choose the new frequency to be
144 * proportional to it, that is
145 *
146 * next_freq = C * max_freq * util / max
147 *
148 * Otherwise, approximate the would-be frequency-invariant utilization by
149 * util_raw * (curr_freq / max_freq) which leads to
150 *
151 * next_freq = C * curr_freq * util_raw / max
152 *
153 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
154 *
155 * The lowest driver-supported frequency which is equal or greater than the raw
156 * next_freq (as calculated above) is returned, subject to policy min/max and
157 * cpufreq driver limitations.
158 */
159static unsigned int get_next_freq(struct sugov_policy *sg_policy,
160 unsigned long util, unsigned long max)
161{
162 struct cpufreq_policy *policy = sg_policy->policy;
163 unsigned int freq = arch_scale_freq_invariant() ?
164 policy->cpuinfo.max_freq : policy->cur;
165
166 freq = (freq + (freq >> 2)) * util / max;
167
168 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
169 return sg_policy->next_freq;
170 sg_policy->cached_raw_freq = freq;
171 return cpufreq_driver_resolve_freq(policy, freq);
172}
173
174static void sugov_get_util(struct sugov_cpu *sg_cpu)
175{
176 struct rq *rq = cpu_rq(sg_cpu->cpu);
177
178 sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
179 sg_cpu->util_cfs = cpu_util_cfs(rq);
180 sg_cpu->util_dl = cpu_util_dl(rq);
181}
182
183static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
184{
185 struct rq *rq = cpu_rq(sg_cpu->cpu);
186 unsigned long util;
187
188 if (rq->rt.rt_nr_running) {
189 util = sg_cpu->max;
190 } else {
191 util = sg_cpu->util_dl;
192 if (rq->cfs.h_nr_running)
193 util += sg_cpu->util_cfs;
194 }
195
196 /*
197 * Ideally we would like to set util_dl as min/guaranteed freq and
198 * util_cfs + util_dl as requested freq. However, cpufreq is not yet
199 * ready for such an interface. So, we only do the latter for now.
200 */
201 return min(util, sg_cpu->max);
202}
203
204static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags)
205{
206 if (flags & SCHED_CPUFREQ_IOWAIT) {
207 if (sg_cpu->iowait_boost_pending)
208 return;
209
210 sg_cpu->iowait_boost_pending = true;
211
212 if (sg_cpu->iowait_boost) {
213 sg_cpu->iowait_boost <<= 1;
214 if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
215 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
216 } else {
217 sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
218 }
219 } else if (sg_cpu->iowait_boost) {
220 s64 delta_ns = time - sg_cpu->last_update;
221
222 /* Clear iowait_boost if the CPU apprears to have been idle. */
223 if (delta_ns > TICK_NSEC) {
224 sg_cpu->iowait_boost = 0;
225 sg_cpu->iowait_boost_pending = false;
226 }
227 }
228}
229
230static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
231 unsigned long *max)
232{
233 unsigned int boost_util, boost_max;
234
235 if (!sg_cpu->iowait_boost)
236 return;
237
238 if (sg_cpu->iowait_boost_pending) {
239 sg_cpu->iowait_boost_pending = false;
240 } else {
241 sg_cpu->iowait_boost >>= 1;
242 if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
243 sg_cpu->iowait_boost = 0;
244 return;
245 }
246 }
247
248 boost_util = sg_cpu->iowait_boost;
249 boost_max = sg_cpu->iowait_boost_max;
250
251 if (*util * boost_max < *max * boost_util) {
252 *util = boost_util;
253 *max = boost_max;
254 }
255}
256
257#ifdef CONFIG_NO_HZ_COMMON
258static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
259{
260 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
261 bool ret = idle_calls == sg_cpu->saved_idle_calls;
262
263 sg_cpu->saved_idle_calls = idle_calls;
264 return ret;
265}
266#else
267static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
268#endif /* CONFIG_NO_HZ_COMMON */
269
270/*
271 * Make sugov_should_update_freq() ignore the rate limit when DL
272 * has increased the utilization.
273 */
274static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
275{
276 if (cpu_util_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->util_dl)
277 sg_policy->need_freq_update = true;
278}
279
280static void sugov_update_single(struct update_util_data *hook, u64 time,
281 unsigned int flags)
282{
283 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
284 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
285 unsigned long util, max;
286 unsigned int next_f;
287 bool busy;
288
289 sugov_set_iowait_boost(sg_cpu, time, flags);
290 sg_cpu->last_update = time;
291
292 ignore_dl_rate_limit(sg_cpu, sg_policy);
293
294 if (!sugov_should_update_freq(sg_policy, time))
295 return;
296
297 busy = sugov_cpu_is_busy(sg_cpu);
298
299 sugov_get_util(sg_cpu);
300 max = sg_cpu->max;
301 util = sugov_aggregate_util(sg_cpu);
302 sugov_iowait_boost(sg_cpu, &util, &max);
303 next_f = get_next_freq(sg_policy, util, max);
304 /*
305 * Do not reduce the frequency if the CPU has not been idle
306 * recently, as the reduction is likely to be premature then.
307 */
308 if (busy && next_f < sg_policy->next_freq &&
309 sg_policy->next_freq != UINT_MAX) {
310 next_f = sg_policy->next_freq;
311
312 /* Reset cached freq as next_freq has changed */
313 sg_policy->cached_raw_freq = 0;
314 }
315
316 sugov_update_commit(sg_policy, time, next_f);
317}
318
319static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
320{
321 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
322 struct cpufreq_policy *policy = sg_policy->policy;
323 unsigned long util = 0, max = 1;
324 unsigned int j;
325
326 for_each_cpu(j, policy->cpus) {
327 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
328 unsigned long j_util, j_max;
329 s64 delta_ns;
330
331 sugov_get_util(j_sg_cpu);
332
333 /*
334 * If the CFS CPU utilization was last updated before the
335 * previous frequency update and the time elapsed between the
336 * last update of the CPU utilization and the last frequency
337 * update is long enough, reset iowait_boost and util_cfs, as
338 * they are now probably stale. However, still consider the
339 * CPU contribution if it has some DEADLINE utilization
340 * (util_dl).
341 */
342 delta_ns = time - j_sg_cpu->last_update;
343 if (delta_ns > TICK_NSEC) {
344 j_sg_cpu->iowait_boost = 0;
345 j_sg_cpu->iowait_boost_pending = false;
346 }
347
348 j_max = j_sg_cpu->max;
349 j_util = sugov_aggregate_util(j_sg_cpu);
350 sugov_iowait_boost(j_sg_cpu, &j_util, &j_max);
351 if (j_util * max > j_max * util) {
352 util = j_util;
353 max = j_max;
354 }
355 }
356
357 return get_next_freq(sg_policy, util, max);
358}
359
360static void
361sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
362{
363 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
364 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
365 unsigned int next_f;
366
367 raw_spin_lock(&sg_policy->update_lock);
368
369 sugov_set_iowait_boost(sg_cpu, time, flags);
370 sg_cpu->last_update = time;
371
372 ignore_dl_rate_limit(sg_cpu, sg_policy);
373
374 if (sugov_should_update_freq(sg_policy, time)) {
375 next_f = sugov_next_freq_shared(sg_cpu, time);
376 sugov_update_commit(sg_policy, time, next_f);
377 }
378
379 raw_spin_unlock(&sg_policy->update_lock);
380}
381
382static void sugov_work(struct kthread_work *work)
383{
384 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
385
386 mutex_lock(&sg_policy->work_lock);
387 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
388 CPUFREQ_RELATION_L);
389 mutex_unlock(&sg_policy->work_lock);
390
391 sg_policy->work_in_progress = false;
392}
393
394static void sugov_irq_work(struct irq_work *irq_work)
395{
396 struct sugov_policy *sg_policy;
397
398 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
399
400 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
401}
402
403/************************** sysfs interface ************************/
404
405static struct sugov_tunables *global_tunables;
406static DEFINE_MUTEX(global_tunables_lock);
407
408static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
409{
410 return container_of(attr_set, struct sugov_tunables, attr_set);
411}
412
413static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
414{
415 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
416
417 return sprintf(buf, "%u\n", tunables->rate_limit_us);
418}
419
420static ssize_t
421rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
422{
423 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
424 struct sugov_policy *sg_policy;
425 unsigned int rate_limit_us;
426
427 if (kstrtouint(buf, 10, &rate_limit_us))
428 return -EINVAL;
429
430 tunables->rate_limit_us = rate_limit_us;
431
432 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
433 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
434
435 return count;
436}
437
438static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
439
440static struct attribute *sugov_attributes[] = {
441 &rate_limit_us.attr,
442 NULL
443};
444
445static struct kobj_type sugov_tunables_ktype = {
446 .default_attrs = sugov_attributes,
447 .sysfs_ops = &governor_sysfs_ops,
448};
449
450/********************** cpufreq governor interface *********************/
451
452static struct cpufreq_governor schedutil_gov;
453
454static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
455{
456 struct sugov_policy *sg_policy;
457
458 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
459 if (!sg_policy)
460 return NULL;
461
462 sg_policy->policy = policy;
463 raw_spin_lock_init(&sg_policy->update_lock);
464 return sg_policy;
465}
466
467static void sugov_policy_free(struct sugov_policy *sg_policy)
468{
469 kfree(sg_policy);
470}
471
472static int sugov_kthread_create(struct sugov_policy *sg_policy)
473{
474 struct task_struct *thread;
475 struct sched_attr attr = {
476 .size = sizeof(struct sched_attr),
477 .sched_policy = SCHED_DEADLINE,
478 .sched_flags = SCHED_FLAG_SUGOV,
479 .sched_nice = 0,
480 .sched_priority = 0,
481 /*
482 * Fake (unused) bandwidth; workaround to "fix"
483 * priority inheritance.
484 */
485 .sched_runtime = 1000000,
486 .sched_deadline = 10000000,
487 .sched_period = 10000000,
488 };
489 struct cpufreq_policy *policy = sg_policy->policy;
490 int ret;
491
492 /* kthread only required for slow path */
493 if (policy->fast_switch_enabled)
494 return 0;
495
496 kthread_init_work(&sg_policy->work, sugov_work);
497 kthread_init_worker(&sg_policy->worker);
498 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
499 "sugov:%d",
500 cpumask_first(policy->related_cpus));
501 if (IS_ERR(thread)) {
502 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
503 return PTR_ERR(thread);
504 }
505
506 ret = sched_setattr_nocheck(thread, &attr);
507 if (ret) {
508 kthread_stop(thread);
509 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
510 return ret;
511 }
512
513 sg_policy->thread = thread;
514
515 /* Kthread is bound to all CPUs by default */
516 if (!policy->dvfs_possible_from_any_cpu)
517 kthread_bind_mask(thread, policy->related_cpus);
518
519 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
520 mutex_init(&sg_policy->work_lock);
521
522 wake_up_process(thread);
523
524 return 0;
525}
526
527static void sugov_kthread_stop(struct sugov_policy *sg_policy)
528{
529 /* kthread only required for slow path */
530 if (sg_policy->policy->fast_switch_enabled)
531 return;
532
533 kthread_flush_worker(&sg_policy->worker);
534 kthread_stop(sg_policy->thread);
535 mutex_destroy(&sg_policy->work_lock);
536}
537
538static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
539{
540 struct sugov_tunables *tunables;
541
542 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
543 if (tunables) {
544 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
545 if (!have_governor_per_policy())
546 global_tunables = tunables;
547 }
548 return tunables;
549}
550
551static void sugov_tunables_free(struct sugov_tunables *tunables)
552{
553 if (!have_governor_per_policy())
554 global_tunables = NULL;
555
556 kfree(tunables);
557}
558
559static int sugov_init(struct cpufreq_policy *policy)
560{
561 struct sugov_policy *sg_policy;
562 struct sugov_tunables *tunables;
563 int ret = 0;
564
565 /* State should be equivalent to EXIT */
566 if (policy->governor_data)
567 return -EBUSY;
568
569 cpufreq_enable_fast_switch(policy);
570
571 sg_policy = sugov_policy_alloc(policy);
572 if (!sg_policy) {
573 ret = -ENOMEM;
574 goto disable_fast_switch;
575 }
576
577 ret = sugov_kthread_create(sg_policy);
578 if (ret)
579 goto free_sg_policy;
580
581 mutex_lock(&global_tunables_lock);
582
583 if (global_tunables) {
584 if (WARN_ON(have_governor_per_policy())) {
585 ret = -EINVAL;
586 goto stop_kthread;
587 }
588 policy->governor_data = sg_policy;
589 sg_policy->tunables = global_tunables;
590
591 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
592 goto out;
593 }
594
595 tunables = sugov_tunables_alloc(sg_policy);
596 if (!tunables) {
597 ret = -ENOMEM;
598 goto stop_kthread;
599 }
600
601 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
602
603 policy->governor_data = sg_policy;
604 sg_policy->tunables = tunables;
605
606 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
607 get_governor_parent_kobj(policy), "%s",
608 schedutil_gov.name);
609 if (ret)
610 goto fail;
611
612out:
613 mutex_unlock(&global_tunables_lock);
614 return 0;
615
616fail:
617 policy->governor_data = NULL;
618 sugov_tunables_free(tunables);
619
620stop_kthread:
621 sugov_kthread_stop(sg_policy);
622 mutex_unlock(&global_tunables_lock);
623
624free_sg_policy:
625 sugov_policy_free(sg_policy);
626
627disable_fast_switch:
628 cpufreq_disable_fast_switch(policy);
629
630 pr_err("initialization failed (error %d)\n", ret);
631 return ret;
632}
633
634static void sugov_exit(struct cpufreq_policy *policy)
635{
636 struct sugov_policy *sg_policy = policy->governor_data;
637 struct sugov_tunables *tunables = sg_policy->tunables;
638 unsigned int count;
639
640 mutex_lock(&global_tunables_lock);
641
642 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
643 policy->governor_data = NULL;
644 if (!count)
645 sugov_tunables_free(tunables);
646
647 mutex_unlock(&global_tunables_lock);
648
649 sugov_kthread_stop(sg_policy);
650 sugov_policy_free(sg_policy);
651 cpufreq_disable_fast_switch(policy);
652}
653
654static int sugov_start(struct cpufreq_policy *policy)
655{
656 struct sugov_policy *sg_policy = policy->governor_data;
657 unsigned int cpu;
658
659 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
660 sg_policy->last_freq_update_time = 0;
661 sg_policy->next_freq = UINT_MAX;
662 sg_policy->work_in_progress = false;
663 sg_policy->need_freq_update = false;
664 sg_policy->cached_raw_freq = 0;
665
666 for_each_cpu(cpu, policy->cpus) {
667 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
668
669 memset(sg_cpu, 0, sizeof(*sg_cpu));
670 sg_cpu->cpu = cpu;
671 sg_cpu->sg_policy = sg_policy;
672 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
673 }
674
675 for_each_cpu(cpu, policy->cpus) {
676 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
677
678 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
679 policy_is_shared(policy) ?
680 sugov_update_shared :
681 sugov_update_single);
682 }
683 return 0;
684}
685
686static void sugov_stop(struct cpufreq_policy *policy)
687{
688 struct sugov_policy *sg_policy = policy->governor_data;
689 unsigned int cpu;
690
691 for_each_cpu(cpu, policy->cpus)
692 cpufreq_remove_update_util_hook(cpu);
693
694 synchronize_sched();
695
696 if (!policy->fast_switch_enabled) {
697 irq_work_sync(&sg_policy->irq_work);
698 kthread_cancel_work_sync(&sg_policy->work);
699 }
700}
701
702static void sugov_limits(struct cpufreq_policy *policy)
703{
704 struct sugov_policy *sg_policy = policy->governor_data;
705
706 if (!policy->fast_switch_enabled) {
707 mutex_lock(&sg_policy->work_lock);
708 cpufreq_policy_apply_limits(policy);
709 mutex_unlock(&sg_policy->work_lock);
710 }
711
712 sg_policy->need_freq_update = true;
713}
714
715static struct cpufreq_governor schedutil_gov = {
716 .name = "schedutil",
717 .owner = THIS_MODULE,
718 .dynamic_switching = true,
719 .init = sugov_init,
720 .exit = sugov_exit,
721 .start = sugov_start,
722 .stop = sugov_stop,
723 .limits = sugov_limits,
724};
725
726#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
727struct cpufreq_governor *cpufreq_default_governor(void)
728{
729 return &schedutil_gov;
730}
731#endif
732
733static int __init sugov_register(void)
734{
735 return cpufreq_register_governor(&schedutil_gov);
736}
737fs_initcall(sugov_register);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include "sched.h"
12
13#include <linux/sched/cpufreq.h>
14#include <trace/events/power.h>
15
16#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
17
18struct sugov_tunables {
19 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
21};
22
23struct sugov_policy {
24 struct cpufreq_policy *policy;
25
26 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
28
29 raw_spinlock_t update_lock; /* For shared policies */
30 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
34
35 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
42
43 bool limits_changed;
44 bool need_freq_update;
45};
46
47struct sugov_cpu {
48 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
50 unsigned int cpu;
51
52 bool iowait_boost_pending;
53 unsigned int iowait_boost;
54 u64 last_update;
55
56 unsigned long bw_dl;
57 unsigned long max;
58
59 /* The field below is for single-CPU policies only: */
60#ifdef CONFIG_NO_HZ_COMMON
61 unsigned long saved_idle_calls;
62#endif
63};
64
65static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
66
67/************************ Governor internals ***********************/
68
69static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
70{
71 s64 delta_ns;
72
73 /*
74 * Since cpufreq_update_util() is called with rq->lock held for
75 * the @target_cpu, our per-CPU data is fully serialized.
76 *
77 * However, drivers cannot in general deal with cross-CPU
78 * requests, so while get_next_freq() will work, our
79 * sugov_update_commit() call may not for the fast switching platforms.
80 *
81 * Hence stop here for remote requests if they aren't supported
82 * by the hardware, as calculating the frequency is pointless if
83 * we cannot in fact act on it.
84 *
85 * For the slow switching platforms, the kthread is always scheduled on
86 * the right set of CPUs and any CPU can find the next frequency and
87 * schedule the kthread.
88 */
89 if (sg_policy->policy->fast_switch_enabled &&
90 !cpufreq_this_cpu_can_update(sg_policy->policy))
91 return false;
92
93 if (unlikely(sg_policy->limits_changed)) {
94 sg_policy->limits_changed = false;
95 sg_policy->need_freq_update = true;
96 return true;
97 }
98
99 delta_ns = time - sg_policy->last_freq_update_time;
100
101 return delta_ns >= sg_policy->freq_update_delay_ns;
102}
103
104static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
105 unsigned int next_freq)
106{
107 if (sg_policy->next_freq == next_freq)
108 return false;
109
110 sg_policy->next_freq = next_freq;
111 sg_policy->last_freq_update_time = time;
112
113 return true;
114}
115
116static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
117 unsigned int next_freq)
118{
119 struct cpufreq_policy *policy = sg_policy->policy;
120 int cpu;
121
122 if (!sugov_update_next_freq(sg_policy, time, next_freq))
123 return;
124
125 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
126 if (!next_freq)
127 return;
128
129 policy->cur = next_freq;
130
131 if (trace_cpu_frequency_enabled()) {
132 for_each_cpu(cpu, policy->cpus)
133 trace_cpu_frequency(next_freq, cpu);
134 }
135}
136
137static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
138 unsigned int next_freq)
139{
140 if (!sugov_update_next_freq(sg_policy, time, next_freq))
141 return;
142
143 if (!sg_policy->work_in_progress) {
144 sg_policy->work_in_progress = true;
145 irq_work_queue(&sg_policy->irq_work);
146 }
147}
148
149/**
150 * get_next_freq - Compute a new frequency for a given cpufreq policy.
151 * @sg_policy: schedutil policy object to compute the new frequency for.
152 * @util: Current CPU utilization.
153 * @max: CPU capacity.
154 *
155 * If the utilization is frequency-invariant, choose the new frequency to be
156 * proportional to it, that is
157 *
158 * next_freq = C * max_freq * util / max
159 *
160 * Otherwise, approximate the would-be frequency-invariant utilization by
161 * util_raw * (curr_freq / max_freq) which leads to
162 *
163 * next_freq = C * curr_freq * util_raw / max
164 *
165 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
166 *
167 * The lowest driver-supported frequency which is equal or greater than the raw
168 * next_freq (as calculated above) is returned, subject to policy min/max and
169 * cpufreq driver limitations.
170 */
171static unsigned int get_next_freq(struct sugov_policy *sg_policy,
172 unsigned long util, unsigned long max)
173{
174 struct cpufreq_policy *policy = sg_policy->policy;
175 unsigned int freq = arch_scale_freq_invariant() ?
176 policy->cpuinfo.max_freq : policy->cur;
177
178 freq = map_util_freq(util, freq, max);
179
180 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
181 return sg_policy->next_freq;
182
183 sg_policy->need_freq_update = false;
184 sg_policy->cached_raw_freq = freq;
185 return cpufreq_driver_resolve_freq(policy, freq);
186}
187
188/*
189 * This function computes an effective utilization for the given CPU, to be
190 * used for frequency selection given the linear relation: f = u * f_max.
191 *
192 * The scheduler tracks the following metrics:
193 *
194 * cpu_util_{cfs,rt,dl,irq}()
195 * cpu_bw_dl()
196 *
197 * Where the cfs,rt and dl util numbers are tracked with the same metric and
198 * synchronized windows and are thus directly comparable.
199 *
200 * The cfs,rt,dl utilization are the running times measured with rq->clock_task
201 * which excludes things like IRQ and steal-time. These latter are then accrued
202 * in the irq utilization.
203 *
204 * The DL bandwidth number otoh is not a measured metric but a value computed
205 * based on the task model parameters and gives the minimal utilization
206 * required to meet deadlines.
207 */
208unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
209 unsigned long max, enum schedutil_type type,
210 struct task_struct *p)
211{
212 unsigned long dl_util, util, irq;
213 struct rq *rq = cpu_rq(cpu);
214
215 if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
216 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
217 return max;
218 }
219
220 /*
221 * Early check to see if IRQ/steal time saturates the CPU, can be
222 * because of inaccuracies in how we track these -- see
223 * update_irq_load_avg().
224 */
225 irq = cpu_util_irq(rq);
226 if (unlikely(irq >= max))
227 return max;
228
229 /*
230 * Because the time spend on RT/DL tasks is visible as 'lost' time to
231 * CFS tasks and we use the same metric to track the effective
232 * utilization (PELT windows are synchronized) we can directly add them
233 * to obtain the CPU's actual utilization.
234 *
235 * CFS and RT utilization can be boosted or capped, depending on
236 * utilization clamp constraints requested by currently RUNNABLE
237 * tasks.
238 * When there are no CFS RUNNABLE tasks, clamps are released and
239 * frequency will be gracefully reduced with the utilization decay.
240 */
241 util = util_cfs + cpu_util_rt(rq);
242 if (type == FREQUENCY_UTIL)
243 util = uclamp_util_with(rq, util, p);
244
245 dl_util = cpu_util_dl(rq);
246
247 /*
248 * For frequency selection we do not make cpu_util_dl() a permanent part
249 * of this sum because we want to use cpu_bw_dl() later on, but we need
250 * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
251 * that we select f_max when there is no idle time.
252 *
253 * NOTE: numerical errors or stop class might cause us to not quite hit
254 * saturation when we should -- something for later.
255 */
256 if (util + dl_util >= max)
257 return max;
258
259 /*
260 * OTOH, for energy computation we need the estimated running time, so
261 * include util_dl and ignore dl_bw.
262 */
263 if (type == ENERGY_UTIL)
264 util += dl_util;
265
266 /*
267 * There is still idle time; further improve the number by using the
268 * irq metric. Because IRQ/steal time is hidden from the task clock we
269 * need to scale the task numbers:
270 *
271 * max - irq
272 * U' = irq + --------- * U
273 * max
274 */
275 util = scale_irq_capacity(util, irq, max);
276 util += irq;
277
278 /*
279 * Bandwidth required by DEADLINE must always be granted while, for
280 * FAIR and RT, we use blocked utilization of IDLE CPUs as a mechanism
281 * to gracefully reduce the frequency when no tasks show up for longer
282 * periods of time.
283 *
284 * Ideally we would like to set bw_dl as min/guaranteed freq and util +
285 * bw_dl as requested freq. However, cpufreq is not yet ready for such
286 * an interface. So, we only do the latter for now.
287 */
288 if (type == FREQUENCY_UTIL)
289 util += cpu_bw_dl(rq);
290
291 return min(max, util);
292}
293
294static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
295{
296 struct rq *rq = cpu_rq(sg_cpu->cpu);
297 unsigned long util = cpu_util_cfs(rq);
298 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
299
300 sg_cpu->max = max;
301 sg_cpu->bw_dl = cpu_bw_dl(rq);
302
303 return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
304}
305
306/**
307 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
308 * @sg_cpu: the sugov data for the CPU to boost
309 * @time: the update time from the caller
310 * @set_iowait_boost: true if an IO boost has been requested
311 *
312 * The IO wait boost of a task is disabled after a tick since the last update
313 * of a CPU. If a new IO wait boost is requested after more then a tick, then
314 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
315 * efficiency by ignoring sporadic wakeups from IO.
316 */
317static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
318 bool set_iowait_boost)
319{
320 s64 delta_ns = time - sg_cpu->last_update;
321
322 /* Reset boost only if a tick has elapsed since last request */
323 if (delta_ns <= TICK_NSEC)
324 return false;
325
326 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
327 sg_cpu->iowait_boost_pending = set_iowait_boost;
328
329 return true;
330}
331
332/**
333 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
334 * @sg_cpu: the sugov data for the CPU to boost
335 * @time: the update time from the caller
336 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
337 *
338 * Each time a task wakes up after an IO operation, the CPU utilization can be
339 * boosted to a certain utilization which doubles at each "frequent and
340 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
341 * of the maximum OPP.
342 *
343 * To keep doubling, an IO boost has to be requested at least once per tick,
344 * otherwise we restart from the utilization of the minimum OPP.
345 */
346static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
347 unsigned int flags)
348{
349 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
350
351 /* Reset boost if the CPU appears to have been idle enough */
352 if (sg_cpu->iowait_boost &&
353 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
354 return;
355
356 /* Boost only tasks waking up after IO */
357 if (!set_iowait_boost)
358 return;
359
360 /* Ensure boost doubles only one time at each request */
361 if (sg_cpu->iowait_boost_pending)
362 return;
363 sg_cpu->iowait_boost_pending = true;
364
365 /* Double the boost at each request */
366 if (sg_cpu->iowait_boost) {
367 sg_cpu->iowait_boost =
368 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
369 return;
370 }
371
372 /* First wakeup after IO: start with minimum boost */
373 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
374}
375
376/**
377 * sugov_iowait_apply() - Apply the IO boost to a CPU.
378 * @sg_cpu: the sugov data for the cpu to boost
379 * @time: the update time from the caller
380 * @util: the utilization to (eventually) boost
381 * @max: the maximum value the utilization can be boosted to
382 *
383 * A CPU running a task which woken up after an IO operation can have its
384 * utilization boosted to speed up the completion of those IO operations.
385 * The IO boost value is increased each time a task wakes up from IO, in
386 * sugov_iowait_apply(), and it's instead decreased by this function,
387 * each time an increase has not been requested (!iowait_boost_pending).
388 *
389 * A CPU which also appears to have been idle for at least one tick has also
390 * its IO boost utilization reset.
391 *
392 * This mechanism is designed to boost high frequently IO waiting tasks, while
393 * being more conservative on tasks which does sporadic IO operations.
394 */
395static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
396 unsigned long util, unsigned long max)
397{
398 unsigned long boost;
399
400 /* No boost currently required */
401 if (!sg_cpu->iowait_boost)
402 return util;
403
404 /* Reset boost if the CPU appears to have been idle enough */
405 if (sugov_iowait_reset(sg_cpu, time, false))
406 return util;
407
408 if (!sg_cpu->iowait_boost_pending) {
409 /*
410 * No boost pending; reduce the boost value.
411 */
412 sg_cpu->iowait_boost >>= 1;
413 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
414 sg_cpu->iowait_boost = 0;
415 return util;
416 }
417 }
418
419 sg_cpu->iowait_boost_pending = false;
420
421 /*
422 * @util is already in capacity scale; convert iowait_boost
423 * into the same scale so we can compare.
424 */
425 boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
426 return max(boost, util);
427}
428
429#ifdef CONFIG_NO_HZ_COMMON
430static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
431{
432 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
433 bool ret = idle_calls == sg_cpu->saved_idle_calls;
434
435 sg_cpu->saved_idle_calls = idle_calls;
436 return ret;
437}
438#else
439static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
440#endif /* CONFIG_NO_HZ_COMMON */
441
442/*
443 * Make sugov_should_update_freq() ignore the rate limit when DL
444 * has increased the utilization.
445 */
446static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
447{
448 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
449 sg_policy->limits_changed = true;
450}
451
452static void sugov_update_single(struct update_util_data *hook, u64 time,
453 unsigned int flags)
454{
455 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
456 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
457 unsigned long util, max;
458 unsigned int next_f;
459 bool busy;
460
461 sugov_iowait_boost(sg_cpu, time, flags);
462 sg_cpu->last_update = time;
463
464 ignore_dl_rate_limit(sg_cpu, sg_policy);
465
466 if (!sugov_should_update_freq(sg_policy, time))
467 return;
468
469 /* Limits may have changed, don't skip frequency update */
470 busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
471
472 util = sugov_get_util(sg_cpu);
473 max = sg_cpu->max;
474 util = sugov_iowait_apply(sg_cpu, time, util, max);
475 next_f = get_next_freq(sg_policy, util, max);
476 /*
477 * Do not reduce the frequency if the CPU has not been idle
478 * recently, as the reduction is likely to be premature then.
479 */
480 if (busy && next_f < sg_policy->next_freq) {
481 next_f = sg_policy->next_freq;
482
483 /* Reset cached freq as next_freq has changed */
484 sg_policy->cached_raw_freq = 0;
485 }
486
487 /*
488 * This code runs under rq->lock for the target CPU, so it won't run
489 * concurrently on two different CPUs for the same target and it is not
490 * necessary to acquire the lock in the fast switch case.
491 */
492 if (sg_policy->policy->fast_switch_enabled) {
493 sugov_fast_switch(sg_policy, time, next_f);
494 } else {
495 raw_spin_lock(&sg_policy->update_lock);
496 sugov_deferred_update(sg_policy, time, next_f);
497 raw_spin_unlock(&sg_policy->update_lock);
498 }
499}
500
501static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
502{
503 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
504 struct cpufreq_policy *policy = sg_policy->policy;
505 unsigned long util = 0, max = 1;
506 unsigned int j;
507
508 for_each_cpu(j, policy->cpus) {
509 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
510 unsigned long j_util, j_max;
511
512 j_util = sugov_get_util(j_sg_cpu);
513 j_max = j_sg_cpu->max;
514 j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
515
516 if (j_util * max > j_max * util) {
517 util = j_util;
518 max = j_max;
519 }
520 }
521
522 return get_next_freq(sg_policy, util, max);
523}
524
525static void
526sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
527{
528 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
529 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
530 unsigned int next_f;
531
532 raw_spin_lock(&sg_policy->update_lock);
533
534 sugov_iowait_boost(sg_cpu, time, flags);
535 sg_cpu->last_update = time;
536
537 ignore_dl_rate_limit(sg_cpu, sg_policy);
538
539 if (sugov_should_update_freq(sg_policy, time)) {
540 next_f = sugov_next_freq_shared(sg_cpu, time);
541
542 if (sg_policy->policy->fast_switch_enabled)
543 sugov_fast_switch(sg_policy, time, next_f);
544 else
545 sugov_deferred_update(sg_policy, time, next_f);
546 }
547
548 raw_spin_unlock(&sg_policy->update_lock);
549}
550
551static void sugov_work(struct kthread_work *work)
552{
553 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
554 unsigned int freq;
555 unsigned long flags;
556
557 /*
558 * Hold sg_policy->update_lock shortly to handle the case where:
559 * incase sg_policy->next_freq is read here, and then updated by
560 * sugov_deferred_update() just before work_in_progress is set to false
561 * here, we may miss queueing the new update.
562 *
563 * Note: If a work was queued after the update_lock is released,
564 * sugov_work() will just be called again by kthread_work code; and the
565 * request will be proceed before the sugov thread sleeps.
566 */
567 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
568 freq = sg_policy->next_freq;
569 sg_policy->work_in_progress = false;
570 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
571
572 mutex_lock(&sg_policy->work_lock);
573 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
574 mutex_unlock(&sg_policy->work_lock);
575}
576
577static void sugov_irq_work(struct irq_work *irq_work)
578{
579 struct sugov_policy *sg_policy;
580
581 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
582
583 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
584}
585
586/************************** sysfs interface ************************/
587
588static struct sugov_tunables *global_tunables;
589static DEFINE_MUTEX(global_tunables_lock);
590
591static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
592{
593 return container_of(attr_set, struct sugov_tunables, attr_set);
594}
595
596static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
597{
598 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
599
600 return sprintf(buf, "%u\n", tunables->rate_limit_us);
601}
602
603static ssize_t
604rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
605{
606 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
607 struct sugov_policy *sg_policy;
608 unsigned int rate_limit_us;
609
610 if (kstrtouint(buf, 10, &rate_limit_us))
611 return -EINVAL;
612
613 tunables->rate_limit_us = rate_limit_us;
614
615 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
616 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
617
618 return count;
619}
620
621static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
622
623static struct attribute *sugov_attrs[] = {
624 &rate_limit_us.attr,
625 NULL
626};
627ATTRIBUTE_GROUPS(sugov);
628
629static struct kobj_type sugov_tunables_ktype = {
630 .default_groups = sugov_groups,
631 .sysfs_ops = &governor_sysfs_ops,
632};
633
634/********************** cpufreq governor interface *********************/
635
636struct cpufreq_governor schedutil_gov;
637
638static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
639{
640 struct sugov_policy *sg_policy;
641
642 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
643 if (!sg_policy)
644 return NULL;
645
646 sg_policy->policy = policy;
647 raw_spin_lock_init(&sg_policy->update_lock);
648 return sg_policy;
649}
650
651static void sugov_policy_free(struct sugov_policy *sg_policy)
652{
653 kfree(sg_policy);
654}
655
656static int sugov_kthread_create(struct sugov_policy *sg_policy)
657{
658 struct task_struct *thread;
659 struct sched_attr attr = {
660 .size = sizeof(struct sched_attr),
661 .sched_policy = SCHED_DEADLINE,
662 .sched_flags = SCHED_FLAG_SUGOV,
663 .sched_nice = 0,
664 .sched_priority = 0,
665 /*
666 * Fake (unused) bandwidth; workaround to "fix"
667 * priority inheritance.
668 */
669 .sched_runtime = 1000000,
670 .sched_deadline = 10000000,
671 .sched_period = 10000000,
672 };
673 struct cpufreq_policy *policy = sg_policy->policy;
674 int ret;
675
676 /* kthread only required for slow path */
677 if (policy->fast_switch_enabled)
678 return 0;
679
680 kthread_init_work(&sg_policy->work, sugov_work);
681 kthread_init_worker(&sg_policy->worker);
682 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
683 "sugov:%d",
684 cpumask_first(policy->related_cpus));
685 if (IS_ERR(thread)) {
686 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
687 return PTR_ERR(thread);
688 }
689
690 ret = sched_setattr_nocheck(thread, &attr);
691 if (ret) {
692 kthread_stop(thread);
693 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
694 return ret;
695 }
696
697 sg_policy->thread = thread;
698 kthread_bind_mask(thread, policy->related_cpus);
699 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
700 mutex_init(&sg_policy->work_lock);
701
702 wake_up_process(thread);
703
704 return 0;
705}
706
707static void sugov_kthread_stop(struct sugov_policy *sg_policy)
708{
709 /* kthread only required for slow path */
710 if (sg_policy->policy->fast_switch_enabled)
711 return;
712
713 kthread_flush_worker(&sg_policy->worker);
714 kthread_stop(sg_policy->thread);
715 mutex_destroy(&sg_policy->work_lock);
716}
717
718static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
719{
720 struct sugov_tunables *tunables;
721
722 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
723 if (tunables) {
724 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
725 if (!have_governor_per_policy())
726 global_tunables = tunables;
727 }
728 return tunables;
729}
730
731static void sugov_tunables_free(struct sugov_tunables *tunables)
732{
733 if (!have_governor_per_policy())
734 global_tunables = NULL;
735
736 kfree(tunables);
737}
738
739static int sugov_init(struct cpufreq_policy *policy)
740{
741 struct sugov_policy *sg_policy;
742 struct sugov_tunables *tunables;
743 int ret = 0;
744
745 /* State should be equivalent to EXIT */
746 if (policy->governor_data)
747 return -EBUSY;
748
749 cpufreq_enable_fast_switch(policy);
750
751 sg_policy = sugov_policy_alloc(policy);
752 if (!sg_policy) {
753 ret = -ENOMEM;
754 goto disable_fast_switch;
755 }
756
757 ret = sugov_kthread_create(sg_policy);
758 if (ret)
759 goto free_sg_policy;
760
761 mutex_lock(&global_tunables_lock);
762
763 if (global_tunables) {
764 if (WARN_ON(have_governor_per_policy())) {
765 ret = -EINVAL;
766 goto stop_kthread;
767 }
768 policy->governor_data = sg_policy;
769 sg_policy->tunables = global_tunables;
770
771 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
772 goto out;
773 }
774
775 tunables = sugov_tunables_alloc(sg_policy);
776 if (!tunables) {
777 ret = -ENOMEM;
778 goto stop_kthread;
779 }
780
781 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
782
783 policy->governor_data = sg_policy;
784 sg_policy->tunables = tunables;
785
786 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
787 get_governor_parent_kobj(policy), "%s",
788 schedutil_gov.name);
789 if (ret)
790 goto fail;
791
792out:
793 mutex_unlock(&global_tunables_lock);
794 return 0;
795
796fail:
797 kobject_put(&tunables->attr_set.kobj);
798 policy->governor_data = NULL;
799 sugov_tunables_free(tunables);
800
801stop_kthread:
802 sugov_kthread_stop(sg_policy);
803 mutex_unlock(&global_tunables_lock);
804
805free_sg_policy:
806 sugov_policy_free(sg_policy);
807
808disable_fast_switch:
809 cpufreq_disable_fast_switch(policy);
810
811 pr_err("initialization failed (error %d)\n", ret);
812 return ret;
813}
814
815static void sugov_exit(struct cpufreq_policy *policy)
816{
817 struct sugov_policy *sg_policy = policy->governor_data;
818 struct sugov_tunables *tunables = sg_policy->tunables;
819 unsigned int count;
820
821 mutex_lock(&global_tunables_lock);
822
823 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
824 policy->governor_data = NULL;
825 if (!count)
826 sugov_tunables_free(tunables);
827
828 mutex_unlock(&global_tunables_lock);
829
830 sugov_kthread_stop(sg_policy);
831 sugov_policy_free(sg_policy);
832 cpufreq_disable_fast_switch(policy);
833}
834
835static int sugov_start(struct cpufreq_policy *policy)
836{
837 struct sugov_policy *sg_policy = policy->governor_data;
838 unsigned int cpu;
839
840 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
841 sg_policy->last_freq_update_time = 0;
842 sg_policy->next_freq = 0;
843 sg_policy->work_in_progress = false;
844 sg_policy->limits_changed = false;
845 sg_policy->need_freq_update = false;
846 sg_policy->cached_raw_freq = 0;
847
848 for_each_cpu(cpu, policy->cpus) {
849 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
850
851 memset(sg_cpu, 0, sizeof(*sg_cpu));
852 sg_cpu->cpu = cpu;
853 sg_cpu->sg_policy = sg_policy;
854 }
855
856 for_each_cpu(cpu, policy->cpus) {
857 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
858
859 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
860 policy_is_shared(policy) ?
861 sugov_update_shared :
862 sugov_update_single);
863 }
864 return 0;
865}
866
867static void sugov_stop(struct cpufreq_policy *policy)
868{
869 struct sugov_policy *sg_policy = policy->governor_data;
870 unsigned int cpu;
871
872 for_each_cpu(cpu, policy->cpus)
873 cpufreq_remove_update_util_hook(cpu);
874
875 synchronize_rcu();
876
877 if (!policy->fast_switch_enabled) {
878 irq_work_sync(&sg_policy->irq_work);
879 kthread_cancel_work_sync(&sg_policy->work);
880 }
881}
882
883static void sugov_limits(struct cpufreq_policy *policy)
884{
885 struct sugov_policy *sg_policy = policy->governor_data;
886
887 if (!policy->fast_switch_enabled) {
888 mutex_lock(&sg_policy->work_lock);
889 cpufreq_policy_apply_limits(policy);
890 mutex_unlock(&sg_policy->work_lock);
891 }
892
893 sg_policy->limits_changed = true;
894}
895
896struct cpufreq_governor schedutil_gov = {
897 .name = "schedutil",
898 .owner = THIS_MODULE,
899 .dynamic_switching = true,
900 .init = sugov_init,
901 .exit = sugov_exit,
902 .start = sugov_start,
903 .stop = sugov_stop,
904 .limits = sugov_limits,
905};
906
907#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
908struct cpufreq_governor *cpufreq_default_governor(void)
909{
910 return &schedutil_gov;
911}
912#endif
913
914static int __init sugov_register(void)
915{
916 return cpufreq_register_governor(&schedutil_gov);
917}
918fs_initcall(sugov_register);
919
920#ifdef CONFIG_ENERGY_MODEL
921extern bool sched_energy_update;
922extern struct mutex sched_energy_mutex;
923
924static void rebuild_sd_workfn(struct work_struct *work)
925{
926 mutex_lock(&sched_energy_mutex);
927 sched_energy_update = true;
928 rebuild_sched_domains();
929 sched_energy_update = false;
930 mutex_unlock(&sched_energy_mutex);
931}
932static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
933
934/*
935 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
936 * on governor changes to make sure the scheduler knows about it.
937 */
938void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
939 struct cpufreq_governor *old_gov)
940{
941 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
942 /*
943 * When called from the cpufreq_register_driver() path, the
944 * cpu_hotplug_lock is already held, so use a work item to
945 * avoid nested locking in rebuild_sched_domains().
946 */
947 schedule_work(&rebuild_sd_work);
948 }
949
950}
951#endif