Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * CPUFreq governor based on scheduler-provided CPU utilization data.
  4 *
  5 * Copyright (C) 2016, Intel Corporation
  6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  7 */
  8
  9#define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
 10
 11struct sugov_tunables {
 12	struct gov_attr_set	attr_set;
 13	unsigned int		rate_limit_us;
 14};
 15
 16struct sugov_policy {
 17	struct cpufreq_policy	*policy;
 18
 19	struct sugov_tunables	*tunables;
 20	struct list_head	tunables_hook;
 21
 22	raw_spinlock_t		update_lock;
 23	u64			last_freq_update_time;
 24	s64			freq_update_delay_ns;
 25	unsigned int		next_freq;
 26	unsigned int		cached_raw_freq;
 27
 28	/* The next fields are only needed if fast switch cannot be used: */
 29	struct			irq_work irq_work;
 30	struct			kthread_work work;
 31	struct			mutex work_lock;
 32	struct			kthread_worker worker;
 33	struct task_struct	*thread;
 34	bool			work_in_progress;
 35
 36	bool			limits_changed;
 37	bool			need_freq_update;
 38};
 39
 40struct sugov_cpu {
 41	struct update_util_data	update_util;
 42	struct sugov_policy	*sg_policy;
 43	unsigned int		cpu;
 44
 45	bool			iowait_boost_pending;
 46	unsigned int		iowait_boost;
 47	u64			last_update;
 48
 49	unsigned long		util;
 50	unsigned long		bw_dl;
 51	unsigned long		max;
 52
 53	/* The field below is for single-CPU policies only: */
 54#ifdef CONFIG_NO_HZ_COMMON
 55	unsigned long		saved_idle_calls;
 56#endif
 57};
 58
 59static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
 60
 61/************************ Governor internals ***********************/
 62
 63static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 64{
 65	s64 delta_ns;
 66
 67	/*
 68	 * Since cpufreq_update_util() is called with rq->lock held for
 69	 * the @target_cpu, our per-CPU data is fully serialized.
 70	 *
 71	 * However, drivers cannot in general deal with cross-CPU
 72	 * requests, so while get_next_freq() will work, our
 73	 * sugov_update_commit() call may not for the fast switching platforms.
 74	 *
 75	 * Hence stop here for remote requests if they aren't supported
 76	 * by the hardware, as calculating the frequency is pointless if
 77	 * we cannot in fact act on it.
 78	 *
 79	 * This is needed on the slow switching platforms too to prevent CPUs
 80	 * going offline from leaving stale IRQ work items behind.
 81	 */
 82	if (!cpufreq_this_cpu_can_update(sg_policy->policy))
 83		return false;
 84
 85	if (unlikely(sg_policy->limits_changed)) {
 86		sg_policy->limits_changed = false;
 87		sg_policy->need_freq_update = true;
 88		return true;
 89	}
 90
 91	delta_ns = time - sg_policy->last_freq_update_time;
 92
 93	return delta_ns >= sg_policy->freq_update_delay_ns;
 94}
 95
 96static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
 97				   unsigned int next_freq)
 98{
 99	if (sg_policy->need_freq_update)
100		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
101	else if (sg_policy->next_freq == next_freq)
102		return false;
103
104	sg_policy->next_freq = next_freq;
105	sg_policy->last_freq_update_time = time;
106
107	return true;
108}
109
110static void sugov_deferred_update(struct sugov_policy *sg_policy)
111{
112	if (!sg_policy->work_in_progress) {
113		sg_policy->work_in_progress = true;
114		irq_work_queue(&sg_policy->irq_work);
115	}
116}
117
118/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119 * get_next_freq - Compute a new frequency for a given cpufreq policy.
120 * @sg_policy: schedutil policy object to compute the new frequency for.
121 * @util: Current CPU utilization.
122 * @max: CPU capacity.
123 *
124 * If the utilization is frequency-invariant, choose the new frequency to be
125 * proportional to it, that is
126 *
127 * next_freq = C * max_freq * util / max
128 *
129 * Otherwise, approximate the would-be frequency-invariant utilization by
130 * util_raw * (curr_freq / max_freq) which leads to
131 *
132 * next_freq = C * curr_freq * util_raw / max
133 *
134 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
135 *
136 * The lowest driver-supported frequency which is equal or greater than the raw
137 * next_freq (as calculated above) is returned, subject to policy min/max and
138 * cpufreq driver limitations.
139 */
140static unsigned int get_next_freq(struct sugov_policy *sg_policy,
141				  unsigned long util, unsigned long max)
142{
143	struct cpufreq_policy *policy = sg_policy->policy;
144	unsigned int freq = arch_scale_freq_invariant() ?
145				policy->cpuinfo.max_freq : policy->cur;
146
147	util = map_util_perf(util);
148	freq = map_util_freq(util, freq, max);
149
150	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
151		return sg_policy->next_freq;
152
153	sg_policy->cached_raw_freq = freq;
154	return cpufreq_driver_resolve_freq(policy, freq);
155}
156
157static void sugov_get_util(struct sugov_cpu *sg_cpu)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158{
159	struct rq *rq = cpu_rq(sg_cpu->cpu);
160
161	sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
162	sg_cpu->bw_dl = cpu_bw_dl(rq);
163	sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
164					  FREQUENCY_UTIL, NULL);
 
 
165}
166
167/**
168 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
169 * @sg_cpu: the sugov data for the CPU to boost
170 * @time: the update time from the caller
171 * @set_iowait_boost: true if an IO boost has been requested
172 *
173 * The IO wait boost of a task is disabled after a tick since the last update
174 * of a CPU. If a new IO wait boost is requested after more then a tick, then
175 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
176 * efficiency by ignoring sporadic wakeups from IO.
177 */
178static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
179			       bool set_iowait_boost)
180{
181	s64 delta_ns = time - sg_cpu->last_update;
182
183	/* Reset boost only if a tick has elapsed since last request */
184	if (delta_ns <= TICK_NSEC)
185		return false;
186
187	sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
188	sg_cpu->iowait_boost_pending = set_iowait_boost;
189
190	return true;
191}
192
193/**
194 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
195 * @sg_cpu: the sugov data for the CPU to boost
196 * @time: the update time from the caller
197 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
198 *
199 * Each time a task wakes up after an IO operation, the CPU utilization can be
200 * boosted to a certain utilization which doubles at each "frequent and
201 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
202 * of the maximum OPP.
203 *
204 * To keep doubling, an IO boost has to be requested at least once per tick,
205 * otherwise we restart from the utilization of the minimum OPP.
206 */
207static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
208			       unsigned int flags)
209{
210	bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
211
212	/* Reset boost if the CPU appears to have been idle enough */
213	if (sg_cpu->iowait_boost &&
214	    sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
215		return;
216
217	/* Boost only tasks waking up after IO */
218	if (!set_iowait_boost)
219		return;
220
221	/* Ensure boost doubles only one time at each request */
222	if (sg_cpu->iowait_boost_pending)
223		return;
224	sg_cpu->iowait_boost_pending = true;
225
226	/* Double the boost at each request */
227	if (sg_cpu->iowait_boost) {
228		sg_cpu->iowait_boost =
229			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
230		return;
231	}
232
233	/* First wakeup after IO: start with minimum boost */
234	sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
235}
236
237/**
238 * sugov_iowait_apply() - Apply the IO boost to a CPU.
239 * @sg_cpu: the sugov data for the cpu to boost
240 * @time: the update time from the caller
 
241 *
242 * A CPU running a task which woken up after an IO operation can have its
243 * utilization boosted to speed up the completion of those IO operations.
244 * The IO boost value is increased each time a task wakes up from IO, in
245 * sugov_iowait_apply(), and it's instead decreased by this function,
246 * each time an increase has not been requested (!iowait_boost_pending).
247 *
248 * A CPU which also appears to have been idle for at least one tick has also
249 * its IO boost utilization reset.
250 *
251 * This mechanism is designed to boost high frequently IO waiting tasks, while
252 * being more conservative on tasks which does sporadic IO operations.
253 */
254static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
 
255{
256	unsigned long boost;
257
258	/* No boost currently required */
259	if (!sg_cpu->iowait_boost)
260		return;
261
262	/* Reset boost if the CPU appears to have been idle enough */
263	if (sugov_iowait_reset(sg_cpu, time, false))
264		return;
265
266	if (!sg_cpu->iowait_boost_pending) {
267		/*
268		 * No boost pending; reduce the boost value.
269		 */
270		sg_cpu->iowait_boost >>= 1;
271		if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
272			sg_cpu->iowait_boost = 0;
273			return;
274		}
275	}
276
277	sg_cpu->iowait_boost_pending = false;
278
279	/*
280	 * sg_cpu->util is already in capacity scale; convert iowait_boost
281	 * into the same scale so we can compare.
282	 */
283	boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
284	boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
285	if (sg_cpu->util < boost)
286		sg_cpu->util = boost;
287}
288
289#ifdef CONFIG_NO_HZ_COMMON
290static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
291{
292	unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
293	bool ret = idle_calls == sg_cpu->saved_idle_calls;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294
295	sg_cpu->saved_idle_calls = idle_calls;
296	return ret;
297}
298#else
299static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
300#endif /* CONFIG_NO_HZ_COMMON */
301
302/*
303 * Make sugov_should_update_freq() ignore the rate limit when DL
304 * has increased the utilization.
305 */
306static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
307{
308	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
309		sg_cpu->sg_policy->limits_changed = true;
310}
311
312static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
313					      u64 time, unsigned int flags)
 
314{
 
 
315	sugov_iowait_boost(sg_cpu, time, flags);
316	sg_cpu->last_update = time;
317
318	ignore_dl_rate_limit(sg_cpu);
319
320	if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
321		return false;
322
323	sugov_get_util(sg_cpu);
324	sugov_iowait_apply(sg_cpu, time);
325
326	return true;
327}
328
329static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
330				     unsigned int flags)
331{
332	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
333	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
334	unsigned int cached_freq = sg_policy->cached_raw_freq;
 
335	unsigned int next_f;
336
337	if (!sugov_update_single_common(sg_cpu, time, flags))
 
 
338		return;
339
340	next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
341	/*
342	 * Do not reduce the frequency if the CPU has not been idle
343	 * recently, as the reduction is likely to be premature then.
344	 *
345	 * Except when the rq is capped by uclamp_max.
346	 */
347	if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
348	    sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
349		next_f = sg_policy->next_freq;
350
351		/* Restore cached freq as next_freq has changed */
352		sg_policy->cached_raw_freq = cached_freq;
353	}
354
355	if (!sugov_update_next_freq(sg_policy, time, next_f))
356		return;
357
358	/*
359	 * This code runs under rq->lock for the target CPU, so it won't run
360	 * concurrently on two different CPUs for the same target and it is not
361	 * necessary to acquire the lock in the fast switch case.
362	 */
363	if (sg_policy->policy->fast_switch_enabled) {
364		cpufreq_driver_fast_switch(sg_policy->policy, next_f);
365	} else {
366		raw_spin_lock(&sg_policy->update_lock);
367		sugov_deferred_update(sg_policy);
368		raw_spin_unlock(&sg_policy->update_lock);
369	}
370}
371
372static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
373				     unsigned int flags)
374{
375	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
376	unsigned long prev_util = sg_cpu->util;
 
377
378	/*
379	 * Fall back to the "frequency" path if frequency invariance is not
380	 * supported, because the direct mapping between the utilization and
381	 * the performance levels depends on the frequency invariance.
382	 */
383	if (!arch_scale_freq_invariant()) {
384		sugov_update_single_freq(hook, time, flags);
385		return;
386	}
387
388	if (!sugov_update_single_common(sg_cpu, time, flags))
 
 
389		return;
390
391	/*
392	 * Do not reduce the target performance level if the CPU has not been
393	 * idle recently, as the reduction is likely to be premature then.
394	 *
395	 * Except when the rq is capped by uclamp_max.
396	 */
397	if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
398	    sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
399		sg_cpu->util = prev_util;
400
401	cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
402				   map_util_perf(sg_cpu->util), sg_cpu->max);
403
404	sg_cpu->sg_policy->last_freq_update_time = time;
405}
406
407static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
408{
409	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
410	struct cpufreq_policy *policy = sg_policy->policy;
411	unsigned long util = 0, max = 1;
412	unsigned int j;
413
 
 
414	for_each_cpu(j, policy->cpus) {
415		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
416		unsigned long j_util, j_max;
417
418		sugov_get_util(j_sg_cpu);
419		sugov_iowait_apply(j_sg_cpu, time);
420		j_util = j_sg_cpu->util;
421		j_max = j_sg_cpu->max;
422
423		if (j_util * max > j_max * util) {
424			util = j_util;
425			max = j_max;
426		}
427	}
428
429	return get_next_freq(sg_policy, util, max);
430}
431
432static void
433sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
434{
435	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
436	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
437	unsigned int next_f;
438
439	raw_spin_lock(&sg_policy->update_lock);
440
441	sugov_iowait_boost(sg_cpu, time, flags);
442	sg_cpu->last_update = time;
443
444	ignore_dl_rate_limit(sg_cpu);
445
446	if (sugov_should_update_freq(sg_policy, time)) {
447		next_f = sugov_next_freq_shared(sg_cpu, time);
448
449		if (!sugov_update_next_freq(sg_policy, time, next_f))
450			goto unlock;
451
452		if (sg_policy->policy->fast_switch_enabled)
453			cpufreq_driver_fast_switch(sg_policy->policy, next_f);
454		else
455			sugov_deferred_update(sg_policy);
456	}
457unlock:
458	raw_spin_unlock(&sg_policy->update_lock);
459}
460
461static void sugov_work(struct kthread_work *work)
462{
463	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
464	unsigned int freq;
465	unsigned long flags;
466
467	/*
468	 * Hold sg_policy->update_lock shortly to handle the case where:
469	 * in case sg_policy->next_freq is read here, and then updated by
470	 * sugov_deferred_update() just before work_in_progress is set to false
471	 * here, we may miss queueing the new update.
472	 *
473	 * Note: If a work was queued after the update_lock is released,
474	 * sugov_work() will just be called again by kthread_work code; and the
475	 * request will be proceed before the sugov thread sleeps.
476	 */
477	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
478	freq = sg_policy->next_freq;
479	sg_policy->work_in_progress = false;
480	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
481
482	mutex_lock(&sg_policy->work_lock);
483	__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
484	mutex_unlock(&sg_policy->work_lock);
485}
486
487static void sugov_irq_work(struct irq_work *irq_work)
488{
489	struct sugov_policy *sg_policy;
490
491	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
492
493	kthread_queue_work(&sg_policy->worker, &sg_policy->work);
494}
495
496/************************** sysfs interface ************************/
497
498static struct sugov_tunables *global_tunables;
499static DEFINE_MUTEX(global_tunables_lock);
500
501static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
502{
503	return container_of(attr_set, struct sugov_tunables, attr_set);
504}
505
506static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
507{
508	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
509
510	return sprintf(buf, "%u\n", tunables->rate_limit_us);
511}
512
513static ssize_t
514rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
515{
516	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
517	struct sugov_policy *sg_policy;
518	unsigned int rate_limit_us;
519
520	if (kstrtouint(buf, 10, &rate_limit_us))
521		return -EINVAL;
522
523	tunables->rate_limit_us = rate_limit_us;
524
525	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
526		sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
527
528	return count;
529}
530
531static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
532
533static struct attribute *sugov_attrs[] = {
534	&rate_limit_us.attr,
535	NULL
536};
537ATTRIBUTE_GROUPS(sugov);
538
539static void sugov_tunables_free(struct kobject *kobj)
540{
541	struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
542
543	kfree(to_sugov_tunables(attr_set));
544}
545
546static struct kobj_type sugov_tunables_ktype = {
547	.default_groups = sugov_groups,
548	.sysfs_ops = &governor_sysfs_ops,
549	.release = &sugov_tunables_free,
550};
551
552/********************** cpufreq governor interface *********************/
553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554struct cpufreq_governor schedutil_gov;
555
556static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
557{
558	struct sugov_policy *sg_policy;
559
560	sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
561	if (!sg_policy)
562		return NULL;
563
564	sg_policy->policy = policy;
565	raw_spin_lock_init(&sg_policy->update_lock);
566	return sg_policy;
567}
568
569static void sugov_policy_free(struct sugov_policy *sg_policy)
570{
571	kfree(sg_policy);
572}
573
574static int sugov_kthread_create(struct sugov_policy *sg_policy)
575{
576	struct task_struct *thread;
577	struct sched_attr attr = {
578		.size		= sizeof(struct sched_attr),
579		.sched_policy	= SCHED_DEADLINE,
580		.sched_flags	= SCHED_FLAG_SUGOV,
581		.sched_nice	= 0,
582		.sched_priority	= 0,
583		/*
584		 * Fake (unused) bandwidth; workaround to "fix"
585		 * priority inheritance.
586		 */
587		.sched_runtime	=  1000000,
588		.sched_deadline = 10000000,
589		.sched_period	= 10000000,
590	};
591	struct cpufreq_policy *policy = sg_policy->policy;
592	int ret;
593
594	/* kthread only required for slow path */
595	if (policy->fast_switch_enabled)
596		return 0;
597
598	kthread_init_work(&sg_policy->work, sugov_work);
599	kthread_init_worker(&sg_policy->worker);
600	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
601				"sugov:%d",
602				cpumask_first(policy->related_cpus));
603	if (IS_ERR(thread)) {
604		pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
605		return PTR_ERR(thread);
606	}
607
608	ret = sched_setattr_nocheck(thread, &attr);
609	if (ret) {
610		kthread_stop(thread);
611		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
612		return ret;
613	}
614
615	sg_policy->thread = thread;
616	kthread_bind_mask(thread, policy->related_cpus);
617	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
618	mutex_init(&sg_policy->work_lock);
619
620	wake_up_process(thread);
621
622	return 0;
623}
624
625static void sugov_kthread_stop(struct sugov_policy *sg_policy)
626{
627	/* kthread only required for slow path */
628	if (sg_policy->policy->fast_switch_enabled)
629		return;
630
631	kthread_flush_worker(&sg_policy->worker);
632	kthread_stop(sg_policy->thread);
633	mutex_destroy(&sg_policy->work_lock);
634}
635
636static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
637{
638	struct sugov_tunables *tunables;
639
640	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
641	if (tunables) {
642		gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
643		if (!have_governor_per_policy())
644			global_tunables = tunables;
645	}
646	return tunables;
647}
648
649static void sugov_clear_global_tunables(void)
650{
651	if (!have_governor_per_policy())
652		global_tunables = NULL;
653}
654
655static int sugov_init(struct cpufreq_policy *policy)
656{
657	struct sugov_policy *sg_policy;
658	struct sugov_tunables *tunables;
659	int ret = 0;
660
661	/* State should be equivalent to EXIT */
662	if (policy->governor_data)
663		return -EBUSY;
664
665	cpufreq_enable_fast_switch(policy);
666
667	sg_policy = sugov_policy_alloc(policy);
668	if (!sg_policy) {
669		ret = -ENOMEM;
670		goto disable_fast_switch;
671	}
672
673	ret = sugov_kthread_create(sg_policy);
674	if (ret)
675		goto free_sg_policy;
676
677	mutex_lock(&global_tunables_lock);
678
679	if (global_tunables) {
680		if (WARN_ON(have_governor_per_policy())) {
681			ret = -EINVAL;
682			goto stop_kthread;
683		}
684		policy->governor_data = sg_policy;
685		sg_policy->tunables = global_tunables;
686
687		gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
688		goto out;
689	}
690
691	tunables = sugov_tunables_alloc(sg_policy);
692	if (!tunables) {
693		ret = -ENOMEM;
694		goto stop_kthread;
695	}
696
697	tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
698
699	policy->governor_data = sg_policy;
700	sg_policy->tunables = tunables;
701
702	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
703				   get_governor_parent_kobj(policy), "%s",
704				   schedutil_gov.name);
705	if (ret)
706		goto fail;
707
708out:
 
709	mutex_unlock(&global_tunables_lock);
710	return 0;
711
712fail:
713	kobject_put(&tunables->attr_set.kobj);
714	policy->governor_data = NULL;
715	sugov_clear_global_tunables();
716
717stop_kthread:
718	sugov_kthread_stop(sg_policy);
719	mutex_unlock(&global_tunables_lock);
720
721free_sg_policy:
722	sugov_policy_free(sg_policy);
723
724disable_fast_switch:
725	cpufreq_disable_fast_switch(policy);
726
727	pr_err("initialization failed (error %d)\n", ret);
728	return ret;
729}
730
731static void sugov_exit(struct cpufreq_policy *policy)
732{
733	struct sugov_policy *sg_policy = policy->governor_data;
734	struct sugov_tunables *tunables = sg_policy->tunables;
735	unsigned int count;
736
737	mutex_lock(&global_tunables_lock);
738
739	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
740	policy->governor_data = NULL;
741	if (!count)
742		sugov_clear_global_tunables();
743
744	mutex_unlock(&global_tunables_lock);
745
746	sugov_kthread_stop(sg_policy);
747	sugov_policy_free(sg_policy);
748	cpufreq_disable_fast_switch(policy);
 
 
749}
750
751static int sugov_start(struct cpufreq_policy *policy)
752{
753	struct sugov_policy *sg_policy = policy->governor_data;
754	void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
755	unsigned int cpu;
756
757	sg_policy->freq_update_delay_ns	= sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
758	sg_policy->last_freq_update_time	= 0;
759	sg_policy->next_freq			= 0;
760	sg_policy->work_in_progress		= false;
761	sg_policy->limits_changed		= false;
762	sg_policy->cached_raw_freq		= 0;
763
764	sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
765
766	for_each_cpu(cpu, policy->cpus) {
767		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
768
769		memset(sg_cpu, 0, sizeof(*sg_cpu));
770		sg_cpu->cpu			= cpu;
771		sg_cpu->sg_policy		= sg_policy;
772	}
773
774	if (policy_is_shared(policy))
775		uu = sugov_update_shared;
776	else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
777		uu = sugov_update_single_perf;
778	else
779		uu = sugov_update_single_freq;
780
781	for_each_cpu(cpu, policy->cpus) {
782		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
783
 
 
 
784		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
785	}
786	return 0;
787}
788
789static void sugov_stop(struct cpufreq_policy *policy)
790{
791	struct sugov_policy *sg_policy = policy->governor_data;
792	unsigned int cpu;
793
794	for_each_cpu(cpu, policy->cpus)
795		cpufreq_remove_update_util_hook(cpu);
796
797	synchronize_rcu();
798
799	if (!policy->fast_switch_enabled) {
800		irq_work_sync(&sg_policy->irq_work);
801		kthread_cancel_work_sync(&sg_policy->work);
802	}
803}
804
805static void sugov_limits(struct cpufreq_policy *policy)
806{
807	struct sugov_policy *sg_policy = policy->governor_data;
808
809	if (!policy->fast_switch_enabled) {
810		mutex_lock(&sg_policy->work_lock);
811		cpufreq_policy_apply_limits(policy);
812		mutex_unlock(&sg_policy->work_lock);
813	}
814
815	sg_policy->limits_changed = true;
816}
817
818struct cpufreq_governor schedutil_gov = {
819	.name			= "schedutil",
820	.owner			= THIS_MODULE,
821	.flags			= CPUFREQ_GOV_DYNAMIC_SWITCHING,
822	.init			= sugov_init,
823	.exit			= sugov_exit,
824	.start			= sugov_start,
825	.stop			= sugov_stop,
826	.limits			= sugov_limits,
827};
828
829#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
830struct cpufreq_governor *cpufreq_default_governor(void)
831{
832	return &schedutil_gov;
833}
834#endif
835
836cpufreq_governor_init(schedutil_gov);
837
838#ifdef CONFIG_ENERGY_MODEL
839static void rebuild_sd_workfn(struct work_struct *work)
840{
841	rebuild_sched_domains_energy();
842}
843static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
844
845/*
846 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
847 * on governor changes to make sure the scheduler knows about it.
848 */
849void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
850				  struct cpufreq_governor *old_gov)
851{
852	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
853		/*
854		 * When called from the cpufreq_register_driver() path, the
855		 * cpu_hotplug_lock is already held, so use a work item to
856		 * avoid nested locking in rebuild_sched_domains().
857		 */
858		schedule_work(&rebuild_sd_work);
859	}
860
861}
862#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * CPUFreq governor based on scheduler-provided CPU utilization data.
  4 *
  5 * Copyright (C) 2016, Intel Corporation
  6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  7 */
  8
  9#define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
 10
 11struct sugov_tunables {
 12	struct gov_attr_set	attr_set;
 13	unsigned int		rate_limit_us;
 14};
 15
 16struct sugov_policy {
 17	struct cpufreq_policy	*policy;
 18
 19	struct sugov_tunables	*tunables;
 20	struct list_head	tunables_hook;
 21
 22	raw_spinlock_t		update_lock;
 23	u64			last_freq_update_time;
 24	s64			freq_update_delay_ns;
 25	unsigned int		next_freq;
 26	unsigned int		cached_raw_freq;
 27
 28	/* The next fields are only needed if fast switch cannot be used: */
 29	struct			irq_work irq_work;
 30	struct			kthread_work work;
 31	struct			mutex work_lock;
 32	struct			kthread_worker worker;
 33	struct task_struct	*thread;
 34	bool			work_in_progress;
 35
 36	bool			limits_changed;
 37	bool			need_freq_update;
 38};
 39
 40struct sugov_cpu {
 41	struct update_util_data	update_util;
 42	struct sugov_policy	*sg_policy;
 43	unsigned int		cpu;
 44
 45	bool			iowait_boost_pending;
 46	unsigned int		iowait_boost;
 47	u64			last_update;
 48
 49	unsigned long		util;
 50	unsigned long		bw_min;
 
 51
 52	/* The field below is for single-CPU policies only: */
 53#ifdef CONFIG_NO_HZ_COMMON
 54	unsigned long		saved_idle_calls;
 55#endif
 56};
 57
 58static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
 59
 60/************************ Governor internals ***********************/
 61
 62static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
 63{
 64	s64 delta_ns;
 65
 66	/*
 67	 * Since cpufreq_update_util() is called with rq->lock held for
 68	 * the @target_cpu, our per-CPU data is fully serialized.
 69	 *
 70	 * However, drivers cannot in general deal with cross-CPU
 71	 * requests, so while get_next_freq() will work, our
 72	 * sugov_update_commit() call may not for the fast switching platforms.
 73	 *
 74	 * Hence stop here for remote requests if they aren't supported
 75	 * by the hardware, as calculating the frequency is pointless if
 76	 * we cannot in fact act on it.
 77	 *
 78	 * This is needed on the slow switching platforms too to prevent CPUs
 79	 * going offline from leaving stale IRQ work items behind.
 80	 */
 81	if (!cpufreq_this_cpu_can_update(sg_policy->policy))
 82		return false;
 83
 84	if (unlikely(sg_policy->limits_changed)) {
 85		sg_policy->limits_changed = false;
 86		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
 87		return true;
 88	}
 89
 90	delta_ns = time - sg_policy->last_freq_update_time;
 91
 92	return delta_ns >= sg_policy->freq_update_delay_ns;
 93}
 94
 95static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
 96				   unsigned int next_freq)
 97{
 98	if (sg_policy->need_freq_update)
 99		sg_policy->need_freq_update = false;
100	else if (sg_policy->next_freq == next_freq)
101		return false;
102
103	sg_policy->next_freq = next_freq;
104	sg_policy->last_freq_update_time = time;
105
106	return true;
107}
108
109static void sugov_deferred_update(struct sugov_policy *sg_policy)
110{
111	if (!sg_policy->work_in_progress) {
112		sg_policy->work_in_progress = true;
113		irq_work_queue(&sg_policy->irq_work);
114	}
115}
116
117/**
118 * get_capacity_ref_freq - get the reference frequency that has been used to
119 * correlate frequency and compute capacity for a given cpufreq policy. We use
120 * the CPU managing it for the arch_scale_freq_ref() call in the function.
121 * @policy: the cpufreq policy of the CPU in question.
122 *
123 * Return: the reference CPU frequency to compute a capacity.
124 */
125static __always_inline
126unsigned long get_capacity_ref_freq(struct cpufreq_policy *policy)
127{
128	unsigned int freq = arch_scale_freq_ref(policy->cpu);
129
130	if (freq)
131		return freq;
132
133	if (arch_scale_freq_invariant())
134		return policy->cpuinfo.max_freq;
135
136	/*
137	 * Apply a 25% margin so that we select a higher frequency than
138	 * the current one before the CPU is fully busy:
139	 */
140	return policy->cur + (policy->cur >> 2);
141}
142
143/**
144 * get_next_freq - Compute a new frequency for a given cpufreq policy.
145 * @sg_policy: schedutil policy object to compute the new frequency for.
146 * @util: Current CPU utilization.
147 * @max: CPU capacity.
148 *
149 * If the utilization is frequency-invariant, choose the new frequency to be
150 * proportional to it, that is
151 *
152 * next_freq = C * max_freq * util / max
153 *
154 * Otherwise, approximate the would-be frequency-invariant utilization by
155 * util_raw * (curr_freq / max_freq) which leads to
156 *
157 * next_freq = C * curr_freq * util_raw / max
158 *
159 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
160 *
161 * The lowest driver-supported frequency which is equal or greater than the raw
162 * next_freq (as calculated above) is returned, subject to policy min/max and
163 * cpufreq driver limitations.
164 */
165static unsigned int get_next_freq(struct sugov_policy *sg_policy,
166				  unsigned long util, unsigned long max)
167{
168	struct cpufreq_policy *policy = sg_policy->policy;
169	unsigned int freq;
 
170
171	freq = get_capacity_ref_freq(policy);
172	freq = map_util_freq(util, freq, max);
173
174	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
175		return sg_policy->next_freq;
176
177	sg_policy->cached_raw_freq = freq;
178	return cpufreq_driver_resolve_freq(policy, freq);
179}
180
181unsigned long sugov_effective_cpu_perf(int cpu, unsigned long actual,
182				 unsigned long min,
183				 unsigned long max)
184{
185	/* Add dvfs headroom to actual utilization */
186	actual = map_util_perf(actual);
187	/* Actually we don't need to target the max performance */
188	if (actual < max)
189		max = actual;
190
191	/*
192	 * Ensure at least minimum performance while providing more compute
193	 * capacity when possible.
194	 */
195	return max(min, max);
196}
197
198static void sugov_get_util(struct sugov_cpu *sg_cpu, unsigned long boost)
199{
200	unsigned long min, max, util = scx_cpuperf_target(sg_cpu->cpu);
201
202	if (!scx_switched_all())
203		util += cpu_util_cfs_boost(sg_cpu->cpu);
204	util = effective_cpu_util(sg_cpu->cpu, util, &min, &max);
205	util = max(util, boost);
206	sg_cpu->bw_min = min;
207	sg_cpu->util = sugov_effective_cpu_perf(sg_cpu->cpu, util, min, max);
208}
209
210/**
211 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
212 * @sg_cpu: the sugov data for the CPU to boost
213 * @time: the update time from the caller
214 * @set_iowait_boost: true if an IO boost has been requested
215 *
216 * The IO wait boost of a task is disabled after a tick since the last update
217 * of a CPU. If a new IO wait boost is requested after more then a tick, then
218 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
219 * efficiency by ignoring sporadic wakeups from IO.
220 */
221static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
222			       bool set_iowait_boost)
223{
224	s64 delta_ns = time - sg_cpu->last_update;
225
226	/* Reset boost only if a tick has elapsed since last request */
227	if (delta_ns <= TICK_NSEC)
228		return false;
229
230	sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
231	sg_cpu->iowait_boost_pending = set_iowait_boost;
232
233	return true;
234}
235
236/**
237 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
238 * @sg_cpu: the sugov data for the CPU to boost
239 * @time: the update time from the caller
240 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
241 *
242 * Each time a task wakes up after an IO operation, the CPU utilization can be
243 * boosted to a certain utilization which doubles at each "frequent and
244 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
245 * of the maximum OPP.
246 *
247 * To keep doubling, an IO boost has to be requested at least once per tick,
248 * otherwise we restart from the utilization of the minimum OPP.
249 */
250static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
251			       unsigned int flags)
252{
253	bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
254
255	/* Reset boost if the CPU appears to have been idle enough */
256	if (sg_cpu->iowait_boost &&
257	    sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
258		return;
259
260	/* Boost only tasks waking up after IO */
261	if (!set_iowait_boost)
262		return;
263
264	/* Ensure boost doubles only one time at each request */
265	if (sg_cpu->iowait_boost_pending)
266		return;
267	sg_cpu->iowait_boost_pending = true;
268
269	/* Double the boost at each request */
270	if (sg_cpu->iowait_boost) {
271		sg_cpu->iowait_boost =
272			min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
273		return;
274	}
275
276	/* First wakeup after IO: start with minimum boost */
277	sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
278}
279
280/**
281 * sugov_iowait_apply() - Apply the IO boost to a CPU.
282 * @sg_cpu: the sugov data for the cpu to boost
283 * @time: the update time from the caller
284 * @max_cap: the max CPU capacity
285 *
286 * A CPU running a task which woken up after an IO operation can have its
287 * utilization boosted to speed up the completion of those IO operations.
288 * The IO boost value is increased each time a task wakes up from IO, in
289 * sugov_iowait_apply(), and it's instead decreased by this function,
290 * each time an increase has not been requested (!iowait_boost_pending).
291 *
292 * A CPU which also appears to have been idle for at least one tick has also
293 * its IO boost utilization reset.
294 *
295 * This mechanism is designed to boost high frequently IO waiting tasks, while
296 * being more conservative on tasks which does sporadic IO operations.
297 */
298static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
299			       unsigned long max_cap)
300{
 
 
301	/* No boost currently required */
302	if (!sg_cpu->iowait_boost)
303		return 0;
304
305	/* Reset boost if the CPU appears to have been idle enough */
306	if (sugov_iowait_reset(sg_cpu, time, false))
307		return 0;
308
309	if (!sg_cpu->iowait_boost_pending) {
310		/*
311		 * No boost pending; reduce the boost value.
312		 */
313		sg_cpu->iowait_boost >>= 1;
314		if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
315			sg_cpu->iowait_boost = 0;
316			return 0;
317		}
318	}
319
320	sg_cpu->iowait_boost_pending = false;
321
322	/*
323	 * sg_cpu->util is already in capacity scale; convert iowait_boost
324	 * into the same scale so we can compare.
325	 */
326	return (sg_cpu->iowait_boost * max_cap) >> SCHED_CAPACITY_SHIFT;
 
 
 
327}
328
329#ifdef CONFIG_NO_HZ_COMMON
330static bool sugov_hold_freq(struct sugov_cpu *sg_cpu)
331{
332	unsigned long idle_calls;
333	bool ret;
334
335	/*
336	 * The heuristics in this function is for the fair class. For SCX, the
337	 * performance target comes directly from the BPF scheduler. Let's just
338	 * follow it.
339	 */
340	if (scx_switched_all())
341		return false;
342
343	/* if capped by uclamp_max, always update to be in compliance */
344	if (uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)))
345		return false;
346
347	/*
348	 * Maintain the frequency if the CPU has not been idle recently, as
349	 * reduction is likely to be premature.
350	 */
351	idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
352	ret = idle_calls == sg_cpu->saved_idle_calls;
353
354	sg_cpu->saved_idle_calls = idle_calls;
355	return ret;
356}
357#else
358static inline bool sugov_hold_freq(struct sugov_cpu *sg_cpu) { return false; }
359#endif /* CONFIG_NO_HZ_COMMON */
360
361/*
362 * Make sugov_should_update_freq() ignore the rate limit when DL
363 * has increased the utilization.
364 */
365static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
366{
367	if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_min)
368		sg_cpu->sg_policy->limits_changed = true;
369}
370
371static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
372					      u64 time, unsigned long max_cap,
373					      unsigned int flags)
374{
375	unsigned long boost;
376
377	sugov_iowait_boost(sg_cpu, time, flags);
378	sg_cpu->last_update = time;
379
380	ignore_dl_rate_limit(sg_cpu);
381
382	if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
383		return false;
384
385	boost = sugov_iowait_apply(sg_cpu, time, max_cap);
386	sugov_get_util(sg_cpu, boost);
387
388	return true;
389}
390
391static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
392				     unsigned int flags)
393{
394	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
395	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
396	unsigned int cached_freq = sg_policy->cached_raw_freq;
397	unsigned long max_cap;
398	unsigned int next_f;
399
400	max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
401
402	if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
403		return;
404
405	next_f = get_next_freq(sg_policy, sg_cpu->util, max_cap);
406
407	if (sugov_hold_freq(sg_cpu) && next_f < sg_policy->next_freq &&
408	    !sg_policy->need_freq_update) {
 
 
 
 
 
409		next_f = sg_policy->next_freq;
410
411		/* Restore cached freq as next_freq has changed */
412		sg_policy->cached_raw_freq = cached_freq;
413	}
414
415	if (!sugov_update_next_freq(sg_policy, time, next_f))
416		return;
417
418	/*
419	 * This code runs under rq->lock for the target CPU, so it won't run
420	 * concurrently on two different CPUs for the same target and it is not
421	 * necessary to acquire the lock in the fast switch case.
422	 */
423	if (sg_policy->policy->fast_switch_enabled) {
424		cpufreq_driver_fast_switch(sg_policy->policy, next_f);
425	} else {
426		raw_spin_lock(&sg_policy->update_lock);
427		sugov_deferred_update(sg_policy);
428		raw_spin_unlock(&sg_policy->update_lock);
429	}
430}
431
432static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
433				     unsigned int flags)
434{
435	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
436	unsigned long prev_util = sg_cpu->util;
437	unsigned long max_cap;
438
439	/*
440	 * Fall back to the "frequency" path if frequency invariance is not
441	 * supported, because the direct mapping between the utilization and
442	 * the performance levels depends on the frequency invariance.
443	 */
444	if (!arch_scale_freq_invariant()) {
445		sugov_update_single_freq(hook, time, flags);
446		return;
447	}
448
449	max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
450
451	if (!sugov_update_single_common(sg_cpu, time, max_cap, flags))
452		return;
453
454	if (sugov_hold_freq(sg_cpu) && sg_cpu->util < prev_util)
 
 
 
 
 
 
 
455		sg_cpu->util = prev_util;
456
457	cpufreq_driver_adjust_perf(sg_cpu->cpu, sg_cpu->bw_min,
458				   sg_cpu->util, max_cap);
459
460	sg_cpu->sg_policy->last_freq_update_time = time;
461}
462
463static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
464{
465	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
466	struct cpufreq_policy *policy = sg_policy->policy;
467	unsigned long util = 0, max_cap;
468	unsigned int j;
469
470	max_cap = arch_scale_cpu_capacity(sg_cpu->cpu);
471
472	for_each_cpu(j, policy->cpus) {
473		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
474		unsigned long boost;
475
476		boost = sugov_iowait_apply(j_sg_cpu, time, max_cap);
477		sugov_get_util(j_sg_cpu, boost);
478
479		util = max(j_sg_cpu->util, util);
 
 
 
 
 
480	}
481
482	return get_next_freq(sg_policy, util, max_cap);
483}
484
485static void
486sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
487{
488	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
489	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
490	unsigned int next_f;
491
492	raw_spin_lock(&sg_policy->update_lock);
493
494	sugov_iowait_boost(sg_cpu, time, flags);
495	sg_cpu->last_update = time;
496
497	ignore_dl_rate_limit(sg_cpu);
498
499	if (sugov_should_update_freq(sg_policy, time)) {
500		next_f = sugov_next_freq_shared(sg_cpu, time);
501
502		if (!sugov_update_next_freq(sg_policy, time, next_f))
503			goto unlock;
504
505		if (sg_policy->policy->fast_switch_enabled)
506			cpufreq_driver_fast_switch(sg_policy->policy, next_f);
507		else
508			sugov_deferred_update(sg_policy);
509	}
510unlock:
511	raw_spin_unlock(&sg_policy->update_lock);
512}
513
514static void sugov_work(struct kthread_work *work)
515{
516	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
517	unsigned int freq;
518	unsigned long flags;
519
520	/*
521	 * Hold sg_policy->update_lock shortly to handle the case where:
522	 * in case sg_policy->next_freq is read here, and then updated by
523	 * sugov_deferred_update() just before work_in_progress is set to false
524	 * here, we may miss queueing the new update.
525	 *
526	 * Note: If a work was queued after the update_lock is released,
527	 * sugov_work() will just be called again by kthread_work code; and the
528	 * request will be proceed before the sugov thread sleeps.
529	 */
530	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
531	freq = sg_policy->next_freq;
532	sg_policy->work_in_progress = false;
533	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
534
535	mutex_lock(&sg_policy->work_lock);
536	__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
537	mutex_unlock(&sg_policy->work_lock);
538}
539
540static void sugov_irq_work(struct irq_work *irq_work)
541{
542	struct sugov_policy *sg_policy;
543
544	sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
545
546	kthread_queue_work(&sg_policy->worker, &sg_policy->work);
547}
548
549/************************** sysfs interface ************************/
550
551static struct sugov_tunables *global_tunables;
552static DEFINE_MUTEX(global_tunables_lock);
553
554static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
555{
556	return container_of(attr_set, struct sugov_tunables, attr_set);
557}
558
559static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
560{
561	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
562
563	return sprintf(buf, "%u\n", tunables->rate_limit_us);
564}
565
566static ssize_t
567rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
568{
569	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
570	struct sugov_policy *sg_policy;
571	unsigned int rate_limit_us;
572
573	if (kstrtouint(buf, 10, &rate_limit_us))
574		return -EINVAL;
575
576	tunables->rate_limit_us = rate_limit_us;
577
578	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
579		sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
580
581	return count;
582}
583
584static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
585
586static struct attribute *sugov_attrs[] = {
587	&rate_limit_us.attr,
588	NULL
589};
590ATTRIBUTE_GROUPS(sugov);
591
592static void sugov_tunables_free(struct kobject *kobj)
593{
594	struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
595
596	kfree(to_sugov_tunables(attr_set));
597}
598
599static const struct kobj_type sugov_tunables_ktype = {
600	.default_groups = sugov_groups,
601	.sysfs_ops = &governor_sysfs_ops,
602	.release = &sugov_tunables_free,
603};
604
605/********************** cpufreq governor interface *********************/
606
607#ifdef CONFIG_ENERGY_MODEL
608static void rebuild_sd_workfn(struct work_struct *work)
609{
610	rebuild_sched_domains_energy();
611}
612
613static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
614
615/*
616 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
617 * on governor changes to make sure the scheduler knows about it.
618 */
619static void sugov_eas_rebuild_sd(void)
620{
621	/*
622	 * When called from the cpufreq_register_driver() path, the
623	 * cpu_hotplug_lock is already held, so use a work item to
624	 * avoid nested locking in rebuild_sched_domains().
625	 */
626	schedule_work(&rebuild_sd_work);
627}
628#else
629static inline void sugov_eas_rebuild_sd(void) { };
630#endif
631
632struct cpufreq_governor schedutil_gov;
633
634static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
635{
636	struct sugov_policy *sg_policy;
637
638	sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
639	if (!sg_policy)
640		return NULL;
641
642	sg_policy->policy = policy;
643	raw_spin_lock_init(&sg_policy->update_lock);
644	return sg_policy;
645}
646
647static void sugov_policy_free(struct sugov_policy *sg_policy)
648{
649	kfree(sg_policy);
650}
651
652static int sugov_kthread_create(struct sugov_policy *sg_policy)
653{
654	struct task_struct *thread;
655	struct sched_attr attr = {
656		.size		= sizeof(struct sched_attr),
657		.sched_policy	= SCHED_DEADLINE,
658		.sched_flags	= SCHED_FLAG_SUGOV,
659		.sched_nice	= 0,
660		.sched_priority	= 0,
661		/*
662		 * Fake (unused) bandwidth; workaround to "fix"
663		 * priority inheritance.
664		 */
665		.sched_runtime	= NSEC_PER_MSEC,
666		.sched_deadline = 10 * NSEC_PER_MSEC,
667		.sched_period	= 10 * NSEC_PER_MSEC,
668	};
669	struct cpufreq_policy *policy = sg_policy->policy;
670	int ret;
671
672	/* kthread only required for slow path */
673	if (policy->fast_switch_enabled)
674		return 0;
675
676	kthread_init_work(&sg_policy->work, sugov_work);
677	kthread_init_worker(&sg_policy->worker);
678	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
679				"sugov:%d",
680				cpumask_first(policy->related_cpus));
681	if (IS_ERR(thread)) {
682		pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
683		return PTR_ERR(thread);
684	}
685
686	ret = sched_setattr_nocheck(thread, &attr);
687	if (ret) {
688		kthread_stop(thread);
689		pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
690		return ret;
691	}
692
693	sg_policy->thread = thread;
694	kthread_bind_mask(thread, policy->related_cpus);
695	init_irq_work(&sg_policy->irq_work, sugov_irq_work);
696	mutex_init(&sg_policy->work_lock);
697
698	wake_up_process(thread);
699
700	return 0;
701}
702
703static void sugov_kthread_stop(struct sugov_policy *sg_policy)
704{
705	/* kthread only required for slow path */
706	if (sg_policy->policy->fast_switch_enabled)
707		return;
708
709	kthread_flush_worker(&sg_policy->worker);
710	kthread_stop(sg_policy->thread);
711	mutex_destroy(&sg_policy->work_lock);
712}
713
714static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
715{
716	struct sugov_tunables *tunables;
717
718	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
719	if (tunables) {
720		gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
721		if (!have_governor_per_policy())
722			global_tunables = tunables;
723	}
724	return tunables;
725}
726
727static void sugov_clear_global_tunables(void)
728{
729	if (!have_governor_per_policy())
730		global_tunables = NULL;
731}
732
733static int sugov_init(struct cpufreq_policy *policy)
734{
735	struct sugov_policy *sg_policy;
736	struct sugov_tunables *tunables;
737	int ret = 0;
738
739	/* State should be equivalent to EXIT */
740	if (policy->governor_data)
741		return -EBUSY;
742
743	cpufreq_enable_fast_switch(policy);
744
745	sg_policy = sugov_policy_alloc(policy);
746	if (!sg_policy) {
747		ret = -ENOMEM;
748		goto disable_fast_switch;
749	}
750
751	ret = sugov_kthread_create(sg_policy);
752	if (ret)
753		goto free_sg_policy;
754
755	mutex_lock(&global_tunables_lock);
756
757	if (global_tunables) {
758		if (WARN_ON(have_governor_per_policy())) {
759			ret = -EINVAL;
760			goto stop_kthread;
761		}
762		policy->governor_data = sg_policy;
763		sg_policy->tunables = global_tunables;
764
765		gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
766		goto out;
767	}
768
769	tunables = sugov_tunables_alloc(sg_policy);
770	if (!tunables) {
771		ret = -ENOMEM;
772		goto stop_kthread;
773	}
774
775	tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
776
777	policy->governor_data = sg_policy;
778	sg_policy->tunables = tunables;
779
780	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
781				   get_governor_parent_kobj(policy), "%s",
782				   schedutil_gov.name);
783	if (ret)
784		goto fail;
785
786out:
787	sugov_eas_rebuild_sd();
788	mutex_unlock(&global_tunables_lock);
789	return 0;
790
791fail:
792	kobject_put(&tunables->attr_set.kobj);
793	policy->governor_data = NULL;
794	sugov_clear_global_tunables();
795
796stop_kthread:
797	sugov_kthread_stop(sg_policy);
798	mutex_unlock(&global_tunables_lock);
799
800free_sg_policy:
801	sugov_policy_free(sg_policy);
802
803disable_fast_switch:
804	cpufreq_disable_fast_switch(policy);
805
806	pr_err("initialization failed (error %d)\n", ret);
807	return ret;
808}
809
810static void sugov_exit(struct cpufreq_policy *policy)
811{
812	struct sugov_policy *sg_policy = policy->governor_data;
813	struct sugov_tunables *tunables = sg_policy->tunables;
814	unsigned int count;
815
816	mutex_lock(&global_tunables_lock);
817
818	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
819	policy->governor_data = NULL;
820	if (!count)
821		sugov_clear_global_tunables();
822
823	mutex_unlock(&global_tunables_lock);
824
825	sugov_kthread_stop(sg_policy);
826	sugov_policy_free(sg_policy);
827	cpufreq_disable_fast_switch(policy);
828
829	sugov_eas_rebuild_sd();
830}
831
832static int sugov_start(struct cpufreq_policy *policy)
833{
834	struct sugov_policy *sg_policy = policy->governor_data;
835	void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
836	unsigned int cpu;
837
838	sg_policy->freq_update_delay_ns	= sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
839	sg_policy->last_freq_update_time	= 0;
840	sg_policy->next_freq			= 0;
841	sg_policy->work_in_progress		= false;
842	sg_policy->limits_changed		= false;
843	sg_policy->cached_raw_freq		= 0;
844
845	sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
846
 
 
 
 
 
 
 
 
847	if (policy_is_shared(policy))
848		uu = sugov_update_shared;
849	else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
850		uu = sugov_update_single_perf;
851	else
852		uu = sugov_update_single_freq;
853
854	for_each_cpu(cpu, policy->cpus) {
855		struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
856
857		memset(sg_cpu, 0, sizeof(*sg_cpu));
858		sg_cpu->cpu = cpu;
859		sg_cpu->sg_policy = sg_policy;
860		cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
861	}
862	return 0;
863}
864
865static void sugov_stop(struct cpufreq_policy *policy)
866{
867	struct sugov_policy *sg_policy = policy->governor_data;
868	unsigned int cpu;
869
870	for_each_cpu(cpu, policy->cpus)
871		cpufreq_remove_update_util_hook(cpu);
872
873	synchronize_rcu();
874
875	if (!policy->fast_switch_enabled) {
876		irq_work_sync(&sg_policy->irq_work);
877		kthread_cancel_work_sync(&sg_policy->work);
878	}
879}
880
881static void sugov_limits(struct cpufreq_policy *policy)
882{
883	struct sugov_policy *sg_policy = policy->governor_data;
884
885	if (!policy->fast_switch_enabled) {
886		mutex_lock(&sg_policy->work_lock);
887		cpufreq_policy_apply_limits(policy);
888		mutex_unlock(&sg_policy->work_lock);
889	}
890
891	sg_policy->limits_changed = true;
892}
893
894struct cpufreq_governor schedutil_gov = {
895	.name			= "schedutil",
896	.owner			= THIS_MODULE,
897	.flags			= CPUFREQ_GOV_DYNAMIC_SWITCHING,
898	.init			= sugov_init,
899	.exit			= sugov_exit,
900	.start			= sugov_start,
901	.stop			= sugov_stop,
902	.limits			= sugov_limits,
903};
904
905#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
906struct cpufreq_governor *cpufreq_default_governor(void)
907{
908	return &schedutil_gov;
909}
910#endif
911
912cpufreq_governor_init(schedutil_gov);