Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1#ifdef CONFIG_SMP
  2#include "sched-pelt.h"
  3
  4int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
  5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
  6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
  7int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
  8int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
  9bool update_other_load_avgs(struct rq *rq);
 10
 11#ifdef CONFIG_SCHED_HW_PRESSURE
 12int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
 13
 14static inline u64 hw_load_avg(struct rq *rq)
 15{
 16	return READ_ONCE(rq->avg_hw.load_avg);
 17}
 18#else
 19static inline int
 20update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
 21{
 22	return 0;
 23}
 24
 25static inline u64 hw_load_avg(struct rq *rq)
 26{
 27	return 0;
 28}
 29#endif
 30
 31#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 32int update_irq_load_avg(struct rq *rq, u64 running);
 33#else
 34static inline int
 35update_irq_load_avg(struct rq *rq, u64 running)
 36{
 37	return 0;
 38}
 39#endif
 40
 41#define PELT_MIN_DIVIDER	(LOAD_AVG_MAX - 1024)
 42
 43static inline u32 get_pelt_divider(struct sched_avg *avg)
 44{
 45	return PELT_MIN_DIVIDER + avg->period_contrib;
 46}
 47
 48static inline void cfs_se_util_change(struct sched_avg *avg)
 49{
 50	unsigned int enqueued;
 51
 52	if (!sched_feat(UTIL_EST))
 53		return;
 54
 55	/* Avoid store if the flag has been already reset */
 56	enqueued = avg->util_est;
 57	if (!(enqueued & UTIL_AVG_UNCHANGED))
 58		return;
 59
 60	/* Reset flag to report util_avg has been updated */
 61	enqueued &= ~UTIL_AVG_UNCHANGED;
 62	WRITE_ONCE(avg->util_est, enqueued);
 63}
 64
 65static inline u64 rq_clock_pelt(struct rq *rq)
 66{
 67	lockdep_assert_rq_held(rq);
 68	assert_clock_updated(rq);
 69
 70	return rq->clock_pelt - rq->lost_idle_time;
 71}
 72
 73/* The rq is idle, we can sync to clock_task */
 74static inline void _update_idle_rq_clock_pelt(struct rq *rq)
 75{
 76	rq->clock_pelt  = rq_clock_task(rq);
 77
 78	u64_u32_store(rq->clock_idle, rq_clock(rq));
 79	/* Paired with smp_rmb in migrate_se_pelt_lag() */
 80	smp_wmb();
 81	u64_u32_store(rq->clock_pelt_idle, rq_clock_pelt(rq));
 82}
 83
 84/*
 85 * The clock_pelt scales the time to reflect the effective amount of
 86 * computation done during the running delta time but then sync back to
 87 * clock_task when rq is idle.
 88 *
 89 *
 90 * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
 91 * @ max capacity  ------******---------------******---------------
 92 * @ half capacity ------************---------************---------
 93 * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
 94 *
 95 */
 96static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
 97{
 98	if (unlikely(is_idle_task(rq->curr))) {
 99		_update_idle_rq_clock_pelt(rq);
100		return;
101	}
102
103	/*
104	 * When a rq runs at a lower compute capacity, it will need
105	 * more time to do the same amount of work than at max
106	 * capacity. In order to be invariant, we scale the delta to
107	 * reflect how much work has been really done.
108	 * Running longer results in stealing idle time that will
109	 * disturb the load signal compared to max capacity. This
110	 * stolen idle time will be automatically reflected when the
111	 * rq will be idle and the clock will be synced with
112	 * rq_clock_task.
113	 */
114
115	/*
116	 * Scale the elapsed time to reflect the real amount of
117	 * computation
118	 */
119	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
120	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
121
122	rq->clock_pelt += delta;
123}
124
125/*
126 * When rq becomes idle, we have to check if it has lost idle time
127 * because it was fully busy. A rq is fully used when the /Sum util_sum
128 * is greater or equal to:
129 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
130 * For optimization and computing rounding purpose, we don't take into account
131 * the position in the current window (period_contrib) and we use the higher
132 * bound of util_sum to decide.
133 */
134static inline void update_idle_rq_clock_pelt(struct rq *rq)
135{
136	u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
137	u32 util_sum = rq->cfs.avg.util_sum;
138	util_sum += rq->avg_rt.util_sum;
139	util_sum += rq->avg_dl.util_sum;
140
141	/*
142	 * Reflecting stolen time makes sense only if the idle
143	 * phase would be present at max capacity. As soon as the
144	 * utilization of a rq has reached the maximum value, it is
145	 * considered as an always running rq without idle time to
146	 * steal. This potential idle time is considered as lost in
147	 * this case. We keep track of this lost idle time compare to
148	 * rq's clock_task.
149	 */
150	if (util_sum >= divider)
151		rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
152
153	_update_idle_rq_clock_pelt(rq);
154}
155
156#ifdef CONFIG_CFS_BANDWIDTH
157static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
158{
159	u64 throttled;
160
161	if (unlikely(cfs_rq->throttle_count))
162		throttled = U64_MAX;
163	else
164		throttled = cfs_rq->throttled_clock_pelt_time;
165
166	u64_u32_store(cfs_rq->throttled_pelt_idle, throttled);
167}
168
169/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
170static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
171{
172	if (unlikely(cfs_rq->throttle_count))
173		return cfs_rq->throttled_clock_pelt - cfs_rq->throttled_clock_pelt_time;
174
175	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_pelt_time;
176}
177#else
178static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
179static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
180{
181	return rq_clock_pelt(rq_of(cfs_rq));
182}
183#endif
184
185#else
186
187static inline int
188update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
189{
190	return 0;
191}
192
193static inline int
194update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
195{
196	return 0;
197}
198
199static inline int
200update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
201{
202	return 0;
203}
204
205static inline int
206update_hw_load_avg(u64 now, struct rq *rq, u64 capacity)
207{
208	return 0;
209}
210
211static inline u64 hw_load_avg(struct rq *rq)
212{
213	return 0;
214}
215
216static inline int
217update_irq_load_avg(struct rq *rq, u64 running)
218{
219	return 0;
220}
221
222static inline u64 rq_clock_pelt(struct rq *rq)
223{
224	return rq_clock_task(rq);
225}
226
227static inline void
228update_rq_clock_pelt(struct rq *rq, s64 delta) { }
229
230static inline void
231update_idle_rq_clock_pelt(struct rq *rq) { }
232
233static inline void update_idle_cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) { }
234#endif
235
236