Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1#ifdef CONFIG_SMP
  2#include "sched-pelt.h"
  3
  4int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
  5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
  6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
  7int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
  8int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
  9
 10#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
 11int update_irq_load_avg(struct rq *rq, u64 running);
 12#else
 13static inline int
 14update_irq_load_avg(struct rq *rq, u64 running)
 15{
 16	return 0;
 17}
 18#endif
 19
 20/*
 21 * When a task is dequeued, its estimated utilization should not be update if
 22 * its util_avg has not been updated at least once.
 23 * This flag is used to synchronize util_avg updates with util_est updates.
 24 * We map this information into the LSB bit of the utilization saved at
 25 * dequeue time (i.e. util_est.dequeued).
 26 */
 27#define UTIL_AVG_UNCHANGED 0x1
 28
 29static inline void cfs_se_util_change(struct sched_avg *avg)
 30{
 31	unsigned int enqueued;
 32
 33	if (!sched_feat(UTIL_EST))
 34		return;
 35
 36	/* Avoid store if the flag has been already set */
 37	enqueued = avg->util_est.enqueued;
 38	if (!(enqueued & UTIL_AVG_UNCHANGED))
 39		return;
 40
 41	/* Reset flag to report util_avg has been updated */
 42	enqueued &= ~UTIL_AVG_UNCHANGED;
 43	WRITE_ONCE(avg->util_est.enqueued, enqueued);
 44}
 45
 46/*
 47 * The clock_pelt scales the time to reflect the effective amount of
 48 * computation done during the running delta time but then sync back to
 49 * clock_task when rq is idle.
 50 *
 51 *
 52 * absolute time   | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
 53 * @ max capacity  ------******---------------******---------------
 54 * @ half capacity ------************---------************---------
 55 * clock pelt      | 1| 2|    3|    4| 7| 8| 9|   10|   11|14|15|16
 56 *
 57 */
 58static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
 59{
 60	if (unlikely(is_idle_task(rq->curr))) {
 61		/* The rq is idle, we can sync to clock_task */
 62		rq->clock_pelt  = rq_clock_task(rq);
 63		return;
 64	}
 65
 66	/*
 67	 * When a rq runs at a lower compute capacity, it will need
 68	 * more time to do the same amount of work than at max
 69	 * capacity. In order to be invariant, we scale the delta to
 70	 * reflect how much work has been really done.
 71	 * Running longer results in stealing idle time that will
 72	 * disturb the load signal compared to max capacity. This
 73	 * stolen idle time will be automatically reflected when the
 74	 * rq will be idle and the clock will be synced with
 75	 * rq_clock_task.
 76	 */
 77
 78	/*
 79	 * Scale the elapsed time to reflect the real amount of
 80	 * computation
 81	 */
 82	delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
 83	delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
 84
 85	rq->clock_pelt += delta;
 86}
 87
 88/*
 89 * When rq becomes idle, we have to check if it has lost idle time
 90 * because it was fully busy. A rq is fully used when the /Sum util_sum
 91 * is greater or equal to:
 92 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
 93 * For optimization and computing rounding purpose, we don't take into account
 94 * the position in the current window (period_contrib) and we use the higher
 95 * bound of util_sum to decide.
 96 */
 97static inline void update_idle_rq_clock_pelt(struct rq *rq)
 98{
 99	u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
100	u32 util_sum = rq->cfs.avg.util_sum;
101	util_sum += rq->avg_rt.util_sum;
102	util_sum += rq->avg_dl.util_sum;
103
104	/*
105	 * Reflecting stolen time makes sense only if the idle
106	 * phase would be present at max capacity. As soon as the
107	 * utilization of a rq has reached the maximum value, it is
108	 * considered as an always runnig rq without idle time to
109	 * steal. This potential idle time is considered as lost in
110	 * this case. We keep track of this lost idle time compare to
111	 * rq's clock_task.
112	 */
113	if (util_sum >= divider)
114		rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
115}
116
117static inline u64 rq_clock_pelt(struct rq *rq)
118{
119	lockdep_assert_held(&rq->lock);
120	assert_clock_updated(rq);
121
122	return rq->clock_pelt - rq->lost_idle_time;
123}
124
125#ifdef CONFIG_CFS_BANDWIDTH
126/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
127static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
128{
129	if (unlikely(cfs_rq->throttle_count))
130		return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
131
132	return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
133}
134#else
135static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
136{
137	return rq_clock_pelt(rq_of(cfs_rq));
138}
139#endif
140
141#else
142
143static inline int
144update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
145{
146	return 0;
147}
148
149static inline int
150update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
151{
152	return 0;
153}
154
155static inline int
156update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
157{
158	return 0;
159}
160
161static inline int
162update_irq_load_avg(struct rq *rq, u64 running)
163{
164	return 0;
165}
166
167static inline u64 rq_clock_pelt(struct rq *rq)
168{
169	return rq_clock_task(rq);
170}
171
172static inline void
173update_rq_clock_pelt(struct rq *rq, s64 delta) { }
174
175static inline void
176update_idle_rq_clock_pelt(struct rq *rq) { }
177
178#endif
179
180