Loading...
1
2#ifdef CONFIG_SCHEDSTATS
3
4/*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7static inline void
8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9{
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
12 rq->rq_sched_info.pcount++;
13 }
14}
15
16/*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19static inline void
20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21{
22 if (rq)
23 rq->rq_cpu_time += delta;
24}
25
26static inline void
27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28{
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31}
32#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
33#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
34#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
35#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
36#define schedstat_val(var) (var)
37#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
38
39#else /* !CONFIG_SCHEDSTATS */
40static inline void
41rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
42{}
43static inline void
44rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
45{}
46static inline void
47rq_sched_info_depart(struct rq *rq, unsigned long long delta)
48{}
49#define schedstat_enabled() 0
50#define schedstat_inc(var) do { } while (0)
51#define schedstat_add(var, amt) do { } while (0)
52#define schedstat_set(var, val) do { } while (0)
53#define schedstat_val(var) 0
54#define schedstat_val_or_zero(var) 0
55#endif /* CONFIG_SCHEDSTATS */
56
57#ifdef CONFIG_SCHED_INFO
58static inline void sched_info_reset_dequeued(struct task_struct *t)
59{
60 t->sched_info.last_queued = 0;
61}
62
63/*
64 * We are interested in knowing how long it was from the *first* time a
65 * task was queued to the time that it finally hit a cpu, we call this routine
66 * from dequeue_task() to account for possible rq->clock skew across cpus. The
67 * delta taken on each cpu would annul the skew.
68 */
69static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
70{
71 unsigned long long now = rq_clock(rq), delta = 0;
72
73 if (unlikely(sched_info_on()))
74 if (t->sched_info.last_queued)
75 delta = now - t->sched_info.last_queued;
76 sched_info_reset_dequeued(t);
77 t->sched_info.run_delay += delta;
78
79 rq_sched_info_dequeued(rq, delta);
80}
81
82/*
83 * Called when a task finally hits the cpu. We can now calculate how
84 * long it was waiting to run. We also note when it began so that we
85 * can keep stats on how long its timeslice is.
86 */
87static void sched_info_arrive(struct rq *rq, struct task_struct *t)
88{
89 unsigned long long now = rq_clock(rq), delta = 0;
90
91 if (t->sched_info.last_queued)
92 delta = now - t->sched_info.last_queued;
93 sched_info_reset_dequeued(t);
94 t->sched_info.run_delay += delta;
95 t->sched_info.last_arrival = now;
96 t->sched_info.pcount++;
97
98 rq_sched_info_arrive(rq, delta);
99}
100
101/*
102 * This function is only called from enqueue_task(), but also only updates
103 * the timestamp if it is already not set. It's assumed that
104 * sched_info_dequeued() will clear that stamp when appropriate.
105 */
106static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
107{
108 if (unlikely(sched_info_on()))
109 if (!t->sched_info.last_queued)
110 t->sched_info.last_queued = rq_clock(rq);
111}
112
113/*
114 * Called when a process ceases being the active-running process involuntarily
115 * due, typically, to expiring its time slice (this may also be called when
116 * switching to the idle task). Now we can calculate how long we ran.
117 * Also, if the process is still in the TASK_RUNNING state, call
118 * sched_info_queued() to mark that it has now again started waiting on
119 * the runqueue.
120 */
121static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
122{
123 unsigned long long delta = rq_clock(rq) -
124 t->sched_info.last_arrival;
125
126 rq_sched_info_depart(rq, delta);
127
128 if (t->state == TASK_RUNNING)
129 sched_info_queued(rq, t);
130}
131
132/*
133 * Called when tasks are switched involuntarily due, typically, to expiring
134 * their time slice. (This may also be called when switching to or from
135 * the idle task.) We are only called when prev != next.
136 */
137static inline void
138__sched_info_switch(struct rq *rq,
139 struct task_struct *prev, struct task_struct *next)
140{
141 /*
142 * prev now departs the cpu. It's not interesting to record
143 * stats about how efficient we were at scheduling the idle
144 * process, however.
145 */
146 if (prev != rq->idle)
147 sched_info_depart(rq, prev);
148
149 if (next != rq->idle)
150 sched_info_arrive(rq, next);
151}
152static inline void
153sched_info_switch(struct rq *rq,
154 struct task_struct *prev, struct task_struct *next)
155{
156 if (unlikely(sched_info_on()))
157 __sched_info_switch(rq, prev, next);
158}
159#else
160#define sched_info_queued(rq, t) do { } while (0)
161#define sched_info_reset_dequeued(t) do { } while (0)
162#define sched_info_dequeued(rq, t) do { } while (0)
163#define sched_info_depart(rq, t) do { } while (0)
164#define sched_info_arrive(rq, next) do { } while (0)
165#define sched_info_switch(rq, t, next) do { } while (0)
166#endif /* CONFIG_SCHED_INFO */
167
168/*
169 * The following are functions that support scheduler-internal time accounting.
170 * These functions are generally called at the timer tick. None of this depends
171 * on CONFIG_SCHEDSTATS.
172 */
173
174/**
175 * cputimer_running - return true if cputimer is running
176 *
177 * @tsk: Pointer to target task.
178 */
179static inline bool cputimer_running(struct task_struct *tsk)
180
181{
182 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
183
184 /* Check if cputimer isn't running. This is accessed without locking. */
185 if (!READ_ONCE(cputimer->running))
186 return false;
187
188 /*
189 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
190 * in __exit_signal(), we won't account to the signal struct further
191 * cputime consumed by that task, even though the task can still be
192 * ticking after __exit_signal().
193 *
194 * In order to keep a consistent behaviour between thread group cputime
195 * and thread group cputimer accounting, lets also ignore the cputime
196 * elapsing after __exit_signal() in any thread group timer running.
197 *
198 * This makes sure that POSIX CPU clocks and timers are synchronized, so
199 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
200 * clock delta is behind the expiring timer value.
201 */
202 if (unlikely(!tsk->sighand))
203 return false;
204
205 return true;
206}
207
208/**
209 * account_group_user_time - Maintain utime for a thread group.
210 *
211 * @tsk: Pointer to task structure.
212 * @cputime: Time value by which to increment the utime field of the
213 * thread_group_cputime structure.
214 *
215 * If thread group time is being maintained, get the structure for the
216 * running CPU and update the utime field there.
217 */
218static inline void account_group_user_time(struct task_struct *tsk,
219 cputime_t cputime)
220{
221 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
222
223 if (!cputimer_running(tsk))
224 return;
225
226 atomic64_add(cputime, &cputimer->cputime_atomic.utime);
227}
228
229/**
230 * account_group_system_time - Maintain stime for a thread group.
231 *
232 * @tsk: Pointer to task structure.
233 * @cputime: Time value by which to increment the stime field of the
234 * thread_group_cputime structure.
235 *
236 * If thread group time is being maintained, get the structure for the
237 * running CPU and update the stime field there.
238 */
239static inline void account_group_system_time(struct task_struct *tsk,
240 cputime_t cputime)
241{
242 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
243
244 if (!cputimer_running(tsk))
245 return;
246
247 atomic64_add(cputime, &cputimer->cputime_atomic.stime);
248}
249
250/**
251 * account_group_exec_runtime - Maintain exec runtime for a thread group.
252 *
253 * @tsk: Pointer to task structure.
254 * @ns: Time value by which to increment the sum_exec_runtime field
255 * of the thread_group_cputime structure.
256 *
257 * If thread group time is being maintained, get the structure for the
258 * running CPU and update the sum_exec_runtime field there.
259 */
260static inline void account_group_exec_runtime(struct task_struct *tsk,
261 unsigned long long ns)
262{
263 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
264
265 if (!cputimer_running(tsk))
266 return;
267
268 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
269}
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KERNEL_STATS_H
3#define _KERNEL_STATS_H
4
5#ifdef CONFIG_SCHEDSTATS
6
7extern struct static_key_false sched_schedstats;
8
9/*
10 * Expects runqueue lock to be held for atomicity of update
11 */
12static inline void
13rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
14{
15 if (rq) {
16 rq->rq_sched_info.run_delay += delta;
17 rq->rq_sched_info.pcount++;
18 }
19}
20
21/*
22 * Expects runqueue lock to be held for atomicity of update
23 */
24static inline void
25rq_sched_info_depart(struct rq *rq, unsigned long long delta)
26{
27 if (rq)
28 rq->rq_cpu_time += delta;
29}
30
31static inline void
32rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
33{
34 if (rq)
35 rq->rq_sched_info.run_delay += delta;
36}
37#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
38#define __schedstat_inc(var) do { var++; } while (0)
39#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
40#define __schedstat_add(var, amt) do { var += (amt); } while (0)
41#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
42#define __schedstat_set(var, val) do { var = (val); } while (0)
43#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
44#define schedstat_val(var) (var)
45#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
46
47void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
48 struct sched_statistics *stats);
49
50void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
51 struct sched_statistics *stats);
52void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
53 struct sched_statistics *stats);
54
55static inline void
56check_schedstat_required(void)
57{
58 if (schedstat_enabled())
59 return;
60
61 /* Force schedstat enabled if a dependent tracepoint is active */
62 if (trace_sched_stat_wait_enabled() ||
63 trace_sched_stat_sleep_enabled() ||
64 trace_sched_stat_iowait_enabled() ||
65 trace_sched_stat_blocked_enabled() ||
66 trace_sched_stat_runtime_enabled())
67 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
68}
69
70#else /* !CONFIG_SCHEDSTATS: */
71
72static inline void rq_sched_info_arrive (struct rq *rq, unsigned long long delta) { }
73static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
74static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delta) { }
75# define schedstat_enabled() 0
76# define __schedstat_inc(var) do { } while (0)
77# define schedstat_inc(var) do { } while (0)
78# define __schedstat_add(var, amt) do { } while (0)
79# define schedstat_add(var, amt) do { } while (0)
80# define __schedstat_set(var, val) do { } while (0)
81# define schedstat_set(var, val) do { } while (0)
82# define schedstat_val(var) 0
83# define schedstat_val_or_zero(var) 0
84
85# define __update_stats_wait_start(rq, p, stats) do { } while (0)
86# define __update_stats_wait_end(rq, p, stats) do { } while (0)
87# define __update_stats_enqueue_sleeper(rq, p, stats) do { } while (0)
88# define check_schedstat_required() do { } while (0)
89
90#endif /* CONFIG_SCHEDSTATS */
91
92#ifdef CONFIG_FAIR_GROUP_SCHED
93struct sched_entity_stats {
94 struct sched_entity se;
95 struct sched_statistics stats;
96} __no_randomize_layout;
97#endif
98
99static inline struct sched_statistics *
100__schedstats_from_se(struct sched_entity *se)
101{
102#ifdef CONFIG_FAIR_GROUP_SCHED
103 if (!entity_is_task(se))
104 return &container_of(se, struct sched_entity_stats, se)->stats;
105#endif
106 return &task_of(se)->stats;
107}
108
109#ifdef CONFIG_PSI
110void psi_task_change(struct task_struct *task, int clear, int set);
111void psi_task_switch(struct task_struct *prev, struct task_struct *next,
112 bool sleep);
113#ifdef CONFIG_IRQ_TIME_ACCOUNTING
114void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev);
115#else
116static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
117 struct task_struct *prev) {}
118#endif /*CONFIG_IRQ_TIME_ACCOUNTING */
119/*
120 * PSI tracks state that persists across sleeps, such as iowaits and
121 * memory stalls. As a result, it has to distinguish between sleeps,
122 * where a task's runnable state changes, and migrations, where a task
123 * and its runnable state are being moved between CPUs and runqueues.
124 *
125 * A notable case is a task whose dequeue is delayed. PSI considers
126 * those sleeping, but because they are still on the runqueue they can
127 * go through migration requeues. In this case, *sleeping* states need
128 * to be transferred.
129 */
130static inline void psi_enqueue(struct task_struct *p, int flags)
131{
132 int clear = 0, set = 0;
133
134 if (static_branch_likely(&psi_disabled))
135 return;
136
137 /* Same runqueue, nothing changed for psi */
138 if (flags & ENQUEUE_RESTORE)
139 return;
140
141 /* psi_sched_switch() will handle the flags */
142 if (task_on_cpu(task_rq(p), p))
143 return;
144
145 if (p->se.sched_delayed) {
146 /* CPU migration of "sleeping" task */
147 SCHED_WARN_ON(!(flags & ENQUEUE_MIGRATED));
148 if (p->in_memstall)
149 set |= TSK_MEMSTALL;
150 if (p->in_iowait)
151 set |= TSK_IOWAIT;
152 } else if (flags & ENQUEUE_MIGRATED) {
153 /* CPU migration of runnable task */
154 set = TSK_RUNNING;
155 if (p->in_memstall)
156 set |= TSK_MEMSTALL | TSK_MEMSTALL_RUNNING;
157 } else {
158 /* Wakeup of new or sleeping task */
159 if (p->in_iowait)
160 clear |= TSK_IOWAIT;
161 set = TSK_RUNNING;
162 if (p->in_memstall)
163 set |= TSK_MEMSTALL_RUNNING;
164 }
165
166 psi_task_change(p, clear, set);
167}
168
169static inline void psi_dequeue(struct task_struct *p, int flags)
170{
171 if (static_branch_likely(&psi_disabled))
172 return;
173
174 /* Same runqueue, nothing changed for psi */
175 if (flags & DEQUEUE_SAVE)
176 return;
177
178 /*
179 * A voluntary sleep is a dequeue followed by a task switch. To
180 * avoid walking all ancestors twice, psi_task_switch() handles
181 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
182 * Do nothing here.
183 */
184 if (flags & DEQUEUE_SLEEP)
185 return;
186
187 /*
188 * When migrating a task to another CPU, clear all psi
189 * state. The enqueue callback above will work it out.
190 */
191 psi_task_change(p, p->psi_flags, 0);
192}
193
194static inline void psi_ttwu_dequeue(struct task_struct *p)
195{
196 if (static_branch_likely(&psi_disabled))
197 return;
198 /*
199 * Is the task being migrated during a wakeup? Make sure to
200 * deregister its sleep-persistent psi states from the old
201 * queue, and let psi_enqueue() know it has to requeue.
202 */
203 if (unlikely(p->psi_flags)) {
204 struct rq_flags rf;
205 struct rq *rq;
206
207 rq = __task_rq_lock(p, &rf);
208 psi_task_change(p, p->psi_flags, 0);
209 __task_rq_unlock(rq, &rf);
210 }
211}
212
213static inline void psi_sched_switch(struct task_struct *prev,
214 struct task_struct *next,
215 bool sleep)
216{
217 if (static_branch_likely(&psi_disabled))
218 return;
219
220 psi_task_switch(prev, next, sleep);
221}
222
223#else /* CONFIG_PSI */
224static inline void psi_enqueue(struct task_struct *p, bool migrate) {}
225static inline void psi_dequeue(struct task_struct *p, bool migrate) {}
226static inline void psi_ttwu_dequeue(struct task_struct *p) {}
227static inline void psi_sched_switch(struct task_struct *prev,
228 struct task_struct *next,
229 bool sleep) {}
230static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr,
231 struct task_struct *prev) {}
232#endif /* CONFIG_PSI */
233
234#ifdef CONFIG_SCHED_INFO
235/*
236 * We are interested in knowing how long it was from the *first* time a
237 * task was queued to the time that it finally hit a CPU, we call this routine
238 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
239 * delta taken on each CPU would annul the skew.
240 */
241static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
242{
243 unsigned long long delta = 0;
244
245 if (!t->sched_info.last_queued)
246 return;
247
248 delta = rq_clock(rq) - t->sched_info.last_queued;
249 t->sched_info.last_queued = 0;
250 t->sched_info.run_delay += delta;
251
252 rq_sched_info_dequeue(rq, delta);
253}
254
255/*
256 * Called when a task finally hits the CPU. We can now calculate how
257 * long it was waiting to run. We also note when it began so that we
258 * can keep stats on how long its time-slice is.
259 */
260static void sched_info_arrive(struct rq *rq, struct task_struct *t)
261{
262 unsigned long long now, delta = 0;
263
264 if (!t->sched_info.last_queued)
265 return;
266
267 now = rq_clock(rq);
268 delta = now - t->sched_info.last_queued;
269 t->sched_info.last_queued = 0;
270 t->sched_info.run_delay += delta;
271 t->sched_info.last_arrival = now;
272 t->sched_info.pcount++;
273
274 rq_sched_info_arrive(rq, delta);
275}
276
277/*
278 * This function is only called from enqueue_task(), but also only updates
279 * the timestamp if it is already not set. It's assumed that
280 * sched_info_dequeue() will clear that stamp when appropriate.
281 */
282static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
283{
284 if (!t->sched_info.last_queued)
285 t->sched_info.last_queued = rq_clock(rq);
286}
287
288/*
289 * Called when a process ceases being the active-running process involuntarily
290 * due, typically, to expiring its time slice (this may also be called when
291 * switching to the idle task). Now we can calculate how long we ran.
292 * Also, if the process is still in the TASK_RUNNING state, call
293 * sched_info_enqueue() to mark that it has now again started waiting on
294 * the runqueue.
295 */
296static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
297{
298 unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
299
300 rq_sched_info_depart(rq, delta);
301
302 if (task_is_running(t))
303 sched_info_enqueue(rq, t);
304}
305
306/*
307 * Called when tasks are switched involuntarily due, typically, to expiring
308 * their time slice. (This may also be called when switching to or from
309 * the idle task.) We are only called when prev != next.
310 */
311static inline void
312sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
313{
314 /*
315 * prev now departs the CPU. It's not interesting to record
316 * stats about how efficient we were at scheduling the idle
317 * process, however.
318 */
319 if (prev != rq->idle)
320 sched_info_depart(rq, prev);
321
322 if (next != rq->idle)
323 sched_info_arrive(rq, next);
324}
325
326#else /* !CONFIG_SCHED_INFO: */
327# define sched_info_enqueue(rq, t) do { } while (0)
328# define sched_info_dequeue(rq, t) do { } while (0)
329# define sched_info_switch(rq, t, next) do { } while (0)
330#endif /* CONFIG_SCHED_INFO */
331
332#endif /* _KERNEL_STATS_H */