Loading...
1
2#ifdef CONFIG_SCHEDSTATS
3
4/*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7static inline void
8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9{
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
12 rq->rq_sched_info.pcount++;
13 }
14}
15
16/*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19static inline void
20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21{
22 if (rq)
23 rq->rq_cpu_time += delta;
24}
25
26static inline void
27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28{
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31}
32# define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
33# define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
34# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
35# define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
36#else /* !CONFIG_SCHEDSTATS */
37static inline void
38rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
39{}
40static inline void
41rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
42{}
43static inline void
44rq_sched_info_depart(struct rq *rq, unsigned long long delta)
45{}
46# define schedstat_enabled() 0
47# define schedstat_inc(rq, field) do { } while (0)
48# define schedstat_add(rq, field, amt) do { } while (0)
49# define schedstat_set(var, val) do { } while (0)
50#endif
51
52#ifdef CONFIG_SCHED_INFO
53static inline void sched_info_reset_dequeued(struct task_struct *t)
54{
55 t->sched_info.last_queued = 0;
56}
57
58/*
59 * We are interested in knowing how long it was from the *first* time a
60 * task was queued to the time that it finally hit a cpu, we call this routine
61 * from dequeue_task() to account for possible rq->clock skew across cpus. The
62 * delta taken on each cpu would annul the skew.
63 */
64static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
65{
66 unsigned long long now = rq_clock(rq), delta = 0;
67
68 if (unlikely(sched_info_on()))
69 if (t->sched_info.last_queued)
70 delta = now - t->sched_info.last_queued;
71 sched_info_reset_dequeued(t);
72 t->sched_info.run_delay += delta;
73
74 rq_sched_info_dequeued(rq, delta);
75}
76
77/*
78 * Called when a task finally hits the cpu. We can now calculate how
79 * long it was waiting to run. We also note when it began so that we
80 * can keep stats on how long its timeslice is.
81 */
82static void sched_info_arrive(struct rq *rq, struct task_struct *t)
83{
84 unsigned long long now = rq_clock(rq), delta = 0;
85
86 if (t->sched_info.last_queued)
87 delta = now - t->sched_info.last_queued;
88 sched_info_reset_dequeued(t);
89 t->sched_info.run_delay += delta;
90 t->sched_info.last_arrival = now;
91 t->sched_info.pcount++;
92
93 rq_sched_info_arrive(rq, delta);
94}
95
96/*
97 * This function is only called from enqueue_task(), but also only updates
98 * the timestamp if it is already not set. It's assumed that
99 * sched_info_dequeued() will clear that stamp when appropriate.
100 */
101static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
102{
103 if (unlikely(sched_info_on()))
104 if (!t->sched_info.last_queued)
105 t->sched_info.last_queued = rq_clock(rq);
106}
107
108/*
109 * Called when a process ceases being the active-running process involuntarily
110 * due, typically, to expiring its time slice (this may also be called when
111 * switching to the idle task). Now we can calculate how long we ran.
112 * Also, if the process is still in the TASK_RUNNING state, call
113 * sched_info_queued() to mark that it has now again started waiting on
114 * the runqueue.
115 */
116static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
117{
118 unsigned long long delta = rq_clock(rq) -
119 t->sched_info.last_arrival;
120
121 rq_sched_info_depart(rq, delta);
122
123 if (t->state == TASK_RUNNING)
124 sched_info_queued(rq, t);
125}
126
127/*
128 * Called when tasks are switched involuntarily due, typically, to expiring
129 * their time slice. (This may also be called when switching to or from
130 * the idle task.) We are only called when prev != next.
131 */
132static inline void
133__sched_info_switch(struct rq *rq,
134 struct task_struct *prev, struct task_struct *next)
135{
136 /*
137 * prev now departs the cpu. It's not interesting to record
138 * stats about how efficient we were at scheduling the idle
139 * process, however.
140 */
141 if (prev != rq->idle)
142 sched_info_depart(rq, prev);
143
144 if (next != rq->idle)
145 sched_info_arrive(rq, next);
146}
147static inline void
148sched_info_switch(struct rq *rq,
149 struct task_struct *prev, struct task_struct *next)
150{
151 if (unlikely(sched_info_on()))
152 __sched_info_switch(rq, prev, next);
153}
154#else
155#define sched_info_queued(rq, t) do { } while (0)
156#define sched_info_reset_dequeued(t) do { } while (0)
157#define sched_info_dequeued(rq, t) do { } while (0)
158#define sched_info_depart(rq, t) do { } while (0)
159#define sched_info_arrive(rq, next) do { } while (0)
160#define sched_info_switch(rq, t, next) do { } while (0)
161#endif /* CONFIG_SCHED_INFO */
162
163/*
164 * The following are functions that support scheduler-internal time accounting.
165 * These functions are generally called at the timer tick. None of this depends
166 * on CONFIG_SCHEDSTATS.
167 */
168
169/**
170 * cputimer_running - return true if cputimer is running
171 *
172 * @tsk: Pointer to target task.
173 */
174static inline bool cputimer_running(struct task_struct *tsk)
175
176{
177 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
178
179 /* Check if cputimer isn't running. This is accessed without locking. */
180 if (!READ_ONCE(cputimer->running))
181 return false;
182
183 /*
184 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
185 * in __exit_signal(), we won't account to the signal struct further
186 * cputime consumed by that task, even though the task can still be
187 * ticking after __exit_signal().
188 *
189 * In order to keep a consistent behaviour between thread group cputime
190 * and thread group cputimer accounting, lets also ignore the cputime
191 * elapsing after __exit_signal() in any thread group timer running.
192 *
193 * This makes sure that POSIX CPU clocks and timers are synchronized, so
194 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
195 * clock delta is behind the expiring timer value.
196 */
197 if (unlikely(!tsk->sighand))
198 return false;
199
200 return true;
201}
202
203/**
204 * account_group_user_time - Maintain utime for a thread group.
205 *
206 * @tsk: Pointer to task structure.
207 * @cputime: Time value by which to increment the utime field of the
208 * thread_group_cputime structure.
209 *
210 * If thread group time is being maintained, get the structure for the
211 * running CPU and update the utime field there.
212 */
213static inline void account_group_user_time(struct task_struct *tsk,
214 cputime_t cputime)
215{
216 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
217
218 if (!cputimer_running(tsk))
219 return;
220
221 atomic64_add(cputime, &cputimer->cputime_atomic.utime);
222}
223
224/**
225 * account_group_system_time - Maintain stime for a thread group.
226 *
227 * @tsk: Pointer to task structure.
228 * @cputime: Time value by which to increment the stime field of the
229 * thread_group_cputime structure.
230 *
231 * If thread group time is being maintained, get the structure for the
232 * running CPU and update the stime field there.
233 */
234static inline void account_group_system_time(struct task_struct *tsk,
235 cputime_t cputime)
236{
237 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
238
239 if (!cputimer_running(tsk))
240 return;
241
242 atomic64_add(cputime, &cputimer->cputime_atomic.stime);
243}
244
245/**
246 * account_group_exec_runtime - Maintain exec runtime for a thread group.
247 *
248 * @tsk: Pointer to task structure.
249 * @ns: Time value by which to increment the sum_exec_runtime field
250 * of the thread_group_cputime structure.
251 *
252 * If thread group time is being maintained, get the structure for the
253 * running CPU and update the sum_exec_runtime field there.
254 */
255static inline void account_group_exec_runtime(struct task_struct *tsk,
256 unsigned long long ns)
257{
258 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
259
260 if (!cputimer_running(tsk))
261 return;
262
263 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
264}
1
2#ifdef CONFIG_SCHEDSTATS
3
4/*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7static inline void
8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9{
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
12 rq->rq_sched_info.pcount++;
13 }
14}
15
16/*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19static inline void
20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21{
22 if (rq)
23 rq->rq_cpu_time += delta;
24}
25
26static inline void
27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28{
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31}
32#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
33#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
34#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
35#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
36#define schedstat_val(var) (var)
37#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
38
39#else /* !CONFIG_SCHEDSTATS */
40static inline void
41rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
42{}
43static inline void
44rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
45{}
46static inline void
47rq_sched_info_depart(struct rq *rq, unsigned long long delta)
48{}
49#define schedstat_enabled() 0
50#define schedstat_inc(var) do { } while (0)
51#define schedstat_add(var, amt) do { } while (0)
52#define schedstat_set(var, val) do { } while (0)
53#define schedstat_val(var) 0
54#define schedstat_val_or_zero(var) 0
55#endif /* CONFIG_SCHEDSTATS */
56
57#ifdef CONFIG_SCHED_INFO
58static inline void sched_info_reset_dequeued(struct task_struct *t)
59{
60 t->sched_info.last_queued = 0;
61}
62
63/*
64 * We are interested in knowing how long it was from the *first* time a
65 * task was queued to the time that it finally hit a cpu, we call this routine
66 * from dequeue_task() to account for possible rq->clock skew across cpus. The
67 * delta taken on each cpu would annul the skew.
68 */
69static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
70{
71 unsigned long long now = rq_clock(rq), delta = 0;
72
73 if (unlikely(sched_info_on()))
74 if (t->sched_info.last_queued)
75 delta = now - t->sched_info.last_queued;
76 sched_info_reset_dequeued(t);
77 t->sched_info.run_delay += delta;
78
79 rq_sched_info_dequeued(rq, delta);
80}
81
82/*
83 * Called when a task finally hits the cpu. We can now calculate how
84 * long it was waiting to run. We also note when it began so that we
85 * can keep stats on how long its timeslice is.
86 */
87static void sched_info_arrive(struct rq *rq, struct task_struct *t)
88{
89 unsigned long long now = rq_clock(rq), delta = 0;
90
91 if (t->sched_info.last_queued)
92 delta = now - t->sched_info.last_queued;
93 sched_info_reset_dequeued(t);
94 t->sched_info.run_delay += delta;
95 t->sched_info.last_arrival = now;
96 t->sched_info.pcount++;
97
98 rq_sched_info_arrive(rq, delta);
99}
100
101/*
102 * This function is only called from enqueue_task(), but also only updates
103 * the timestamp if it is already not set. It's assumed that
104 * sched_info_dequeued() will clear that stamp when appropriate.
105 */
106static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
107{
108 if (unlikely(sched_info_on()))
109 if (!t->sched_info.last_queued)
110 t->sched_info.last_queued = rq_clock(rq);
111}
112
113/*
114 * Called when a process ceases being the active-running process involuntarily
115 * due, typically, to expiring its time slice (this may also be called when
116 * switching to the idle task). Now we can calculate how long we ran.
117 * Also, if the process is still in the TASK_RUNNING state, call
118 * sched_info_queued() to mark that it has now again started waiting on
119 * the runqueue.
120 */
121static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
122{
123 unsigned long long delta = rq_clock(rq) -
124 t->sched_info.last_arrival;
125
126 rq_sched_info_depart(rq, delta);
127
128 if (t->state == TASK_RUNNING)
129 sched_info_queued(rq, t);
130}
131
132/*
133 * Called when tasks are switched involuntarily due, typically, to expiring
134 * their time slice. (This may also be called when switching to or from
135 * the idle task.) We are only called when prev != next.
136 */
137static inline void
138__sched_info_switch(struct rq *rq,
139 struct task_struct *prev, struct task_struct *next)
140{
141 /*
142 * prev now departs the cpu. It's not interesting to record
143 * stats about how efficient we were at scheduling the idle
144 * process, however.
145 */
146 if (prev != rq->idle)
147 sched_info_depart(rq, prev);
148
149 if (next != rq->idle)
150 sched_info_arrive(rq, next);
151}
152static inline void
153sched_info_switch(struct rq *rq,
154 struct task_struct *prev, struct task_struct *next)
155{
156 if (unlikely(sched_info_on()))
157 __sched_info_switch(rq, prev, next);
158}
159#else
160#define sched_info_queued(rq, t) do { } while (0)
161#define sched_info_reset_dequeued(t) do { } while (0)
162#define sched_info_dequeued(rq, t) do { } while (0)
163#define sched_info_depart(rq, t) do { } while (0)
164#define sched_info_arrive(rq, next) do { } while (0)
165#define sched_info_switch(rq, t, next) do { } while (0)
166#endif /* CONFIG_SCHED_INFO */
167
168/*
169 * The following are functions that support scheduler-internal time accounting.
170 * These functions are generally called at the timer tick. None of this depends
171 * on CONFIG_SCHEDSTATS.
172 */
173
174/**
175 * cputimer_running - return true if cputimer is running
176 *
177 * @tsk: Pointer to target task.
178 */
179static inline bool cputimer_running(struct task_struct *tsk)
180
181{
182 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
183
184 /* Check if cputimer isn't running. This is accessed without locking. */
185 if (!READ_ONCE(cputimer->running))
186 return false;
187
188 /*
189 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
190 * in __exit_signal(), we won't account to the signal struct further
191 * cputime consumed by that task, even though the task can still be
192 * ticking after __exit_signal().
193 *
194 * In order to keep a consistent behaviour between thread group cputime
195 * and thread group cputimer accounting, lets also ignore the cputime
196 * elapsing after __exit_signal() in any thread group timer running.
197 *
198 * This makes sure that POSIX CPU clocks and timers are synchronized, so
199 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
200 * clock delta is behind the expiring timer value.
201 */
202 if (unlikely(!tsk->sighand))
203 return false;
204
205 return true;
206}
207
208/**
209 * account_group_user_time - Maintain utime for a thread group.
210 *
211 * @tsk: Pointer to task structure.
212 * @cputime: Time value by which to increment the utime field of the
213 * thread_group_cputime structure.
214 *
215 * If thread group time is being maintained, get the structure for the
216 * running CPU and update the utime field there.
217 */
218static inline void account_group_user_time(struct task_struct *tsk,
219 cputime_t cputime)
220{
221 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
222
223 if (!cputimer_running(tsk))
224 return;
225
226 atomic64_add(cputime, &cputimer->cputime_atomic.utime);
227}
228
229/**
230 * account_group_system_time - Maintain stime for a thread group.
231 *
232 * @tsk: Pointer to task structure.
233 * @cputime: Time value by which to increment the stime field of the
234 * thread_group_cputime structure.
235 *
236 * If thread group time is being maintained, get the structure for the
237 * running CPU and update the stime field there.
238 */
239static inline void account_group_system_time(struct task_struct *tsk,
240 cputime_t cputime)
241{
242 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
243
244 if (!cputimer_running(tsk))
245 return;
246
247 atomic64_add(cputime, &cputimer->cputime_atomic.stime);
248}
249
250/**
251 * account_group_exec_runtime - Maintain exec runtime for a thread group.
252 *
253 * @tsk: Pointer to task structure.
254 * @ns: Time value by which to increment the sum_exec_runtime field
255 * of the thread_group_cputime structure.
256 *
257 * If thread group time is being maintained, get the structure for the
258 * running CPU and update the sum_exec_runtime field there.
259 */
260static inline void account_group_exec_runtime(struct task_struct *tsk,
261 unsigned long long ns)
262{
263 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
264
265 if (!cputimer_running(tsk))
266 return;
267
268 atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
269}