Linux Audio

Check our new training course

Loading...
v3.15
 
  1
  2#ifdef CONFIG_SCHEDSTATS
  3
  4/*
  5 * Expects runqueue lock to be held for atomicity of update
  6 */
  7static inline void
  8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  9{
 10	if (rq) {
 11		rq->rq_sched_info.run_delay += delta;
 12		rq->rq_sched_info.pcount++;
 13	}
 14}
 15
 16/*
 17 * Expects runqueue lock to be held for atomicity of update
 18 */
 19static inline void
 20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 21{
 22	if (rq)
 23		rq->rq_cpu_time += delta;
 24}
 25
 26static inline void
 27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 28{
 29	if (rq)
 30		rq->rq_sched_info.run_delay += delta;
 31}
 32# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
 33# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
 34# define schedstat_set(var, val)	do { var = (val); } while (0)
 35#else /* !CONFIG_SCHEDSTATS */
 36static inline void
 37rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 38{}
 39static inline void
 40rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 41{}
 42static inline void
 43rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 44{}
 45# define schedstat_inc(rq, field)	do { } while (0)
 46# define schedstat_add(rq, field, amt)	do { } while (0)
 47# define schedstat_set(var, val)	do { } while (0)
 48#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 51static inline void sched_info_reset_dequeued(struct task_struct *t)
 52{
 53	t->sched_info.last_queued = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54}
 55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56/*
 57 * We are interested in knowing how long it was from the *first* time a
 58 * task was queued to the time that it finally hit a cpu, we call this routine
 59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
 60 * delta taken on each cpu would annul the skew.
 61 */
 62static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
 63{
 64	unsigned long long now = rq_clock(rq), delta = 0;
 65
 66	if (unlikely(sched_info_on()))
 67		if (t->sched_info.last_queued)
 68			delta = now - t->sched_info.last_queued;
 69	sched_info_reset_dequeued(t);
 
 70	t->sched_info.run_delay += delta;
 71
 72	rq_sched_info_dequeued(rq, delta);
 73}
 74
 75/*
 76 * Called when a task finally hits the cpu.  We can now calculate how
 77 * long it was waiting to run.  We also note when it began so that we
 78 * can keep stats on how long its timeslice is.
 79 */
 80static void sched_info_arrive(struct rq *rq, struct task_struct *t)
 81{
 82	unsigned long long now = rq_clock(rq), delta = 0;
 83
 84	if (t->sched_info.last_queued)
 85		delta = now - t->sched_info.last_queued;
 86	sched_info_reset_dequeued(t);
 
 
 
 87	t->sched_info.run_delay += delta;
 88	t->sched_info.last_arrival = now;
 89	t->sched_info.pcount++;
 90
 91	rq_sched_info_arrive(rq, delta);
 92}
 93
 94/*
 95 * This function is only called from enqueue_task(), but also only updates
 96 * the timestamp if it is already not set.  It's assumed that
 97 * sched_info_dequeued() will clear that stamp when appropriate.
 98 */
 99static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
100{
101	if (unlikely(sched_info_on()))
102		if (!t->sched_info.last_queued)
103			t->sched_info.last_queued = rq_clock(rq);
104}
105
106/*
107 * Called when a process ceases being the active-running process involuntarily
108 * due, typically, to expiring its time slice (this may also be called when
109 * switching to the idle task).  Now we can calculate how long we ran.
110 * Also, if the process is still in the TASK_RUNNING state, call
111 * sched_info_queued() to mark that it has now again started waiting on
112 * the runqueue.
113 */
114static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
115{
116	unsigned long long delta = rq_clock(rq) -
117					t->sched_info.last_arrival;
118
119	rq_sched_info_depart(rq, delta);
120
121	if (t->state == TASK_RUNNING)
122		sched_info_queued(rq, t);
123}
124
125/*
126 * Called when tasks are switched involuntarily due, typically, to expiring
127 * their time slice.  (This may also be called when switching to or from
128 * the idle task.)  We are only called when prev != next.
129 */
130static inline void
131__sched_info_switch(struct rq *rq,
132		    struct task_struct *prev, struct task_struct *next)
133{
134	/*
135	 * prev now departs the cpu.  It's not interesting to record
136	 * stats about how efficient we were at scheduling the idle
137	 * process, however.
138	 */
139	if (prev != rq->idle)
140		sched_info_depart(rq, prev);
141
142	if (next != rq->idle)
143		sched_info_arrive(rq, next);
144}
145static inline void
146sched_info_switch(struct rq *rq,
147		  struct task_struct *prev, struct task_struct *next)
148{
149	if (unlikely(sched_info_on()))
150		__sched_info_switch(rq, prev, next);
151}
152#else
153#define sched_info_queued(rq, t)		do { } while (0)
154#define sched_info_reset_dequeued(t)	do { } while (0)
155#define sched_info_dequeued(rq, t)		do { } while (0)
156#define sched_info_depart(rq, t)		do { } while (0)
157#define sched_info_arrive(rq, next)		do { } while (0)
158#define sched_info_switch(rq, t, next)		do { } while (0)
159#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
160
161/*
162 * The following are functions that support scheduler-internal time accounting.
163 * These functions are generally called at the timer tick.  None of this depends
164 * on CONFIG_SCHEDSTATS.
165 */
166
167/**
168 * cputimer_running - return true if cputimer is running
169 *
170 * @tsk:	Pointer to target task.
171 */
172static inline bool cputimer_running(struct task_struct *tsk)
173
174{
175	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
176
177	if (!cputimer->running)
178		return false;
179
180	/*
181	 * After we flush the task's sum_exec_runtime to sig->sum_sched_runtime
182	 * in __exit_signal(), we won't account to the signal struct further
183	 * cputime consumed by that task, even though the task can still be
184	 * ticking after __exit_signal().
185	 *
186	 * In order to keep a consistent behaviour between thread group cputime
187	 * and thread group cputimer accounting, lets also ignore the cputime
188	 * elapsing after __exit_signal() in any thread group timer running.
189	 *
190	 * This makes sure that POSIX CPU clocks and timers are synchronized, so
191	 * that a POSIX CPU timer won't expire while the corresponding POSIX CPU
192	 * clock delta is behind the expiring timer value.
193	 */
194	if (unlikely(!tsk->sighand))
195		return false;
196
197	return true;
198}
199
200/**
201 * account_group_user_time - Maintain utime for a thread group.
202 *
203 * @tsk:	Pointer to task structure.
204 * @cputime:	Time value by which to increment the utime field of the
205 *		thread_group_cputime structure.
206 *
207 * If thread group time is being maintained, get the structure for the
208 * running CPU and update the utime field there.
209 */
210static inline void account_group_user_time(struct task_struct *tsk,
211					   cputime_t cputime)
212{
213	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
214
215	if (!cputimer_running(tsk))
216		return;
217
218	raw_spin_lock(&cputimer->lock);
219	cputimer->cputime.utime += cputime;
220	raw_spin_unlock(&cputimer->lock);
221}
222
223/**
224 * account_group_system_time - Maintain stime for a thread group.
225 *
226 * @tsk:	Pointer to task structure.
227 * @cputime:	Time value by which to increment the stime field of the
228 *		thread_group_cputime structure.
229 *
230 * If thread group time is being maintained, get the structure for the
231 * running CPU and update the stime field there.
232 */
233static inline void account_group_system_time(struct task_struct *tsk,
234					     cputime_t cputime)
235{
236	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
237
238	if (!cputimer_running(tsk))
239		return;
240
241	raw_spin_lock(&cputimer->lock);
242	cputimer->cputime.stime += cputime;
243	raw_spin_unlock(&cputimer->lock);
244}
245
246/**
247 * account_group_exec_runtime - Maintain exec runtime for a thread group.
248 *
249 * @tsk:	Pointer to task structure.
250 * @ns:		Time value by which to increment the sum_exec_runtime field
251 *		of the thread_group_cputime structure.
252 *
253 * If thread group time is being maintained, get the structure for the
254 * running CPU and update the sum_exec_runtime field there.
255 */
256static inline void account_group_exec_runtime(struct task_struct *tsk,
257					      unsigned long long ns)
258{
259	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
260
261	if (!cputimer_running(tsk))
262		return;
263
264	raw_spin_lock(&cputimer->lock);
265	cputimer->cputime.sum_exec_runtime += ns;
266	raw_spin_unlock(&cputimer->lock);
267}
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#ifdef CONFIG_SCHEDSTATS
  4
  5/*
  6 * Expects runqueue lock to be held for atomicity of update
  7 */
  8static inline void
  9rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 10{
 11	if (rq) {
 12		rq->rq_sched_info.run_delay += delta;
 13		rq->rq_sched_info.pcount++;
 14	}
 15}
 16
 17/*
 18 * Expects runqueue lock to be held for atomicity of update
 19 */
 20static inline void
 21rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 22{
 23	if (rq)
 24		rq->rq_cpu_time += delta;
 25}
 26
 27static inline void
 28rq_sched_info_dequeue(struct rq *rq, unsigned long long delta)
 29{
 30	if (rq)
 31		rq->rq_sched_info.run_delay += delta;
 32}
 33#define   schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
 34#define __schedstat_inc(var)		do { var++; } while (0)
 35#define   schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
 36#define __schedstat_add(var, amt)	do { var += (amt); } while (0)
 37#define   schedstat_add(var, amt)	do { if (schedstat_enabled()) { var += (amt); } } while (0)
 38#define __schedstat_set(var, val)	do { var = (val); } while (0)
 39#define   schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
 40#define   schedstat_val(var)		(var)
 41#define   schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
 42
 43#else /* !CONFIG_SCHEDSTATS: */
 44static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
 45static inline void rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) { }
 46static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
 47# define   schedstat_enabled()		0
 48# define __schedstat_inc(var)		do { } while (0)
 49# define   schedstat_inc(var)		do { } while (0)
 50# define __schedstat_add(var, amt)	do { } while (0)
 51# define   schedstat_add(var, amt)	do { } while (0)
 52# define __schedstat_set(var, val)	do { } while (0)
 53# define   schedstat_set(var, val)	do { } while (0)
 54# define   schedstat_val(var)		0
 55# define   schedstat_val_or_zero(var)	0
 56#endif /* CONFIG_SCHEDSTATS */
 57
 58#ifdef CONFIG_PSI
 59/*
 60 * PSI tracks state that persists across sleeps, such as iowaits and
 61 * memory stalls. As a result, it has to distinguish between sleeps,
 62 * where a task's runnable state changes, and requeues, where a task
 63 * and its state are being moved between CPUs and runqueues.
 64 */
 65static inline void psi_enqueue(struct task_struct *p, bool wakeup)
 66{
 67	int clear = 0, set = TSK_RUNNING;
 68
 69	if (static_branch_likely(&psi_disabled))
 70		return;
 71
 72	if (!wakeup || p->sched_psi_wake_requeue) {
 73		if (p->in_memstall)
 74			set |= TSK_MEMSTALL;
 75		if (p->sched_psi_wake_requeue)
 76			p->sched_psi_wake_requeue = 0;
 77	} else {
 78		if (p->in_iowait)
 79			clear |= TSK_IOWAIT;
 80	}
 81
 82	psi_task_change(p, clear, set);
 83}
 84
 85static inline void psi_dequeue(struct task_struct *p, bool sleep)
 
 86{
 87	int clear = TSK_RUNNING;
 88
 89	if (static_branch_likely(&psi_disabled))
 90		return;
 91
 92	/*
 93	 * A voluntary sleep is a dequeue followed by a task switch. To
 94	 * avoid walking all ancestors twice, psi_task_switch() handles
 95	 * TSK_RUNNING and TSK_IOWAIT for us when it moves TSK_ONCPU.
 96	 * Do nothing here.
 97	 */
 98	if (sleep)
 99		return;
100
101	if (p->in_memstall)
102		clear |= TSK_MEMSTALL;
103
104	psi_task_change(p, clear, 0);
105}
106
107static inline void psi_ttwu_dequeue(struct task_struct *p)
108{
109	if (static_branch_likely(&psi_disabled))
110		return;
111	/*
112	 * Is the task being migrated during a wakeup? Make sure to
113	 * deregister its sleep-persistent psi states from the old
114	 * queue, and let psi_enqueue() know it has to requeue.
115	 */
116	if (unlikely(p->in_iowait || p->in_memstall)) {
117		struct rq_flags rf;
118		struct rq *rq;
119		int clear = 0;
120
121		if (p->in_iowait)
122			clear |= TSK_IOWAIT;
123		if (p->in_memstall)
124			clear |= TSK_MEMSTALL;
125
126		rq = __task_rq_lock(p, &rf);
127		psi_task_change(p, clear, 0);
128		p->sched_psi_wake_requeue = 1;
129		__task_rq_unlock(rq, &rf);
130	}
131}
132
133static inline void psi_sched_switch(struct task_struct *prev,
134				    struct task_struct *next,
135				    bool sleep)
136{
137	if (static_branch_likely(&psi_disabled))
138		return;
139
140	psi_task_switch(prev, next, sleep);
141}
142
143#else /* CONFIG_PSI */
144static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
145static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
146static inline void psi_ttwu_dequeue(struct task_struct *p) {}
147static inline void psi_sched_switch(struct task_struct *prev,
148				    struct task_struct *next,
149				    bool sleep) {}
150#endif /* CONFIG_PSI */
151
152#ifdef CONFIG_SCHED_INFO
153/*
154 * We are interested in knowing how long it was from the *first* time a
155 * task was queued to the time that it finally hit a CPU, we call this routine
156 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
157 * delta taken on each CPU would annul the skew.
158 */
159static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
160{
161	unsigned long long delta = 0;
162
163	if (!t->sched_info.last_queued)
164		return;
165
166	delta = rq_clock(rq) - t->sched_info.last_queued;
167	t->sched_info.last_queued = 0;
168	t->sched_info.run_delay += delta;
169
170	rq_sched_info_dequeue(rq, delta);
171}
172
173/*
174 * Called when a task finally hits the CPU.  We can now calculate how
175 * long it was waiting to run.  We also note when it began so that we
176 * can keep stats on how long its timeslice is.
177 */
178static void sched_info_arrive(struct rq *rq, struct task_struct *t)
179{
180	unsigned long long now, delta = 0;
181
182	if (!t->sched_info.last_queued)
183		return;
184
185	now = rq_clock(rq);
186	delta = now - t->sched_info.last_queued;
187	t->sched_info.last_queued = 0;
188	t->sched_info.run_delay += delta;
189	t->sched_info.last_arrival = now;
190	t->sched_info.pcount++;
191
192	rq_sched_info_arrive(rq, delta);
193}
194
195/*
196 * This function is only called from enqueue_task(), but also only updates
197 * the timestamp if it is already not set.  It's assumed that
198 * sched_info_dequeue() will clear that stamp when appropriate.
199 */
200static inline void sched_info_enqueue(struct rq *rq, struct task_struct *t)
201{
202	if (!t->sched_info.last_queued)
203		t->sched_info.last_queued = rq_clock(rq);
 
204}
205
206/*
207 * Called when a process ceases being the active-running process involuntarily
208 * due, typically, to expiring its time slice (this may also be called when
209 * switching to the idle task).  Now we can calculate how long we ran.
210 * Also, if the process is still in the TASK_RUNNING state, call
211 * sched_info_enqueue() to mark that it has now again started waiting on
212 * the runqueue.
213 */
214static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
215{
216	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
 
217
218	rq_sched_info_depart(rq, delta);
219
220	if (task_is_running(t))
221		sched_info_enqueue(rq, t);
222}
223
224/*
225 * Called when tasks are switched involuntarily due, typically, to expiring
226 * their time slice.  (This may also be called when switching to or from
227 * the idle task.)  We are only called when prev != next.
228 */
229static inline void
230sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
 
231{
232	/*
233	 * prev now departs the CPU.  It's not interesting to record
234	 * stats about how efficient we were at scheduling the idle
235	 * process, however.
236	 */
237	if (prev != rq->idle)
238		sched_info_depart(rq, prev);
239
240	if (next != rq->idle)
241		sched_info_arrive(rq, next);
242}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
244#else /* !CONFIG_SCHED_INFO: */
245# define sched_info_enqueue(rq, t)	do { } while (0)
246# define sched_info_dequeue(rq, t)	do { } while (0)
247# define sched_info_switch(rq, t, next)	do { } while (0)
248#endif /* CONFIG_SCHED_INFO */