Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#ifdef CONFIG_SCHEDSTATS
  4
  5/*
  6 * Expects runqueue lock to be held for atomicity of update
  7 */
  8static inline void
  9rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 10{
 11	if (rq) {
 12		rq->rq_sched_info.run_delay += delta;
 13		rq->rq_sched_info.pcount++;
 14	}
 15}
 16
 17/*
 18 * Expects runqueue lock to be held for atomicity of update
 19 */
 20static inline void
 21rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 22{
 23	if (rq)
 24		rq->rq_cpu_time += delta;
 25}
 26
 27static inline void
 28rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 29{
 30	if (rq)
 31		rq->rq_sched_info.run_delay += delta;
 32}
 33#define   schedstat_enabled()		static_branch_unlikely(&sched_schedstats)
 34#define __schedstat_inc(var)		do { var++; } while (0)
 35#define   schedstat_inc(var)		do { if (schedstat_enabled()) { var++; } } while (0)
 36#define __schedstat_add(var, amt)	do { var += (amt); } while (0)
 37#define   schedstat_add(var, amt)	do { if (schedstat_enabled()) { var += (amt); } } while (0)
 38#define __schedstat_set(var, val)	do { var = (val); } while (0)
 39#define   schedstat_set(var, val)	do { if (schedstat_enabled()) { var = (val); } } while (0)
 40#define   schedstat_val(var)		(var)
 41#define   schedstat_val_or_zero(var)	((schedstat_enabled()) ? (var) : 0)
 42
 43#else /* !CONFIG_SCHEDSTATS: */
 44static inline void rq_sched_info_arrive  (struct rq *rq, unsigned long long delta) { }
 45static inline void rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) { }
 46static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delta) { }
 47# define   schedstat_enabled()		0
 48# define __schedstat_inc(var)		do { } while (0)
 49# define   schedstat_inc(var)		do { } while (0)
 50# define __schedstat_add(var, amt)	do { } while (0)
 51# define   schedstat_add(var, amt)	do { } while (0)
 52# define __schedstat_set(var, val)	do { } while (0)
 53# define   schedstat_set(var, val)	do { } while (0)
 54# define   schedstat_val(var)		0
 55# define   schedstat_val_or_zero(var)	0
 56#endif /* CONFIG_SCHEDSTATS */
 57
 58#ifdef CONFIG_PSI
 59/*
 60 * PSI tracks state that persists across sleeps, such as iowaits and
 61 * memory stalls. As a result, it has to distinguish between sleeps,
 62 * where a task's runnable state changes, and requeues, where a task
 63 * and its state are being moved between CPUs and runqueues.
 64 */
 65static inline void psi_enqueue(struct task_struct *p, bool wakeup)
 66{
 67	int clear = 0, set = TSK_RUNNING;
 68
 69	if (static_branch_likely(&psi_disabled))
 70		return;
 71
 72	if (!wakeup || p->sched_psi_wake_requeue) {
 73		if (p->flags & PF_MEMSTALL)
 74			set |= TSK_MEMSTALL;
 75		if (p->sched_psi_wake_requeue)
 76			p->sched_psi_wake_requeue = 0;
 77	} else {
 78		if (p->in_iowait)
 79			clear |= TSK_IOWAIT;
 80	}
 81
 82	psi_task_change(p, clear, set);
 83}
 84
 85static inline void psi_dequeue(struct task_struct *p, bool sleep)
 86{
 87	int clear = TSK_RUNNING, set = 0;
 88
 89	if (static_branch_likely(&psi_disabled))
 90		return;
 91
 92	if (!sleep) {
 93		if (p->flags & PF_MEMSTALL)
 94			clear |= TSK_MEMSTALL;
 95	} else {
 96		if (p->in_iowait)
 97			set |= TSK_IOWAIT;
 98	}
 99
100	psi_task_change(p, clear, set);
101}
102
103static inline void psi_ttwu_dequeue(struct task_struct *p)
104{
105	if (static_branch_likely(&psi_disabled))
106		return;
107	/*
108	 * Is the task being migrated during a wakeup? Make sure to
109	 * deregister its sleep-persistent psi states from the old
110	 * queue, and let psi_enqueue() know it has to requeue.
111	 */
112	if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) {
113		struct rq_flags rf;
114		struct rq *rq;
115		int clear = 0;
116
117		if (p->in_iowait)
118			clear |= TSK_IOWAIT;
119		if (p->flags & PF_MEMSTALL)
120			clear |= TSK_MEMSTALL;
121
122		rq = __task_rq_lock(p, &rf);
123		psi_task_change(p, clear, 0);
124		p->sched_psi_wake_requeue = 1;
125		__task_rq_unlock(rq, &rf);
126	}
127}
128
129static inline void psi_task_tick(struct rq *rq)
130{
131	if (static_branch_likely(&psi_disabled))
132		return;
133
134	if (unlikely(rq->curr->flags & PF_MEMSTALL))
135		psi_memstall_tick(rq->curr, cpu_of(rq));
136}
137#else /* CONFIG_PSI */
138static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
139static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
140static inline void psi_ttwu_dequeue(struct task_struct *p) {}
141static inline void psi_task_tick(struct rq *rq) {}
142#endif /* CONFIG_PSI */
143
144#ifdef CONFIG_SCHED_INFO
145static inline void sched_info_reset_dequeued(struct task_struct *t)
146{
147	t->sched_info.last_queued = 0;
148}
149
150/*
151 * We are interested in knowing how long it was from the *first* time a
152 * task was queued to the time that it finally hit a CPU, we call this routine
153 * from dequeue_task() to account for possible rq->clock skew across CPUs. The
154 * delta taken on each CPU would annul the skew.
155 */
156static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
157{
158	unsigned long long now = rq_clock(rq), delta = 0;
159
160	if (sched_info_on()) {
161		if (t->sched_info.last_queued)
162			delta = now - t->sched_info.last_queued;
163	}
164	sched_info_reset_dequeued(t);
165	t->sched_info.run_delay += delta;
166
167	rq_sched_info_dequeued(rq, delta);
168}
169
170/*
171 * Called when a task finally hits the CPU.  We can now calculate how
172 * long it was waiting to run.  We also note when it began so that we
173 * can keep stats on how long its timeslice is.
174 */
175static void sched_info_arrive(struct rq *rq, struct task_struct *t)
176{
177	unsigned long long now = rq_clock(rq), delta = 0;
178
179	if (t->sched_info.last_queued)
180		delta = now - t->sched_info.last_queued;
181	sched_info_reset_dequeued(t);
182	t->sched_info.run_delay += delta;
183	t->sched_info.last_arrival = now;
184	t->sched_info.pcount++;
185
186	rq_sched_info_arrive(rq, delta);
187}
188
189/*
190 * This function is only called from enqueue_task(), but also only updates
191 * the timestamp if it is already not set.  It's assumed that
192 * sched_info_dequeued() will clear that stamp when appropriate.
193 */
194static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
195{
196	if (sched_info_on()) {
197		if (!t->sched_info.last_queued)
198			t->sched_info.last_queued = rq_clock(rq);
199	}
200}
201
202/*
203 * Called when a process ceases being the active-running process involuntarily
204 * due, typically, to expiring its time slice (this may also be called when
205 * switching to the idle task).  Now we can calculate how long we ran.
206 * Also, if the process is still in the TASK_RUNNING state, call
207 * sched_info_queued() to mark that it has now again started waiting on
208 * the runqueue.
209 */
210static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
211{
212	unsigned long long delta = rq_clock(rq) - t->sched_info.last_arrival;
 
213
214	rq_sched_info_depart(rq, delta);
215
216	if (t->state == TASK_RUNNING)
217		sched_info_queued(rq, t);
218}
219
220/*
221 * Called when tasks are switched involuntarily due, typically, to expiring
222 * their time slice.  (This may also be called when switching to or from
223 * the idle task.)  We are only called when prev != next.
224 */
225static inline void
226__sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
227{
 
 
228	/*
229	 * prev now departs the CPU.  It's not interesting to record
230	 * stats about how efficient we were at scheduling the idle
231	 * process, however.
232	 */
233	if (prev != rq->idle)
234		sched_info_depart(rq, prev);
235
236	if (next != rq->idle)
237		sched_info_arrive(rq, next);
238}
239
240static inline void
241sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next)
242{
243	if (sched_info_on())
244		__sched_info_switch(rq, prev, next);
245}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
247#else /* !CONFIG_SCHED_INFO: */
248# define sched_info_queued(rq, t)	do { } while (0)
249# define sched_info_reset_dequeued(t)	do { } while (0)
250# define sched_info_dequeued(rq, t)	do { } while (0)
251# define sched_info_depart(rq, t)	do { } while (0)
252# define sched_info_arrive(rq, next)	do { } while (0)
253# define sched_info_switch(rq, t, next)	do { } while (0)
254#endif /* CONFIG_SCHED_INFO */
v3.5.6
 
  1
  2#ifdef CONFIG_SCHEDSTATS
  3
  4/*
  5 * Expects runqueue lock to be held for atomicity of update
  6 */
  7static inline void
  8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
  9{
 10	if (rq) {
 11		rq->rq_sched_info.run_delay += delta;
 12		rq->rq_sched_info.pcount++;
 13	}
 14}
 15
 16/*
 17 * Expects runqueue lock to be held for atomicity of update
 18 */
 19static inline void
 20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 21{
 22	if (rq)
 23		rq->rq_cpu_time += delta;
 24}
 25
 26static inline void
 27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 28{
 29	if (rq)
 30		rq->rq_sched_info.run_delay += delta;
 31}
 32# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
 33# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
 34# define schedstat_set(var, val)	do { var = (val); } while (0)
 35#else /* !CONFIG_SCHEDSTATS */
 36static inline void
 37rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
 38{}
 39static inline void
 40rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 41{}
 42static inline void
 43rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 44{}
 45# define schedstat_inc(rq, field)	do { } while (0)
 46# define schedstat_add(rq, field, amt)	do { } while (0)
 47# define schedstat_set(var, val)	do { } while (0)
 48#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 51static inline void sched_info_reset_dequeued(struct task_struct *t)
 52{
 53	t->sched_info.last_queued = 0;
 54}
 55
 56/*
 57 * We are interested in knowing how long it was from the *first* time a
 58 * task was queued to the time that it finally hit a cpu, we call this routine
 59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
 60 * delta taken on each cpu would annul the skew.
 61 */
 62static inline void sched_info_dequeued(struct task_struct *t)
 63{
 64	unsigned long long now = task_rq(t)->clock, delta = 0;
 65
 66	if (unlikely(sched_info_on()))
 67		if (t->sched_info.last_queued)
 68			delta = now - t->sched_info.last_queued;
 
 69	sched_info_reset_dequeued(t);
 70	t->sched_info.run_delay += delta;
 71
 72	rq_sched_info_dequeued(task_rq(t), delta);
 73}
 74
 75/*
 76 * Called when a task finally hits the cpu.  We can now calculate how
 77 * long it was waiting to run.  We also note when it began so that we
 78 * can keep stats on how long its timeslice is.
 79 */
 80static void sched_info_arrive(struct task_struct *t)
 81{
 82	unsigned long long now = task_rq(t)->clock, delta = 0;
 83
 84	if (t->sched_info.last_queued)
 85		delta = now - t->sched_info.last_queued;
 86	sched_info_reset_dequeued(t);
 87	t->sched_info.run_delay += delta;
 88	t->sched_info.last_arrival = now;
 89	t->sched_info.pcount++;
 90
 91	rq_sched_info_arrive(task_rq(t), delta);
 92}
 93
 94/*
 95 * This function is only called from enqueue_task(), but also only updates
 96 * the timestamp if it is already not set.  It's assumed that
 97 * sched_info_dequeued() will clear that stamp when appropriate.
 98 */
 99static inline void sched_info_queued(struct task_struct *t)
100{
101	if (unlikely(sched_info_on()))
102		if (!t->sched_info.last_queued)
103			t->sched_info.last_queued = task_rq(t)->clock;
 
104}
105
106/*
107 * Called when a process ceases being the active-running process, either
108 * voluntarily or involuntarily.  Now we can calculate how long we ran.
 
109 * Also, if the process is still in the TASK_RUNNING state, call
110 * sched_info_queued() to mark that it has now again started waiting on
111 * the runqueue.
112 */
113static inline void sched_info_depart(struct task_struct *t)
114{
115	unsigned long long delta = task_rq(t)->clock -
116					t->sched_info.last_arrival;
117
118	rq_sched_info_depart(task_rq(t), delta);
119
120	if (t->state == TASK_RUNNING)
121		sched_info_queued(t);
122}
123
124/*
125 * Called when tasks are switched involuntarily due, typically, to expiring
126 * their time slice.  (This may also be called when switching to or from
127 * the idle task.)  We are only called when prev != next.
128 */
129static inline void
130__sched_info_switch(struct task_struct *prev, struct task_struct *next)
131{
132	struct rq *rq = task_rq(prev);
133
134	/*
135	 * prev now departs the cpu.  It's not interesting to record
136	 * stats about how efficient we were at scheduling the idle
137	 * process, however.
138	 */
139	if (prev != rq->idle)
140		sched_info_depart(prev);
141
142	if (next != rq->idle)
143		sched_info_arrive(next);
144}
 
145static inline void
146sched_info_switch(struct task_struct *prev, struct task_struct *next)
147{
148	if (unlikely(sched_info_on()))
149		__sched_info_switch(prev, next);
150}
151#else
152#define sched_info_queued(t)			do { } while (0)
153#define sched_info_reset_dequeued(t)	do { } while (0)
154#define sched_info_dequeued(t)			do { } while (0)
155#define sched_info_switch(t, next)		do { } while (0)
156#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
157
158/*
159 * The following are functions that support scheduler-internal time accounting.
160 * These functions are generally called at the timer tick.  None of this depends
161 * on CONFIG_SCHEDSTATS.
162 */
163
164/**
165 * account_group_user_time - Maintain utime for a thread group.
166 *
167 * @tsk:	Pointer to task structure.
168 * @cputime:	Time value by which to increment the utime field of the
169 *		thread_group_cputime structure.
170 *
171 * If thread group time is being maintained, get the structure for the
172 * running CPU and update the utime field there.
173 */
174static inline void account_group_user_time(struct task_struct *tsk,
175					   cputime_t cputime)
176{
177	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
178
179	if (!cputimer->running)
180		return;
181
182	raw_spin_lock(&cputimer->lock);
183	cputimer->cputime.utime += cputime;
184	raw_spin_unlock(&cputimer->lock);
185}
186
187/**
188 * account_group_system_time - Maintain stime for a thread group.
189 *
190 * @tsk:	Pointer to task structure.
191 * @cputime:	Time value by which to increment the stime field of the
192 *		thread_group_cputime structure.
193 *
194 * If thread group time is being maintained, get the structure for the
195 * running CPU and update the stime field there.
196 */
197static inline void account_group_system_time(struct task_struct *tsk,
198					     cputime_t cputime)
199{
200	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
201
202	if (!cputimer->running)
203		return;
204
205	raw_spin_lock(&cputimer->lock);
206	cputimer->cputime.stime += cputime;
207	raw_spin_unlock(&cputimer->lock);
208}
209
210/**
211 * account_group_exec_runtime - Maintain exec runtime for a thread group.
212 *
213 * @tsk:	Pointer to task structure.
214 * @ns:		Time value by which to increment the sum_exec_runtime field
215 *		of the thread_group_cputime structure.
216 *
217 * If thread group time is being maintained, get the structure for the
218 * running CPU and update the sum_exec_runtime field there.
219 */
220static inline void account_group_exec_runtime(struct task_struct *tsk,
221					      unsigned long long ns)
222{
223	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
224
225	if (!cputimer->running)
226		return;
227
228	raw_spin_lock(&cputimer->lock);
229	cputimer->cputime.sum_exec_runtime += ns;
230	raw_spin_unlock(&cputimer->lock);
231}