Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Simple CPU accounting cgroup controller
  3 */
  4#include "sched.h"
 
 
 
  5
  6#ifdef CONFIG_IRQ_TIME_ACCOUNTING
  7
  8/*
  9 * There are no locks covering percpu hardirq/softirq time.
 10 * They are only modified in vtime_account, on corresponding CPU
 11 * with interrupts disabled. So, writes are safe.
 12 * They are read and saved off onto struct rq in update_rq_clock().
 13 * This may result in other CPU reading this CPU's irq time and can
 14 * race with irq/vtime_account on this CPU. We would either get old
 15 * or new value with a side effect of accounting a slice of irq time to wrong
 16 * task when irq is in progress while we read rq->clock. That is a worthy
 17 * compromise in place of having locks on each irq in account_system_time.
 18 */
 19DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
 20
 21static int sched_clock_irqtime;
 22
 23void enable_sched_clock_irqtime(void)
 24{
 25	sched_clock_irqtime = 1;
 26}
 27
 28void disable_sched_clock_irqtime(void)
 29{
 30	sched_clock_irqtime = 0;
 31}
 32
 33static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
 34				  enum cpu_usage_stat idx)
 35{
 36	u64 *cpustat = kcpustat_this_cpu->cpustat;
 37
 38	u64_stats_update_begin(&irqtime->sync);
 39	cpustat[idx] += delta;
 40	irqtime->total += delta;
 41	irqtime->tick_delta += delta;
 42	u64_stats_update_end(&irqtime->sync);
 43}
 44
 45/*
 46 * Called before incrementing preempt_count on {soft,}irq_enter
 47 * and before decrementing preempt_count on {soft,}irq_exit.
 48 */
 49void irqtime_account_irq(struct task_struct *curr)
 50{
 51	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
 
 52	s64 delta;
 53	int cpu;
 54
 55	if (!sched_clock_irqtime)
 56		return;
 57
 58	cpu = smp_processor_id();
 59	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
 60	irqtime->irq_start_time += delta;
 
 61
 62	/*
 63	 * We do not account for softirq time from ksoftirqd here.
 64	 * We want to continue accounting softirq time to ksoftirqd thread
 65	 * in that case, so as not to confuse scheduler with a special task
 66	 * that do not consume any time, but still wants to run.
 67	 */
 68	if (hardirq_count())
 69		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
 70	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
 71		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
 72}
 73EXPORT_SYMBOL_GPL(irqtime_account_irq);
 74
 75static u64 irqtime_tick_accounted(u64 maxtime)
 76{
 77	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
 78	u64 delta;
 79
 80	delta = min(irqtime->tick_delta, maxtime);
 81	irqtime->tick_delta -= delta;
 82
 83	return delta;
 84}
 85
 86#else /* CONFIG_IRQ_TIME_ACCOUNTING */
 87
 88#define sched_clock_irqtime	(0)
 89
 90static u64 irqtime_tick_accounted(u64 dummy)
 91{
 92	return 0;
 93}
 94
 95#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
 96
 97static inline void task_group_account_field(struct task_struct *p, int index,
 98					    u64 tmp)
 99{
100	/*
101	 * Since all updates are sure to touch the root cgroup, we
102	 * get ourselves ahead and touch it first. If the root cgroup
103	 * is the only cgroup, then nothing else should be necessary.
104	 *
105	 */
106	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
107
108	cgroup_account_cputime_field(p, index, tmp);
109}
110
111/*
112 * Account user CPU time to a process.
113 * @p: the process that the CPU time gets accounted to
114 * @cputime: the CPU time spent in user space since the last update
115 */
116void account_user_time(struct task_struct *p, u64 cputime)
117{
118	int index;
119
120	/* Add user time to process. */
121	p->utime += cputime;
122	account_group_user_time(p, cputime);
123
124	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
125
126	/* Add user time to cpustat. */
127	task_group_account_field(p, index, cputime);
128
129	/* Account for user time used */
130	acct_account_cputime(p);
131}
132
133/*
134 * Account guest CPU time to a process.
135 * @p: the process that the CPU time gets accounted to
136 * @cputime: the CPU time spent in virtual machine since the last update
137 */
138void account_guest_time(struct task_struct *p, u64 cputime)
139{
140	u64 *cpustat = kcpustat_this_cpu->cpustat;
141
142	/* Add guest time to process. */
143	p->utime += cputime;
144	account_group_user_time(p, cputime);
145	p->gtime += cputime;
146
147	/* Add guest time to cpustat. */
148	if (task_nice(p) > 0) {
149		cpustat[CPUTIME_NICE] += cputime;
150		cpustat[CPUTIME_GUEST_NICE] += cputime;
151	} else {
152		cpustat[CPUTIME_USER] += cputime;
153		cpustat[CPUTIME_GUEST] += cputime;
154	}
155}
156
157/*
158 * Account system CPU time to a process and desired cpustat field
159 * @p: the process that the CPU time gets accounted to
160 * @cputime: the CPU time spent in kernel space since the last update
161 * @index: pointer to cpustat field that has to be updated
162 */
163void account_system_index_time(struct task_struct *p,
164			       u64 cputime, enum cpu_usage_stat index)
165{
166	/* Add system time to process. */
167	p->stime += cputime;
168	account_group_system_time(p, cputime);
169
170	/* Add system time to cpustat. */
171	task_group_account_field(p, index, cputime);
172
173	/* Account for system time used */
174	acct_account_cputime(p);
175}
176
177/*
178 * Account system CPU time to a process.
179 * @p: the process that the CPU time gets accounted to
180 * @hardirq_offset: the offset to subtract from hardirq_count()
181 * @cputime: the CPU time spent in kernel space since the last update
182 */
183void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
184{
185	int index;
186
187	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
188		account_guest_time(p, cputime);
189		return;
190	}
191
192	if (hardirq_count() - hardirq_offset)
193		index = CPUTIME_IRQ;
194	else if (in_serving_softirq())
195		index = CPUTIME_SOFTIRQ;
196	else
197		index = CPUTIME_SYSTEM;
198
199	account_system_index_time(p, cputime, index);
200}
201
202/*
203 * Account for involuntary wait time.
204 * @cputime: the CPU time spent in involuntary wait
205 */
206void account_steal_time(u64 cputime)
207{
208	u64 *cpustat = kcpustat_this_cpu->cpustat;
209
210	cpustat[CPUTIME_STEAL] += cputime;
211}
212
213/*
214 * Account for idle time.
215 * @cputime: the CPU time spent in idle wait
216 */
217void account_idle_time(u64 cputime)
218{
219	u64 *cpustat = kcpustat_this_cpu->cpustat;
220	struct rq *rq = this_rq();
221
222	if (atomic_read(&rq->nr_iowait) > 0)
223		cpustat[CPUTIME_IOWAIT] += cputime;
224	else
225		cpustat[CPUTIME_IDLE] += cputime;
226}
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228/*
229 * When a guest is interrupted for a longer amount of time, missed clock
230 * ticks are not redelivered later. Due to that, this function may on
231 * occasion account more time than the calling functions think elapsed.
232 */
233static __always_inline u64 steal_account_process_time(u64 maxtime)
234{
235#ifdef CONFIG_PARAVIRT
236	if (static_key_false(&paravirt_steal_enabled)) {
237		u64 steal;
238
239		steal = paravirt_steal_clock(smp_processor_id());
240		steal -= this_rq()->prev_steal_time;
241		steal = min(steal, maxtime);
242		account_steal_time(steal);
243		this_rq()->prev_steal_time += steal;
244
245		return steal;
246	}
247#endif
248	return 0;
249}
250
251/*
252 * Account how much elapsed time was spent in steal, irq, or softirq time.
253 */
254static inline u64 account_other_time(u64 max)
255{
256	u64 accounted;
257
258	lockdep_assert_irqs_disabled();
259
260	accounted = steal_account_process_time(max);
261
262	if (accounted < max)
263		accounted += irqtime_tick_accounted(max - accounted);
264
265	return accounted;
266}
267
268#ifdef CONFIG_64BIT
269static inline u64 read_sum_exec_runtime(struct task_struct *t)
270{
271	return t->se.sum_exec_runtime;
272}
273#else
274static u64 read_sum_exec_runtime(struct task_struct *t)
275{
276	u64 ns;
277	struct rq_flags rf;
278	struct rq *rq;
279
280	rq = task_rq_lock(t, &rf);
281	ns = t->se.sum_exec_runtime;
282	task_rq_unlock(rq, t, &rf);
283
284	return ns;
285}
286#endif
287
288/*
289 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
290 * tasks (sum on group iteration) belonging to @tsk's group.
291 */
292void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
293{
294	struct signal_struct *sig = tsk->signal;
295	u64 utime, stime;
296	struct task_struct *t;
297	unsigned int seq, nextseq;
298	unsigned long flags;
299
300	/*
301	 * Update current task runtime to account pending time since last
302	 * scheduler action or thread_group_cputime() call. This thread group
303	 * might have other running tasks on different CPUs, but updating
304	 * their runtime can affect syscall performance, so we skip account
305	 * those pending times and rely only on values updated on tick or
306	 * other scheduler action.
307	 */
308	if (same_thread_group(current, tsk))
309		(void) task_sched_runtime(current);
310
311	rcu_read_lock();
312	/* Attempt a lockless read on the first round. */
313	nextseq = 0;
314	do {
315		seq = nextseq;
316		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
317		times->utime = sig->utime;
318		times->stime = sig->stime;
319		times->sum_exec_runtime = sig->sum_sched_runtime;
320
321		for_each_thread(tsk, t) {
322			task_cputime(t, &utime, &stime);
323			times->utime += utime;
324			times->stime += stime;
325			times->sum_exec_runtime += read_sum_exec_runtime(t);
326		}
327		/* If lockless access failed, take the lock. */
328		nextseq = 1;
329	} while (need_seqretry(&sig->stats_lock, seq));
330	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
331	rcu_read_unlock();
332}
333
334#ifdef CONFIG_IRQ_TIME_ACCOUNTING
335/*
336 * Account a tick to a process and cpustat
337 * @p: the process that the CPU time gets accounted to
338 * @user_tick: is the tick from userspace
339 * @rq: the pointer to rq
340 *
341 * Tick demultiplexing follows the order
342 * - pending hardirq update
343 * - pending softirq update
344 * - user_time
345 * - idle_time
346 * - system time
347 *   - check for guest_time
348 *   - else account as system_time
349 *
350 * Check for hardirq is done both for system and user time as there is
351 * no timer going off while we are on hardirq and hence we may never get an
352 * opportunity to update it solely in system time.
353 * p->stime and friends are only updated on system time and not on irq
354 * softirq as those do not count in task exec_runtime any more.
355 */
356static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
357					 struct rq *rq, int ticks)
358{
359	u64 other, cputime = TICK_NSEC * ticks;
360
361	/*
362	 * When returning from idle, many ticks can get accounted at
363	 * once, including some ticks of steal, irq, and softirq time.
364	 * Subtract those ticks from the amount of time accounted to
365	 * idle, or potentially user or system time. Due to rounding,
366	 * other time can exceed ticks occasionally.
367	 */
368	other = account_other_time(ULONG_MAX);
369	if (other >= cputime)
370		return;
371
372	cputime -= other;
373
374	if (this_cpu_ksoftirqd() == p) {
375		/*
376		 * ksoftirqd time do not get accounted in cpu_softirq_time.
377		 * So, we have to handle it separately here.
378		 * Also, p->stime needs to be updated for ksoftirqd.
379		 */
380		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
381	} else if (user_tick) {
382		account_user_time(p, cputime);
383	} else if (p == rq->idle) {
384		account_idle_time(cputime);
385	} else if (p->flags & PF_VCPU) { /* System time or guest time */
386		account_guest_time(p, cputime);
387	} else {
388		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
389	}
390}
391
392static void irqtime_account_idle_ticks(int ticks)
393{
394	struct rq *rq = this_rq();
395
396	irqtime_account_process_tick(current, 0, rq, ticks);
397}
398#else /* CONFIG_IRQ_TIME_ACCOUNTING */
399static inline void irqtime_account_idle_ticks(int ticks) { }
400static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
401						struct rq *rq, int nr_ticks) { }
402#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
403
404/*
405 * Use precise platform statistics if available:
406 */
407#ifdef CONFIG_VIRT_CPU_ACCOUNTING
408# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
409void vtime_common_task_switch(struct task_struct *prev)
410{
411	if (is_idle_task(prev))
412		vtime_account_idle(prev);
413	else
414		vtime_account_system(prev);
415
416	vtime_flush(prev);
417	arch_vtime_task_switch(prev);
418}
419# endif
420#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
421
422
423#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
424/*
425 * Archs that account the whole time spent in the idle task
426 * (outside irq) as idle time can rely on this and just implement
427 * vtime_account_system() and vtime_account_idle(). Archs that
428 * have other meaning of the idle time (s390 only includes the
429 * time spent by the CPU when it's in low power mode) must override
430 * vtime_account().
431 */
432#ifndef __ARCH_HAS_VTIME_ACCOUNT
433void vtime_account_irq_enter(struct task_struct *tsk)
434{
435	if (!in_interrupt() && is_idle_task(tsk))
 
 
 
 
 
 
 
436		vtime_account_idle(tsk);
437	else
438		vtime_account_system(tsk);
 
439}
440EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
441#endif /* __ARCH_HAS_VTIME_ACCOUNT */
442
443void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
444		    u64 *ut, u64 *st)
445{
446	*ut = curr->utime;
447	*st = curr->stime;
448}
449
450void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
451{
452	*ut = p->utime;
453	*st = p->stime;
454}
455EXPORT_SYMBOL_GPL(task_cputime_adjusted);
456
457void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
458{
459	struct task_cputime cputime;
460
461	thread_group_cputime(p, &cputime);
462
463	*ut = cputime.utime;
464	*st = cputime.stime;
465}
466
467#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
468
469/*
470 * Account a single tick of CPU time.
471 * @p: the process that the CPU time gets accounted to
472 * @user_tick: indicates if the tick is a user or a system tick
473 */
474void account_process_tick(struct task_struct *p, int user_tick)
475{
476	u64 cputime, steal;
477	struct rq *rq = this_rq();
478
479	if (vtime_accounting_cpu_enabled())
480		return;
481
482	if (sched_clock_irqtime) {
483		irqtime_account_process_tick(p, user_tick, rq, 1);
484		return;
485	}
486
487	cputime = TICK_NSEC;
488	steal = steal_account_process_time(ULONG_MAX);
489
490	if (steal >= cputime)
491		return;
492
493	cputime -= steal;
494
495	if (user_tick)
496		account_user_time(p, cputime);
497	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
498		account_system_time(p, HARDIRQ_OFFSET, cputime);
499	else
500		account_idle_time(cputime);
501}
502
503/*
504 * Account multiple ticks of idle time.
505 * @ticks: number of stolen ticks
506 */
507void account_idle_ticks(unsigned long ticks)
508{
509	u64 cputime, steal;
510
511	if (sched_clock_irqtime) {
512		irqtime_account_idle_ticks(ticks);
513		return;
514	}
515
516	cputime = ticks * TICK_NSEC;
517	steal = steal_account_process_time(ULONG_MAX);
518
519	if (steal >= cputime)
520		return;
521
522	cputime -= steal;
523	account_idle_time(cputime);
524}
525
526/*
527 * Perform (stime * rtime) / total, but avoid multiplication overflow by
528 * loosing precision when the numbers are big.
529 */
530static u64 scale_stime(u64 stime, u64 rtime, u64 total)
531{
532	u64 scaled;
533
534	for (;;) {
535		/* Make sure "rtime" is the bigger of stime/rtime */
536		if (stime > rtime)
537			swap(rtime, stime);
538
539		/* Make sure 'total' fits in 32 bits */
540		if (total >> 32)
541			goto drop_precision;
542
543		/* Does rtime (and thus stime) fit in 32 bits? */
544		if (!(rtime >> 32))
545			break;
546
547		/* Can we just balance rtime/stime rather than dropping bits? */
548		if (stime >> 31)
549			goto drop_precision;
550
551		/* We can grow stime and shrink rtime and try to make them both fit */
552		stime <<= 1;
553		rtime >>= 1;
554		continue;
555
556drop_precision:
557		/* We drop from rtime, it has more bits than stime */
558		rtime >>= 1;
559		total >>= 1;
560	}
561
562	/*
563	 * Make sure gcc understands that this is a 32x32->64 multiply,
564	 * followed by a 64/32->64 divide.
565	 */
566	scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
567	return scaled;
568}
569
570/*
571 * Adjust tick based cputime random precision against scheduler runtime
572 * accounting.
573 *
574 * Tick based cputime accounting depend on random scheduling timeslices of a
575 * task to be interrupted or not by the timer.  Depending on these
576 * circumstances, the number of these interrupts may be over or
577 * under-optimistic, matching the real user and system cputime with a variable
578 * precision.
579 *
580 * Fix this by scaling these tick based values against the total runtime
581 * accounted by the CFS scheduler.
582 *
583 * This code provides the following guarantees:
584 *
585 *   stime + utime == rtime
586 *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
587 *
588 * Assuming that rtime_i+1 >= rtime_i.
589 */
590void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
591		    u64 *ut, u64 *st)
592{
593	u64 rtime, stime, utime;
594	unsigned long flags;
595
596	/* Serialize concurrent callers such that we can honour our guarantees */
597	raw_spin_lock_irqsave(&prev->lock, flags);
598	rtime = curr->sum_exec_runtime;
599
600	/*
601	 * This is possible under two circumstances:
602	 *  - rtime isn't monotonic after all (a bug);
603	 *  - we got reordered by the lock.
604	 *
605	 * In both cases this acts as a filter such that the rest of the code
606	 * can assume it is monotonic regardless of anything else.
607	 */
608	if (prev->stime + prev->utime >= rtime)
609		goto out;
610
611	stime = curr->stime;
612	utime = curr->utime;
613
614	/*
615	 * If either stime or utime are 0, assume all runtime is userspace.
616	 * Once a task gets some ticks, the monotonicy code at 'update:'
617	 * will ensure things converge to the observed ratio.
618	 */
619	if (stime == 0) {
620		utime = rtime;
621		goto update;
622	}
623
624	if (utime == 0) {
625		stime = rtime;
626		goto update;
627	}
628
629	stime = scale_stime(stime, rtime, stime + utime);
 
 
 
 
 
 
630
631update:
632	/*
633	 * Make sure stime doesn't go backwards; this preserves monotonicity
634	 * for utime because rtime is monotonic.
635	 *
636	 *  utime_i+1 = rtime_i+1 - stime_i
637	 *            = rtime_i+1 - (rtime_i - utime_i)
638	 *            = (rtime_i+1 - rtime_i) + utime_i
639	 *            >= utime_i
640	 */
641	if (stime < prev->stime)
642		stime = prev->stime;
643	utime = rtime - stime;
644
645	/*
646	 * Make sure utime doesn't go backwards; this still preserves
647	 * monotonicity for stime, analogous argument to above.
648	 */
649	if (utime < prev->utime) {
650		utime = prev->utime;
651		stime = rtime - utime;
652	}
653
654	prev->stime = stime;
655	prev->utime = utime;
656out:
657	*ut = prev->utime;
658	*st = prev->stime;
659	raw_spin_unlock_irqrestore(&prev->lock, flags);
660}
661
662void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
663{
664	struct task_cputime cputime = {
665		.sum_exec_runtime = p->se.sum_exec_runtime,
666	};
667
668	task_cputime(p, &cputime.utime, &cputime.stime);
 
669	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
670}
671EXPORT_SYMBOL_GPL(task_cputime_adjusted);
672
673void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
674{
675	struct task_cputime cputime;
676
677	thread_group_cputime(p, &cputime);
678	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
679}
680#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
681
682#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
683static u64 vtime_delta(struct vtime *vtime)
684{
685	unsigned long long clock;
686
687	clock = sched_clock();
688	if (clock < vtime->starttime)
689		return 0;
690
691	return clock - vtime->starttime;
692}
693
694static u64 get_vtime_delta(struct vtime *vtime)
695{
696	u64 delta = vtime_delta(vtime);
697	u64 other;
698
699	/*
700	 * Unlike tick based timing, vtime based timing never has lost
701	 * ticks, and no need for steal time accounting to make up for
702	 * lost ticks. Vtime accounts a rounded version of actual
703	 * elapsed time. Limit account_other_time to prevent rounding
704	 * errors from causing elapsed vtime to go negative.
705	 */
706	other = account_other_time(delta);
707	WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
708	vtime->starttime += delta;
709
710	return delta - other;
711}
712
713static void __vtime_account_system(struct task_struct *tsk,
714				   struct vtime *vtime)
715{
716	vtime->stime += get_vtime_delta(vtime);
717	if (vtime->stime >= TICK_NSEC) {
718		account_system_time(tsk, irq_count(), vtime->stime);
719		vtime->stime = 0;
720	}
721}
722
723static void vtime_account_guest(struct task_struct *tsk,
724				struct vtime *vtime)
725{
726	vtime->gtime += get_vtime_delta(vtime);
727	if (vtime->gtime >= TICK_NSEC) {
728		account_guest_time(tsk, vtime->gtime);
729		vtime->gtime = 0;
730	}
731}
732
733void vtime_account_system(struct task_struct *tsk)
 
 
 
 
 
 
 
 
 
 
734{
735	struct vtime *vtime = &tsk->vtime;
736
737	if (!vtime_delta(vtime))
738		return;
739
740	write_seqcount_begin(&vtime->seqcount);
741	/* We might have scheduled out from guest path */
742	if (current->flags & PF_VCPU)
743		vtime_account_guest(tsk, vtime);
744	else
745		__vtime_account_system(tsk, vtime);
746	write_seqcount_end(&vtime->seqcount);
747}
748
749void vtime_user_enter(struct task_struct *tsk)
750{
751	struct vtime *vtime = &tsk->vtime;
752
753	write_seqcount_begin(&vtime->seqcount);
754	__vtime_account_system(tsk, vtime);
755	vtime->state = VTIME_USER;
756	write_seqcount_end(&vtime->seqcount);
757}
758
759void vtime_user_exit(struct task_struct *tsk)
760{
761	struct vtime *vtime = &tsk->vtime;
762
763	write_seqcount_begin(&vtime->seqcount);
764	vtime->utime += get_vtime_delta(vtime);
765	if (vtime->utime >= TICK_NSEC) {
766		account_user_time(tsk, vtime->utime);
767		vtime->utime = 0;
768	}
769	vtime->state = VTIME_SYS;
770	write_seqcount_end(&vtime->seqcount);
771}
772
773void vtime_guest_enter(struct task_struct *tsk)
774{
775	struct vtime *vtime = &tsk->vtime;
776	/*
777	 * The flags must be updated under the lock with
778	 * the vtime_starttime flush and update.
779	 * That enforces a right ordering and update sequence
780	 * synchronization against the reader (task_gtime())
781	 * that can thus safely catch up with a tickless delta.
782	 */
783	write_seqcount_begin(&vtime->seqcount);
784	__vtime_account_system(tsk, vtime);
785	current->flags |= PF_VCPU;
 
786	write_seqcount_end(&vtime->seqcount);
787}
788EXPORT_SYMBOL_GPL(vtime_guest_enter);
789
790void vtime_guest_exit(struct task_struct *tsk)
791{
792	struct vtime *vtime = &tsk->vtime;
793
794	write_seqcount_begin(&vtime->seqcount);
795	vtime_account_guest(tsk, vtime);
796	current->flags &= ~PF_VCPU;
 
797	write_seqcount_end(&vtime->seqcount);
798}
799EXPORT_SYMBOL_GPL(vtime_guest_exit);
800
801void vtime_account_idle(struct task_struct *tsk)
802{
803	account_idle_time(get_vtime_delta(&tsk->vtime));
804}
805
806void arch_vtime_task_switch(struct task_struct *prev)
807{
808	struct vtime *vtime = &prev->vtime;
809
810	write_seqcount_begin(&vtime->seqcount);
 
 
 
 
811	vtime->state = VTIME_INACTIVE;
 
812	write_seqcount_end(&vtime->seqcount);
813
814	vtime = &current->vtime;
815
816	write_seqcount_begin(&vtime->seqcount);
817	vtime->state = VTIME_SYS;
 
 
 
 
 
818	vtime->starttime = sched_clock();
 
819	write_seqcount_end(&vtime->seqcount);
820}
821
822void vtime_init_idle(struct task_struct *t, int cpu)
823{
824	struct vtime *vtime = &t->vtime;
825	unsigned long flags;
826
827	local_irq_save(flags);
828	write_seqcount_begin(&vtime->seqcount);
829	vtime->state = VTIME_SYS;
830	vtime->starttime = sched_clock();
 
831	write_seqcount_end(&vtime->seqcount);
832	local_irq_restore(flags);
833}
834
835u64 task_gtime(struct task_struct *t)
836{
837	struct vtime *vtime = &t->vtime;
838	unsigned int seq;
839	u64 gtime;
840
841	if (!vtime_accounting_enabled())
842		return t->gtime;
843
844	do {
845		seq = read_seqcount_begin(&vtime->seqcount);
846
847		gtime = t->gtime;
848		if (vtime->state == VTIME_SYS && t->flags & PF_VCPU)
849			gtime += vtime->gtime + vtime_delta(vtime);
850
851	} while (read_seqcount_retry(&vtime->seqcount, seq));
852
853	return gtime;
854}
855
856/*
857 * Fetch cputime raw values from fields of task_struct and
858 * add up the pending nohz execution time since the last
859 * cputime snapshot.
860 */
861void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
862{
863	struct vtime *vtime = &t->vtime;
864	unsigned int seq;
865	u64 delta;
 
866
867	if (!vtime_accounting_enabled()) {
868		*utime = t->utime;
869		*stime = t->stime;
870		return;
871	}
872
873	do {
 
874		seq = read_seqcount_begin(&vtime->seqcount);
875
876		*utime = t->utime;
877		*stime = t->stime;
878
879		/* Task is sleeping, nothing to add */
880		if (vtime->state == VTIME_INACTIVE || is_idle_task(t))
881			continue;
882
 
883		delta = vtime_delta(vtime);
884
885		/*
886		 * Task runs either in user or kernel space, add pending nohz time to
887		 * the right place.
888		 */
889		if (vtime->state == VTIME_USER || t->flags & PF_VCPU)
890			*utime += vtime->utime + delta;
891		else if (vtime->state == VTIME_SYS)
892			*stime += vtime->stime + delta;
 
 
893	} while (read_seqcount_retry(&vtime->seqcount, seq));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
894}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
895#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Simple CPU accounting cgroup controller
   4 */
   5
   6#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
   7 #include <asm/cputime.h>
   8#endif
   9
  10#ifdef CONFIG_IRQ_TIME_ACCOUNTING
  11
  12/*
  13 * There are no locks covering percpu hardirq/softirq time.
  14 * They are only modified in vtime_account, on corresponding CPU
  15 * with interrupts disabled. So, writes are safe.
  16 * They are read and saved off onto struct rq in update_rq_clock().
  17 * This may result in other CPU reading this CPU's IRQ time and can
  18 * race with irq/vtime_account on this CPU. We would either get old
  19 * or new value with a side effect of accounting a slice of IRQ time to wrong
  20 * task when IRQ is in progress while we read rq->clock. That is a worthy
  21 * compromise in place of having locks on each IRQ in account_system_time.
  22 */
  23DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
  24
  25static int sched_clock_irqtime;
  26
  27void enable_sched_clock_irqtime(void)
  28{
  29	sched_clock_irqtime = 1;
  30}
  31
  32void disable_sched_clock_irqtime(void)
  33{
  34	sched_clock_irqtime = 0;
  35}
  36
  37static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
  38				  enum cpu_usage_stat idx)
  39{
  40	u64 *cpustat = kcpustat_this_cpu->cpustat;
  41
  42	u64_stats_update_begin(&irqtime->sync);
  43	cpustat[idx] += delta;
  44	irqtime->total += delta;
  45	irqtime->tick_delta += delta;
  46	u64_stats_update_end(&irqtime->sync);
  47}
  48
  49/*
  50 * Called after incrementing preempt_count on {soft,}irq_enter
  51 * and before decrementing preempt_count on {soft,}irq_exit.
  52 */
  53void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
  54{
  55	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
  56	unsigned int pc;
  57	s64 delta;
  58	int cpu;
  59
  60	if (!sched_clock_irqtime)
  61		return;
  62
  63	cpu = smp_processor_id();
  64	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
  65	irqtime->irq_start_time += delta;
  66	pc = irq_count() - offset;
  67
  68	/*
  69	 * We do not account for softirq time from ksoftirqd here.
  70	 * We want to continue accounting softirq time to ksoftirqd thread
  71	 * in that case, so as not to confuse scheduler with a special task
  72	 * that do not consume any time, but still wants to run.
  73	 */
  74	if (pc & HARDIRQ_MASK)
  75		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
  76	else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
  77		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
  78}
 
  79
  80static u64 irqtime_tick_accounted(u64 maxtime)
  81{
  82	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
  83	u64 delta;
  84
  85	delta = min(irqtime->tick_delta, maxtime);
  86	irqtime->tick_delta -= delta;
  87
  88	return delta;
  89}
  90
  91#else /* CONFIG_IRQ_TIME_ACCOUNTING */
  92
  93#define sched_clock_irqtime	(0)
  94
  95static u64 irqtime_tick_accounted(u64 dummy)
  96{
  97	return 0;
  98}
  99
 100#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
 101
 102static inline void task_group_account_field(struct task_struct *p, int index,
 103					    u64 tmp)
 104{
 105	/*
 106	 * Since all updates are sure to touch the root cgroup, we
 107	 * get ourselves ahead and touch it first. If the root cgroup
 108	 * is the only cgroup, then nothing else should be necessary.
 109	 *
 110	 */
 111	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
 112
 113	cgroup_account_cputime_field(p, index, tmp);
 114}
 115
 116/*
 117 * Account user CPU time to a process.
 118 * @p: the process that the CPU time gets accounted to
 119 * @cputime: the CPU time spent in user space since the last update
 120 */
 121void account_user_time(struct task_struct *p, u64 cputime)
 122{
 123	int index;
 124
 125	/* Add user time to process. */
 126	p->utime += cputime;
 127	account_group_user_time(p, cputime);
 128
 129	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
 130
 131	/* Add user time to cpustat. */
 132	task_group_account_field(p, index, cputime);
 133
 134	/* Account for user time used */
 135	acct_account_cputime(p);
 136}
 137
 138/*
 139 * Account guest CPU time to a process.
 140 * @p: the process that the CPU time gets accounted to
 141 * @cputime: the CPU time spent in virtual machine since the last update
 142 */
 143void account_guest_time(struct task_struct *p, u64 cputime)
 144{
 145	u64 *cpustat = kcpustat_this_cpu->cpustat;
 146
 147	/* Add guest time to process. */
 148	p->utime += cputime;
 149	account_group_user_time(p, cputime);
 150	p->gtime += cputime;
 151
 152	/* Add guest time to cpustat. */
 153	if (task_nice(p) > 0) {
 154		task_group_account_field(p, CPUTIME_NICE, cputime);
 155		cpustat[CPUTIME_GUEST_NICE] += cputime;
 156	} else {
 157		task_group_account_field(p, CPUTIME_USER, cputime);
 158		cpustat[CPUTIME_GUEST] += cputime;
 159	}
 160}
 161
 162/*
 163 * Account system CPU time to a process and desired cpustat field
 164 * @p: the process that the CPU time gets accounted to
 165 * @cputime: the CPU time spent in kernel space since the last update
 166 * @index: pointer to cpustat field that has to be updated
 167 */
 168void account_system_index_time(struct task_struct *p,
 169			       u64 cputime, enum cpu_usage_stat index)
 170{
 171	/* Add system time to process. */
 172	p->stime += cputime;
 173	account_group_system_time(p, cputime);
 174
 175	/* Add system time to cpustat. */
 176	task_group_account_field(p, index, cputime);
 177
 178	/* Account for system time used */
 179	acct_account_cputime(p);
 180}
 181
 182/*
 183 * Account system CPU time to a process.
 184 * @p: the process that the CPU time gets accounted to
 185 * @hardirq_offset: the offset to subtract from hardirq_count()
 186 * @cputime: the CPU time spent in kernel space since the last update
 187 */
 188void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
 189{
 190	int index;
 191
 192	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
 193		account_guest_time(p, cputime);
 194		return;
 195	}
 196
 197	if (hardirq_count() - hardirq_offset)
 198		index = CPUTIME_IRQ;
 199	else if (in_serving_softirq())
 200		index = CPUTIME_SOFTIRQ;
 201	else
 202		index = CPUTIME_SYSTEM;
 203
 204	account_system_index_time(p, cputime, index);
 205}
 206
 207/*
 208 * Account for involuntary wait time.
 209 * @cputime: the CPU time spent in involuntary wait
 210 */
 211void account_steal_time(u64 cputime)
 212{
 213	u64 *cpustat = kcpustat_this_cpu->cpustat;
 214
 215	cpustat[CPUTIME_STEAL] += cputime;
 216}
 217
 218/*
 219 * Account for idle time.
 220 * @cputime: the CPU time spent in idle wait
 221 */
 222void account_idle_time(u64 cputime)
 223{
 224	u64 *cpustat = kcpustat_this_cpu->cpustat;
 225	struct rq *rq = this_rq();
 226
 227	if (atomic_read(&rq->nr_iowait) > 0)
 228		cpustat[CPUTIME_IOWAIT] += cputime;
 229	else
 230		cpustat[CPUTIME_IDLE] += cputime;
 231}
 232
 233
 234#ifdef CONFIG_SCHED_CORE
 235/*
 236 * Account for forceidle time due to core scheduling.
 237 *
 238 * REQUIRES: schedstat is enabled.
 239 */
 240void __account_forceidle_time(struct task_struct *p, u64 delta)
 241{
 242	__schedstat_add(p->stats.core_forceidle_sum, delta);
 243
 244	task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
 245}
 246#endif
 247
 248/*
 249 * When a guest is interrupted for a longer amount of time, missed clock
 250 * ticks are not redelivered later. Due to that, this function may on
 251 * occasion account more time than the calling functions think elapsed.
 252 */
 253static __always_inline u64 steal_account_process_time(u64 maxtime)
 254{
 255#ifdef CONFIG_PARAVIRT
 256	if (static_key_false(&paravirt_steal_enabled)) {
 257		u64 steal;
 258
 259		steal = paravirt_steal_clock(smp_processor_id());
 260		steal -= this_rq()->prev_steal_time;
 261		steal = min(steal, maxtime);
 262		account_steal_time(steal);
 263		this_rq()->prev_steal_time += steal;
 264
 265		return steal;
 266	}
 267#endif
 268	return 0;
 269}
 270
 271/*
 272 * Account how much elapsed time was spent in steal, IRQ, or softirq time.
 273 */
 274static inline u64 account_other_time(u64 max)
 275{
 276	u64 accounted;
 277
 278	lockdep_assert_irqs_disabled();
 279
 280	accounted = steal_account_process_time(max);
 281
 282	if (accounted < max)
 283		accounted += irqtime_tick_accounted(max - accounted);
 284
 285	return accounted;
 286}
 287
 288#ifdef CONFIG_64BIT
 289static inline u64 read_sum_exec_runtime(struct task_struct *t)
 290{
 291	return t->se.sum_exec_runtime;
 292}
 293#else
 294static u64 read_sum_exec_runtime(struct task_struct *t)
 295{
 296	u64 ns;
 297	struct rq_flags rf;
 298	struct rq *rq;
 299
 300	rq = task_rq_lock(t, &rf);
 301	ns = t->se.sum_exec_runtime;
 302	task_rq_unlock(rq, t, &rf);
 303
 304	return ns;
 305}
 306#endif
 307
 308/*
 309 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
 310 * tasks (sum on group iteration) belonging to @tsk's group.
 311 */
 312void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 313{
 314	struct signal_struct *sig = tsk->signal;
 315	u64 utime, stime;
 316	struct task_struct *t;
 317	unsigned int seq, nextseq;
 318	unsigned long flags;
 319
 320	/*
 321	 * Update current task runtime to account pending time since last
 322	 * scheduler action or thread_group_cputime() call. This thread group
 323	 * might have other running tasks on different CPUs, but updating
 324	 * their runtime can affect syscall performance, so we skip account
 325	 * those pending times and rely only on values updated on tick or
 326	 * other scheduler action.
 327	 */
 328	if (same_thread_group(current, tsk))
 329		(void) task_sched_runtime(current);
 330
 331	rcu_read_lock();
 332	/* Attempt a lockless read on the first round. */
 333	nextseq = 0;
 334	do {
 335		seq = nextseq;
 336		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
 337		times->utime = sig->utime;
 338		times->stime = sig->stime;
 339		times->sum_exec_runtime = sig->sum_sched_runtime;
 340
 341		for_each_thread(tsk, t) {
 342			task_cputime(t, &utime, &stime);
 343			times->utime += utime;
 344			times->stime += stime;
 345			times->sum_exec_runtime += read_sum_exec_runtime(t);
 346		}
 347		/* If lockless access failed, take the lock. */
 348		nextseq = 1;
 349	} while (need_seqretry(&sig->stats_lock, seq));
 350	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
 351	rcu_read_unlock();
 352}
 353
 354#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 355/*
 356 * Account a tick to a process and cpustat
 357 * @p: the process that the CPU time gets accounted to
 358 * @user_tick: is the tick from userspace
 359 * @rq: the pointer to rq
 360 *
 361 * Tick demultiplexing follows the order
 362 * - pending hardirq update
 363 * - pending softirq update
 364 * - user_time
 365 * - idle_time
 366 * - system time
 367 *   - check for guest_time
 368 *   - else account as system_time
 369 *
 370 * Check for hardirq is done both for system and user time as there is
 371 * no timer going off while we are on hardirq and hence we may never get an
 372 * opportunity to update it solely in system time.
 373 * p->stime and friends are only updated on system time and not on IRQ
 374 * softirq as those do not count in task exec_runtime any more.
 375 */
 376static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
 377					 int ticks)
 378{
 379	u64 other, cputime = TICK_NSEC * ticks;
 380
 381	/*
 382	 * When returning from idle, many ticks can get accounted at
 383	 * once, including some ticks of steal, IRQ, and softirq time.
 384	 * Subtract those ticks from the amount of time accounted to
 385	 * idle, or potentially user or system time. Due to rounding,
 386	 * other time can exceed ticks occasionally.
 387	 */
 388	other = account_other_time(ULONG_MAX);
 389	if (other >= cputime)
 390		return;
 391
 392	cputime -= other;
 393
 394	if (this_cpu_ksoftirqd() == p) {
 395		/*
 396		 * ksoftirqd time do not get accounted in cpu_softirq_time.
 397		 * So, we have to handle it separately here.
 398		 * Also, p->stime needs to be updated for ksoftirqd.
 399		 */
 400		account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
 401	} else if (user_tick) {
 402		account_user_time(p, cputime);
 403	} else if (p == this_rq()->idle) {
 404		account_idle_time(cputime);
 405	} else if (p->flags & PF_VCPU) { /* System time or guest time */
 406		account_guest_time(p, cputime);
 407	} else {
 408		account_system_index_time(p, cputime, CPUTIME_SYSTEM);
 409	}
 410}
 411
 412static void irqtime_account_idle_ticks(int ticks)
 413{
 414	irqtime_account_process_tick(current, 0, ticks);
 
 
 415}
 416#else /* CONFIG_IRQ_TIME_ACCOUNTING */
 417static inline void irqtime_account_idle_ticks(int ticks) { }
 418static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
 419						int nr_ticks) { }
 420#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
 421
 422/*
 423 * Use precise platform statistics if available:
 424 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 425#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 426
 427void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
 
 
 
 
 
 
 
 
 428{
 429	unsigned int pc = irq_count() - offset;
 430
 431	if (pc & HARDIRQ_OFFSET) {
 432		vtime_account_hardirq(tsk);
 433	} else if (pc & SOFTIRQ_OFFSET) {
 434		vtime_account_softirq(tsk);
 435	} else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
 436		   is_idle_task(tsk)) {
 437		vtime_account_idle(tsk);
 438	} else {
 439		vtime_account_kernel(tsk);
 440	}
 441}
 
 
 442
 443void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
 444		    u64 *ut, u64 *st)
 445{
 446	*ut = curr->utime;
 447	*st = curr->stime;
 448}
 449
 450void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 451{
 452	*ut = p->utime;
 453	*st = p->stime;
 454}
 455EXPORT_SYMBOL_GPL(task_cputime_adjusted);
 456
 457void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 458{
 459	struct task_cputime cputime;
 460
 461	thread_group_cputime(p, &cputime);
 462
 463	*ut = cputime.utime;
 464	*st = cputime.stime;
 465}
 466
 467#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
 468
 469/*
 470 * Account a single tick of CPU time.
 471 * @p: the process that the CPU time gets accounted to
 472 * @user_tick: indicates if the tick is a user or a system tick
 473 */
 474void account_process_tick(struct task_struct *p, int user_tick)
 475{
 476	u64 cputime, steal;
 
 477
 478	if (vtime_accounting_enabled_this_cpu())
 479		return;
 480
 481	if (sched_clock_irqtime) {
 482		irqtime_account_process_tick(p, user_tick, 1);
 483		return;
 484	}
 485
 486	cputime = TICK_NSEC;
 487	steal = steal_account_process_time(ULONG_MAX);
 488
 489	if (steal >= cputime)
 490		return;
 491
 492	cputime -= steal;
 493
 494	if (user_tick)
 495		account_user_time(p, cputime);
 496	else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
 497		account_system_time(p, HARDIRQ_OFFSET, cputime);
 498	else
 499		account_idle_time(cputime);
 500}
 501
 502/*
 503 * Account multiple ticks of idle time.
 504 * @ticks: number of stolen ticks
 505 */
 506void account_idle_ticks(unsigned long ticks)
 507{
 508	u64 cputime, steal;
 509
 510	if (sched_clock_irqtime) {
 511		irqtime_account_idle_ticks(ticks);
 512		return;
 513	}
 514
 515	cputime = ticks * TICK_NSEC;
 516	steal = steal_account_process_time(ULONG_MAX);
 517
 518	if (steal >= cputime)
 519		return;
 520
 521	cputime -= steal;
 522	account_idle_time(cputime);
 523}
 524
 525/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526 * Adjust tick based cputime random precision against scheduler runtime
 527 * accounting.
 528 *
 529 * Tick based cputime accounting depend on random scheduling timeslices of a
 530 * task to be interrupted or not by the timer.  Depending on these
 531 * circumstances, the number of these interrupts may be over or
 532 * under-optimistic, matching the real user and system cputime with a variable
 533 * precision.
 534 *
 535 * Fix this by scaling these tick based values against the total runtime
 536 * accounted by the CFS scheduler.
 537 *
 538 * This code provides the following guarantees:
 539 *
 540 *   stime + utime == rtime
 541 *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
 542 *
 543 * Assuming that rtime_i+1 >= rtime_i.
 544 */
 545void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
 546		    u64 *ut, u64 *st)
 547{
 548	u64 rtime, stime, utime;
 549	unsigned long flags;
 550
 551	/* Serialize concurrent callers such that we can honour our guarantees */
 552	raw_spin_lock_irqsave(&prev->lock, flags);
 553	rtime = curr->sum_exec_runtime;
 554
 555	/*
 556	 * This is possible under two circumstances:
 557	 *  - rtime isn't monotonic after all (a bug);
 558	 *  - we got reordered by the lock.
 559	 *
 560	 * In both cases this acts as a filter such that the rest of the code
 561	 * can assume it is monotonic regardless of anything else.
 562	 */
 563	if (prev->stime + prev->utime >= rtime)
 564		goto out;
 565
 566	stime = curr->stime;
 567	utime = curr->utime;
 568
 569	/*
 570	 * If either stime or utime are 0, assume all runtime is userspace.
 571	 * Once a task gets some ticks, the monotonicity code at 'update:'
 572	 * will ensure things converge to the observed ratio.
 573	 */
 574	if (stime == 0) {
 575		utime = rtime;
 576		goto update;
 577	}
 578
 579	if (utime == 0) {
 580		stime = rtime;
 581		goto update;
 582	}
 583
 584	stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
 585	/*
 586	 * Because mul_u64_u64_div_u64() can approximate on some
 587	 * achitectures; enforce the constraint that: a*b/(b+c) <= a.
 588	 */
 589	if (unlikely(stime > rtime))
 590		stime = rtime;
 591
 592update:
 593	/*
 594	 * Make sure stime doesn't go backwards; this preserves monotonicity
 595	 * for utime because rtime is monotonic.
 596	 *
 597	 *  utime_i+1 = rtime_i+1 - stime_i
 598	 *            = rtime_i+1 - (rtime_i - utime_i)
 599	 *            = (rtime_i+1 - rtime_i) + utime_i
 600	 *            >= utime_i
 601	 */
 602	if (stime < prev->stime)
 603		stime = prev->stime;
 604	utime = rtime - stime;
 605
 606	/*
 607	 * Make sure utime doesn't go backwards; this still preserves
 608	 * monotonicity for stime, analogous argument to above.
 609	 */
 610	if (utime < prev->utime) {
 611		utime = prev->utime;
 612		stime = rtime - utime;
 613	}
 614
 615	prev->stime = stime;
 616	prev->utime = utime;
 617out:
 618	*ut = prev->utime;
 619	*st = prev->stime;
 620	raw_spin_unlock_irqrestore(&prev->lock, flags);
 621}
 622
 623void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 624{
 625	struct task_cputime cputime = {
 626		.sum_exec_runtime = p->se.sum_exec_runtime,
 627	};
 628
 629	if (task_cputime(p, &cputime.utime, &cputime.stime))
 630		cputime.sum_exec_runtime = task_sched_runtime(p);
 631	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
 632}
 633EXPORT_SYMBOL_GPL(task_cputime_adjusted);
 634
 635void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
 636{
 637	struct task_cputime cputime;
 638
 639	thread_group_cputime(p, &cputime);
 640	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
 641}
 642#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 643
 644#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 645static u64 vtime_delta(struct vtime *vtime)
 646{
 647	unsigned long long clock;
 648
 649	clock = sched_clock();
 650	if (clock < vtime->starttime)
 651		return 0;
 652
 653	return clock - vtime->starttime;
 654}
 655
 656static u64 get_vtime_delta(struct vtime *vtime)
 657{
 658	u64 delta = vtime_delta(vtime);
 659	u64 other;
 660
 661	/*
 662	 * Unlike tick based timing, vtime based timing never has lost
 663	 * ticks, and no need for steal time accounting to make up for
 664	 * lost ticks. Vtime accounts a rounded version of actual
 665	 * elapsed time. Limit account_other_time to prevent rounding
 666	 * errors from causing elapsed vtime to go negative.
 667	 */
 668	other = account_other_time(delta);
 669	WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
 670	vtime->starttime += delta;
 671
 672	return delta - other;
 673}
 674
 675static void vtime_account_system(struct task_struct *tsk,
 676				 struct vtime *vtime)
 677{
 678	vtime->stime += get_vtime_delta(vtime);
 679	if (vtime->stime >= TICK_NSEC) {
 680		account_system_time(tsk, irq_count(), vtime->stime);
 681		vtime->stime = 0;
 682	}
 683}
 684
 685static void vtime_account_guest(struct task_struct *tsk,
 686				struct vtime *vtime)
 687{
 688	vtime->gtime += get_vtime_delta(vtime);
 689	if (vtime->gtime >= TICK_NSEC) {
 690		account_guest_time(tsk, vtime->gtime);
 691		vtime->gtime = 0;
 692	}
 693}
 694
 695static void __vtime_account_kernel(struct task_struct *tsk,
 696				   struct vtime *vtime)
 697{
 698	/* We might have scheduled out from guest path */
 699	if (vtime->state == VTIME_GUEST)
 700		vtime_account_guest(tsk, vtime);
 701	else
 702		vtime_account_system(tsk, vtime);
 703}
 704
 705void vtime_account_kernel(struct task_struct *tsk)
 706{
 707	struct vtime *vtime = &tsk->vtime;
 708
 709	if (!vtime_delta(vtime))
 710		return;
 711
 712	write_seqcount_begin(&vtime->seqcount);
 713	__vtime_account_kernel(tsk, vtime);
 
 
 
 
 714	write_seqcount_end(&vtime->seqcount);
 715}
 716
 717void vtime_user_enter(struct task_struct *tsk)
 718{
 719	struct vtime *vtime = &tsk->vtime;
 720
 721	write_seqcount_begin(&vtime->seqcount);
 722	vtime_account_system(tsk, vtime);
 723	vtime->state = VTIME_USER;
 724	write_seqcount_end(&vtime->seqcount);
 725}
 726
 727void vtime_user_exit(struct task_struct *tsk)
 728{
 729	struct vtime *vtime = &tsk->vtime;
 730
 731	write_seqcount_begin(&vtime->seqcount);
 732	vtime->utime += get_vtime_delta(vtime);
 733	if (vtime->utime >= TICK_NSEC) {
 734		account_user_time(tsk, vtime->utime);
 735		vtime->utime = 0;
 736	}
 737	vtime->state = VTIME_SYS;
 738	write_seqcount_end(&vtime->seqcount);
 739}
 740
 741void vtime_guest_enter(struct task_struct *tsk)
 742{
 743	struct vtime *vtime = &tsk->vtime;
 744	/*
 745	 * The flags must be updated under the lock with
 746	 * the vtime_starttime flush and update.
 747	 * That enforces a right ordering and update sequence
 748	 * synchronization against the reader (task_gtime())
 749	 * that can thus safely catch up with a tickless delta.
 750	 */
 751	write_seqcount_begin(&vtime->seqcount);
 752	vtime_account_system(tsk, vtime);
 753	tsk->flags |= PF_VCPU;
 754	vtime->state = VTIME_GUEST;
 755	write_seqcount_end(&vtime->seqcount);
 756}
 757EXPORT_SYMBOL_GPL(vtime_guest_enter);
 758
 759void vtime_guest_exit(struct task_struct *tsk)
 760{
 761	struct vtime *vtime = &tsk->vtime;
 762
 763	write_seqcount_begin(&vtime->seqcount);
 764	vtime_account_guest(tsk, vtime);
 765	tsk->flags &= ~PF_VCPU;
 766	vtime->state = VTIME_SYS;
 767	write_seqcount_end(&vtime->seqcount);
 768}
 769EXPORT_SYMBOL_GPL(vtime_guest_exit);
 770
 771void vtime_account_idle(struct task_struct *tsk)
 772{
 773	account_idle_time(get_vtime_delta(&tsk->vtime));
 774}
 775
 776void vtime_task_switch_generic(struct task_struct *prev)
 777{
 778	struct vtime *vtime = &prev->vtime;
 779
 780	write_seqcount_begin(&vtime->seqcount);
 781	if (vtime->state == VTIME_IDLE)
 782		vtime_account_idle(prev);
 783	else
 784		__vtime_account_kernel(prev, vtime);
 785	vtime->state = VTIME_INACTIVE;
 786	vtime->cpu = -1;
 787	write_seqcount_end(&vtime->seqcount);
 788
 789	vtime = &current->vtime;
 790
 791	write_seqcount_begin(&vtime->seqcount);
 792	if (is_idle_task(current))
 793		vtime->state = VTIME_IDLE;
 794	else if (current->flags & PF_VCPU)
 795		vtime->state = VTIME_GUEST;
 796	else
 797		vtime->state = VTIME_SYS;
 798	vtime->starttime = sched_clock();
 799	vtime->cpu = smp_processor_id();
 800	write_seqcount_end(&vtime->seqcount);
 801}
 802
 803void vtime_init_idle(struct task_struct *t, int cpu)
 804{
 805	struct vtime *vtime = &t->vtime;
 806	unsigned long flags;
 807
 808	local_irq_save(flags);
 809	write_seqcount_begin(&vtime->seqcount);
 810	vtime->state = VTIME_IDLE;
 811	vtime->starttime = sched_clock();
 812	vtime->cpu = cpu;
 813	write_seqcount_end(&vtime->seqcount);
 814	local_irq_restore(flags);
 815}
 816
 817u64 task_gtime(struct task_struct *t)
 818{
 819	struct vtime *vtime = &t->vtime;
 820	unsigned int seq;
 821	u64 gtime;
 822
 823	if (!vtime_accounting_enabled())
 824		return t->gtime;
 825
 826	do {
 827		seq = read_seqcount_begin(&vtime->seqcount);
 828
 829		gtime = t->gtime;
 830		if (vtime->state == VTIME_GUEST)
 831			gtime += vtime->gtime + vtime_delta(vtime);
 832
 833	} while (read_seqcount_retry(&vtime->seqcount, seq));
 834
 835	return gtime;
 836}
 837
 838/*
 839 * Fetch cputime raw values from fields of task_struct and
 840 * add up the pending nohz execution time since the last
 841 * cputime snapshot.
 842 */
 843bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
 844{
 845	struct vtime *vtime = &t->vtime;
 846	unsigned int seq;
 847	u64 delta;
 848	int ret;
 849
 850	if (!vtime_accounting_enabled()) {
 851		*utime = t->utime;
 852		*stime = t->stime;
 853		return false;
 854	}
 855
 856	do {
 857		ret = false;
 858		seq = read_seqcount_begin(&vtime->seqcount);
 859
 860		*utime = t->utime;
 861		*stime = t->stime;
 862
 863		/* Task is sleeping or idle, nothing to add */
 864		if (vtime->state < VTIME_SYS)
 865			continue;
 866
 867		ret = true;
 868		delta = vtime_delta(vtime);
 869
 870		/*
 871		 * Task runs either in user (including guest) or kernel space,
 872		 * add pending nohz time to the right place.
 873		 */
 874		if (vtime->state == VTIME_SYS)
 
 
 875			*stime += vtime->stime + delta;
 876		else
 877			*utime += vtime->utime + delta;
 878	} while (read_seqcount_retry(&vtime->seqcount, seq));
 879
 880	return ret;
 881}
 882
 883static int vtime_state_fetch(struct vtime *vtime, int cpu)
 884{
 885	int state = READ_ONCE(vtime->state);
 886
 887	/*
 888	 * We raced against a context switch, fetch the
 889	 * kcpustat task again.
 890	 */
 891	if (vtime->cpu != cpu && vtime->cpu != -1)
 892		return -EAGAIN;
 893
 894	/*
 895	 * Two possible things here:
 896	 * 1) We are seeing the scheduling out task (prev) or any past one.
 897	 * 2) We are seeing the scheduling in task (next) but it hasn't
 898	 *    passed though vtime_task_switch() yet so the pending
 899	 *    cputime of the prev task may not be flushed yet.
 900	 *
 901	 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
 902	 */
 903	if (state == VTIME_INACTIVE)
 904		return -EAGAIN;
 905
 906	return state;
 907}
 908
 909static u64 kcpustat_user_vtime(struct vtime *vtime)
 910{
 911	if (vtime->state == VTIME_USER)
 912		return vtime->utime + vtime_delta(vtime);
 913	else if (vtime->state == VTIME_GUEST)
 914		return vtime->gtime + vtime_delta(vtime);
 915	return 0;
 916}
 917
 918static int kcpustat_field_vtime(u64 *cpustat,
 919				struct task_struct *tsk,
 920				enum cpu_usage_stat usage,
 921				int cpu, u64 *val)
 922{
 923	struct vtime *vtime = &tsk->vtime;
 924	unsigned int seq;
 925
 926	do {
 927		int state;
 928
 929		seq = read_seqcount_begin(&vtime->seqcount);
 930
 931		state = vtime_state_fetch(vtime, cpu);
 932		if (state < 0)
 933			return state;
 934
 935		*val = cpustat[usage];
 936
 937		/*
 938		 * Nice VS unnice cputime accounting may be inaccurate if
 939		 * the nice value has changed since the last vtime update.
 940		 * But proper fix would involve interrupting target on nice
 941		 * updates which is a no go on nohz_full (although the scheduler
 942		 * may still interrupt the target if rescheduling is needed...)
 943		 */
 944		switch (usage) {
 945		case CPUTIME_SYSTEM:
 946			if (state == VTIME_SYS)
 947				*val += vtime->stime + vtime_delta(vtime);
 948			break;
 949		case CPUTIME_USER:
 950			if (task_nice(tsk) <= 0)
 951				*val += kcpustat_user_vtime(vtime);
 952			break;
 953		case CPUTIME_NICE:
 954			if (task_nice(tsk) > 0)
 955				*val += kcpustat_user_vtime(vtime);
 956			break;
 957		case CPUTIME_GUEST:
 958			if (state == VTIME_GUEST && task_nice(tsk) <= 0)
 959				*val += vtime->gtime + vtime_delta(vtime);
 960			break;
 961		case CPUTIME_GUEST_NICE:
 962			if (state == VTIME_GUEST && task_nice(tsk) > 0)
 963				*val += vtime->gtime + vtime_delta(vtime);
 964			break;
 965		default:
 966			break;
 967		}
 968	} while (read_seqcount_retry(&vtime->seqcount, seq));
 969
 970	return 0;
 971}
 972
 973u64 kcpustat_field(struct kernel_cpustat *kcpustat,
 974		   enum cpu_usage_stat usage, int cpu)
 975{
 976	u64 *cpustat = kcpustat->cpustat;
 977	u64 val = cpustat[usage];
 978	struct rq *rq;
 979	int err;
 980
 981	if (!vtime_accounting_enabled_cpu(cpu))
 982		return val;
 983
 984	rq = cpu_rq(cpu);
 985
 986	for (;;) {
 987		struct task_struct *curr;
 988
 989		rcu_read_lock();
 990		curr = rcu_dereference(rq->curr);
 991		if (WARN_ON_ONCE(!curr)) {
 992			rcu_read_unlock();
 993			return cpustat[usage];
 994		}
 995
 996		err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
 997		rcu_read_unlock();
 998
 999		if (!err)
1000			return val;
1001
1002		cpu_relax();
1003	}
1004}
1005EXPORT_SYMBOL_GPL(kcpustat_field);
1006
1007static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1008				    const struct kernel_cpustat *src,
1009				    struct task_struct *tsk, int cpu)
1010{
1011	struct vtime *vtime = &tsk->vtime;
1012	unsigned int seq;
1013
1014	do {
1015		u64 *cpustat;
1016		u64 delta;
1017		int state;
1018
1019		seq = read_seqcount_begin(&vtime->seqcount);
1020
1021		state = vtime_state_fetch(vtime, cpu);
1022		if (state < 0)
1023			return state;
1024
1025		*dst = *src;
1026		cpustat = dst->cpustat;
1027
1028		/* Task is sleeping, dead or idle, nothing to add */
1029		if (state < VTIME_SYS)
1030			continue;
1031
1032		delta = vtime_delta(vtime);
1033
1034		/*
1035		 * Task runs either in user (including guest) or kernel space,
1036		 * add pending nohz time to the right place.
1037		 */
1038		if (state == VTIME_SYS) {
1039			cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1040		} else if (state == VTIME_USER) {
1041			if (task_nice(tsk) > 0)
1042				cpustat[CPUTIME_NICE] += vtime->utime + delta;
1043			else
1044				cpustat[CPUTIME_USER] += vtime->utime + delta;
1045		} else {
1046			WARN_ON_ONCE(state != VTIME_GUEST);
1047			if (task_nice(tsk) > 0) {
1048				cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1049				cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1050			} else {
1051				cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1052				cpustat[CPUTIME_USER] += vtime->gtime + delta;
1053			}
1054		}
1055	} while (read_seqcount_retry(&vtime->seqcount, seq));
1056
1057	return 0;
1058}
1059
1060void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1061{
1062	const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1063	struct rq *rq;
1064	int err;
1065
1066	if (!vtime_accounting_enabled_cpu(cpu)) {
1067		*dst = *src;
1068		return;
1069	}
1070
1071	rq = cpu_rq(cpu);
1072
1073	for (;;) {
1074		struct task_struct *curr;
1075
1076		rcu_read_lock();
1077		curr = rcu_dereference(rq->curr);
1078		if (WARN_ON_ONCE(!curr)) {
1079			rcu_read_unlock();
1080			*dst = *src;
1081			return;
1082		}
1083
1084		err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1085		rcu_read_unlock();
1086
1087		if (!err)
1088			return;
1089
1090		cpu_relax();
1091	}
1092}
1093EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1094
1095#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */