Loading...
1#include <linux/export.h>
2#include <linux/sched.h>
3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h>
5#include <linux/static_key.h>
6#include <linux/context_tracking.h>
7#include "sched.h"
8
9
10#ifdef CONFIG_IRQ_TIME_ACCOUNTING
11
12/*
13 * There are no locks covering percpu hardirq/softirq time.
14 * They are only modified in vtime_account, on corresponding CPU
15 * with interrupts disabled. So, writes are safe.
16 * They are read and saved off onto struct rq in update_rq_clock().
17 * This may result in other CPU reading this CPU's irq time and can
18 * race with irq/vtime_account on this CPU. We would either get old
19 * or new value with a side effect of accounting a slice of irq time to wrong
20 * task when irq is in progress while we read rq->clock. That is a worthy
21 * compromise in place of having locks on each irq in account_system_time.
22 */
23DEFINE_PER_CPU(u64, cpu_hardirq_time);
24DEFINE_PER_CPU(u64, cpu_softirq_time);
25
26static DEFINE_PER_CPU(u64, irq_start_time);
27static int sched_clock_irqtime;
28
29void enable_sched_clock_irqtime(void)
30{
31 sched_clock_irqtime = 1;
32}
33
34void disable_sched_clock_irqtime(void)
35{
36 sched_clock_irqtime = 0;
37}
38
39#ifndef CONFIG_64BIT
40DEFINE_PER_CPU(seqcount_t, irq_time_seq);
41#endif /* CONFIG_64BIT */
42
43/*
44 * Called before incrementing preempt_count on {soft,}irq_enter
45 * and before decrementing preempt_count on {soft,}irq_exit.
46 */
47void irqtime_account_irq(struct task_struct *curr)
48{
49 unsigned long flags;
50 s64 delta;
51 int cpu;
52
53 if (!sched_clock_irqtime)
54 return;
55
56 local_irq_save(flags);
57
58 cpu = smp_processor_id();
59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
60 __this_cpu_add(irq_start_time, delta);
61
62 irq_time_write_begin();
63 /*
64 * We do not account for softirq time from ksoftirqd here.
65 * We want to continue accounting softirq time to ksoftirqd thread
66 * in that case, so as not to confuse scheduler with a special task
67 * that do not consume any time, but still wants to run.
68 */
69 if (hardirq_count())
70 __this_cpu_add(cpu_hardirq_time, delta);
71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
72 __this_cpu_add(cpu_softirq_time, delta);
73
74 irq_time_write_end();
75 local_irq_restore(flags);
76}
77EXPORT_SYMBOL_GPL(irqtime_account_irq);
78
79static int irqtime_account_hi_update(void)
80{
81 u64 *cpustat = kcpustat_this_cpu->cpustat;
82 unsigned long flags;
83 u64 latest_ns;
84 int ret = 0;
85
86 local_irq_save(flags);
87 latest_ns = this_cpu_read(cpu_hardirq_time);
88 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
89 ret = 1;
90 local_irq_restore(flags);
91 return ret;
92}
93
94static int irqtime_account_si_update(void)
95{
96 u64 *cpustat = kcpustat_this_cpu->cpustat;
97 unsigned long flags;
98 u64 latest_ns;
99 int ret = 0;
100
101 local_irq_save(flags);
102 latest_ns = this_cpu_read(cpu_softirq_time);
103 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
104 ret = 1;
105 local_irq_restore(flags);
106 return ret;
107}
108
109#else /* CONFIG_IRQ_TIME_ACCOUNTING */
110
111#define sched_clock_irqtime (0)
112
113#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
114
115static inline void task_group_account_field(struct task_struct *p, int index,
116 u64 tmp)
117{
118 /*
119 * Since all updates are sure to touch the root cgroup, we
120 * get ourselves ahead and touch it first. If the root cgroup
121 * is the only cgroup, then nothing else should be necessary.
122 *
123 */
124 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
125
126 cpuacct_account_field(p, index, tmp);
127}
128
129/*
130 * Account user cpu time to a process.
131 * @p: the process that the cpu time gets accounted to
132 * @cputime: the cpu time spent in user space since the last update
133 * @cputime_scaled: cputime scaled by cpu frequency
134 */
135void account_user_time(struct task_struct *p, cputime_t cputime,
136 cputime_t cputime_scaled)
137{
138 int index;
139
140 /* Add user time to process. */
141 p->utime += cputime;
142 p->utimescaled += cputime_scaled;
143 account_group_user_time(p, cputime);
144
145 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
146
147 /* Add user time to cpustat. */
148 task_group_account_field(p, index, (__force u64) cputime);
149
150 /* Account for user time used */
151 acct_account_cputime(p);
152}
153
154/*
155 * Account guest cpu time to a process.
156 * @p: the process that the cpu time gets accounted to
157 * @cputime: the cpu time spent in virtual machine since the last update
158 * @cputime_scaled: cputime scaled by cpu frequency
159 */
160static void account_guest_time(struct task_struct *p, cputime_t cputime,
161 cputime_t cputime_scaled)
162{
163 u64 *cpustat = kcpustat_this_cpu->cpustat;
164
165 /* Add guest time to process. */
166 p->utime += cputime;
167 p->utimescaled += cputime_scaled;
168 account_group_user_time(p, cputime);
169 p->gtime += cputime;
170
171 /* Add guest time to cpustat. */
172 if (task_nice(p) > 0) {
173 cpustat[CPUTIME_NICE] += (__force u64) cputime;
174 cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
175 } else {
176 cpustat[CPUTIME_USER] += (__force u64) cputime;
177 cpustat[CPUTIME_GUEST] += (__force u64) cputime;
178 }
179}
180
181/*
182 * Account system cpu time to a process and desired cpustat field
183 * @p: the process that the cpu time gets accounted to
184 * @cputime: the cpu time spent in kernel space since the last update
185 * @cputime_scaled: cputime scaled by cpu frequency
186 * @target_cputime64: pointer to cpustat field that has to be updated
187 */
188static inline
189void __account_system_time(struct task_struct *p, cputime_t cputime,
190 cputime_t cputime_scaled, int index)
191{
192 /* Add system time to process. */
193 p->stime += cputime;
194 p->stimescaled += cputime_scaled;
195 account_group_system_time(p, cputime);
196
197 /* Add system time to cpustat. */
198 task_group_account_field(p, index, (__force u64) cputime);
199
200 /* Account for system time used */
201 acct_account_cputime(p);
202}
203
204/*
205 * Account system cpu time to a process.
206 * @p: the process that the cpu time gets accounted to
207 * @hardirq_offset: the offset to subtract from hardirq_count()
208 * @cputime: the cpu time spent in kernel space since the last update
209 * @cputime_scaled: cputime scaled by cpu frequency
210 */
211void account_system_time(struct task_struct *p, int hardirq_offset,
212 cputime_t cputime, cputime_t cputime_scaled)
213{
214 int index;
215
216 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
217 account_guest_time(p, cputime, cputime_scaled);
218 return;
219 }
220
221 if (hardirq_count() - hardirq_offset)
222 index = CPUTIME_IRQ;
223 else if (in_serving_softirq())
224 index = CPUTIME_SOFTIRQ;
225 else
226 index = CPUTIME_SYSTEM;
227
228 __account_system_time(p, cputime, cputime_scaled, index);
229}
230
231/*
232 * Account for involuntary wait time.
233 * @cputime: the cpu time spent in involuntary wait
234 */
235void account_steal_time(cputime_t cputime)
236{
237 u64 *cpustat = kcpustat_this_cpu->cpustat;
238
239 cpustat[CPUTIME_STEAL] += (__force u64) cputime;
240}
241
242/*
243 * Account for idle time.
244 * @cputime: the cpu time spent in idle wait
245 */
246void account_idle_time(cputime_t cputime)
247{
248 u64 *cpustat = kcpustat_this_cpu->cpustat;
249 struct rq *rq = this_rq();
250
251 if (atomic_read(&rq->nr_iowait) > 0)
252 cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
253 else
254 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
255}
256
257static __always_inline bool steal_account_process_tick(void)
258{
259#ifdef CONFIG_PARAVIRT
260 if (static_key_false(¶virt_steal_enabled)) {
261 u64 steal;
262 cputime_t steal_ct;
263
264 steal = paravirt_steal_clock(smp_processor_id());
265 steal -= this_rq()->prev_steal_time;
266
267 /*
268 * cputime_t may be less precise than nsecs (eg: if it's
269 * based on jiffies). Lets cast the result to cputime
270 * granularity and account the rest on the next rounds.
271 */
272 steal_ct = nsecs_to_cputime(steal);
273 this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
274
275 account_steal_time(steal_ct);
276 return steal_ct;
277 }
278#endif
279 return false;
280}
281
282/*
283 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
284 * tasks (sum on group iteration) belonging to @tsk's group.
285 */
286void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
287{
288 struct signal_struct *sig = tsk->signal;
289 cputime_t utime, stime;
290 struct task_struct *t;
291
292 times->utime = sig->utime;
293 times->stime = sig->stime;
294 times->sum_exec_runtime = sig->sum_sched_runtime;
295
296 rcu_read_lock();
297 /* make sure we can trust tsk->thread_group list */
298 if (!likely(pid_alive(tsk)))
299 goto out;
300
301 t = tsk;
302 do {
303 task_cputime(t, &utime, &stime);
304 times->utime += utime;
305 times->stime += stime;
306 times->sum_exec_runtime += task_sched_runtime(t);
307 } while_each_thread(tsk, t);
308out:
309 rcu_read_unlock();
310}
311
312#ifdef CONFIG_IRQ_TIME_ACCOUNTING
313/*
314 * Account a tick to a process and cpustat
315 * @p: the process that the cpu time gets accounted to
316 * @user_tick: is the tick from userspace
317 * @rq: the pointer to rq
318 *
319 * Tick demultiplexing follows the order
320 * - pending hardirq update
321 * - pending softirq update
322 * - user_time
323 * - idle_time
324 * - system time
325 * - check for guest_time
326 * - else account as system_time
327 *
328 * Check for hardirq is done both for system and user time as there is
329 * no timer going off while we are on hardirq and hence we may never get an
330 * opportunity to update it solely in system time.
331 * p->stime and friends are only updated on system time and not on irq
332 * softirq as those do not count in task exec_runtime any more.
333 */
334static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
335 struct rq *rq, int ticks)
336{
337 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
338 u64 cputime = (__force u64) cputime_one_jiffy;
339 u64 *cpustat = kcpustat_this_cpu->cpustat;
340
341 if (steal_account_process_tick())
342 return;
343
344 cputime *= ticks;
345 scaled *= ticks;
346
347 if (irqtime_account_hi_update()) {
348 cpustat[CPUTIME_IRQ] += cputime;
349 } else if (irqtime_account_si_update()) {
350 cpustat[CPUTIME_SOFTIRQ] += cputime;
351 } else if (this_cpu_ksoftirqd() == p) {
352 /*
353 * ksoftirqd time do not get accounted in cpu_softirq_time.
354 * So, we have to handle it separately here.
355 * Also, p->stime needs to be updated for ksoftirqd.
356 */
357 __account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
358 } else if (user_tick) {
359 account_user_time(p, cputime, scaled);
360 } else if (p == rq->idle) {
361 account_idle_time(cputime);
362 } else if (p->flags & PF_VCPU) { /* System time or guest time */
363 account_guest_time(p, cputime, scaled);
364 } else {
365 __account_system_time(p, cputime, scaled, CPUTIME_SYSTEM);
366 }
367}
368
369static void irqtime_account_idle_ticks(int ticks)
370{
371 struct rq *rq = this_rq();
372
373 irqtime_account_process_tick(current, 0, rq, ticks);
374}
375#else /* CONFIG_IRQ_TIME_ACCOUNTING */
376static inline void irqtime_account_idle_ticks(int ticks) {}
377static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
378 struct rq *rq, int nr_ticks) {}
379#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
380
381/*
382 * Use precise platform statistics if available:
383 */
384#ifdef CONFIG_VIRT_CPU_ACCOUNTING
385
386#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
387void vtime_common_task_switch(struct task_struct *prev)
388{
389 if (is_idle_task(prev))
390 vtime_account_idle(prev);
391 else
392 vtime_account_system(prev);
393
394#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
395 vtime_account_user(prev);
396#endif
397 arch_vtime_task_switch(prev);
398}
399#endif
400
401/*
402 * Archs that account the whole time spent in the idle task
403 * (outside irq) as idle time can rely on this and just implement
404 * vtime_account_system() and vtime_account_idle(). Archs that
405 * have other meaning of the idle time (s390 only includes the
406 * time spent by the CPU when it's in low power mode) must override
407 * vtime_account().
408 */
409#ifndef __ARCH_HAS_VTIME_ACCOUNT
410void vtime_common_account_irq_enter(struct task_struct *tsk)
411{
412 if (!in_interrupt()) {
413 /*
414 * If we interrupted user, context_tracking_in_user()
415 * is 1 because the context tracking don't hook
416 * on irq entry/exit. This way we know if
417 * we need to flush user time on kernel entry.
418 */
419 if (context_tracking_in_user()) {
420 vtime_account_user(tsk);
421 return;
422 }
423
424 if (is_idle_task(tsk)) {
425 vtime_account_idle(tsk);
426 return;
427 }
428 }
429 vtime_account_system(tsk);
430}
431EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
432#endif /* __ARCH_HAS_VTIME_ACCOUNT */
433#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
434
435
436#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
437void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
438{
439 *ut = p->utime;
440 *st = p->stime;
441}
442
443void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
444{
445 struct task_cputime cputime;
446
447 thread_group_cputime(p, &cputime);
448
449 *ut = cputime.utime;
450 *st = cputime.stime;
451}
452#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
453/*
454 * Account a single tick of cpu time.
455 * @p: the process that the cpu time gets accounted to
456 * @user_tick: indicates if the tick is a user or a system tick
457 */
458void account_process_tick(struct task_struct *p, int user_tick)
459{
460 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
461 struct rq *rq = this_rq();
462
463 if (vtime_accounting_enabled())
464 return;
465
466 if (sched_clock_irqtime) {
467 irqtime_account_process_tick(p, user_tick, rq, 1);
468 return;
469 }
470
471 if (steal_account_process_tick())
472 return;
473
474 if (user_tick)
475 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
476 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
477 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
478 one_jiffy_scaled);
479 else
480 account_idle_time(cputime_one_jiffy);
481}
482
483/*
484 * Account multiple ticks of steal time.
485 * @p: the process from which the cpu time has been stolen
486 * @ticks: number of stolen ticks
487 */
488void account_steal_ticks(unsigned long ticks)
489{
490 account_steal_time(jiffies_to_cputime(ticks));
491}
492
493/*
494 * Account multiple ticks of idle time.
495 * @ticks: number of stolen ticks
496 */
497void account_idle_ticks(unsigned long ticks)
498{
499
500 if (sched_clock_irqtime) {
501 irqtime_account_idle_ticks(ticks);
502 return;
503 }
504
505 account_idle_time(jiffies_to_cputime(ticks));
506}
507
508/*
509 * Perform (stime * rtime) / total, but avoid multiplication overflow by
510 * loosing precision when the numbers are big.
511 */
512static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
513{
514 u64 scaled;
515
516 for (;;) {
517 /* Make sure "rtime" is the bigger of stime/rtime */
518 if (stime > rtime)
519 swap(rtime, stime);
520
521 /* Make sure 'total' fits in 32 bits */
522 if (total >> 32)
523 goto drop_precision;
524
525 /* Does rtime (and thus stime) fit in 32 bits? */
526 if (!(rtime >> 32))
527 break;
528
529 /* Can we just balance rtime/stime rather than dropping bits? */
530 if (stime >> 31)
531 goto drop_precision;
532
533 /* We can grow stime and shrink rtime and try to make them both fit */
534 stime <<= 1;
535 rtime >>= 1;
536 continue;
537
538drop_precision:
539 /* We drop from rtime, it has more bits than stime */
540 rtime >>= 1;
541 total >>= 1;
542 }
543
544 /*
545 * Make sure gcc understands that this is a 32x32->64 multiply,
546 * followed by a 64/32->64 divide.
547 */
548 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
549 return (__force cputime_t) scaled;
550}
551
552/*
553 * Adjust tick based cputime random precision against scheduler
554 * runtime accounting.
555 */
556static void cputime_adjust(struct task_cputime *curr,
557 struct cputime *prev,
558 cputime_t *ut, cputime_t *st)
559{
560 cputime_t rtime, stime, utime;
561
562 /*
563 * Tick based cputime accounting depend on random scheduling
564 * timeslices of a task to be interrupted or not by the timer.
565 * Depending on these circumstances, the number of these interrupts
566 * may be over or under-optimistic, matching the real user and system
567 * cputime with a variable precision.
568 *
569 * Fix this by scaling these tick based values against the total
570 * runtime accounted by the CFS scheduler.
571 */
572 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
573
574 /*
575 * Update userspace visible utime/stime values only if actual execution
576 * time is bigger than already exported. Note that can happen, that we
577 * provided bigger values due to scaling inaccuracy on big numbers.
578 */
579 if (prev->stime + prev->utime >= rtime)
580 goto out;
581
582 stime = curr->stime;
583 utime = curr->utime;
584
585 if (utime == 0) {
586 stime = rtime;
587 } else if (stime == 0) {
588 utime = rtime;
589 } else {
590 cputime_t total = stime + utime;
591
592 stime = scale_stime((__force u64)stime,
593 (__force u64)rtime, (__force u64)total);
594 utime = rtime - stime;
595 }
596
597 /*
598 * If the tick based count grows faster than the scheduler one,
599 * the result of the scaling may go backward.
600 * Let's enforce monotonicity.
601 */
602 prev->stime = max(prev->stime, stime);
603 prev->utime = max(prev->utime, utime);
604
605out:
606 *ut = prev->utime;
607 *st = prev->stime;
608}
609
610void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
611{
612 struct task_cputime cputime = {
613 .sum_exec_runtime = p->se.sum_exec_runtime,
614 };
615
616 task_cputime(p, &cputime.utime, &cputime.stime);
617 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
618}
619
620/*
621 * Must be called with siglock held.
622 */
623void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
624{
625 struct task_cputime cputime;
626
627 thread_group_cputime(p, &cputime);
628 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
629}
630#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
631
632#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
633static unsigned long long vtime_delta(struct task_struct *tsk)
634{
635 unsigned long long clock;
636
637 clock = local_clock();
638 if (clock < tsk->vtime_snap)
639 return 0;
640
641 return clock - tsk->vtime_snap;
642}
643
644static cputime_t get_vtime_delta(struct task_struct *tsk)
645{
646 unsigned long long delta = vtime_delta(tsk);
647
648 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
649 tsk->vtime_snap += delta;
650
651 /* CHECKME: always safe to convert nsecs to cputime? */
652 return nsecs_to_cputime(delta);
653}
654
655static void __vtime_account_system(struct task_struct *tsk)
656{
657 cputime_t delta_cpu = get_vtime_delta(tsk);
658
659 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
660}
661
662void vtime_account_system(struct task_struct *tsk)
663{
664 write_seqlock(&tsk->vtime_seqlock);
665 __vtime_account_system(tsk);
666 write_sequnlock(&tsk->vtime_seqlock);
667}
668
669void vtime_gen_account_irq_exit(struct task_struct *tsk)
670{
671 write_seqlock(&tsk->vtime_seqlock);
672 __vtime_account_system(tsk);
673 if (context_tracking_in_user())
674 tsk->vtime_snap_whence = VTIME_USER;
675 write_sequnlock(&tsk->vtime_seqlock);
676}
677
678void vtime_account_user(struct task_struct *tsk)
679{
680 cputime_t delta_cpu;
681
682 write_seqlock(&tsk->vtime_seqlock);
683 delta_cpu = get_vtime_delta(tsk);
684 tsk->vtime_snap_whence = VTIME_SYS;
685 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
686 write_sequnlock(&tsk->vtime_seqlock);
687}
688
689void vtime_user_enter(struct task_struct *tsk)
690{
691 write_seqlock(&tsk->vtime_seqlock);
692 __vtime_account_system(tsk);
693 tsk->vtime_snap_whence = VTIME_USER;
694 write_sequnlock(&tsk->vtime_seqlock);
695}
696
697void vtime_guest_enter(struct task_struct *tsk)
698{
699 /*
700 * The flags must be updated under the lock with
701 * the vtime_snap flush and update.
702 * That enforces a right ordering and update sequence
703 * synchronization against the reader (task_gtime())
704 * that can thus safely catch up with a tickless delta.
705 */
706 write_seqlock(&tsk->vtime_seqlock);
707 __vtime_account_system(tsk);
708 current->flags |= PF_VCPU;
709 write_sequnlock(&tsk->vtime_seqlock);
710}
711EXPORT_SYMBOL_GPL(vtime_guest_enter);
712
713void vtime_guest_exit(struct task_struct *tsk)
714{
715 write_seqlock(&tsk->vtime_seqlock);
716 __vtime_account_system(tsk);
717 current->flags &= ~PF_VCPU;
718 write_sequnlock(&tsk->vtime_seqlock);
719}
720EXPORT_SYMBOL_GPL(vtime_guest_exit);
721
722void vtime_account_idle(struct task_struct *tsk)
723{
724 cputime_t delta_cpu = get_vtime_delta(tsk);
725
726 account_idle_time(delta_cpu);
727}
728
729void arch_vtime_task_switch(struct task_struct *prev)
730{
731 write_seqlock(&prev->vtime_seqlock);
732 prev->vtime_snap_whence = VTIME_SLEEPING;
733 write_sequnlock(&prev->vtime_seqlock);
734
735 write_seqlock(¤t->vtime_seqlock);
736 current->vtime_snap_whence = VTIME_SYS;
737 current->vtime_snap = sched_clock_cpu(smp_processor_id());
738 write_sequnlock(¤t->vtime_seqlock);
739}
740
741void vtime_init_idle(struct task_struct *t, int cpu)
742{
743 unsigned long flags;
744
745 write_seqlock_irqsave(&t->vtime_seqlock, flags);
746 t->vtime_snap_whence = VTIME_SYS;
747 t->vtime_snap = sched_clock_cpu(cpu);
748 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
749}
750
751cputime_t task_gtime(struct task_struct *t)
752{
753 unsigned int seq;
754 cputime_t gtime;
755
756 do {
757 seq = read_seqbegin(&t->vtime_seqlock);
758
759 gtime = t->gtime;
760 if (t->flags & PF_VCPU)
761 gtime += vtime_delta(t);
762
763 } while (read_seqretry(&t->vtime_seqlock, seq));
764
765 return gtime;
766}
767
768/*
769 * Fetch cputime raw values from fields of task_struct and
770 * add up the pending nohz execution time since the last
771 * cputime snapshot.
772 */
773static void
774fetch_task_cputime(struct task_struct *t,
775 cputime_t *u_dst, cputime_t *s_dst,
776 cputime_t *u_src, cputime_t *s_src,
777 cputime_t *udelta, cputime_t *sdelta)
778{
779 unsigned int seq;
780 unsigned long long delta;
781
782 do {
783 *udelta = 0;
784 *sdelta = 0;
785
786 seq = read_seqbegin(&t->vtime_seqlock);
787
788 if (u_dst)
789 *u_dst = *u_src;
790 if (s_dst)
791 *s_dst = *s_src;
792
793 /* Task is sleeping, nothing to add */
794 if (t->vtime_snap_whence == VTIME_SLEEPING ||
795 is_idle_task(t))
796 continue;
797
798 delta = vtime_delta(t);
799
800 /*
801 * Task runs either in user or kernel space, add pending nohz time to
802 * the right place.
803 */
804 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
805 *udelta = delta;
806 } else {
807 if (t->vtime_snap_whence == VTIME_SYS)
808 *sdelta = delta;
809 }
810 } while (read_seqretry(&t->vtime_seqlock, seq));
811}
812
813
814void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
815{
816 cputime_t udelta, sdelta;
817
818 fetch_task_cputime(t, utime, stime, &t->utime,
819 &t->stime, &udelta, &sdelta);
820 if (utime)
821 *utime += udelta;
822 if (stime)
823 *stime += sdelta;
824}
825
826void task_cputime_scaled(struct task_struct *t,
827 cputime_t *utimescaled, cputime_t *stimescaled)
828{
829 cputime_t udelta, sdelta;
830
831 fetch_task_cputime(t, utimescaled, stimescaled,
832 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
833 if (utimescaled)
834 *utimescaled += cputime_to_scaled(udelta);
835 if (stimescaled)
836 *stimescaled += cputime_to_scaled(sdelta);
837}
838#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple CPU accounting cgroup controller
4 */
5
6#ifdef CONFIG_IRQ_TIME_ACCOUNTING
7
8/*
9 * There are no locks covering percpu hardirq/softirq time.
10 * They are only modified in vtime_account, on corresponding CPU
11 * with interrupts disabled. So, writes are safe.
12 * They are read and saved off onto struct rq in update_rq_clock().
13 * This may result in other CPU reading this CPU's irq time and can
14 * race with irq/vtime_account on this CPU. We would either get old
15 * or new value with a side effect of accounting a slice of irq time to wrong
16 * task when irq is in progress while we read rq->clock. That is a worthy
17 * compromise in place of having locks on each irq in account_system_time.
18 */
19DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
20
21static int sched_clock_irqtime;
22
23void enable_sched_clock_irqtime(void)
24{
25 sched_clock_irqtime = 1;
26}
27
28void disable_sched_clock_irqtime(void)
29{
30 sched_clock_irqtime = 0;
31}
32
33static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
34 enum cpu_usage_stat idx)
35{
36 u64 *cpustat = kcpustat_this_cpu->cpustat;
37
38 u64_stats_update_begin(&irqtime->sync);
39 cpustat[idx] += delta;
40 irqtime->total += delta;
41 irqtime->tick_delta += delta;
42 u64_stats_update_end(&irqtime->sync);
43}
44
45/*
46 * Called after incrementing preempt_count on {soft,}irq_enter
47 * and before decrementing preempt_count on {soft,}irq_exit.
48 */
49void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
50{
51 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
52 unsigned int pc;
53 s64 delta;
54 int cpu;
55
56 if (!sched_clock_irqtime)
57 return;
58
59 cpu = smp_processor_id();
60 delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
61 irqtime->irq_start_time += delta;
62 pc = irq_count() - offset;
63
64 /*
65 * We do not account for softirq time from ksoftirqd here.
66 * We want to continue accounting softirq time to ksoftirqd thread
67 * in that case, so as not to confuse scheduler with a special task
68 * that do not consume any time, but still wants to run.
69 */
70 if (pc & HARDIRQ_MASK)
71 irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
72 else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
73 irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
74}
75
76static u64 irqtime_tick_accounted(u64 maxtime)
77{
78 struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
79 u64 delta;
80
81 delta = min(irqtime->tick_delta, maxtime);
82 irqtime->tick_delta -= delta;
83
84 return delta;
85}
86
87#else /* CONFIG_IRQ_TIME_ACCOUNTING */
88
89#define sched_clock_irqtime (0)
90
91static u64 irqtime_tick_accounted(u64 dummy)
92{
93 return 0;
94}
95
96#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
97
98static inline void task_group_account_field(struct task_struct *p, int index,
99 u64 tmp)
100{
101 /*
102 * Since all updates are sure to touch the root cgroup, we
103 * get ourselves ahead and touch it first. If the root cgroup
104 * is the only cgroup, then nothing else should be necessary.
105 *
106 */
107 __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
108
109 cgroup_account_cputime_field(p, index, tmp);
110}
111
112/*
113 * Account user CPU time to a process.
114 * @p: the process that the CPU time gets accounted to
115 * @cputime: the CPU time spent in user space since the last update
116 */
117void account_user_time(struct task_struct *p, u64 cputime)
118{
119 int index;
120
121 /* Add user time to process. */
122 p->utime += cputime;
123 account_group_user_time(p, cputime);
124
125 index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
126
127 /* Add user time to cpustat. */
128 task_group_account_field(p, index, cputime);
129
130 /* Account for user time used */
131 acct_account_cputime(p);
132}
133
134/*
135 * Account guest CPU time to a process.
136 * @p: the process that the CPU time gets accounted to
137 * @cputime: the CPU time spent in virtual machine since the last update
138 */
139void account_guest_time(struct task_struct *p, u64 cputime)
140{
141 u64 *cpustat = kcpustat_this_cpu->cpustat;
142
143 /* Add guest time to process. */
144 p->utime += cputime;
145 account_group_user_time(p, cputime);
146 p->gtime += cputime;
147
148 /* Add guest time to cpustat. */
149 if (task_nice(p) > 0) {
150 task_group_account_field(p, CPUTIME_NICE, cputime);
151 cpustat[CPUTIME_GUEST_NICE] += cputime;
152 } else {
153 task_group_account_field(p, CPUTIME_USER, cputime);
154 cpustat[CPUTIME_GUEST] += cputime;
155 }
156}
157
158/*
159 * Account system CPU time to a process and desired cpustat field
160 * @p: the process that the CPU time gets accounted to
161 * @cputime: the CPU time spent in kernel space since the last update
162 * @index: pointer to cpustat field that has to be updated
163 */
164void account_system_index_time(struct task_struct *p,
165 u64 cputime, enum cpu_usage_stat index)
166{
167 /* Add system time to process. */
168 p->stime += cputime;
169 account_group_system_time(p, cputime);
170
171 /* Add system time to cpustat. */
172 task_group_account_field(p, index, cputime);
173
174 /* Account for system time used */
175 acct_account_cputime(p);
176}
177
178/*
179 * Account system CPU time to a process.
180 * @p: the process that the CPU time gets accounted to
181 * @hardirq_offset: the offset to subtract from hardirq_count()
182 * @cputime: the CPU time spent in kernel space since the last update
183 */
184void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
185{
186 int index;
187
188 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
189 account_guest_time(p, cputime);
190 return;
191 }
192
193 if (hardirq_count() - hardirq_offset)
194 index = CPUTIME_IRQ;
195 else if (in_serving_softirq())
196 index = CPUTIME_SOFTIRQ;
197 else
198 index = CPUTIME_SYSTEM;
199
200 account_system_index_time(p, cputime, index);
201}
202
203/*
204 * Account for involuntary wait time.
205 * @cputime: the CPU time spent in involuntary wait
206 */
207void account_steal_time(u64 cputime)
208{
209 u64 *cpustat = kcpustat_this_cpu->cpustat;
210
211 cpustat[CPUTIME_STEAL] += cputime;
212}
213
214/*
215 * Account for idle time.
216 * @cputime: the CPU time spent in idle wait
217 */
218void account_idle_time(u64 cputime)
219{
220 u64 *cpustat = kcpustat_this_cpu->cpustat;
221 struct rq *rq = this_rq();
222
223 if (atomic_read(&rq->nr_iowait) > 0)
224 cpustat[CPUTIME_IOWAIT] += cputime;
225 else
226 cpustat[CPUTIME_IDLE] += cputime;
227}
228
229
230#ifdef CONFIG_SCHED_CORE
231/*
232 * Account for forceidle time due to core scheduling.
233 *
234 * REQUIRES: schedstat is enabled.
235 */
236void __account_forceidle_time(struct task_struct *p, u64 delta)
237{
238 __schedstat_add(p->stats.core_forceidle_sum, delta);
239
240 task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
241}
242#endif
243
244/*
245 * When a guest is interrupted for a longer amount of time, missed clock
246 * ticks are not redelivered later. Due to that, this function may on
247 * occasion account more time than the calling functions think elapsed.
248 */
249static __always_inline u64 steal_account_process_time(u64 maxtime)
250{
251#ifdef CONFIG_PARAVIRT
252 if (static_key_false(¶virt_steal_enabled)) {
253 u64 steal;
254
255 steal = paravirt_steal_clock(smp_processor_id());
256 steal -= this_rq()->prev_steal_time;
257 steal = min(steal, maxtime);
258 account_steal_time(steal);
259 this_rq()->prev_steal_time += steal;
260
261 return steal;
262 }
263#endif
264 return 0;
265}
266
267/*
268 * Account how much elapsed time was spent in steal, irq, or softirq time.
269 */
270static inline u64 account_other_time(u64 max)
271{
272 u64 accounted;
273
274 lockdep_assert_irqs_disabled();
275
276 accounted = steal_account_process_time(max);
277
278 if (accounted < max)
279 accounted += irqtime_tick_accounted(max - accounted);
280
281 return accounted;
282}
283
284#ifdef CONFIG_64BIT
285static inline u64 read_sum_exec_runtime(struct task_struct *t)
286{
287 return t->se.sum_exec_runtime;
288}
289#else
290static u64 read_sum_exec_runtime(struct task_struct *t)
291{
292 u64 ns;
293 struct rq_flags rf;
294 struct rq *rq;
295
296 rq = task_rq_lock(t, &rf);
297 ns = t->se.sum_exec_runtime;
298 task_rq_unlock(rq, t, &rf);
299
300 return ns;
301}
302#endif
303
304/*
305 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
306 * tasks (sum on group iteration) belonging to @tsk's group.
307 */
308void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
309{
310 struct signal_struct *sig = tsk->signal;
311 u64 utime, stime;
312 struct task_struct *t;
313 unsigned int seq, nextseq;
314 unsigned long flags;
315
316 /*
317 * Update current task runtime to account pending time since last
318 * scheduler action or thread_group_cputime() call. This thread group
319 * might have other running tasks on different CPUs, but updating
320 * their runtime can affect syscall performance, so we skip account
321 * those pending times and rely only on values updated on tick or
322 * other scheduler action.
323 */
324 if (same_thread_group(current, tsk))
325 (void) task_sched_runtime(current);
326
327 rcu_read_lock();
328 /* Attempt a lockless read on the first round. */
329 nextseq = 0;
330 do {
331 seq = nextseq;
332 flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
333 times->utime = sig->utime;
334 times->stime = sig->stime;
335 times->sum_exec_runtime = sig->sum_sched_runtime;
336
337 for_each_thread(tsk, t) {
338 task_cputime(t, &utime, &stime);
339 times->utime += utime;
340 times->stime += stime;
341 times->sum_exec_runtime += read_sum_exec_runtime(t);
342 }
343 /* If lockless access failed, take the lock. */
344 nextseq = 1;
345 } while (need_seqretry(&sig->stats_lock, seq));
346 done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
347 rcu_read_unlock();
348}
349
350#ifdef CONFIG_IRQ_TIME_ACCOUNTING
351/*
352 * Account a tick to a process and cpustat
353 * @p: the process that the CPU time gets accounted to
354 * @user_tick: is the tick from userspace
355 * @rq: the pointer to rq
356 *
357 * Tick demultiplexing follows the order
358 * - pending hardirq update
359 * - pending softirq update
360 * - user_time
361 * - idle_time
362 * - system time
363 * - check for guest_time
364 * - else account as system_time
365 *
366 * Check for hardirq is done both for system and user time as there is
367 * no timer going off while we are on hardirq and hence we may never get an
368 * opportunity to update it solely in system time.
369 * p->stime and friends are only updated on system time and not on irq
370 * softirq as those do not count in task exec_runtime any more.
371 */
372static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
373 int ticks)
374{
375 u64 other, cputime = TICK_NSEC * ticks;
376
377 /*
378 * When returning from idle, many ticks can get accounted at
379 * once, including some ticks of steal, irq, and softirq time.
380 * Subtract those ticks from the amount of time accounted to
381 * idle, or potentially user or system time. Due to rounding,
382 * other time can exceed ticks occasionally.
383 */
384 other = account_other_time(ULONG_MAX);
385 if (other >= cputime)
386 return;
387
388 cputime -= other;
389
390 if (this_cpu_ksoftirqd() == p) {
391 /*
392 * ksoftirqd time do not get accounted in cpu_softirq_time.
393 * So, we have to handle it separately here.
394 * Also, p->stime needs to be updated for ksoftirqd.
395 */
396 account_system_index_time(p, cputime, CPUTIME_SOFTIRQ);
397 } else if (user_tick) {
398 account_user_time(p, cputime);
399 } else if (p == this_rq()->idle) {
400 account_idle_time(cputime);
401 } else if (p->flags & PF_VCPU) { /* System time or guest time */
402 account_guest_time(p, cputime);
403 } else {
404 account_system_index_time(p, cputime, CPUTIME_SYSTEM);
405 }
406}
407
408static void irqtime_account_idle_ticks(int ticks)
409{
410 irqtime_account_process_tick(current, 0, ticks);
411}
412#else /* CONFIG_IRQ_TIME_ACCOUNTING */
413static inline void irqtime_account_idle_ticks(int ticks) { }
414static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
415 int nr_ticks) { }
416#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
417
418/*
419 * Use precise platform statistics if available:
420 */
421#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
422
423# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
424void vtime_task_switch(struct task_struct *prev)
425{
426 if (is_idle_task(prev))
427 vtime_account_idle(prev);
428 else
429 vtime_account_kernel(prev);
430
431 vtime_flush(prev);
432 arch_vtime_task_switch(prev);
433}
434# endif
435
436void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
437{
438 unsigned int pc = irq_count() - offset;
439
440 if (pc & HARDIRQ_OFFSET) {
441 vtime_account_hardirq(tsk);
442 } else if (pc & SOFTIRQ_OFFSET) {
443 vtime_account_softirq(tsk);
444 } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) &&
445 is_idle_task(tsk)) {
446 vtime_account_idle(tsk);
447 } else {
448 vtime_account_kernel(tsk);
449 }
450}
451
452void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
453 u64 *ut, u64 *st)
454{
455 *ut = curr->utime;
456 *st = curr->stime;
457}
458
459void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
460{
461 *ut = p->utime;
462 *st = p->stime;
463}
464EXPORT_SYMBOL_GPL(task_cputime_adjusted);
465
466void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
467{
468 struct task_cputime cputime;
469
470 thread_group_cputime(p, &cputime);
471
472 *ut = cputime.utime;
473 *st = cputime.stime;
474}
475
476#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
477
478/*
479 * Account a single tick of CPU time.
480 * @p: the process that the CPU time gets accounted to
481 * @user_tick: indicates if the tick is a user or a system tick
482 */
483void account_process_tick(struct task_struct *p, int user_tick)
484{
485 u64 cputime, steal;
486
487 if (vtime_accounting_enabled_this_cpu())
488 return;
489
490 if (sched_clock_irqtime) {
491 irqtime_account_process_tick(p, user_tick, 1);
492 return;
493 }
494
495 cputime = TICK_NSEC;
496 steal = steal_account_process_time(ULONG_MAX);
497
498 if (steal >= cputime)
499 return;
500
501 cputime -= steal;
502
503 if (user_tick)
504 account_user_time(p, cputime);
505 else if ((p != this_rq()->idle) || (irq_count() != HARDIRQ_OFFSET))
506 account_system_time(p, HARDIRQ_OFFSET, cputime);
507 else
508 account_idle_time(cputime);
509}
510
511/*
512 * Account multiple ticks of idle time.
513 * @ticks: number of stolen ticks
514 */
515void account_idle_ticks(unsigned long ticks)
516{
517 u64 cputime, steal;
518
519 if (sched_clock_irqtime) {
520 irqtime_account_idle_ticks(ticks);
521 return;
522 }
523
524 cputime = ticks * TICK_NSEC;
525 steal = steal_account_process_time(ULONG_MAX);
526
527 if (steal >= cputime)
528 return;
529
530 cputime -= steal;
531 account_idle_time(cputime);
532}
533
534/*
535 * Adjust tick based cputime random precision against scheduler runtime
536 * accounting.
537 *
538 * Tick based cputime accounting depend on random scheduling timeslices of a
539 * task to be interrupted or not by the timer. Depending on these
540 * circumstances, the number of these interrupts may be over or
541 * under-optimistic, matching the real user and system cputime with a variable
542 * precision.
543 *
544 * Fix this by scaling these tick based values against the total runtime
545 * accounted by the CFS scheduler.
546 *
547 * This code provides the following guarantees:
548 *
549 * stime + utime == rtime
550 * stime_i+1 >= stime_i, utime_i+1 >= utime_i
551 *
552 * Assuming that rtime_i+1 >= rtime_i.
553 */
554void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
555 u64 *ut, u64 *st)
556{
557 u64 rtime, stime, utime;
558 unsigned long flags;
559
560 /* Serialize concurrent callers such that we can honour our guarantees */
561 raw_spin_lock_irqsave(&prev->lock, flags);
562 rtime = curr->sum_exec_runtime;
563
564 /*
565 * This is possible under two circumstances:
566 * - rtime isn't monotonic after all (a bug);
567 * - we got reordered by the lock.
568 *
569 * In both cases this acts as a filter such that the rest of the code
570 * can assume it is monotonic regardless of anything else.
571 */
572 if (prev->stime + prev->utime >= rtime)
573 goto out;
574
575 stime = curr->stime;
576 utime = curr->utime;
577
578 /*
579 * If either stime or utime are 0, assume all runtime is userspace.
580 * Once a task gets some ticks, the monotonicity code at 'update:'
581 * will ensure things converge to the observed ratio.
582 */
583 if (stime == 0) {
584 utime = rtime;
585 goto update;
586 }
587
588 if (utime == 0) {
589 stime = rtime;
590 goto update;
591 }
592
593 stime = mul_u64_u64_div_u64(stime, rtime, stime + utime);
594
595update:
596 /*
597 * Make sure stime doesn't go backwards; this preserves monotonicity
598 * for utime because rtime is monotonic.
599 *
600 * utime_i+1 = rtime_i+1 - stime_i
601 * = rtime_i+1 - (rtime_i - utime_i)
602 * = (rtime_i+1 - rtime_i) + utime_i
603 * >= utime_i
604 */
605 if (stime < prev->stime)
606 stime = prev->stime;
607 utime = rtime - stime;
608
609 /*
610 * Make sure utime doesn't go backwards; this still preserves
611 * monotonicity for stime, analogous argument to above.
612 */
613 if (utime < prev->utime) {
614 utime = prev->utime;
615 stime = rtime - utime;
616 }
617
618 prev->stime = stime;
619 prev->utime = utime;
620out:
621 *ut = prev->utime;
622 *st = prev->stime;
623 raw_spin_unlock_irqrestore(&prev->lock, flags);
624}
625
626void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
627{
628 struct task_cputime cputime = {
629 .sum_exec_runtime = p->se.sum_exec_runtime,
630 };
631
632 if (task_cputime(p, &cputime.utime, &cputime.stime))
633 cputime.sum_exec_runtime = task_sched_runtime(p);
634 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
635}
636EXPORT_SYMBOL_GPL(task_cputime_adjusted);
637
638void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
639{
640 struct task_cputime cputime;
641
642 thread_group_cputime(p, &cputime);
643 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
644}
645#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
646
647#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
648static u64 vtime_delta(struct vtime *vtime)
649{
650 unsigned long long clock;
651
652 clock = sched_clock();
653 if (clock < vtime->starttime)
654 return 0;
655
656 return clock - vtime->starttime;
657}
658
659static u64 get_vtime_delta(struct vtime *vtime)
660{
661 u64 delta = vtime_delta(vtime);
662 u64 other;
663
664 /*
665 * Unlike tick based timing, vtime based timing never has lost
666 * ticks, and no need for steal time accounting to make up for
667 * lost ticks. Vtime accounts a rounded version of actual
668 * elapsed time. Limit account_other_time to prevent rounding
669 * errors from causing elapsed vtime to go negative.
670 */
671 other = account_other_time(delta);
672 WARN_ON_ONCE(vtime->state == VTIME_INACTIVE);
673 vtime->starttime += delta;
674
675 return delta - other;
676}
677
678static void vtime_account_system(struct task_struct *tsk,
679 struct vtime *vtime)
680{
681 vtime->stime += get_vtime_delta(vtime);
682 if (vtime->stime >= TICK_NSEC) {
683 account_system_time(tsk, irq_count(), vtime->stime);
684 vtime->stime = 0;
685 }
686}
687
688static void vtime_account_guest(struct task_struct *tsk,
689 struct vtime *vtime)
690{
691 vtime->gtime += get_vtime_delta(vtime);
692 if (vtime->gtime >= TICK_NSEC) {
693 account_guest_time(tsk, vtime->gtime);
694 vtime->gtime = 0;
695 }
696}
697
698static void __vtime_account_kernel(struct task_struct *tsk,
699 struct vtime *vtime)
700{
701 /* We might have scheduled out from guest path */
702 if (vtime->state == VTIME_GUEST)
703 vtime_account_guest(tsk, vtime);
704 else
705 vtime_account_system(tsk, vtime);
706}
707
708void vtime_account_kernel(struct task_struct *tsk)
709{
710 struct vtime *vtime = &tsk->vtime;
711
712 if (!vtime_delta(vtime))
713 return;
714
715 write_seqcount_begin(&vtime->seqcount);
716 __vtime_account_kernel(tsk, vtime);
717 write_seqcount_end(&vtime->seqcount);
718}
719
720void vtime_user_enter(struct task_struct *tsk)
721{
722 struct vtime *vtime = &tsk->vtime;
723
724 write_seqcount_begin(&vtime->seqcount);
725 vtime_account_system(tsk, vtime);
726 vtime->state = VTIME_USER;
727 write_seqcount_end(&vtime->seqcount);
728}
729
730void vtime_user_exit(struct task_struct *tsk)
731{
732 struct vtime *vtime = &tsk->vtime;
733
734 write_seqcount_begin(&vtime->seqcount);
735 vtime->utime += get_vtime_delta(vtime);
736 if (vtime->utime >= TICK_NSEC) {
737 account_user_time(tsk, vtime->utime);
738 vtime->utime = 0;
739 }
740 vtime->state = VTIME_SYS;
741 write_seqcount_end(&vtime->seqcount);
742}
743
744void vtime_guest_enter(struct task_struct *tsk)
745{
746 struct vtime *vtime = &tsk->vtime;
747 /*
748 * The flags must be updated under the lock with
749 * the vtime_starttime flush and update.
750 * That enforces a right ordering and update sequence
751 * synchronization against the reader (task_gtime())
752 * that can thus safely catch up with a tickless delta.
753 */
754 write_seqcount_begin(&vtime->seqcount);
755 vtime_account_system(tsk, vtime);
756 tsk->flags |= PF_VCPU;
757 vtime->state = VTIME_GUEST;
758 write_seqcount_end(&vtime->seqcount);
759}
760EXPORT_SYMBOL_GPL(vtime_guest_enter);
761
762void vtime_guest_exit(struct task_struct *tsk)
763{
764 struct vtime *vtime = &tsk->vtime;
765
766 write_seqcount_begin(&vtime->seqcount);
767 vtime_account_guest(tsk, vtime);
768 tsk->flags &= ~PF_VCPU;
769 vtime->state = VTIME_SYS;
770 write_seqcount_end(&vtime->seqcount);
771}
772EXPORT_SYMBOL_GPL(vtime_guest_exit);
773
774void vtime_account_idle(struct task_struct *tsk)
775{
776 account_idle_time(get_vtime_delta(&tsk->vtime));
777}
778
779void vtime_task_switch_generic(struct task_struct *prev)
780{
781 struct vtime *vtime = &prev->vtime;
782
783 write_seqcount_begin(&vtime->seqcount);
784 if (vtime->state == VTIME_IDLE)
785 vtime_account_idle(prev);
786 else
787 __vtime_account_kernel(prev, vtime);
788 vtime->state = VTIME_INACTIVE;
789 vtime->cpu = -1;
790 write_seqcount_end(&vtime->seqcount);
791
792 vtime = ¤t->vtime;
793
794 write_seqcount_begin(&vtime->seqcount);
795 if (is_idle_task(current))
796 vtime->state = VTIME_IDLE;
797 else if (current->flags & PF_VCPU)
798 vtime->state = VTIME_GUEST;
799 else
800 vtime->state = VTIME_SYS;
801 vtime->starttime = sched_clock();
802 vtime->cpu = smp_processor_id();
803 write_seqcount_end(&vtime->seqcount);
804}
805
806void vtime_init_idle(struct task_struct *t, int cpu)
807{
808 struct vtime *vtime = &t->vtime;
809 unsigned long flags;
810
811 local_irq_save(flags);
812 write_seqcount_begin(&vtime->seqcount);
813 vtime->state = VTIME_IDLE;
814 vtime->starttime = sched_clock();
815 vtime->cpu = cpu;
816 write_seqcount_end(&vtime->seqcount);
817 local_irq_restore(flags);
818}
819
820u64 task_gtime(struct task_struct *t)
821{
822 struct vtime *vtime = &t->vtime;
823 unsigned int seq;
824 u64 gtime;
825
826 if (!vtime_accounting_enabled())
827 return t->gtime;
828
829 do {
830 seq = read_seqcount_begin(&vtime->seqcount);
831
832 gtime = t->gtime;
833 if (vtime->state == VTIME_GUEST)
834 gtime += vtime->gtime + vtime_delta(vtime);
835
836 } while (read_seqcount_retry(&vtime->seqcount, seq));
837
838 return gtime;
839}
840
841/*
842 * Fetch cputime raw values from fields of task_struct and
843 * add up the pending nohz execution time since the last
844 * cputime snapshot.
845 */
846bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
847{
848 struct vtime *vtime = &t->vtime;
849 unsigned int seq;
850 u64 delta;
851 int ret;
852
853 if (!vtime_accounting_enabled()) {
854 *utime = t->utime;
855 *stime = t->stime;
856 return false;
857 }
858
859 do {
860 ret = false;
861 seq = read_seqcount_begin(&vtime->seqcount);
862
863 *utime = t->utime;
864 *stime = t->stime;
865
866 /* Task is sleeping or idle, nothing to add */
867 if (vtime->state < VTIME_SYS)
868 continue;
869
870 ret = true;
871 delta = vtime_delta(vtime);
872
873 /*
874 * Task runs either in user (including guest) or kernel space,
875 * add pending nohz time to the right place.
876 */
877 if (vtime->state == VTIME_SYS)
878 *stime += vtime->stime + delta;
879 else
880 *utime += vtime->utime + delta;
881 } while (read_seqcount_retry(&vtime->seqcount, seq));
882
883 return ret;
884}
885
886static int vtime_state_fetch(struct vtime *vtime, int cpu)
887{
888 int state = READ_ONCE(vtime->state);
889
890 /*
891 * We raced against a context switch, fetch the
892 * kcpustat task again.
893 */
894 if (vtime->cpu != cpu && vtime->cpu != -1)
895 return -EAGAIN;
896
897 /*
898 * Two possible things here:
899 * 1) We are seeing the scheduling out task (prev) or any past one.
900 * 2) We are seeing the scheduling in task (next) but it hasn't
901 * passed though vtime_task_switch() yet so the pending
902 * cputime of the prev task may not be flushed yet.
903 *
904 * Case 1) is ok but 2) is not. So wait for a safe VTIME state.
905 */
906 if (state == VTIME_INACTIVE)
907 return -EAGAIN;
908
909 return state;
910}
911
912static u64 kcpustat_user_vtime(struct vtime *vtime)
913{
914 if (vtime->state == VTIME_USER)
915 return vtime->utime + vtime_delta(vtime);
916 else if (vtime->state == VTIME_GUEST)
917 return vtime->gtime + vtime_delta(vtime);
918 return 0;
919}
920
921static int kcpustat_field_vtime(u64 *cpustat,
922 struct task_struct *tsk,
923 enum cpu_usage_stat usage,
924 int cpu, u64 *val)
925{
926 struct vtime *vtime = &tsk->vtime;
927 unsigned int seq;
928
929 do {
930 int state;
931
932 seq = read_seqcount_begin(&vtime->seqcount);
933
934 state = vtime_state_fetch(vtime, cpu);
935 if (state < 0)
936 return state;
937
938 *val = cpustat[usage];
939
940 /*
941 * Nice VS unnice cputime accounting may be inaccurate if
942 * the nice value has changed since the last vtime update.
943 * But proper fix would involve interrupting target on nice
944 * updates which is a no go on nohz_full (although the scheduler
945 * may still interrupt the target if rescheduling is needed...)
946 */
947 switch (usage) {
948 case CPUTIME_SYSTEM:
949 if (state == VTIME_SYS)
950 *val += vtime->stime + vtime_delta(vtime);
951 break;
952 case CPUTIME_USER:
953 if (task_nice(tsk) <= 0)
954 *val += kcpustat_user_vtime(vtime);
955 break;
956 case CPUTIME_NICE:
957 if (task_nice(tsk) > 0)
958 *val += kcpustat_user_vtime(vtime);
959 break;
960 case CPUTIME_GUEST:
961 if (state == VTIME_GUEST && task_nice(tsk) <= 0)
962 *val += vtime->gtime + vtime_delta(vtime);
963 break;
964 case CPUTIME_GUEST_NICE:
965 if (state == VTIME_GUEST && task_nice(tsk) > 0)
966 *val += vtime->gtime + vtime_delta(vtime);
967 break;
968 default:
969 break;
970 }
971 } while (read_seqcount_retry(&vtime->seqcount, seq));
972
973 return 0;
974}
975
976u64 kcpustat_field(struct kernel_cpustat *kcpustat,
977 enum cpu_usage_stat usage, int cpu)
978{
979 u64 *cpustat = kcpustat->cpustat;
980 u64 val = cpustat[usage];
981 struct rq *rq;
982 int err;
983
984 if (!vtime_accounting_enabled_cpu(cpu))
985 return val;
986
987 rq = cpu_rq(cpu);
988
989 for (;;) {
990 struct task_struct *curr;
991
992 rcu_read_lock();
993 curr = rcu_dereference(rq->curr);
994 if (WARN_ON_ONCE(!curr)) {
995 rcu_read_unlock();
996 return cpustat[usage];
997 }
998
999 err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
1000 rcu_read_unlock();
1001
1002 if (!err)
1003 return val;
1004
1005 cpu_relax();
1006 }
1007}
1008EXPORT_SYMBOL_GPL(kcpustat_field);
1009
1010static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst,
1011 const struct kernel_cpustat *src,
1012 struct task_struct *tsk, int cpu)
1013{
1014 struct vtime *vtime = &tsk->vtime;
1015 unsigned int seq;
1016
1017 do {
1018 u64 *cpustat;
1019 u64 delta;
1020 int state;
1021
1022 seq = read_seqcount_begin(&vtime->seqcount);
1023
1024 state = vtime_state_fetch(vtime, cpu);
1025 if (state < 0)
1026 return state;
1027
1028 *dst = *src;
1029 cpustat = dst->cpustat;
1030
1031 /* Task is sleeping, dead or idle, nothing to add */
1032 if (state < VTIME_SYS)
1033 continue;
1034
1035 delta = vtime_delta(vtime);
1036
1037 /*
1038 * Task runs either in user (including guest) or kernel space,
1039 * add pending nohz time to the right place.
1040 */
1041 if (state == VTIME_SYS) {
1042 cpustat[CPUTIME_SYSTEM] += vtime->stime + delta;
1043 } else if (state == VTIME_USER) {
1044 if (task_nice(tsk) > 0)
1045 cpustat[CPUTIME_NICE] += vtime->utime + delta;
1046 else
1047 cpustat[CPUTIME_USER] += vtime->utime + delta;
1048 } else {
1049 WARN_ON_ONCE(state != VTIME_GUEST);
1050 if (task_nice(tsk) > 0) {
1051 cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta;
1052 cpustat[CPUTIME_NICE] += vtime->gtime + delta;
1053 } else {
1054 cpustat[CPUTIME_GUEST] += vtime->gtime + delta;
1055 cpustat[CPUTIME_USER] += vtime->gtime + delta;
1056 }
1057 }
1058 } while (read_seqcount_retry(&vtime->seqcount, seq));
1059
1060 return 0;
1061}
1062
1063void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
1064{
1065 const struct kernel_cpustat *src = &kcpustat_cpu(cpu);
1066 struct rq *rq;
1067 int err;
1068
1069 if (!vtime_accounting_enabled_cpu(cpu)) {
1070 *dst = *src;
1071 return;
1072 }
1073
1074 rq = cpu_rq(cpu);
1075
1076 for (;;) {
1077 struct task_struct *curr;
1078
1079 rcu_read_lock();
1080 curr = rcu_dereference(rq->curr);
1081 if (WARN_ON_ONCE(!curr)) {
1082 rcu_read_unlock();
1083 *dst = *src;
1084 return;
1085 }
1086
1087 err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
1088 rcu_read_unlock();
1089
1090 if (!err)
1091 return;
1092
1093 cpu_relax();
1094 }
1095}
1096EXPORT_SYMBOL_GPL(kcpustat_cpu_fetch);
1097
1098#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */