Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
6#include <linux/sched/signal.h>
7#include <linux/sched/cputime.h>
8#include <linux/posix-timers.h>
9#include <linux/errno.h>
10#include <linux/math64.h>
11#include <linux/uaccess.h>
12#include <linux/kernel_stat.h>
13#include <trace/events/timer.h>
14#include <linux/tick.h>
15#include <linux/workqueue.h>
16#include <linux/compat.h>
17#include <linux/sched/deadline.h>
18
19#include "posix-timers.h"
20
21static void posix_cpu_timer_rearm(struct k_itimer *timer);
22
23void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
24{
25 posix_cputimers_init(pct);
26 if (cpu_limit != RLIM_INFINITY) {
27 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 pct->timers_active = true;
29 }
30}
31
32/*
33 * Called after updating RLIMIT_CPU to run cpu timer and update
34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35 * necessary. Needs siglock protection since other code may update the
36 * expiration cache as well.
37 */
38void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
39{
40 u64 nsecs = rlim_new * NSEC_PER_SEC;
41
42 spin_lock_irq(&task->sighand->siglock);
43 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 spin_unlock_irq(&task->sighand->siglock);
45}
46
47/*
48 * Functions for validating access to tasks.
49 */
50static struct task_struct *lookup_task(const pid_t pid, bool thread,
51 bool gettime)
52{
53 struct task_struct *p;
54
55 /*
56 * If the encoded PID is 0, then the timer is targeted at current
57 * or the process to which current belongs.
58 */
59 if (!pid)
60 return thread ? current : current->group_leader;
61
62 p = find_task_by_vpid(pid);
63 if (!p)
64 return p;
65
66 if (thread)
67 return same_thread_group(p, current) ? p : NULL;
68
69 if (gettime) {
70 /*
71 * For clock_gettime(PROCESS) the task does not need to be
72 * the actual group leader. tsk->sighand gives
73 * access to the group's clock.
74 *
75 * Timers need the group leader because they take a
76 * reference on it and store the task pointer until the
77 * timer is destroyed.
78 */
79 return (p == current || thread_group_leader(p)) ? p : NULL;
80 }
81
82 /*
83 * For processes require that p is group leader.
84 */
85 return has_group_leader_pid(p) ? p : NULL;
86}
87
88static struct task_struct *__get_task_for_clock(const clockid_t clock,
89 bool getref, bool gettime)
90{
91 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
92 const pid_t pid = CPUCLOCK_PID(clock);
93 struct task_struct *p;
94
95 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
96 return NULL;
97
98 rcu_read_lock();
99 p = lookup_task(pid, thread, gettime);
100 if (p && getref)
101 get_task_struct(p);
102 rcu_read_unlock();
103 return p;
104}
105
106static inline struct task_struct *get_task_for_clock(const clockid_t clock)
107{
108 return __get_task_for_clock(clock, true, false);
109}
110
111static inline struct task_struct *get_task_for_clock_get(const clockid_t clock)
112{
113 return __get_task_for_clock(clock, true, true);
114}
115
116static inline int validate_clock_permissions(const clockid_t clock)
117{
118 return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
119}
120
121/*
122 * Update expiry time from increment, and increase overrun count,
123 * given the current clock sample.
124 */
125static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
126{
127 u64 delta, incr, expires = timer->it.cpu.node.expires;
128 int i;
129
130 if (!timer->it_interval)
131 return expires;
132
133 if (now < expires)
134 return expires;
135
136 incr = timer->it_interval;
137 delta = now + incr - expires;
138
139 /* Don't use (incr*2 < delta), incr*2 might overflow. */
140 for (i = 0; incr < delta - incr; i++)
141 incr = incr << 1;
142
143 for (; i >= 0; incr >>= 1, i--) {
144 if (delta < incr)
145 continue;
146
147 timer->it.cpu.node.expires += incr;
148 timer->it_overrun += 1LL << i;
149 delta -= incr;
150 }
151 return timer->it.cpu.node.expires;
152}
153
154/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
155static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
156{
157 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
158 ~pct->bases[CPUCLOCK_VIRT].nextevt |
159 ~pct->bases[CPUCLOCK_SCHED].nextevt);
160}
161
162static int
163posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
164{
165 int error = validate_clock_permissions(which_clock);
166
167 if (!error) {
168 tp->tv_sec = 0;
169 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
170 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
171 /*
172 * If sched_clock is using a cycle counter, we
173 * don't have any idea of its true resolution
174 * exported, but it is much more than 1s/HZ.
175 */
176 tp->tv_nsec = 1;
177 }
178 }
179 return error;
180}
181
182static int
183posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
184{
185 int error = validate_clock_permissions(clock);
186
187 /*
188 * You can never reset a CPU clock, but we check for other errors
189 * in the call before failing with EPERM.
190 */
191 return error ? : -EPERM;
192}
193
194/*
195 * Sample a per-thread clock for the given task. clkid is validated.
196 */
197static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
198{
199 u64 utime, stime;
200
201 if (clkid == CPUCLOCK_SCHED)
202 return task_sched_runtime(p);
203
204 task_cputime(p, &utime, &stime);
205
206 switch (clkid) {
207 case CPUCLOCK_PROF:
208 return utime + stime;
209 case CPUCLOCK_VIRT:
210 return utime;
211 default:
212 WARN_ON_ONCE(1);
213 }
214 return 0;
215}
216
217static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
218{
219 samples[CPUCLOCK_PROF] = stime + utime;
220 samples[CPUCLOCK_VIRT] = utime;
221 samples[CPUCLOCK_SCHED] = rtime;
222}
223
224static void task_sample_cputime(struct task_struct *p, u64 *samples)
225{
226 u64 stime, utime;
227
228 task_cputime(p, &utime, &stime);
229 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
230}
231
232static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
233 u64 *samples)
234{
235 u64 stime, utime, rtime;
236
237 utime = atomic64_read(&at->utime);
238 stime = atomic64_read(&at->stime);
239 rtime = atomic64_read(&at->sum_exec_runtime);
240 store_samples(samples, stime, utime, rtime);
241}
242
243/*
244 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
245 * to avoid race conditions with concurrent updates to cputime.
246 */
247static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
248{
249 u64 curr_cputime;
250retry:
251 curr_cputime = atomic64_read(cputime);
252 if (sum_cputime > curr_cputime) {
253 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
254 goto retry;
255 }
256}
257
258static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
259 struct task_cputime *sum)
260{
261 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
262 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
263 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
264}
265
266/**
267 * thread_group_sample_cputime - Sample cputime for a given task
268 * @tsk: Task for which cputime needs to be started
269 * @samples: Storage for time samples
270 *
271 * Called from sys_getitimer() to calculate the expiry time of an active
272 * timer. That means group cputime accounting is already active. Called
273 * with task sighand lock held.
274 *
275 * Updates @times with an uptodate sample of the thread group cputimes.
276 */
277void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
278{
279 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
280 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
281
282 WARN_ON_ONCE(!pct->timers_active);
283
284 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
285}
286
287/**
288 * thread_group_start_cputime - Start cputime and return a sample
289 * @tsk: Task for which cputime needs to be started
290 * @samples: Storage for time samples
291 *
292 * The thread group cputime accouting is avoided when there are no posix
293 * CPU timers armed. Before starting a timer it's required to check whether
294 * the time accounting is active. If not, a full update of the atomic
295 * accounting store needs to be done and the accounting enabled.
296 *
297 * Updates @times with an uptodate sample of the thread group cputimes.
298 */
299static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
300{
301 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
302 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
303
304 /* Check if cputimer isn't running. This is accessed without locking. */
305 if (!READ_ONCE(pct->timers_active)) {
306 struct task_cputime sum;
307
308 /*
309 * The POSIX timer interface allows for absolute time expiry
310 * values through the TIMER_ABSTIME flag, therefore we have
311 * to synchronize the timer to the clock every time we start it.
312 */
313 thread_group_cputime(tsk, &sum);
314 update_gt_cputime(&cputimer->cputime_atomic, &sum);
315
316 /*
317 * We're setting timers_active without a lock. Ensure this
318 * only gets written to in one operation. We set it after
319 * update_gt_cputime() as a small optimization, but
320 * barriers are not required because update_gt_cputime()
321 * can handle concurrent updates.
322 */
323 WRITE_ONCE(pct->timers_active, true);
324 }
325 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
326}
327
328static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
329{
330 struct task_cputime ct;
331
332 thread_group_cputime(tsk, &ct);
333 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
334}
335
336/*
337 * Sample a process (thread group) clock for the given task clkid. If the
338 * group's cputime accounting is already enabled, read the atomic
339 * store. Otherwise a full update is required. Task's sighand lock must be
340 * held to protect the task traversal on a full update. clkid is already
341 * validated.
342 */
343static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
344 bool start)
345{
346 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
347 struct posix_cputimers *pct = &p->signal->posix_cputimers;
348 u64 samples[CPUCLOCK_MAX];
349
350 if (!READ_ONCE(pct->timers_active)) {
351 if (start)
352 thread_group_start_cputime(p, samples);
353 else
354 __thread_group_cputime(p, samples);
355 } else {
356 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
357 }
358
359 return samples[clkid];
360}
361
362static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
363{
364 const clockid_t clkid = CPUCLOCK_WHICH(clock);
365 struct task_struct *tsk;
366 u64 t;
367
368 tsk = get_task_for_clock_get(clock);
369 if (!tsk)
370 return -EINVAL;
371
372 if (CPUCLOCK_PERTHREAD(clock))
373 t = cpu_clock_sample(clkid, tsk);
374 else
375 t = cpu_clock_sample_group(clkid, tsk, false);
376 put_task_struct(tsk);
377
378 *tp = ns_to_timespec64(t);
379 return 0;
380}
381
382/*
383 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
384 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
385 * new timer already all-zeros initialized.
386 */
387static int posix_cpu_timer_create(struct k_itimer *new_timer)
388{
389 struct task_struct *p = get_task_for_clock(new_timer->it_clock);
390
391 if (!p)
392 return -EINVAL;
393
394 new_timer->kclock = &clock_posix_cpu;
395 timerqueue_init(&new_timer->it.cpu.node);
396 new_timer->it.cpu.task = p;
397 return 0;
398}
399
400/*
401 * Clean up a CPU-clock timer that is about to be destroyed.
402 * This is called from timer deletion with the timer already locked.
403 * If we return TIMER_RETRY, it's necessary to release the timer's lock
404 * and try again. (This happens when the timer is in the middle of firing.)
405 */
406static int posix_cpu_timer_del(struct k_itimer *timer)
407{
408 struct cpu_timer *ctmr = &timer->it.cpu;
409 struct task_struct *p = ctmr->task;
410 struct sighand_struct *sighand;
411 unsigned long flags;
412 int ret = 0;
413
414 if (WARN_ON_ONCE(!p))
415 return -EINVAL;
416
417 /*
418 * Protect against sighand release/switch in exit/exec and process/
419 * thread timer list entry concurrent read/writes.
420 */
421 sighand = lock_task_sighand(p, &flags);
422 if (unlikely(sighand == NULL)) {
423 /*
424 * This raced with the reaping of the task. The exit cleanup
425 * should have removed this timer from the timer queue.
426 */
427 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
428 } else {
429 if (timer->it.cpu.firing)
430 ret = TIMER_RETRY;
431 else
432 cpu_timer_dequeue(ctmr);
433
434 unlock_task_sighand(p, &flags);
435 }
436
437 if (!ret)
438 put_task_struct(p);
439
440 return ret;
441}
442
443static void cleanup_timerqueue(struct timerqueue_head *head)
444{
445 struct timerqueue_node *node;
446 struct cpu_timer *ctmr;
447
448 while ((node = timerqueue_getnext(head))) {
449 timerqueue_del(head, node);
450 ctmr = container_of(node, struct cpu_timer, node);
451 ctmr->head = NULL;
452 }
453}
454
455/*
456 * Clean out CPU timers which are still armed when a thread exits. The
457 * timers are only removed from the list. No other updates are done. The
458 * corresponding posix timers are still accessible, but cannot be rearmed.
459 *
460 * This must be called with the siglock held.
461 */
462static void cleanup_timers(struct posix_cputimers *pct)
463{
464 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
465 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
466 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
467}
468
469/*
470 * These are both called with the siglock held, when the current thread
471 * is being reaped. When the final (leader) thread in the group is reaped,
472 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
473 */
474void posix_cpu_timers_exit(struct task_struct *tsk)
475{
476 cleanup_timers(&tsk->posix_cputimers);
477}
478void posix_cpu_timers_exit_group(struct task_struct *tsk)
479{
480 cleanup_timers(&tsk->signal->posix_cputimers);
481}
482
483/*
484 * Insert the timer on the appropriate list before any timers that
485 * expire later. This must be called with the sighand lock held.
486 */
487static void arm_timer(struct k_itimer *timer)
488{
489 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
490 struct cpu_timer *ctmr = &timer->it.cpu;
491 u64 newexp = cpu_timer_getexpires(ctmr);
492 struct task_struct *p = ctmr->task;
493 struct posix_cputimer_base *base;
494
495 if (CPUCLOCK_PERTHREAD(timer->it_clock))
496 base = p->posix_cputimers.bases + clkidx;
497 else
498 base = p->signal->posix_cputimers.bases + clkidx;
499
500 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
501 return;
502
503 /*
504 * We are the new earliest-expiring POSIX 1.b timer, hence
505 * need to update expiration cache. Take into account that
506 * for process timers we share expiration cache with itimers
507 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
508 */
509 if (newexp < base->nextevt)
510 base->nextevt = newexp;
511
512 if (CPUCLOCK_PERTHREAD(timer->it_clock))
513 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
514 else
515 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
516}
517
518/*
519 * The timer is locked, fire it and arrange for its reload.
520 */
521static void cpu_timer_fire(struct k_itimer *timer)
522{
523 struct cpu_timer *ctmr = &timer->it.cpu;
524
525 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
526 /*
527 * User don't want any signal.
528 */
529 cpu_timer_setexpires(ctmr, 0);
530 } else if (unlikely(timer->sigq == NULL)) {
531 /*
532 * This a special case for clock_nanosleep,
533 * not a normal timer from sys_timer_create.
534 */
535 wake_up_process(timer->it_process);
536 cpu_timer_setexpires(ctmr, 0);
537 } else if (!timer->it_interval) {
538 /*
539 * One-shot timer. Clear it as soon as it's fired.
540 */
541 posix_timer_event(timer, 0);
542 cpu_timer_setexpires(ctmr, 0);
543 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
544 /*
545 * The signal did not get queued because the signal
546 * was ignored, so we won't get any callback to
547 * reload the timer. But we need to keep it
548 * ticking in case the signal is deliverable next time.
549 */
550 posix_cpu_timer_rearm(timer);
551 ++timer->it_requeue_pending;
552 }
553}
554
555/*
556 * Guts of sys_timer_settime for CPU timers.
557 * This is called with the timer locked and interrupts disabled.
558 * If we return TIMER_RETRY, it's necessary to release the timer's lock
559 * and try again. (This happens when the timer is in the middle of firing.)
560 */
561static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
562 struct itimerspec64 *new, struct itimerspec64 *old)
563{
564 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
565 u64 old_expires, new_expires, old_incr, val;
566 struct cpu_timer *ctmr = &timer->it.cpu;
567 struct task_struct *p = ctmr->task;
568 struct sighand_struct *sighand;
569 unsigned long flags;
570 int ret = 0;
571
572 if (WARN_ON_ONCE(!p))
573 return -EINVAL;
574
575 /*
576 * Use the to_ktime conversion because that clamps the maximum
577 * value to KTIME_MAX and avoid multiplication overflows.
578 */
579 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
580
581 /*
582 * Protect against sighand release/switch in exit/exec and p->cpu_timers
583 * and p->signal->cpu_timers read/write in arm_timer()
584 */
585 sighand = lock_task_sighand(p, &flags);
586 /*
587 * If p has just been reaped, we can no
588 * longer get any information about it at all.
589 */
590 if (unlikely(sighand == NULL))
591 return -ESRCH;
592
593 /*
594 * Disarm any old timer after extracting its expiry time.
595 */
596 old_incr = timer->it_interval;
597 old_expires = cpu_timer_getexpires(ctmr);
598
599 if (unlikely(timer->it.cpu.firing)) {
600 timer->it.cpu.firing = -1;
601 ret = TIMER_RETRY;
602 } else {
603 cpu_timer_dequeue(ctmr);
604 }
605
606 /*
607 * We need to sample the current value to convert the new
608 * value from to relative and absolute, and to convert the
609 * old value from absolute to relative. To set a process
610 * timer, we need a sample to balance the thread expiry
611 * times (in arm_timer). With an absolute time, we must
612 * check if it's already passed. In short, we need a sample.
613 */
614 if (CPUCLOCK_PERTHREAD(timer->it_clock))
615 val = cpu_clock_sample(clkid, p);
616 else
617 val = cpu_clock_sample_group(clkid, p, true);
618
619 if (old) {
620 if (old_expires == 0) {
621 old->it_value.tv_sec = 0;
622 old->it_value.tv_nsec = 0;
623 } else {
624 /*
625 * Update the timer in case it has overrun already.
626 * If it has, we'll report it as having overrun and
627 * with the next reloaded timer already ticking,
628 * though we are swallowing that pending
629 * notification here to install the new setting.
630 */
631 u64 exp = bump_cpu_timer(timer, val);
632
633 if (val < exp) {
634 old_expires = exp - val;
635 old->it_value = ns_to_timespec64(old_expires);
636 } else {
637 old->it_value.tv_nsec = 1;
638 old->it_value.tv_sec = 0;
639 }
640 }
641 }
642
643 if (unlikely(ret)) {
644 /*
645 * We are colliding with the timer actually firing.
646 * Punt after filling in the timer's old value, and
647 * disable this firing since we are already reporting
648 * it as an overrun (thanks to bump_cpu_timer above).
649 */
650 unlock_task_sighand(p, &flags);
651 goto out;
652 }
653
654 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
655 new_expires += val;
656 }
657
658 /*
659 * Install the new expiry time (or zero).
660 * For a timer with no notification action, we don't actually
661 * arm the timer (we'll just fake it for timer_gettime).
662 */
663 cpu_timer_setexpires(ctmr, new_expires);
664 if (new_expires != 0 && val < new_expires) {
665 arm_timer(timer);
666 }
667
668 unlock_task_sighand(p, &flags);
669 /*
670 * Install the new reload setting, and
671 * set up the signal and overrun bookkeeping.
672 */
673 timer->it_interval = timespec64_to_ktime(new->it_interval);
674
675 /*
676 * This acts as a modification timestamp for the timer,
677 * so any automatic reload attempt will punt on seeing
678 * that we have reset the timer manually.
679 */
680 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
681 ~REQUEUE_PENDING;
682 timer->it_overrun_last = 0;
683 timer->it_overrun = -1;
684
685 if (new_expires != 0 && !(val < new_expires)) {
686 /*
687 * The designated time already passed, so we notify
688 * immediately, even if the thread never runs to
689 * accumulate more time on this clock.
690 */
691 cpu_timer_fire(timer);
692 }
693
694 ret = 0;
695 out:
696 if (old)
697 old->it_interval = ns_to_timespec64(old_incr);
698
699 return ret;
700}
701
702static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
703{
704 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
705 struct cpu_timer *ctmr = &timer->it.cpu;
706 u64 now, expires = cpu_timer_getexpires(ctmr);
707 struct task_struct *p = ctmr->task;
708
709 if (WARN_ON_ONCE(!p))
710 return;
711
712 /*
713 * Easy part: convert the reload time.
714 */
715 itp->it_interval = ktime_to_timespec64(timer->it_interval);
716
717 if (!expires)
718 return;
719
720 /*
721 * Sample the clock to take the difference with the expiry time.
722 */
723 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
724 now = cpu_clock_sample(clkid, p);
725 } else {
726 struct sighand_struct *sighand;
727 unsigned long flags;
728
729 /*
730 * Protect against sighand release/switch in exit/exec and
731 * also make timer sampling safe if it ends up calling
732 * thread_group_cputime().
733 */
734 sighand = lock_task_sighand(p, &flags);
735 if (unlikely(sighand == NULL)) {
736 /*
737 * The process has been reaped.
738 * We can't even collect a sample any more.
739 * Disarm the timer, nothing else to do.
740 */
741 cpu_timer_setexpires(ctmr, 0);
742 return;
743 } else {
744 now = cpu_clock_sample_group(clkid, p, false);
745 unlock_task_sighand(p, &flags);
746 }
747 }
748
749 if (now < expires) {
750 itp->it_value = ns_to_timespec64(expires - now);
751 } else {
752 /*
753 * The timer should have expired already, but the firing
754 * hasn't taken place yet. Say it's just about to expire.
755 */
756 itp->it_value.tv_nsec = 1;
757 itp->it_value.tv_sec = 0;
758 }
759}
760
761#define MAX_COLLECTED 20
762
763static u64 collect_timerqueue(struct timerqueue_head *head,
764 struct list_head *firing, u64 now)
765{
766 struct timerqueue_node *next;
767 int i = 0;
768
769 while ((next = timerqueue_getnext(head))) {
770 struct cpu_timer *ctmr;
771 u64 expires;
772
773 ctmr = container_of(next, struct cpu_timer, node);
774 expires = cpu_timer_getexpires(ctmr);
775 /* Limit the number of timers to expire at once */
776 if (++i == MAX_COLLECTED || now < expires)
777 return expires;
778
779 ctmr->firing = 1;
780 cpu_timer_dequeue(ctmr);
781 list_add_tail(&ctmr->elist, firing);
782 }
783
784 return U64_MAX;
785}
786
787static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
788 struct list_head *firing)
789{
790 struct posix_cputimer_base *base = pct->bases;
791 int i;
792
793 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
794 base->nextevt = collect_timerqueue(&base->tqhead, firing,
795 samples[i]);
796 }
797}
798
799static inline void check_dl_overrun(struct task_struct *tsk)
800{
801 if (tsk->dl.dl_overrun) {
802 tsk->dl.dl_overrun = 0;
803 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
804 }
805}
806
807static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
808{
809 if (time < limit)
810 return false;
811
812 if (print_fatal_signals) {
813 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
814 rt ? "RT" : "CPU", hard ? "hard" : "soft",
815 current->comm, task_pid_nr(current));
816 }
817 __group_send_sig_info(signo, SEND_SIG_PRIV, current);
818 return true;
819}
820
821/*
822 * Check for any per-thread CPU timers that have fired and move them off
823 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
824 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
825 */
826static void check_thread_timers(struct task_struct *tsk,
827 struct list_head *firing)
828{
829 struct posix_cputimers *pct = &tsk->posix_cputimers;
830 u64 samples[CPUCLOCK_MAX];
831 unsigned long soft;
832
833 if (dl_task(tsk))
834 check_dl_overrun(tsk);
835
836 if (expiry_cache_is_inactive(pct))
837 return;
838
839 task_sample_cputime(tsk, samples);
840 collect_posix_cputimers(pct, samples, firing);
841
842 /*
843 * Check for the special case thread timers.
844 */
845 soft = task_rlimit(tsk, RLIMIT_RTTIME);
846 if (soft != RLIM_INFINITY) {
847 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
848 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
849 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
850
851 /* At the hard limit, send SIGKILL. No further action. */
852 if (hard != RLIM_INFINITY &&
853 check_rlimit(rttime, hard, SIGKILL, true, true))
854 return;
855
856 /* At the soft limit, send a SIGXCPU every second */
857 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
858 soft += USEC_PER_SEC;
859 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
860 }
861 }
862
863 if (expiry_cache_is_inactive(pct))
864 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
865}
866
867static inline void stop_process_timers(struct signal_struct *sig)
868{
869 struct posix_cputimers *pct = &sig->posix_cputimers;
870
871 /* Turn off the active flag. This is done without locking. */
872 WRITE_ONCE(pct->timers_active, false);
873 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
874}
875
876static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
877 u64 *expires, u64 cur_time, int signo)
878{
879 if (!it->expires)
880 return;
881
882 if (cur_time >= it->expires) {
883 if (it->incr)
884 it->expires += it->incr;
885 else
886 it->expires = 0;
887
888 trace_itimer_expire(signo == SIGPROF ?
889 ITIMER_PROF : ITIMER_VIRTUAL,
890 task_tgid(tsk), cur_time);
891 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
892 }
893
894 if (it->expires && it->expires < *expires)
895 *expires = it->expires;
896}
897
898/*
899 * Check for any per-thread CPU timers that have fired and move them
900 * off the tsk->*_timers list onto the firing list. Per-thread timers
901 * have already been taken off.
902 */
903static void check_process_timers(struct task_struct *tsk,
904 struct list_head *firing)
905{
906 struct signal_struct *const sig = tsk->signal;
907 struct posix_cputimers *pct = &sig->posix_cputimers;
908 u64 samples[CPUCLOCK_MAX];
909 unsigned long soft;
910
911 /*
912 * If there are no active process wide timers (POSIX 1.b, itimers,
913 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
914 * processing when there is already another task handling them.
915 */
916 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
917 return;
918
919 /*
920 * Signify that a thread is checking for process timers.
921 * Write access to this field is protected by the sighand lock.
922 */
923 pct->expiry_active = true;
924
925 /*
926 * Collect the current process totals. Group accounting is active
927 * so the sample can be taken directly.
928 */
929 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
930 collect_posix_cputimers(pct, samples, firing);
931
932 /*
933 * Check for the special case process timers.
934 */
935 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
936 &pct->bases[CPUCLOCK_PROF].nextevt,
937 samples[CPUCLOCK_PROF], SIGPROF);
938 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
939 &pct->bases[CPUCLOCK_VIRT].nextevt,
940 samples[CPUCLOCK_VIRT], SIGVTALRM);
941
942 soft = task_rlimit(tsk, RLIMIT_CPU);
943 if (soft != RLIM_INFINITY) {
944 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
945 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
946 u64 ptime = samples[CPUCLOCK_PROF];
947 u64 softns = (u64)soft * NSEC_PER_SEC;
948 u64 hardns = (u64)hard * NSEC_PER_SEC;
949
950 /* At the hard limit, send SIGKILL. No further action. */
951 if (hard != RLIM_INFINITY &&
952 check_rlimit(ptime, hardns, SIGKILL, false, true))
953 return;
954
955 /* At the soft limit, send a SIGXCPU every second */
956 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
957 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
958 softns += NSEC_PER_SEC;
959 }
960
961 /* Update the expiry cache */
962 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
963 pct->bases[CPUCLOCK_PROF].nextevt = softns;
964 }
965
966 if (expiry_cache_is_inactive(pct))
967 stop_process_timers(sig);
968
969 pct->expiry_active = false;
970}
971
972/*
973 * This is called from the signal code (via posixtimer_rearm)
974 * when the last timer signal was delivered and we have to reload the timer.
975 */
976static void posix_cpu_timer_rearm(struct k_itimer *timer)
977{
978 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
979 struct cpu_timer *ctmr = &timer->it.cpu;
980 struct task_struct *p = ctmr->task;
981 struct sighand_struct *sighand;
982 unsigned long flags;
983 u64 now;
984
985 if (WARN_ON_ONCE(!p))
986 return;
987
988 /*
989 * Fetch the current sample and update the timer's expiry time.
990 */
991 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
992 now = cpu_clock_sample(clkid, p);
993 bump_cpu_timer(timer, now);
994 if (unlikely(p->exit_state))
995 return;
996
997 /* Protect timer list r/w in arm_timer() */
998 sighand = lock_task_sighand(p, &flags);
999 if (!sighand)
1000 return;
1001 } else {
1002 /*
1003 * Protect arm_timer() and timer sampling in case of call to
1004 * thread_group_cputime().
1005 */
1006 sighand = lock_task_sighand(p, &flags);
1007 if (unlikely(sighand == NULL)) {
1008 /*
1009 * The process has been reaped.
1010 * We can't even collect a sample any more.
1011 */
1012 cpu_timer_setexpires(ctmr, 0);
1013 return;
1014 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1015 /* If the process is dying, no need to rearm */
1016 goto unlock;
1017 }
1018 now = cpu_clock_sample_group(clkid, p, true);
1019 bump_cpu_timer(timer, now);
1020 /* Leave the sighand locked for the call below. */
1021 }
1022
1023 /*
1024 * Now re-arm for the new expiry time.
1025 */
1026 arm_timer(timer);
1027unlock:
1028 unlock_task_sighand(p, &flags);
1029}
1030
1031/**
1032 * task_cputimers_expired - Check whether posix CPU timers are expired
1033 *
1034 * @samples: Array of current samples for the CPUCLOCK clocks
1035 * @pct: Pointer to a posix_cputimers container
1036 *
1037 * Returns true if any member of @samples is greater than the corresponding
1038 * member of @pct->bases[CLK].nextevt. False otherwise
1039 */
1040static inline bool
1041task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1042{
1043 int i;
1044
1045 for (i = 0; i < CPUCLOCK_MAX; i++) {
1046 if (samples[i] >= pct->bases[i].nextevt)
1047 return true;
1048 }
1049 return false;
1050}
1051
1052/**
1053 * fastpath_timer_check - POSIX CPU timers fast path.
1054 *
1055 * @tsk: The task (thread) being checked.
1056 *
1057 * Check the task and thread group timers. If both are zero (there are no
1058 * timers set) return false. Otherwise snapshot the task and thread group
1059 * timers and compare them with the corresponding expiration times. Return
1060 * true if a timer has expired, else return false.
1061 */
1062static inline bool fastpath_timer_check(struct task_struct *tsk)
1063{
1064 struct posix_cputimers *pct = &tsk->posix_cputimers;
1065 struct signal_struct *sig;
1066
1067 if (!expiry_cache_is_inactive(pct)) {
1068 u64 samples[CPUCLOCK_MAX];
1069
1070 task_sample_cputime(tsk, samples);
1071 if (task_cputimers_expired(samples, pct))
1072 return true;
1073 }
1074
1075 sig = tsk->signal;
1076 pct = &sig->posix_cputimers;
1077 /*
1078 * Check if thread group timers expired when timers are active and
1079 * no other thread in the group is already handling expiry for
1080 * thread group cputimers. These fields are read without the
1081 * sighand lock. However, this is fine because this is meant to be
1082 * a fastpath heuristic to determine whether we should try to
1083 * acquire the sighand lock to handle timer expiry.
1084 *
1085 * In the worst case scenario, if concurrently timers_active is set
1086 * or expiry_active is cleared, but the current thread doesn't see
1087 * the change yet, the timer checks are delayed until the next
1088 * thread in the group gets a scheduler interrupt to handle the
1089 * timer. This isn't an issue in practice because these types of
1090 * delays with signals actually getting sent are expected.
1091 */
1092 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1093 u64 samples[CPUCLOCK_MAX];
1094
1095 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1096 samples);
1097
1098 if (task_cputimers_expired(samples, pct))
1099 return true;
1100 }
1101
1102 if (dl_task(tsk) && tsk->dl.dl_overrun)
1103 return true;
1104
1105 return false;
1106}
1107
1108/*
1109 * This is called from the timer interrupt handler. The irq handler has
1110 * already updated our counts. We need to check if any timers fire now.
1111 * Interrupts are disabled.
1112 */
1113void run_posix_cpu_timers(void)
1114{
1115 struct task_struct *tsk = current;
1116 struct k_itimer *timer, *next;
1117 unsigned long flags;
1118 LIST_HEAD(firing);
1119
1120 lockdep_assert_irqs_disabled();
1121
1122 /*
1123 * The fast path checks that there are no expired thread or thread
1124 * group timers. If that's so, just return.
1125 */
1126 if (!fastpath_timer_check(tsk))
1127 return;
1128
1129 if (!lock_task_sighand(tsk, &flags))
1130 return;
1131 /*
1132 * Here we take off tsk->signal->cpu_timers[N] and
1133 * tsk->cpu_timers[N] all the timers that are firing, and
1134 * put them on the firing list.
1135 */
1136 check_thread_timers(tsk, &firing);
1137
1138 check_process_timers(tsk, &firing);
1139
1140 /*
1141 * We must release these locks before taking any timer's lock.
1142 * There is a potential race with timer deletion here, as the
1143 * siglock now protects our private firing list. We have set
1144 * the firing flag in each timer, so that a deletion attempt
1145 * that gets the timer lock before we do will give it up and
1146 * spin until we've taken care of that timer below.
1147 */
1148 unlock_task_sighand(tsk, &flags);
1149
1150 /*
1151 * Now that all the timers on our list have the firing flag,
1152 * no one will touch their list entries but us. We'll take
1153 * each timer's lock before clearing its firing flag, so no
1154 * timer call will interfere.
1155 */
1156 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1157 int cpu_firing;
1158
1159 spin_lock(&timer->it_lock);
1160 list_del_init(&timer->it.cpu.elist);
1161 cpu_firing = timer->it.cpu.firing;
1162 timer->it.cpu.firing = 0;
1163 /*
1164 * The firing flag is -1 if we collided with a reset
1165 * of the timer, which already reported this
1166 * almost-firing as an overrun. So don't generate an event.
1167 */
1168 if (likely(cpu_firing >= 0))
1169 cpu_timer_fire(timer);
1170 spin_unlock(&timer->it_lock);
1171 }
1172}
1173
1174/*
1175 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1176 * The tsk->sighand->siglock must be held by the caller.
1177 */
1178void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1179 u64 *newval, u64 *oldval)
1180{
1181 u64 now, *nextevt;
1182
1183 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1184 return;
1185
1186 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1187 now = cpu_clock_sample_group(clkid, tsk, true);
1188
1189 if (oldval) {
1190 /*
1191 * We are setting itimer. The *oldval is absolute and we update
1192 * it to be relative, *newval argument is relative and we update
1193 * it to be absolute.
1194 */
1195 if (*oldval) {
1196 if (*oldval <= now) {
1197 /* Just about to fire. */
1198 *oldval = TICK_NSEC;
1199 } else {
1200 *oldval -= now;
1201 }
1202 }
1203
1204 if (!*newval)
1205 return;
1206 *newval += now;
1207 }
1208
1209 /*
1210 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1211 * expiry cache is also used by RLIMIT_CPU!.
1212 */
1213 if (*newval < *nextevt)
1214 *nextevt = *newval;
1215
1216 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1217}
1218
1219static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1220 const struct timespec64 *rqtp)
1221{
1222 struct itimerspec64 it;
1223 struct k_itimer timer;
1224 u64 expires;
1225 int error;
1226
1227 /*
1228 * Set up a temporary timer and then wait for it to go off.
1229 */
1230 memset(&timer, 0, sizeof timer);
1231 spin_lock_init(&timer.it_lock);
1232 timer.it_clock = which_clock;
1233 timer.it_overrun = -1;
1234 error = posix_cpu_timer_create(&timer);
1235 timer.it_process = current;
1236
1237 if (!error) {
1238 static struct itimerspec64 zero_it;
1239 struct restart_block *restart;
1240
1241 memset(&it, 0, sizeof(it));
1242 it.it_value = *rqtp;
1243
1244 spin_lock_irq(&timer.it_lock);
1245 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1246 if (error) {
1247 spin_unlock_irq(&timer.it_lock);
1248 return error;
1249 }
1250
1251 while (!signal_pending(current)) {
1252 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1253 /*
1254 * Our timer fired and was reset, below
1255 * deletion can not fail.
1256 */
1257 posix_cpu_timer_del(&timer);
1258 spin_unlock_irq(&timer.it_lock);
1259 return 0;
1260 }
1261
1262 /*
1263 * Block until cpu_timer_fire (or a signal) wakes us.
1264 */
1265 __set_current_state(TASK_INTERRUPTIBLE);
1266 spin_unlock_irq(&timer.it_lock);
1267 schedule();
1268 spin_lock_irq(&timer.it_lock);
1269 }
1270
1271 /*
1272 * We were interrupted by a signal.
1273 */
1274 expires = cpu_timer_getexpires(&timer.it.cpu);
1275 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1276 if (!error) {
1277 /*
1278 * Timer is now unarmed, deletion can not fail.
1279 */
1280 posix_cpu_timer_del(&timer);
1281 }
1282 spin_unlock_irq(&timer.it_lock);
1283
1284 while (error == TIMER_RETRY) {
1285 /*
1286 * We need to handle case when timer was or is in the
1287 * middle of firing. In other cases we already freed
1288 * resources.
1289 */
1290 spin_lock_irq(&timer.it_lock);
1291 error = posix_cpu_timer_del(&timer);
1292 spin_unlock_irq(&timer.it_lock);
1293 }
1294
1295 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1296 /*
1297 * It actually did fire already.
1298 */
1299 return 0;
1300 }
1301
1302 error = -ERESTART_RESTARTBLOCK;
1303 /*
1304 * Report back to the user the time still remaining.
1305 */
1306 restart = ¤t->restart_block;
1307 restart->nanosleep.expires = expires;
1308 if (restart->nanosleep.type != TT_NONE)
1309 error = nanosleep_copyout(restart, &it.it_value);
1310 }
1311
1312 return error;
1313}
1314
1315static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1316
1317static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1318 const struct timespec64 *rqtp)
1319{
1320 struct restart_block *restart_block = ¤t->restart_block;
1321 int error;
1322
1323 /*
1324 * Diagnose required errors first.
1325 */
1326 if (CPUCLOCK_PERTHREAD(which_clock) &&
1327 (CPUCLOCK_PID(which_clock) == 0 ||
1328 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1329 return -EINVAL;
1330
1331 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1332
1333 if (error == -ERESTART_RESTARTBLOCK) {
1334
1335 if (flags & TIMER_ABSTIME)
1336 return -ERESTARTNOHAND;
1337
1338 restart_block->fn = posix_cpu_nsleep_restart;
1339 restart_block->nanosleep.clockid = which_clock;
1340 }
1341 return error;
1342}
1343
1344static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1345{
1346 clockid_t which_clock = restart_block->nanosleep.clockid;
1347 struct timespec64 t;
1348
1349 t = ns_to_timespec64(restart_block->nanosleep.expires);
1350
1351 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1352}
1353
1354#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1355#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1356
1357static int process_cpu_clock_getres(const clockid_t which_clock,
1358 struct timespec64 *tp)
1359{
1360 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1361}
1362static int process_cpu_clock_get(const clockid_t which_clock,
1363 struct timespec64 *tp)
1364{
1365 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1366}
1367static int process_cpu_timer_create(struct k_itimer *timer)
1368{
1369 timer->it_clock = PROCESS_CLOCK;
1370 return posix_cpu_timer_create(timer);
1371}
1372static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1373 const struct timespec64 *rqtp)
1374{
1375 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1376}
1377static int thread_cpu_clock_getres(const clockid_t which_clock,
1378 struct timespec64 *tp)
1379{
1380 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1381}
1382static int thread_cpu_clock_get(const clockid_t which_clock,
1383 struct timespec64 *tp)
1384{
1385 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1386}
1387static int thread_cpu_timer_create(struct k_itimer *timer)
1388{
1389 timer->it_clock = THREAD_CLOCK;
1390 return posix_cpu_timer_create(timer);
1391}
1392
1393const struct k_clock clock_posix_cpu = {
1394 .clock_getres = posix_cpu_clock_getres,
1395 .clock_set = posix_cpu_clock_set,
1396 .clock_get = posix_cpu_clock_get,
1397 .timer_create = posix_cpu_timer_create,
1398 .nsleep = posix_cpu_nsleep,
1399 .timer_set = posix_cpu_timer_set,
1400 .timer_del = posix_cpu_timer_del,
1401 .timer_get = posix_cpu_timer_get,
1402 .timer_rearm = posix_cpu_timer_rearm,
1403};
1404
1405const struct k_clock clock_process = {
1406 .clock_getres = process_cpu_clock_getres,
1407 .clock_get = process_cpu_clock_get,
1408 .timer_create = process_cpu_timer_create,
1409 .nsleep = process_cpu_nsleep,
1410};
1411
1412const struct k_clock clock_thread = {
1413 .clock_getres = thread_cpu_clock_getres,
1414 .clock_get = thread_cpu_clock_get,
1415 .timer_create = thread_cpu_timer_create,
1416};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
6#include <linux/sched/signal.h>
7#include <linux/sched/cputime.h>
8#include <linux/posix-timers.h>
9#include <linux/errno.h>
10#include <linux/math64.h>
11#include <linux/uaccess.h>
12#include <linux/kernel_stat.h>
13#include <trace/events/timer.h>
14#include <linux/tick.h>
15#include <linux/workqueue.h>
16#include <linux/compat.h>
17#include <linux/sched/deadline.h>
18#include <linux/task_work.h>
19
20#include "posix-timers.h"
21
22static void posix_cpu_timer_rearm(struct k_itimer *timer);
23
24void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
25{
26 posix_cputimers_init(pct);
27 if (cpu_limit != RLIM_INFINITY) {
28 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
29 pct->timers_active = true;
30 }
31}
32
33/*
34 * Called after updating RLIMIT_CPU to run cpu timer and update
35 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
36 * necessary. Needs siglock protection since other code may update the
37 * expiration cache as well.
38 *
39 * Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and
40 * we cannot lock_task_sighand. Cannot fail if task is current.
41 */
42int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
43{
44 u64 nsecs = rlim_new * NSEC_PER_SEC;
45 unsigned long irq_fl;
46
47 if (!lock_task_sighand(task, &irq_fl))
48 return -ESRCH;
49 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
50 unlock_task_sighand(task, &irq_fl);
51 return 0;
52}
53
54/*
55 * Functions for validating access to tasks.
56 */
57static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
58{
59 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
60 const pid_t upid = CPUCLOCK_PID(clock);
61 struct pid *pid;
62
63 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
64 return NULL;
65
66 /*
67 * If the encoded PID is 0, then the timer is targeted at current
68 * or the process to which current belongs.
69 */
70 if (upid == 0)
71 return thread ? task_pid(current) : task_tgid(current);
72
73 pid = find_vpid(upid);
74 if (!pid)
75 return NULL;
76
77 if (thread) {
78 struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
79 return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
80 }
81
82 /*
83 * For clock_gettime(PROCESS) allow finding the process by
84 * with the pid of the current task. The code needs the tgid
85 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
86 * used to find the process.
87 */
88 if (gettime && (pid == task_pid(current)))
89 return task_tgid(current);
90
91 /*
92 * For processes require that pid identifies a process.
93 */
94 return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
95}
96
97static inline int validate_clock_permissions(const clockid_t clock)
98{
99 int ret;
100
101 rcu_read_lock();
102 ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
103 rcu_read_unlock();
104
105 return ret;
106}
107
108static inline enum pid_type clock_pid_type(const clockid_t clock)
109{
110 return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
111}
112
113static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
114{
115 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
116}
117
118/*
119 * Update expiry time from increment, and increase overrun count,
120 * given the current clock sample.
121 */
122static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
123{
124 u64 delta, incr, expires = timer->it.cpu.node.expires;
125 int i;
126
127 if (!timer->it_interval)
128 return expires;
129
130 if (now < expires)
131 return expires;
132
133 incr = timer->it_interval;
134 delta = now + incr - expires;
135
136 /* Don't use (incr*2 < delta), incr*2 might overflow. */
137 for (i = 0; incr < delta - incr; i++)
138 incr = incr << 1;
139
140 for (; i >= 0; incr >>= 1, i--) {
141 if (delta < incr)
142 continue;
143
144 timer->it.cpu.node.expires += incr;
145 timer->it_overrun += 1LL << i;
146 delta -= incr;
147 }
148 return timer->it.cpu.node.expires;
149}
150
151/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
152static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
153{
154 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
155 ~pct->bases[CPUCLOCK_VIRT].nextevt |
156 ~pct->bases[CPUCLOCK_SCHED].nextevt);
157}
158
159static int
160posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
161{
162 int error = validate_clock_permissions(which_clock);
163
164 if (!error) {
165 tp->tv_sec = 0;
166 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
167 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
168 /*
169 * If sched_clock is using a cycle counter, we
170 * don't have any idea of its true resolution
171 * exported, but it is much more than 1s/HZ.
172 */
173 tp->tv_nsec = 1;
174 }
175 }
176 return error;
177}
178
179static int
180posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
181{
182 int error = validate_clock_permissions(clock);
183
184 /*
185 * You can never reset a CPU clock, but we check for other errors
186 * in the call before failing with EPERM.
187 */
188 return error ? : -EPERM;
189}
190
191/*
192 * Sample a per-thread clock for the given task. clkid is validated.
193 */
194static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
195{
196 u64 utime, stime;
197
198 if (clkid == CPUCLOCK_SCHED)
199 return task_sched_runtime(p);
200
201 task_cputime(p, &utime, &stime);
202
203 switch (clkid) {
204 case CPUCLOCK_PROF:
205 return utime + stime;
206 case CPUCLOCK_VIRT:
207 return utime;
208 default:
209 WARN_ON_ONCE(1);
210 }
211 return 0;
212}
213
214static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
215{
216 samples[CPUCLOCK_PROF] = stime + utime;
217 samples[CPUCLOCK_VIRT] = utime;
218 samples[CPUCLOCK_SCHED] = rtime;
219}
220
221static void task_sample_cputime(struct task_struct *p, u64 *samples)
222{
223 u64 stime, utime;
224
225 task_cputime(p, &utime, &stime);
226 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
227}
228
229static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
230 u64 *samples)
231{
232 u64 stime, utime, rtime;
233
234 utime = atomic64_read(&at->utime);
235 stime = atomic64_read(&at->stime);
236 rtime = atomic64_read(&at->sum_exec_runtime);
237 store_samples(samples, stime, utime, rtime);
238}
239
240/*
241 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
242 * to avoid race conditions with concurrent updates to cputime.
243 */
244static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
245{
246 u64 curr_cputime = atomic64_read(cputime);
247
248 do {
249 if (sum_cputime <= curr_cputime)
250 return;
251 } while (!atomic64_try_cmpxchg(cputime, &curr_cputime, sum_cputime));
252}
253
254static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
255 struct task_cputime *sum)
256{
257 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
258 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
259 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
260}
261
262/**
263 * thread_group_sample_cputime - Sample cputime for a given task
264 * @tsk: Task for which cputime needs to be started
265 * @samples: Storage for time samples
266 *
267 * Called from sys_getitimer() to calculate the expiry time of an active
268 * timer. That means group cputime accounting is already active. Called
269 * with task sighand lock held.
270 *
271 * Updates @times with an uptodate sample of the thread group cputimes.
272 */
273void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
274{
275 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
276 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
277
278 WARN_ON_ONCE(!pct->timers_active);
279
280 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
281}
282
283/**
284 * thread_group_start_cputime - Start cputime and return a sample
285 * @tsk: Task for which cputime needs to be started
286 * @samples: Storage for time samples
287 *
288 * The thread group cputime accounting is avoided when there are no posix
289 * CPU timers armed. Before starting a timer it's required to check whether
290 * the time accounting is active. If not, a full update of the atomic
291 * accounting store needs to be done and the accounting enabled.
292 *
293 * Updates @times with an uptodate sample of the thread group cputimes.
294 */
295static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
296{
297 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
298 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
299
300 lockdep_assert_task_sighand_held(tsk);
301
302 /* Check if cputimer isn't running. This is accessed without locking. */
303 if (!READ_ONCE(pct->timers_active)) {
304 struct task_cputime sum;
305
306 /*
307 * The POSIX timer interface allows for absolute time expiry
308 * values through the TIMER_ABSTIME flag, therefore we have
309 * to synchronize the timer to the clock every time we start it.
310 */
311 thread_group_cputime(tsk, &sum);
312 update_gt_cputime(&cputimer->cputime_atomic, &sum);
313
314 /*
315 * We're setting timers_active without a lock. Ensure this
316 * only gets written to in one operation. We set it after
317 * update_gt_cputime() as a small optimization, but
318 * barriers are not required because update_gt_cputime()
319 * can handle concurrent updates.
320 */
321 WRITE_ONCE(pct->timers_active, true);
322 }
323 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
324}
325
326static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
327{
328 struct task_cputime ct;
329
330 thread_group_cputime(tsk, &ct);
331 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
332}
333
334/*
335 * Sample a process (thread group) clock for the given task clkid. If the
336 * group's cputime accounting is already enabled, read the atomic
337 * store. Otherwise a full update is required. clkid is already validated.
338 */
339static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
340 bool start)
341{
342 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
343 struct posix_cputimers *pct = &p->signal->posix_cputimers;
344 u64 samples[CPUCLOCK_MAX];
345
346 if (!READ_ONCE(pct->timers_active)) {
347 if (start)
348 thread_group_start_cputime(p, samples);
349 else
350 __thread_group_cputime(p, samples);
351 } else {
352 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
353 }
354
355 return samples[clkid];
356}
357
358static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
359{
360 const clockid_t clkid = CPUCLOCK_WHICH(clock);
361 struct task_struct *tsk;
362 u64 t;
363
364 rcu_read_lock();
365 tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
366 if (!tsk) {
367 rcu_read_unlock();
368 return -EINVAL;
369 }
370
371 if (CPUCLOCK_PERTHREAD(clock))
372 t = cpu_clock_sample(clkid, tsk);
373 else
374 t = cpu_clock_sample_group(clkid, tsk, false);
375 rcu_read_unlock();
376
377 *tp = ns_to_timespec64(t);
378 return 0;
379}
380
381/*
382 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
383 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
384 * new timer already all-zeros initialized.
385 */
386static int posix_cpu_timer_create(struct k_itimer *new_timer)
387{
388 static struct lock_class_key posix_cpu_timers_key;
389 struct pid *pid;
390
391 rcu_read_lock();
392 pid = pid_for_clock(new_timer->it_clock, false);
393 if (!pid) {
394 rcu_read_unlock();
395 return -EINVAL;
396 }
397
398 /*
399 * If posix timer expiry is handled in task work context then
400 * timer::it_lock can be taken without disabling interrupts as all
401 * other locking happens in task context. This requires a separate
402 * lock class key otherwise regular posix timer expiry would record
403 * the lock class being taken in interrupt context and generate a
404 * false positive warning.
405 */
406 if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
407 lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
408
409 new_timer->kclock = &clock_posix_cpu;
410 timerqueue_init(&new_timer->it.cpu.node);
411 new_timer->it.cpu.pid = get_pid(pid);
412 rcu_read_unlock();
413 return 0;
414}
415
416static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
417 struct task_struct *tsk)
418{
419 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
420
421 if (CPUCLOCK_PERTHREAD(timer->it_clock))
422 return tsk->posix_cputimers.bases + clkidx;
423 else
424 return tsk->signal->posix_cputimers.bases + clkidx;
425}
426
427/*
428 * Force recalculating the base earliest expiration on the next tick.
429 * This will also re-evaluate the need to keep around the process wide
430 * cputime counter and tick dependency and eventually shut these down
431 * if necessary.
432 */
433static void trigger_base_recalc_expires(struct k_itimer *timer,
434 struct task_struct *tsk)
435{
436 struct posix_cputimer_base *base = timer_base(timer, tsk);
437
438 base->nextevt = 0;
439}
440
441/*
442 * Dequeue the timer and reset the base if it was its earliest expiration.
443 * It makes sure the next tick recalculates the base next expiration so we
444 * don't keep the costly process wide cputime counter around for a random
445 * amount of time, along with the tick dependency.
446 *
447 * If another timer gets queued between this and the next tick, its
448 * expiration will update the base next event if necessary on the next
449 * tick.
450 */
451static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
452{
453 struct cpu_timer *ctmr = &timer->it.cpu;
454 struct posix_cputimer_base *base;
455
456 if (!cpu_timer_dequeue(ctmr))
457 return;
458
459 base = timer_base(timer, p);
460 if (cpu_timer_getexpires(ctmr) == base->nextevt)
461 trigger_base_recalc_expires(timer, p);
462}
463
464
465/*
466 * Clean up a CPU-clock timer that is about to be destroyed.
467 * This is called from timer deletion with the timer already locked.
468 * If we return TIMER_RETRY, it's necessary to release the timer's lock
469 * and try again. (This happens when the timer is in the middle of firing.)
470 */
471static int posix_cpu_timer_del(struct k_itimer *timer)
472{
473 struct cpu_timer *ctmr = &timer->it.cpu;
474 struct sighand_struct *sighand;
475 struct task_struct *p;
476 unsigned long flags;
477 int ret = 0;
478
479 rcu_read_lock();
480 p = cpu_timer_task_rcu(timer);
481 if (!p)
482 goto out;
483
484 /*
485 * Protect against sighand release/switch in exit/exec and process/
486 * thread timer list entry concurrent read/writes.
487 */
488 sighand = lock_task_sighand(p, &flags);
489 if (unlikely(sighand == NULL)) {
490 /*
491 * This raced with the reaping of the task. The exit cleanup
492 * should have removed this timer from the timer queue.
493 */
494 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
495 } else {
496 if (timer->it.cpu.firing) {
497 /*
498 * Prevent signal delivery. The timer cannot be dequeued
499 * because it is on the firing list which is not protected
500 * by sighand->lock. The delivery path is waiting for
501 * the timer lock. So go back, unlock and retry.
502 */
503 timer->it.cpu.firing = false;
504 ret = TIMER_RETRY;
505 } else {
506 disarm_timer(timer, p);
507 }
508 unlock_task_sighand(p, &flags);
509 }
510
511out:
512 rcu_read_unlock();
513
514 if (!ret) {
515 put_pid(ctmr->pid);
516 timer->it_status = POSIX_TIMER_DISARMED;
517 }
518 return ret;
519}
520
521static void cleanup_timerqueue(struct timerqueue_head *head)
522{
523 struct timerqueue_node *node;
524 struct cpu_timer *ctmr;
525
526 while ((node = timerqueue_getnext(head))) {
527 timerqueue_del(head, node);
528 ctmr = container_of(node, struct cpu_timer, node);
529 ctmr->head = NULL;
530 }
531}
532
533/*
534 * Clean out CPU timers which are still armed when a thread exits. The
535 * timers are only removed from the list. No other updates are done. The
536 * corresponding posix timers are still accessible, but cannot be rearmed.
537 *
538 * This must be called with the siglock held.
539 */
540static void cleanup_timers(struct posix_cputimers *pct)
541{
542 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
543 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
544 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
545}
546
547/*
548 * These are both called with the siglock held, when the current thread
549 * is being reaped. When the final (leader) thread in the group is reaped,
550 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
551 */
552void posix_cpu_timers_exit(struct task_struct *tsk)
553{
554 cleanup_timers(&tsk->posix_cputimers);
555}
556void posix_cpu_timers_exit_group(struct task_struct *tsk)
557{
558 cleanup_timers(&tsk->signal->posix_cputimers);
559}
560
561/*
562 * Insert the timer on the appropriate list before any timers that
563 * expire later. This must be called with the sighand lock held.
564 */
565static void arm_timer(struct k_itimer *timer, struct task_struct *p)
566{
567 struct posix_cputimer_base *base = timer_base(timer, p);
568 struct cpu_timer *ctmr = &timer->it.cpu;
569 u64 newexp = cpu_timer_getexpires(ctmr);
570
571 timer->it_status = POSIX_TIMER_ARMED;
572 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
573 return;
574
575 /*
576 * We are the new earliest-expiring POSIX 1.b timer, hence
577 * need to update expiration cache. Take into account that
578 * for process timers we share expiration cache with itimers
579 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
580 */
581 if (newexp < base->nextevt)
582 base->nextevt = newexp;
583
584 if (CPUCLOCK_PERTHREAD(timer->it_clock))
585 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
586 else
587 tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
588}
589
590/*
591 * The timer is locked, fire it and arrange for its reload.
592 */
593static void cpu_timer_fire(struct k_itimer *timer)
594{
595 struct cpu_timer *ctmr = &timer->it.cpu;
596
597 timer->it_status = POSIX_TIMER_DISARMED;
598
599 if (unlikely(ctmr->nanosleep)) {
600 /*
601 * This a special case for clock_nanosleep,
602 * not a normal timer from sys_timer_create.
603 */
604 wake_up_process(timer->it_process);
605 cpu_timer_setexpires(ctmr, 0);
606 } else {
607 posix_timer_queue_signal(timer);
608 /* Disable oneshot timers */
609 if (!timer->it_interval)
610 cpu_timer_setexpires(ctmr, 0);
611 }
612}
613
614static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now);
615
616/*
617 * Guts of sys_timer_settime for CPU timers.
618 * This is called with the timer locked and interrupts disabled.
619 * If we return TIMER_RETRY, it's necessary to release the timer's lock
620 * and try again. (This happens when the timer is in the middle of firing.)
621 */
622static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
623 struct itimerspec64 *new, struct itimerspec64 *old)
624{
625 bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
626 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
627 struct cpu_timer *ctmr = &timer->it.cpu;
628 u64 old_expires, new_expires, now;
629 struct sighand_struct *sighand;
630 struct task_struct *p;
631 unsigned long flags;
632 int ret = 0;
633
634 rcu_read_lock();
635 p = cpu_timer_task_rcu(timer);
636 if (!p) {
637 /*
638 * If p has just been reaped, we can no
639 * longer get any information about it at all.
640 */
641 rcu_read_unlock();
642 return -ESRCH;
643 }
644
645 /*
646 * Use the to_ktime conversion because that clamps the maximum
647 * value to KTIME_MAX and avoid multiplication overflows.
648 */
649 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
650
651 /*
652 * Protect against sighand release/switch in exit/exec and p->cpu_timers
653 * and p->signal->cpu_timers read/write in arm_timer()
654 */
655 sighand = lock_task_sighand(p, &flags);
656 /*
657 * If p has just been reaped, we can no
658 * longer get any information about it at all.
659 */
660 if (unlikely(sighand == NULL)) {
661 rcu_read_unlock();
662 return -ESRCH;
663 }
664
665 /* Retrieve the current expiry time before disarming the timer */
666 old_expires = cpu_timer_getexpires(ctmr);
667
668 if (unlikely(timer->it.cpu.firing)) {
669 /*
670 * Prevent signal delivery. The timer cannot be dequeued
671 * because it is on the firing list which is not protected
672 * by sighand->lock. The delivery path is waiting for
673 * the timer lock. So go back, unlock and retry.
674 */
675 timer->it.cpu.firing = false;
676 ret = TIMER_RETRY;
677 } else {
678 cpu_timer_dequeue(ctmr);
679 timer->it_status = POSIX_TIMER_DISARMED;
680 }
681
682 /*
683 * Sample the current clock for saving the previous setting
684 * and for rearming the timer.
685 */
686 if (CPUCLOCK_PERTHREAD(timer->it_clock))
687 now = cpu_clock_sample(clkid, p);
688 else
689 now = cpu_clock_sample_group(clkid, p, !sigev_none);
690
691 /* Retrieve the previous expiry value if requested. */
692 if (old) {
693 old->it_value = (struct timespec64){ };
694 if (old_expires)
695 __posix_cpu_timer_get(timer, old, now);
696 }
697
698 /* Retry if the timer expiry is running concurrently */
699 if (unlikely(ret)) {
700 unlock_task_sighand(p, &flags);
701 goto out;
702 }
703
704 /* Convert relative expiry time to absolute */
705 if (new_expires && !(timer_flags & TIMER_ABSTIME))
706 new_expires += now;
707
708 /* Set the new expiry time (might be 0) */
709 cpu_timer_setexpires(ctmr, new_expires);
710
711 /*
712 * Arm the timer if it is not disabled, the new expiry value has
713 * not yet expired and the timer requires signal delivery.
714 * SIGEV_NONE timers are never armed. In case the timer is not
715 * armed, enforce the reevaluation of the timer base so that the
716 * process wide cputime counter can be disabled eventually.
717 */
718 if (likely(!sigev_none)) {
719 if (new_expires && now < new_expires)
720 arm_timer(timer, p);
721 else
722 trigger_base_recalc_expires(timer, p);
723 }
724
725 unlock_task_sighand(p, &flags);
726
727 posix_timer_set_common(timer, new);
728
729 /*
730 * If the new expiry time was already in the past the timer was not
731 * queued. Fire it immediately even if the thread never runs to
732 * accumulate more time on this clock.
733 */
734 if (!sigev_none && new_expires && now >= new_expires)
735 cpu_timer_fire(timer);
736out:
737 rcu_read_unlock();
738 return ret;
739}
740
741static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now)
742{
743 bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
744 u64 expires, iv = timer->it_interval;
745
746 /*
747 * Make sure that interval timers are moved forward for the
748 * following cases:
749 * - SIGEV_NONE timers which are never armed
750 * - Timers which expired, but the signal has not yet been
751 * delivered
752 */
753 if (iv && timer->it_status != POSIX_TIMER_ARMED)
754 expires = bump_cpu_timer(timer, now);
755 else
756 expires = cpu_timer_getexpires(&timer->it.cpu);
757
758 /*
759 * Expired interval timers cannot have a remaining time <= 0.
760 * The kernel has to move them forward so that the next
761 * timer expiry is > @now.
762 */
763 if (now < expires) {
764 itp->it_value = ns_to_timespec64(expires - now);
765 } else {
766 /*
767 * A single shot SIGEV_NONE timer must return 0, when it is
768 * expired! Timers which have a real signal delivery mode
769 * must return a remaining time greater than 0 because the
770 * signal has not yet been delivered.
771 */
772 if (!sigev_none)
773 itp->it_value.tv_nsec = 1;
774 }
775}
776
777static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
778{
779 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
780 struct task_struct *p;
781 u64 now;
782
783 rcu_read_lock();
784 p = cpu_timer_task_rcu(timer);
785 if (p && cpu_timer_getexpires(&timer->it.cpu)) {
786 itp->it_interval = ktime_to_timespec64(timer->it_interval);
787
788 if (CPUCLOCK_PERTHREAD(timer->it_clock))
789 now = cpu_clock_sample(clkid, p);
790 else
791 now = cpu_clock_sample_group(clkid, p, false);
792
793 __posix_cpu_timer_get(timer, itp, now);
794 }
795 rcu_read_unlock();
796}
797
798#define MAX_COLLECTED 20
799
800static u64 collect_timerqueue(struct timerqueue_head *head,
801 struct list_head *firing, u64 now)
802{
803 struct timerqueue_node *next;
804 int i = 0;
805
806 while ((next = timerqueue_getnext(head))) {
807 struct cpu_timer *ctmr;
808 u64 expires;
809
810 ctmr = container_of(next, struct cpu_timer, node);
811 expires = cpu_timer_getexpires(ctmr);
812 /* Limit the number of timers to expire at once */
813 if (++i == MAX_COLLECTED || now < expires)
814 return expires;
815
816 ctmr->firing = true;
817 /* See posix_cpu_timer_wait_running() */
818 rcu_assign_pointer(ctmr->handling, current);
819 cpu_timer_dequeue(ctmr);
820 list_add_tail(&ctmr->elist, firing);
821 }
822
823 return U64_MAX;
824}
825
826static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
827 struct list_head *firing)
828{
829 struct posix_cputimer_base *base = pct->bases;
830 int i;
831
832 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
833 base->nextevt = collect_timerqueue(&base->tqhead, firing,
834 samples[i]);
835 }
836}
837
838static inline void check_dl_overrun(struct task_struct *tsk)
839{
840 if (tsk->dl.dl_overrun) {
841 tsk->dl.dl_overrun = 0;
842 send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
843 }
844}
845
846static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
847{
848 if (time < limit)
849 return false;
850
851 if (print_fatal_signals) {
852 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
853 rt ? "RT" : "CPU", hard ? "hard" : "soft",
854 current->comm, task_pid_nr(current));
855 }
856 send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
857 return true;
858}
859
860/*
861 * Check for any per-thread CPU timers that have fired and move them off
862 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
863 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
864 */
865static void check_thread_timers(struct task_struct *tsk,
866 struct list_head *firing)
867{
868 struct posix_cputimers *pct = &tsk->posix_cputimers;
869 u64 samples[CPUCLOCK_MAX];
870 unsigned long soft;
871
872 if (dl_task(tsk))
873 check_dl_overrun(tsk);
874
875 if (expiry_cache_is_inactive(pct))
876 return;
877
878 task_sample_cputime(tsk, samples);
879 collect_posix_cputimers(pct, samples, firing);
880
881 /*
882 * Check for the special case thread timers.
883 */
884 soft = task_rlimit(tsk, RLIMIT_RTTIME);
885 if (soft != RLIM_INFINITY) {
886 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
887 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
888 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
889
890 /* At the hard limit, send SIGKILL. No further action. */
891 if (hard != RLIM_INFINITY &&
892 check_rlimit(rttime, hard, SIGKILL, true, true))
893 return;
894
895 /* At the soft limit, send a SIGXCPU every second */
896 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
897 soft += USEC_PER_SEC;
898 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
899 }
900 }
901
902 if (expiry_cache_is_inactive(pct))
903 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
904}
905
906static inline void stop_process_timers(struct signal_struct *sig)
907{
908 struct posix_cputimers *pct = &sig->posix_cputimers;
909
910 /* Turn off the active flag. This is done without locking. */
911 WRITE_ONCE(pct->timers_active, false);
912 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
913}
914
915static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
916 u64 *expires, u64 cur_time, int signo)
917{
918 if (!it->expires)
919 return;
920
921 if (cur_time >= it->expires) {
922 if (it->incr)
923 it->expires += it->incr;
924 else
925 it->expires = 0;
926
927 trace_itimer_expire(signo == SIGPROF ?
928 ITIMER_PROF : ITIMER_VIRTUAL,
929 task_tgid(tsk), cur_time);
930 send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
931 }
932
933 if (it->expires && it->expires < *expires)
934 *expires = it->expires;
935}
936
937/*
938 * Check for any per-thread CPU timers that have fired and move them
939 * off the tsk->*_timers list onto the firing list. Per-thread timers
940 * have already been taken off.
941 */
942static void check_process_timers(struct task_struct *tsk,
943 struct list_head *firing)
944{
945 struct signal_struct *const sig = tsk->signal;
946 struct posix_cputimers *pct = &sig->posix_cputimers;
947 u64 samples[CPUCLOCK_MAX];
948 unsigned long soft;
949
950 /*
951 * If there are no active process wide timers (POSIX 1.b, itimers,
952 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
953 * processing when there is already another task handling them.
954 */
955 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
956 return;
957
958 /*
959 * Signify that a thread is checking for process timers.
960 * Write access to this field is protected by the sighand lock.
961 */
962 pct->expiry_active = true;
963
964 /*
965 * Collect the current process totals. Group accounting is active
966 * so the sample can be taken directly.
967 */
968 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
969 collect_posix_cputimers(pct, samples, firing);
970
971 /*
972 * Check for the special case process timers.
973 */
974 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
975 &pct->bases[CPUCLOCK_PROF].nextevt,
976 samples[CPUCLOCK_PROF], SIGPROF);
977 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
978 &pct->bases[CPUCLOCK_VIRT].nextevt,
979 samples[CPUCLOCK_VIRT], SIGVTALRM);
980
981 soft = task_rlimit(tsk, RLIMIT_CPU);
982 if (soft != RLIM_INFINITY) {
983 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
984 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
985 u64 ptime = samples[CPUCLOCK_PROF];
986 u64 softns = (u64)soft * NSEC_PER_SEC;
987 u64 hardns = (u64)hard * NSEC_PER_SEC;
988
989 /* At the hard limit, send SIGKILL. No further action. */
990 if (hard != RLIM_INFINITY &&
991 check_rlimit(ptime, hardns, SIGKILL, false, true))
992 return;
993
994 /* At the soft limit, send a SIGXCPU every second */
995 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
996 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
997 softns += NSEC_PER_SEC;
998 }
999
1000 /* Update the expiry cache */
1001 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
1002 pct->bases[CPUCLOCK_PROF].nextevt = softns;
1003 }
1004
1005 if (expiry_cache_is_inactive(pct))
1006 stop_process_timers(sig);
1007
1008 pct->expiry_active = false;
1009}
1010
1011/*
1012 * This is called from the signal code (via posixtimer_rearm)
1013 * when the last timer signal was delivered and we have to reload the timer.
1014 */
1015static void posix_cpu_timer_rearm(struct k_itimer *timer)
1016{
1017 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
1018 struct task_struct *p;
1019 struct sighand_struct *sighand;
1020 unsigned long flags;
1021 u64 now;
1022
1023 rcu_read_lock();
1024 p = cpu_timer_task_rcu(timer);
1025 if (!p)
1026 goto out;
1027
1028 /* Protect timer list r/w in arm_timer() */
1029 sighand = lock_task_sighand(p, &flags);
1030 if (unlikely(sighand == NULL))
1031 goto out;
1032
1033 /*
1034 * Fetch the current sample and update the timer's expiry time.
1035 */
1036 if (CPUCLOCK_PERTHREAD(timer->it_clock))
1037 now = cpu_clock_sample(clkid, p);
1038 else
1039 now = cpu_clock_sample_group(clkid, p, true);
1040
1041 bump_cpu_timer(timer, now);
1042
1043 /*
1044 * Now re-arm for the new expiry time.
1045 */
1046 arm_timer(timer, p);
1047 unlock_task_sighand(p, &flags);
1048out:
1049 rcu_read_unlock();
1050}
1051
1052/**
1053 * task_cputimers_expired - Check whether posix CPU timers are expired
1054 *
1055 * @samples: Array of current samples for the CPUCLOCK clocks
1056 * @pct: Pointer to a posix_cputimers container
1057 *
1058 * Returns true if any member of @samples is greater than the corresponding
1059 * member of @pct->bases[CLK].nextevt. False otherwise
1060 */
1061static inline bool
1062task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1063{
1064 int i;
1065
1066 for (i = 0; i < CPUCLOCK_MAX; i++) {
1067 if (samples[i] >= pct->bases[i].nextevt)
1068 return true;
1069 }
1070 return false;
1071}
1072
1073/**
1074 * fastpath_timer_check - POSIX CPU timers fast path.
1075 *
1076 * @tsk: The task (thread) being checked.
1077 *
1078 * Check the task and thread group timers. If both are zero (there are no
1079 * timers set) return false. Otherwise snapshot the task and thread group
1080 * timers and compare them with the corresponding expiration times. Return
1081 * true if a timer has expired, else return false.
1082 */
1083static inline bool fastpath_timer_check(struct task_struct *tsk)
1084{
1085 struct posix_cputimers *pct = &tsk->posix_cputimers;
1086 struct signal_struct *sig;
1087
1088 if (!expiry_cache_is_inactive(pct)) {
1089 u64 samples[CPUCLOCK_MAX];
1090
1091 task_sample_cputime(tsk, samples);
1092 if (task_cputimers_expired(samples, pct))
1093 return true;
1094 }
1095
1096 sig = tsk->signal;
1097 pct = &sig->posix_cputimers;
1098 /*
1099 * Check if thread group timers expired when timers are active and
1100 * no other thread in the group is already handling expiry for
1101 * thread group cputimers. These fields are read without the
1102 * sighand lock. However, this is fine because this is meant to be
1103 * a fastpath heuristic to determine whether we should try to
1104 * acquire the sighand lock to handle timer expiry.
1105 *
1106 * In the worst case scenario, if concurrently timers_active is set
1107 * or expiry_active is cleared, but the current thread doesn't see
1108 * the change yet, the timer checks are delayed until the next
1109 * thread in the group gets a scheduler interrupt to handle the
1110 * timer. This isn't an issue in practice because these types of
1111 * delays with signals actually getting sent are expected.
1112 */
1113 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1114 u64 samples[CPUCLOCK_MAX];
1115
1116 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1117 samples);
1118
1119 if (task_cputimers_expired(samples, pct))
1120 return true;
1121 }
1122
1123 if (dl_task(tsk) && tsk->dl.dl_overrun)
1124 return true;
1125
1126 return false;
1127}
1128
1129static void handle_posix_cpu_timers(struct task_struct *tsk);
1130
1131#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1132static void posix_cpu_timers_work(struct callback_head *work)
1133{
1134 struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
1135
1136 mutex_lock(&cw->mutex);
1137 handle_posix_cpu_timers(current);
1138 mutex_unlock(&cw->mutex);
1139}
1140
1141/*
1142 * Invoked from the posix-timer core when a cancel operation failed because
1143 * the timer is marked firing. The caller holds rcu_read_lock(), which
1144 * protects the timer and the task which is expiring it from being freed.
1145 */
1146static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1147{
1148 struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
1149
1150 /* Has the handling task completed expiry already? */
1151 if (!tsk)
1152 return;
1153
1154 /* Ensure that the task cannot go away */
1155 get_task_struct(tsk);
1156 /* Now drop the RCU protection so the mutex can be locked */
1157 rcu_read_unlock();
1158 /* Wait on the expiry mutex */
1159 mutex_lock(&tsk->posix_cputimers_work.mutex);
1160 /* Release it immediately again. */
1161 mutex_unlock(&tsk->posix_cputimers_work.mutex);
1162 /* Drop the task reference. */
1163 put_task_struct(tsk);
1164 /* Relock RCU so the callsite is balanced */
1165 rcu_read_lock();
1166}
1167
1168static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1169{
1170 /* Ensure that timr->it.cpu.handling task cannot go away */
1171 rcu_read_lock();
1172 spin_unlock_irq(&timr->it_lock);
1173 posix_cpu_timer_wait_running(timr);
1174 rcu_read_unlock();
1175 /* @timr is on stack and is valid */
1176 spin_lock_irq(&timr->it_lock);
1177}
1178
1179/*
1180 * Clear existing posix CPU timers task work.
1181 */
1182void clear_posix_cputimers_work(struct task_struct *p)
1183{
1184 /*
1185 * A copied work entry from the old task is not meaningful, clear it.
1186 * N.B. init_task_work will not do this.
1187 */
1188 memset(&p->posix_cputimers_work.work, 0,
1189 sizeof(p->posix_cputimers_work.work));
1190 init_task_work(&p->posix_cputimers_work.work,
1191 posix_cpu_timers_work);
1192 mutex_init(&p->posix_cputimers_work.mutex);
1193 p->posix_cputimers_work.scheduled = false;
1194}
1195
1196/*
1197 * Initialize posix CPU timers task work in init task. Out of line to
1198 * keep the callback static and to avoid header recursion hell.
1199 */
1200void __init posix_cputimers_init_work(void)
1201{
1202 clear_posix_cputimers_work(current);
1203}
1204
1205/*
1206 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1207 * in hard interrupt context or in task context with interrupts
1208 * disabled. Aside of that the writer/reader interaction is always in the
1209 * context of the current task, which means they are strict per CPU.
1210 */
1211static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1212{
1213 return tsk->posix_cputimers_work.scheduled;
1214}
1215
1216static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1217{
1218 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1219 return;
1220
1221 /* Schedule task work to actually expire the timers */
1222 tsk->posix_cputimers_work.scheduled = true;
1223 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1224}
1225
1226static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1227 unsigned long start)
1228{
1229 bool ret = true;
1230
1231 /*
1232 * On !RT kernels interrupts are disabled while collecting expired
1233 * timers, so no tick can happen and the fast path check can be
1234 * reenabled without further checks.
1235 */
1236 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1237 tsk->posix_cputimers_work.scheduled = false;
1238 return true;
1239 }
1240
1241 /*
1242 * On RT enabled kernels ticks can happen while the expired timers
1243 * are collected under sighand lock. But any tick which observes
1244 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1245 * checks. So reenabling the tick work has do be done carefully:
1246 *
1247 * Disable interrupts and run the fast path check if jiffies have
1248 * advanced since the collecting of expired timers started. If
1249 * jiffies have not advanced or the fast path check did not find
1250 * newly expired timers, reenable the fast path check in the timer
1251 * interrupt. If there are newly expired timers, return false and
1252 * let the collection loop repeat.
1253 */
1254 local_irq_disable();
1255 if (start != jiffies && fastpath_timer_check(tsk))
1256 ret = false;
1257 else
1258 tsk->posix_cputimers_work.scheduled = false;
1259 local_irq_enable();
1260
1261 return ret;
1262}
1263#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1264static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1265{
1266 lockdep_posixtimer_enter();
1267 handle_posix_cpu_timers(tsk);
1268 lockdep_posixtimer_exit();
1269}
1270
1271static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1272{
1273 cpu_relax();
1274}
1275
1276static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1277{
1278 spin_unlock_irq(&timr->it_lock);
1279 cpu_relax();
1280 spin_lock_irq(&timr->it_lock);
1281}
1282
1283static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1284{
1285 return false;
1286}
1287
1288static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1289 unsigned long start)
1290{
1291 return true;
1292}
1293#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1294
1295static void handle_posix_cpu_timers(struct task_struct *tsk)
1296{
1297 struct k_itimer *timer, *next;
1298 unsigned long flags, start;
1299 LIST_HEAD(firing);
1300
1301 if (!lock_task_sighand(tsk, &flags))
1302 return;
1303
1304 do {
1305 /*
1306 * On RT locking sighand lock does not disable interrupts,
1307 * so this needs to be careful vs. ticks. Store the current
1308 * jiffies value.
1309 */
1310 start = READ_ONCE(jiffies);
1311 barrier();
1312
1313 /*
1314 * Here we take off tsk->signal->cpu_timers[N] and
1315 * tsk->cpu_timers[N] all the timers that are firing, and
1316 * put them on the firing list.
1317 */
1318 check_thread_timers(tsk, &firing);
1319
1320 check_process_timers(tsk, &firing);
1321
1322 /*
1323 * The above timer checks have updated the expiry cache and
1324 * because nothing can have queued or modified timers after
1325 * sighand lock was taken above it is guaranteed to be
1326 * consistent. So the next timer interrupt fastpath check
1327 * will find valid data.
1328 *
1329 * If timer expiry runs in the timer interrupt context then
1330 * the loop is not relevant as timers will be directly
1331 * expired in interrupt context. The stub function below
1332 * returns always true which allows the compiler to
1333 * optimize the loop out.
1334 *
1335 * If timer expiry is deferred to task work context then
1336 * the following rules apply:
1337 *
1338 * - On !RT kernels no tick can have happened on this CPU
1339 * after sighand lock was acquired because interrupts are
1340 * disabled. So reenabling task work before dropping
1341 * sighand lock and reenabling interrupts is race free.
1342 *
1343 * - On RT kernels ticks might have happened but the tick
1344 * work ignored posix CPU timer handling because the
1345 * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1346 * must be done very carefully including a check whether
1347 * ticks have happened since the start of the timer
1348 * expiry checks. posix_cpu_timers_enable_work() takes
1349 * care of that and eventually lets the expiry checks
1350 * run again.
1351 */
1352 } while (!posix_cpu_timers_enable_work(tsk, start));
1353
1354 /*
1355 * We must release sighand lock before taking any timer's lock.
1356 * There is a potential race with timer deletion here, as the
1357 * siglock now protects our private firing list. We have set
1358 * the firing flag in each timer, so that a deletion attempt
1359 * that gets the timer lock before we do will give it up and
1360 * spin until we've taken care of that timer below.
1361 */
1362 unlock_task_sighand(tsk, &flags);
1363
1364 /*
1365 * Now that all the timers on our list have the firing flag,
1366 * no one will touch their list entries but us. We'll take
1367 * each timer's lock before clearing its firing flag, so no
1368 * timer call will interfere.
1369 */
1370 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1371 bool cpu_firing;
1372
1373 /*
1374 * spin_lock() is sufficient here even independent of the
1375 * expiry context. If expiry happens in hard interrupt
1376 * context it's obvious. For task work context it's safe
1377 * because all other operations on timer::it_lock happen in
1378 * task context (syscall or exit).
1379 */
1380 spin_lock(&timer->it_lock);
1381 list_del_init(&timer->it.cpu.elist);
1382 cpu_firing = timer->it.cpu.firing;
1383 timer->it.cpu.firing = false;
1384 /*
1385 * If the firing flag is cleared then this raced with a
1386 * timer rearm/delete operation. So don't generate an
1387 * event.
1388 */
1389 if (likely(cpu_firing))
1390 cpu_timer_fire(timer);
1391 /* See posix_cpu_timer_wait_running() */
1392 rcu_assign_pointer(timer->it.cpu.handling, NULL);
1393 spin_unlock(&timer->it_lock);
1394 }
1395}
1396
1397/*
1398 * This is called from the timer interrupt handler. The irq handler has
1399 * already updated our counts. We need to check if any timers fire now.
1400 * Interrupts are disabled.
1401 */
1402void run_posix_cpu_timers(void)
1403{
1404 struct task_struct *tsk = current;
1405
1406 lockdep_assert_irqs_disabled();
1407
1408 /*
1409 * If the actual expiry is deferred to task work context and the
1410 * work is already scheduled there is no point to do anything here.
1411 */
1412 if (posix_cpu_timers_work_scheduled(tsk))
1413 return;
1414
1415 /*
1416 * The fast path checks that there are no expired thread or thread
1417 * group timers. If that's so, just return.
1418 */
1419 if (!fastpath_timer_check(tsk))
1420 return;
1421
1422 __run_posix_cpu_timers(tsk);
1423}
1424
1425/*
1426 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1427 * The tsk->sighand->siglock must be held by the caller.
1428 */
1429void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1430 u64 *newval, u64 *oldval)
1431{
1432 u64 now, *nextevt;
1433
1434 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1435 return;
1436
1437 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1438 now = cpu_clock_sample_group(clkid, tsk, true);
1439
1440 if (oldval) {
1441 /*
1442 * We are setting itimer. The *oldval is absolute and we update
1443 * it to be relative, *newval argument is relative and we update
1444 * it to be absolute.
1445 */
1446 if (*oldval) {
1447 if (*oldval <= now) {
1448 /* Just about to fire. */
1449 *oldval = TICK_NSEC;
1450 } else {
1451 *oldval -= now;
1452 }
1453 }
1454
1455 if (*newval)
1456 *newval += now;
1457 }
1458
1459 /*
1460 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1461 * expiry cache is also used by RLIMIT_CPU!.
1462 */
1463 if (*newval < *nextevt)
1464 *nextevt = *newval;
1465
1466 tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
1467}
1468
1469static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1470 const struct timespec64 *rqtp)
1471{
1472 struct itimerspec64 it;
1473 struct k_itimer timer;
1474 u64 expires;
1475 int error;
1476
1477 /*
1478 * Set up a temporary timer and then wait for it to go off.
1479 */
1480 memset(&timer, 0, sizeof timer);
1481 spin_lock_init(&timer.it_lock);
1482 timer.it_clock = which_clock;
1483 timer.it_overrun = -1;
1484 error = posix_cpu_timer_create(&timer);
1485 timer.it_process = current;
1486 timer.it.cpu.nanosleep = true;
1487
1488 if (!error) {
1489 static struct itimerspec64 zero_it;
1490 struct restart_block *restart;
1491
1492 memset(&it, 0, sizeof(it));
1493 it.it_value = *rqtp;
1494
1495 spin_lock_irq(&timer.it_lock);
1496 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1497 if (error) {
1498 spin_unlock_irq(&timer.it_lock);
1499 return error;
1500 }
1501
1502 while (!signal_pending(current)) {
1503 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1504 /*
1505 * Our timer fired and was reset, below
1506 * deletion can not fail.
1507 */
1508 posix_cpu_timer_del(&timer);
1509 spin_unlock_irq(&timer.it_lock);
1510 return 0;
1511 }
1512
1513 /*
1514 * Block until cpu_timer_fire (or a signal) wakes us.
1515 */
1516 __set_current_state(TASK_INTERRUPTIBLE);
1517 spin_unlock_irq(&timer.it_lock);
1518 schedule();
1519 spin_lock_irq(&timer.it_lock);
1520 }
1521
1522 /*
1523 * We were interrupted by a signal.
1524 */
1525 expires = cpu_timer_getexpires(&timer.it.cpu);
1526 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1527 if (!error) {
1528 /* Timer is now unarmed, deletion can not fail. */
1529 posix_cpu_timer_del(&timer);
1530 } else {
1531 while (error == TIMER_RETRY) {
1532 posix_cpu_timer_wait_running_nsleep(&timer);
1533 error = posix_cpu_timer_del(&timer);
1534 }
1535 }
1536
1537 spin_unlock_irq(&timer.it_lock);
1538
1539 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1540 /*
1541 * It actually did fire already.
1542 */
1543 return 0;
1544 }
1545
1546 error = -ERESTART_RESTARTBLOCK;
1547 /*
1548 * Report back to the user the time still remaining.
1549 */
1550 restart = ¤t->restart_block;
1551 restart->nanosleep.expires = expires;
1552 if (restart->nanosleep.type != TT_NONE)
1553 error = nanosleep_copyout(restart, &it.it_value);
1554 }
1555
1556 return error;
1557}
1558
1559static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1560
1561static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1562 const struct timespec64 *rqtp)
1563{
1564 struct restart_block *restart_block = ¤t->restart_block;
1565 int error;
1566
1567 /*
1568 * Diagnose required errors first.
1569 */
1570 if (CPUCLOCK_PERTHREAD(which_clock) &&
1571 (CPUCLOCK_PID(which_clock) == 0 ||
1572 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1573 return -EINVAL;
1574
1575 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1576
1577 if (error == -ERESTART_RESTARTBLOCK) {
1578
1579 if (flags & TIMER_ABSTIME)
1580 return -ERESTARTNOHAND;
1581
1582 restart_block->nanosleep.clockid = which_clock;
1583 set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1584 }
1585 return error;
1586}
1587
1588static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1589{
1590 clockid_t which_clock = restart_block->nanosleep.clockid;
1591 struct timespec64 t;
1592
1593 t = ns_to_timespec64(restart_block->nanosleep.expires);
1594
1595 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1596}
1597
1598#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1599#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1600
1601static int process_cpu_clock_getres(const clockid_t which_clock,
1602 struct timespec64 *tp)
1603{
1604 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1605}
1606static int process_cpu_clock_get(const clockid_t which_clock,
1607 struct timespec64 *tp)
1608{
1609 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1610}
1611static int process_cpu_timer_create(struct k_itimer *timer)
1612{
1613 timer->it_clock = PROCESS_CLOCK;
1614 return posix_cpu_timer_create(timer);
1615}
1616static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1617 const struct timespec64 *rqtp)
1618{
1619 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1620}
1621static int thread_cpu_clock_getres(const clockid_t which_clock,
1622 struct timespec64 *tp)
1623{
1624 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1625}
1626static int thread_cpu_clock_get(const clockid_t which_clock,
1627 struct timespec64 *tp)
1628{
1629 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1630}
1631static int thread_cpu_timer_create(struct k_itimer *timer)
1632{
1633 timer->it_clock = THREAD_CLOCK;
1634 return posix_cpu_timer_create(timer);
1635}
1636
1637const struct k_clock clock_posix_cpu = {
1638 .clock_getres = posix_cpu_clock_getres,
1639 .clock_set = posix_cpu_clock_set,
1640 .clock_get_timespec = posix_cpu_clock_get,
1641 .timer_create = posix_cpu_timer_create,
1642 .nsleep = posix_cpu_nsleep,
1643 .timer_set = posix_cpu_timer_set,
1644 .timer_del = posix_cpu_timer_del,
1645 .timer_get = posix_cpu_timer_get,
1646 .timer_rearm = posix_cpu_timer_rearm,
1647 .timer_wait_running = posix_cpu_timer_wait_running,
1648};
1649
1650const struct k_clock clock_process = {
1651 .clock_getres = process_cpu_clock_getres,
1652 .clock_get_timespec = process_cpu_clock_get,
1653 .timer_create = process_cpu_timer_create,
1654 .nsleep = process_cpu_nsleep,
1655};
1656
1657const struct k_clock clock_thread = {
1658 .clock_getres = thread_cpu_clock_getres,
1659 .clock_get_timespec = thread_cpu_clock_get,
1660 .timer_create = thread_cpu_timer_create,
1661};