Loading...
1/*
2 * Implement CPU time clocks for the POSIX clock interface.
3 */
4
5#include <linux/sched.h>
6#include <linux/posix-timers.h>
7#include <linux/errno.h>
8#include <linux/math64.h>
9#include <asm/uaccess.h>
10#include <linux/kernel_stat.h>
11#include <trace/events/timer.h>
12#include <linux/random.h>
13#include <linux/tick.h>
14#include <linux/workqueue.h>
15
16/*
17 * Called after updating RLIMIT_CPU to run cpu timer and update
18 * tsk->signal->cputime_expires expiration cache if necessary. Needs
19 * siglock protection since other code may update expiration cache as
20 * well.
21 */
22void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
23{
24 cputime_t cputime = secs_to_cputime(rlim_new);
25
26 spin_lock_irq(&task->sighand->siglock);
27 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
28 spin_unlock_irq(&task->sighand->siglock);
29}
30
31static int check_clock(const clockid_t which_clock)
32{
33 int error = 0;
34 struct task_struct *p;
35 const pid_t pid = CPUCLOCK_PID(which_clock);
36
37 if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX)
38 return -EINVAL;
39
40 if (pid == 0)
41 return 0;
42
43 rcu_read_lock();
44 p = find_task_by_vpid(pid);
45 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
46 same_thread_group(p, current) : has_group_leader_pid(p))) {
47 error = -EINVAL;
48 }
49 rcu_read_unlock();
50
51 return error;
52}
53
54static inline unsigned long long
55timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
56{
57 unsigned long long ret;
58
59 ret = 0; /* high half always zero when .cpu used */
60 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
61 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
62 } else {
63 ret = cputime_to_expires(timespec_to_cputime(tp));
64 }
65 return ret;
66}
67
68static void sample_to_timespec(const clockid_t which_clock,
69 unsigned long long expires,
70 struct timespec *tp)
71{
72 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
73 *tp = ns_to_timespec(expires);
74 else
75 cputime_to_timespec((__force cputime_t)expires, tp);
76}
77
78/*
79 * Update expiry time from increment, and increase overrun count,
80 * given the current clock sample.
81 */
82static void bump_cpu_timer(struct k_itimer *timer,
83 unsigned long long now)
84{
85 int i;
86 unsigned long long delta, incr;
87
88 if (timer->it.cpu.incr == 0)
89 return;
90
91 if (now < timer->it.cpu.expires)
92 return;
93
94 incr = timer->it.cpu.incr;
95 delta = now + incr - timer->it.cpu.expires;
96
97 /* Don't use (incr*2 < delta), incr*2 might overflow. */
98 for (i = 0; incr < delta - incr; i++)
99 incr = incr << 1;
100
101 for (; i >= 0; incr >>= 1, i--) {
102 if (delta < incr)
103 continue;
104
105 timer->it.cpu.expires += incr;
106 timer->it_overrun += 1 << i;
107 delta -= incr;
108 }
109}
110
111/**
112 * task_cputime_zero - Check a task_cputime struct for all zero fields.
113 *
114 * @cputime: The struct to compare.
115 *
116 * Checks @cputime to see if all fields are zero. Returns true if all fields
117 * are zero, false if any field is nonzero.
118 */
119static inline int task_cputime_zero(const struct task_cputime *cputime)
120{
121 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
122 return 1;
123 return 0;
124}
125
126static inline unsigned long long prof_ticks(struct task_struct *p)
127{
128 cputime_t utime, stime;
129
130 task_cputime(p, &utime, &stime);
131
132 return cputime_to_expires(utime + stime);
133}
134static inline unsigned long long virt_ticks(struct task_struct *p)
135{
136 cputime_t utime;
137
138 task_cputime(p, &utime, NULL);
139
140 return cputime_to_expires(utime);
141}
142
143static int
144posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
145{
146 int error = check_clock(which_clock);
147 if (!error) {
148 tp->tv_sec = 0;
149 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
150 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
151 /*
152 * If sched_clock is using a cycle counter, we
153 * don't have any idea of its true resolution
154 * exported, but it is much more than 1s/HZ.
155 */
156 tp->tv_nsec = 1;
157 }
158 }
159 return error;
160}
161
162static int
163posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
164{
165 /*
166 * You can never reset a CPU clock, but we check for other errors
167 * in the call before failing with EPERM.
168 */
169 int error = check_clock(which_clock);
170 if (error == 0) {
171 error = -EPERM;
172 }
173 return error;
174}
175
176
177/*
178 * Sample a per-thread clock for the given task.
179 */
180static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
181 unsigned long long *sample)
182{
183 switch (CPUCLOCK_WHICH(which_clock)) {
184 default:
185 return -EINVAL;
186 case CPUCLOCK_PROF:
187 *sample = prof_ticks(p);
188 break;
189 case CPUCLOCK_VIRT:
190 *sample = virt_ticks(p);
191 break;
192 case CPUCLOCK_SCHED:
193 *sample = task_sched_runtime(p);
194 break;
195 }
196 return 0;
197}
198
199/*
200 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
201 * to avoid race conditions with concurrent updates to cputime.
202 */
203static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
204{
205 u64 curr_cputime;
206retry:
207 curr_cputime = atomic64_read(cputime);
208 if (sum_cputime > curr_cputime) {
209 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
210 goto retry;
211 }
212}
213
214static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
215{
216 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
217 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
218 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
219}
220
221/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
222static inline void sample_cputime_atomic(struct task_cputime *times,
223 struct task_cputime_atomic *atomic_times)
224{
225 times->utime = atomic64_read(&atomic_times->utime);
226 times->stime = atomic64_read(&atomic_times->stime);
227 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
228}
229
230void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
231{
232 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
233 struct task_cputime sum;
234
235 /* Check if cputimer isn't running. This is accessed without locking. */
236 if (!READ_ONCE(cputimer->running)) {
237 /*
238 * The POSIX timer interface allows for absolute time expiry
239 * values through the TIMER_ABSTIME flag, therefore we have
240 * to synchronize the timer to the clock every time we start it.
241 */
242 thread_group_cputime(tsk, &sum);
243 update_gt_cputime(&cputimer->cputime_atomic, &sum);
244
245 /*
246 * We're setting cputimer->running without a lock. Ensure
247 * this only gets written to in one operation. We set
248 * running after update_gt_cputime() as a small optimization,
249 * but barriers are not required because update_gt_cputime()
250 * can handle concurrent updates.
251 */
252 WRITE_ONCE(cputimer->running, true);
253 }
254 sample_cputime_atomic(times, &cputimer->cputime_atomic);
255}
256
257/*
258 * Sample a process (thread group) clock for the given group_leader task.
259 * Must be called with task sighand lock held for safe while_each_thread()
260 * traversal.
261 */
262static int cpu_clock_sample_group(const clockid_t which_clock,
263 struct task_struct *p,
264 unsigned long long *sample)
265{
266 struct task_cputime cputime;
267
268 switch (CPUCLOCK_WHICH(which_clock)) {
269 default:
270 return -EINVAL;
271 case CPUCLOCK_PROF:
272 thread_group_cputime(p, &cputime);
273 *sample = cputime_to_expires(cputime.utime + cputime.stime);
274 break;
275 case CPUCLOCK_VIRT:
276 thread_group_cputime(p, &cputime);
277 *sample = cputime_to_expires(cputime.utime);
278 break;
279 case CPUCLOCK_SCHED:
280 thread_group_cputime(p, &cputime);
281 *sample = cputime.sum_exec_runtime;
282 break;
283 }
284 return 0;
285}
286
287static int posix_cpu_clock_get_task(struct task_struct *tsk,
288 const clockid_t which_clock,
289 struct timespec *tp)
290{
291 int err = -EINVAL;
292 unsigned long long rtn;
293
294 if (CPUCLOCK_PERTHREAD(which_clock)) {
295 if (same_thread_group(tsk, current))
296 err = cpu_clock_sample(which_clock, tsk, &rtn);
297 } else {
298 if (tsk == current || thread_group_leader(tsk))
299 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
300 }
301
302 if (!err)
303 sample_to_timespec(which_clock, rtn, tp);
304
305 return err;
306}
307
308
309static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
310{
311 const pid_t pid = CPUCLOCK_PID(which_clock);
312 int err = -EINVAL;
313
314 if (pid == 0) {
315 /*
316 * Special case constant value for our own clocks.
317 * We don't have to do any lookup to find ourselves.
318 */
319 err = posix_cpu_clock_get_task(current, which_clock, tp);
320 } else {
321 /*
322 * Find the given PID, and validate that the caller
323 * should be able to see it.
324 */
325 struct task_struct *p;
326 rcu_read_lock();
327 p = find_task_by_vpid(pid);
328 if (p)
329 err = posix_cpu_clock_get_task(p, which_clock, tp);
330 rcu_read_unlock();
331 }
332
333 return err;
334}
335
336/*
337 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
338 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
339 * new timer already all-zeros initialized.
340 */
341static int posix_cpu_timer_create(struct k_itimer *new_timer)
342{
343 int ret = 0;
344 const pid_t pid = CPUCLOCK_PID(new_timer->it_clock);
345 struct task_struct *p;
346
347 if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX)
348 return -EINVAL;
349
350 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
351
352 rcu_read_lock();
353 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
354 if (pid == 0) {
355 p = current;
356 } else {
357 p = find_task_by_vpid(pid);
358 if (p && !same_thread_group(p, current))
359 p = NULL;
360 }
361 } else {
362 if (pid == 0) {
363 p = current->group_leader;
364 } else {
365 p = find_task_by_vpid(pid);
366 if (p && !has_group_leader_pid(p))
367 p = NULL;
368 }
369 }
370 new_timer->it.cpu.task = p;
371 if (p) {
372 get_task_struct(p);
373 } else {
374 ret = -EINVAL;
375 }
376 rcu_read_unlock();
377
378 return ret;
379}
380
381/*
382 * Clean up a CPU-clock timer that is about to be destroyed.
383 * This is called from timer deletion with the timer already locked.
384 * If we return TIMER_RETRY, it's necessary to release the timer's lock
385 * and try again. (This happens when the timer is in the middle of firing.)
386 */
387static int posix_cpu_timer_del(struct k_itimer *timer)
388{
389 int ret = 0;
390 unsigned long flags;
391 struct sighand_struct *sighand;
392 struct task_struct *p = timer->it.cpu.task;
393
394 WARN_ON_ONCE(p == NULL);
395
396 /*
397 * Protect against sighand release/switch in exit/exec and process/
398 * thread timer list entry concurrent read/writes.
399 */
400 sighand = lock_task_sighand(p, &flags);
401 if (unlikely(sighand == NULL)) {
402 /*
403 * We raced with the reaping of the task.
404 * The deletion should have cleared us off the list.
405 */
406 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
407 } else {
408 if (timer->it.cpu.firing)
409 ret = TIMER_RETRY;
410 else
411 list_del(&timer->it.cpu.entry);
412
413 unlock_task_sighand(p, &flags);
414 }
415
416 if (!ret)
417 put_task_struct(p);
418
419 return ret;
420}
421
422static void cleanup_timers_list(struct list_head *head)
423{
424 struct cpu_timer_list *timer, *next;
425
426 list_for_each_entry_safe(timer, next, head, entry)
427 list_del_init(&timer->entry);
428}
429
430/*
431 * Clean out CPU timers still ticking when a thread exited. The task
432 * pointer is cleared, and the expiry time is replaced with the residual
433 * time for later timer_gettime calls to return.
434 * This must be called with the siglock held.
435 */
436static void cleanup_timers(struct list_head *head)
437{
438 cleanup_timers_list(head);
439 cleanup_timers_list(++head);
440 cleanup_timers_list(++head);
441}
442
443/*
444 * These are both called with the siglock held, when the current thread
445 * is being reaped. When the final (leader) thread in the group is reaped,
446 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
447 */
448void posix_cpu_timers_exit(struct task_struct *tsk)
449{
450 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
451 sizeof(unsigned long long));
452 cleanup_timers(tsk->cpu_timers);
453
454}
455void posix_cpu_timers_exit_group(struct task_struct *tsk)
456{
457 cleanup_timers(tsk->signal->cpu_timers);
458}
459
460static inline int expires_gt(cputime_t expires, cputime_t new_exp)
461{
462 return expires == 0 || expires > new_exp;
463}
464
465/*
466 * Insert the timer on the appropriate list before any timers that
467 * expire later. This must be called with the sighand lock held.
468 */
469static void arm_timer(struct k_itimer *timer)
470{
471 struct task_struct *p = timer->it.cpu.task;
472 struct list_head *head, *listpos;
473 struct task_cputime *cputime_expires;
474 struct cpu_timer_list *const nt = &timer->it.cpu;
475 struct cpu_timer_list *next;
476
477 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
478 head = p->cpu_timers;
479 cputime_expires = &p->cputime_expires;
480 } else {
481 head = p->signal->cpu_timers;
482 cputime_expires = &p->signal->cputime_expires;
483 }
484 head += CPUCLOCK_WHICH(timer->it_clock);
485
486 listpos = head;
487 list_for_each_entry(next, head, entry) {
488 if (nt->expires < next->expires)
489 break;
490 listpos = &next->entry;
491 }
492 list_add(&nt->entry, listpos);
493
494 if (listpos == head) {
495 unsigned long long exp = nt->expires;
496
497 /*
498 * We are the new earliest-expiring POSIX 1.b timer, hence
499 * need to update expiration cache. Take into account that
500 * for process timers we share expiration cache with itimers
501 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
502 */
503
504 switch (CPUCLOCK_WHICH(timer->it_clock)) {
505 case CPUCLOCK_PROF:
506 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp)))
507 cputime_expires->prof_exp = expires_to_cputime(exp);
508 break;
509 case CPUCLOCK_VIRT:
510 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp)))
511 cputime_expires->virt_exp = expires_to_cputime(exp);
512 break;
513 case CPUCLOCK_SCHED:
514 if (cputime_expires->sched_exp == 0 ||
515 cputime_expires->sched_exp > exp)
516 cputime_expires->sched_exp = exp;
517 break;
518 }
519 if (CPUCLOCK_PERTHREAD(timer->it_clock))
520 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
521 else
522 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
523 }
524}
525
526/*
527 * The timer is locked, fire it and arrange for its reload.
528 */
529static void cpu_timer_fire(struct k_itimer *timer)
530{
531 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
532 /*
533 * User don't want any signal.
534 */
535 timer->it.cpu.expires = 0;
536 } else if (unlikely(timer->sigq == NULL)) {
537 /*
538 * This a special case for clock_nanosleep,
539 * not a normal timer from sys_timer_create.
540 */
541 wake_up_process(timer->it_process);
542 timer->it.cpu.expires = 0;
543 } else if (timer->it.cpu.incr == 0) {
544 /*
545 * One-shot timer. Clear it as soon as it's fired.
546 */
547 posix_timer_event(timer, 0);
548 timer->it.cpu.expires = 0;
549 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
550 /*
551 * The signal did not get queued because the signal
552 * was ignored, so we won't get any callback to
553 * reload the timer. But we need to keep it
554 * ticking in case the signal is deliverable next time.
555 */
556 posix_cpu_timer_schedule(timer);
557 }
558}
559
560/*
561 * Sample a process (thread group) timer for the given group_leader task.
562 * Must be called with task sighand lock held for safe while_each_thread()
563 * traversal.
564 */
565static int cpu_timer_sample_group(const clockid_t which_clock,
566 struct task_struct *p,
567 unsigned long long *sample)
568{
569 struct task_cputime cputime;
570
571 thread_group_cputimer(p, &cputime);
572 switch (CPUCLOCK_WHICH(which_clock)) {
573 default:
574 return -EINVAL;
575 case CPUCLOCK_PROF:
576 *sample = cputime_to_expires(cputime.utime + cputime.stime);
577 break;
578 case CPUCLOCK_VIRT:
579 *sample = cputime_to_expires(cputime.utime);
580 break;
581 case CPUCLOCK_SCHED:
582 *sample = cputime.sum_exec_runtime;
583 break;
584 }
585 return 0;
586}
587
588/*
589 * Guts of sys_timer_settime for CPU timers.
590 * This is called with the timer locked and interrupts disabled.
591 * If we return TIMER_RETRY, it's necessary to release the timer's lock
592 * and try again. (This happens when the timer is in the middle of firing.)
593 */
594static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
595 struct itimerspec *new, struct itimerspec *old)
596{
597 unsigned long flags;
598 struct sighand_struct *sighand;
599 struct task_struct *p = timer->it.cpu.task;
600 unsigned long long old_expires, new_expires, old_incr, val;
601 int ret;
602
603 WARN_ON_ONCE(p == NULL);
604
605 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
606
607 /*
608 * Protect against sighand release/switch in exit/exec and p->cpu_timers
609 * and p->signal->cpu_timers read/write in arm_timer()
610 */
611 sighand = lock_task_sighand(p, &flags);
612 /*
613 * If p has just been reaped, we can no
614 * longer get any information about it at all.
615 */
616 if (unlikely(sighand == NULL)) {
617 return -ESRCH;
618 }
619
620 /*
621 * Disarm any old timer after extracting its expiry time.
622 */
623 WARN_ON_ONCE(!irqs_disabled());
624
625 ret = 0;
626 old_incr = timer->it.cpu.incr;
627 old_expires = timer->it.cpu.expires;
628 if (unlikely(timer->it.cpu.firing)) {
629 timer->it.cpu.firing = -1;
630 ret = TIMER_RETRY;
631 } else
632 list_del_init(&timer->it.cpu.entry);
633
634 /*
635 * We need to sample the current value to convert the new
636 * value from to relative and absolute, and to convert the
637 * old value from absolute to relative. To set a process
638 * timer, we need a sample to balance the thread expiry
639 * times (in arm_timer). With an absolute time, we must
640 * check if it's already passed. In short, we need a sample.
641 */
642 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
643 cpu_clock_sample(timer->it_clock, p, &val);
644 } else {
645 cpu_timer_sample_group(timer->it_clock, p, &val);
646 }
647
648 if (old) {
649 if (old_expires == 0) {
650 old->it_value.tv_sec = 0;
651 old->it_value.tv_nsec = 0;
652 } else {
653 /*
654 * Update the timer in case it has
655 * overrun already. If it has,
656 * we'll report it as having overrun
657 * and with the next reloaded timer
658 * already ticking, though we are
659 * swallowing that pending
660 * notification here to install the
661 * new setting.
662 */
663 bump_cpu_timer(timer, val);
664 if (val < timer->it.cpu.expires) {
665 old_expires = timer->it.cpu.expires - val;
666 sample_to_timespec(timer->it_clock,
667 old_expires,
668 &old->it_value);
669 } else {
670 old->it_value.tv_nsec = 1;
671 old->it_value.tv_sec = 0;
672 }
673 }
674 }
675
676 if (unlikely(ret)) {
677 /*
678 * We are colliding with the timer actually firing.
679 * Punt after filling in the timer's old value, and
680 * disable this firing since we are already reporting
681 * it as an overrun (thanks to bump_cpu_timer above).
682 */
683 unlock_task_sighand(p, &flags);
684 goto out;
685 }
686
687 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
688 new_expires += val;
689 }
690
691 /*
692 * Install the new expiry time (or zero).
693 * For a timer with no notification action, we don't actually
694 * arm the timer (we'll just fake it for timer_gettime).
695 */
696 timer->it.cpu.expires = new_expires;
697 if (new_expires != 0 && val < new_expires) {
698 arm_timer(timer);
699 }
700
701 unlock_task_sighand(p, &flags);
702 /*
703 * Install the new reload setting, and
704 * set up the signal and overrun bookkeeping.
705 */
706 timer->it.cpu.incr = timespec_to_sample(timer->it_clock,
707 &new->it_interval);
708
709 /*
710 * This acts as a modification timestamp for the timer,
711 * so any automatic reload attempt will punt on seeing
712 * that we have reset the timer manually.
713 */
714 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
715 ~REQUEUE_PENDING;
716 timer->it_overrun_last = 0;
717 timer->it_overrun = -1;
718
719 if (new_expires != 0 && !(val < new_expires)) {
720 /*
721 * The designated time already passed, so we notify
722 * immediately, even if the thread never runs to
723 * accumulate more time on this clock.
724 */
725 cpu_timer_fire(timer);
726 }
727
728 ret = 0;
729 out:
730 if (old) {
731 sample_to_timespec(timer->it_clock,
732 old_incr, &old->it_interval);
733 }
734
735 return ret;
736}
737
738static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
739{
740 unsigned long long now;
741 struct task_struct *p = timer->it.cpu.task;
742
743 WARN_ON_ONCE(p == NULL);
744
745 /*
746 * Easy part: convert the reload time.
747 */
748 sample_to_timespec(timer->it_clock,
749 timer->it.cpu.incr, &itp->it_interval);
750
751 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
752 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
753 return;
754 }
755
756 /*
757 * Sample the clock to take the difference with the expiry time.
758 */
759 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
760 cpu_clock_sample(timer->it_clock, p, &now);
761 } else {
762 struct sighand_struct *sighand;
763 unsigned long flags;
764
765 /*
766 * Protect against sighand release/switch in exit/exec and
767 * also make timer sampling safe if it ends up calling
768 * thread_group_cputime().
769 */
770 sighand = lock_task_sighand(p, &flags);
771 if (unlikely(sighand == NULL)) {
772 /*
773 * The process has been reaped.
774 * We can't even collect a sample any more.
775 * Call the timer disarmed, nothing else to do.
776 */
777 timer->it.cpu.expires = 0;
778 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
779 &itp->it_value);
780 } else {
781 cpu_timer_sample_group(timer->it_clock, p, &now);
782 unlock_task_sighand(p, &flags);
783 }
784 }
785
786 if (now < timer->it.cpu.expires) {
787 sample_to_timespec(timer->it_clock,
788 timer->it.cpu.expires - now,
789 &itp->it_value);
790 } else {
791 /*
792 * The timer should have expired already, but the firing
793 * hasn't taken place yet. Say it's just about to expire.
794 */
795 itp->it_value.tv_nsec = 1;
796 itp->it_value.tv_sec = 0;
797 }
798}
799
800static unsigned long long
801check_timers_list(struct list_head *timers,
802 struct list_head *firing,
803 unsigned long long curr)
804{
805 int maxfire = 20;
806
807 while (!list_empty(timers)) {
808 struct cpu_timer_list *t;
809
810 t = list_first_entry(timers, struct cpu_timer_list, entry);
811
812 if (!--maxfire || curr < t->expires)
813 return t->expires;
814
815 t->firing = 1;
816 list_move_tail(&t->entry, firing);
817 }
818
819 return 0;
820}
821
822/*
823 * Check for any per-thread CPU timers that have fired and move them off
824 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
825 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
826 */
827static void check_thread_timers(struct task_struct *tsk,
828 struct list_head *firing)
829{
830 struct list_head *timers = tsk->cpu_timers;
831 struct signal_struct *const sig = tsk->signal;
832 struct task_cputime *tsk_expires = &tsk->cputime_expires;
833 unsigned long long expires;
834 unsigned long soft;
835
836 /*
837 * If cputime_expires is zero, then there are no active
838 * per thread CPU timers.
839 */
840 if (task_cputime_zero(&tsk->cputime_expires))
841 return;
842
843 expires = check_timers_list(timers, firing, prof_ticks(tsk));
844 tsk_expires->prof_exp = expires_to_cputime(expires);
845
846 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
847 tsk_expires->virt_exp = expires_to_cputime(expires);
848
849 tsk_expires->sched_exp = check_timers_list(++timers, firing,
850 tsk->se.sum_exec_runtime);
851
852 /*
853 * Check for the special case thread timers.
854 */
855 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
856 if (soft != RLIM_INFINITY) {
857 unsigned long hard =
858 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
859
860 if (hard != RLIM_INFINITY &&
861 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
862 /*
863 * At the hard limit, we just die.
864 * No need to calculate anything else now.
865 */
866 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
867 return;
868 }
869 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
870 /*
871 * At the soft limit, send a SIGXCPU every second.
872 */
873 if (soft < hard) {
874 soft += USEC_PER_SEC;
875 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
876 }
877 printk(KERN_INFO
878 "RT Watchdog Timeout: %s[%d]\n",
879 tsk->comm, task_pid_nr(tsk));
880 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
881 }
882 }
883 if (task_cputime_zero(tsk_expires))
884 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
885}
886
887static inline void stop_process_timers(struct signal_struct *sig)
888{
889 struct thread_group_cputimer *cputimer = &sig->cputimer;
890
891 /* Turn off cputimer->running. This is done without locking. */
892 WRITE_ONCE(cputimer->running, false);
893 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
894}
895
896static u32 onecputick;
897
898static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
899 unsigned long long *expires,
900 unsigned long long cur_time, int signo)
901{
902 if (!it->expires)
903 return;
904
905 if (cur_time >= it->expires) {
906 if (it->incr) {
907 it->expires += it->incr;
908 it->error += it->incr_error;
909 if (it->error >= onecputick) {
910 it->expires -= cputime_one_jiffy;
911 it->error -= onecputick;
912 }
913 } else {
914 it->expires = 0;
915 }
916
917 trace_itimer_expire(signo == SIGPROF ?
918 ITIMER_PROF : ITIMER_VIRTUAL,
919 tsk->signal->leader_pid, cur_time);
920 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
921 }
922
923 if (it->expires && (!*expires || it->expires < *expires)) {
924 *expires = it->expires;
925 }
926}
927
928/*
929 * Check for any per-thread CPU timers that have fired and move them
930 * off the tsk->*_timers list onto the firing list. Per-thread timers
931 * have already been taken off.
932 */
933static void check_process_timers(struct task_struct *tsk,
934 struct list_head *firing)
935{
936 struct signal_struct *const sig = tsk->signal;
937 unsigned long long utime, ptime, virt_expires, prof_expires;
938 unsigned long long sum_sched_runtime, sched_expires;
939 struct list_head *timers = sig->cpu_timers;
940 struct task_cputime cputime;
941 unsigned long soft;
942
943 /*
944 * If cputimer is not running, then there are no active
945 * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
946 */
947 if (!READ_ONCE(tsk->signal->cputimer.running))
948 return;
949
950 /*
951 * Signify that a thread is checking for process timers.
952 * Write access to this field is protected by the sighand lock.
953 */
954 sig->cputimer.checking_timer = true;
955
956 /*
957 * Collect the current process totals.
958 */
959 thread_group_cputimer(tsk, &cputime);
960 utime = cputime_to_expires(cputime.utime);
961 ptime = utime + cputime_to_expires(cputime.stime);
962 sum_sched_runtime = cputime.sum_exec_runtime;
963
964 prof_expires = check_timers_list(timers, firing, ptime);
965 virt_expires = check_timers_list(++timers, firing, utime);
966 sched_expires = check_timers_list(++timers, firing, sum_sched_runtime);
967
968 /*
969 * Check for the special case process timers.
970 */
971 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
972 SIGPROF);
973 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
974 SIGVTALRM);
975 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
976 if (soft != RLIM_INFINITY) {
977 unsigned long psecs = cputime_to_secs(ptime);
978 unsigned long hard =
979 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
980 cputime_t x;
981 if (psecs >= hard) {
982 /*
983 * At the hard limit, we just die.
984 * No need to calculate anything else now.
985 */
986 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
987 return;
988 }
989 if (psecs >= soft) {
990 /*
991 * At the soft limit, send a SIGXCPU every second.
992 */
993 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
994 if (soft < hard) {
995 soft++;
996 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
997 }
998 }
999 x = secs_to_cputime(soft);
1000 if (!prof_expires || x < prof_expires) {
1001 prof_expires = x;
1002 }
1003 }
1004
1005 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires);
1006 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires);
1007 sig->cputime_expires.sched_exp = sched_expires;
1008 if (task_cputime_zero(&sig->cputime_expires))
1009 stop_process_timers(sig);
1010
1011 sig->cputimer.checking_timer = false;
1012}
1013
1014/*
1015 * This is called from the signal code (via do_schedule_next_timer)
1016 * when the last timer signal was delivered and we have to reload the timer.
1017 */
1018void posix_cpu_timer_schedule(struct k_itimer *timer)
1019{
1020 struct sighand_struct *sighand;
1021 unsigned long flags;
1022 struct task_struct *p = timer->it.cpu.task;
1023 unsigned long long now;
1024
1025 WARN_ON_ONCE(p == NULL);
1026
1027 /*
1028 * Fetch the current sample and update the timer's expiry time.
1029 */
1030 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1031 cpu_clock_sample(timer->it_clock, p, &now);
1032 bump_cpu_timer(timer, now);
1033 if (unlikely(p->exit_state))
1034 goto out;
1035
1036 /* Protect timer list r/w in arm_timer() */
1037 sighand = lock_task_sighand(p, &flags);
1038 if (!sighand)
1039 goto out;
1040 } else {
1041 /*
1042 * Protect arm_timer() and timer sampling in case of call to
1043 * thread_group_cputime().
1044 */
1045 sighand = lock_task_sighand(p, &flags);
1046 if (unlikely(sighand == NULL)) {
1047 /*
1048 * The process has been reaped.
1049 * We can't even collect a sample any more.
1050 */
1051 timer->it.cpu.expires = 0;
1052 goto out;
1053 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1054 unlock_task_sighand(p, &flags);
1055 /* Optimizations: if the process is dying, no need to rearm */
1056 goto out;
1057 }
1058 cpu_timer_sample_group(timer->it_clock, p, &now);
1059 bump_cpu_timer(timer, now);
1060 /* Leave the sighand locked for the call below. */
1061 }
1062
1063 /*
1064 * Now re-arm for the new expiry time.
1065 */
1066 WARN_ON_ONCE(!irqs_disabled());
1067 arm_timer(timer);
1068 unlock_task_sighand(p, &flags);
1069
1070out:
1071 timer->it_overrun_last = timer->it_overrun;
1072 timer->it_overrun = -1;
1073 ++timer->it_requeue_pending;
1074}
1075
1076/**
1077 * task_cputime_expired - Compare two task_cputime entities.
1078 *
1079 * @sample: The task_cputime structure to be checked for expiration.
1080 * @expires: Expiration times, against which @sample will be checked.
1081 *
1082 * Checks @sample against @expires to see if any field of @sample has expired.
1083 * Returns true if any field of the former is greater than the corresponding
1084 * field of the latter if the latter field is set. Otherwise returns false.
1085 */
1086static inline int task_cputime_expired(const struct task_cputime *sample,
1087 const struct task_cputime *expires)
1088{
1089 if (expires->utime && sample->utime >= expires->utime)
1090 return 1;
1091 if (expires->stime && sample->utime + sample->stime >= expires->stime)
1092 return 1;
1093 if (expires->sum_exec_runtime != 0 &&
1094 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1095 return 1;
1096 return 0;
1097}
1098
1099/**
1100 * fastpath_timer_check - POSIX CPU timers fast path.
1101 *
1102 * @tsk: The task (thread) being checked.
1103 *
1104 * Check the task and thread group timers. If both are zero (there are no
1105 * timers set) return false. Otherwise snapshot the task and thread group
1106 * timers and compare them with the corresponding expiration times. Return
1107 * true if a timer has expired, else return false.
1108 */
1109static inline int fastpath_timer_check(struct task_struct *tsk)
1110{
1111 struct signal_struct *sig;
1112
1113 if (!task_cputime_zero(&tsk->cputime_expires)) {
1114 struct task_cputime task_sample;
1115
1116 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1117 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1118 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1119 return 1;
1120 }
1121
1122 sig = tsk->signal;
1123 /*
1124 * Check if thread group timers expired when the cputimer is
1125 * running and no other thread in the group is already checking
1126 * for thread group cputimers. These fields are read without the
1127 * sighand lock. However, this is fine because this is meant to
1128 * be a fastpath heuristic to determine whether we should try to
1129 * acquire the sighand lock to check/handle timers.
1130 *
1131 * In the worst case scenario, if 'running' or 'checking_timer' gets
1132 * set but the current thread doesn't see the change yet, we'll wait
1133 * until the next thread in the group gets a scheduler interrupt to
1134 * handle the timer. This isn't an issue in practice because these
1135 * types of delays with signals actually getting sent are expected.
1136 */
1137 if (READ_ONCE(sig->cputimer.running) &&
1138 !READ_ONCE(sig->cputimer.checking_timer)) {
1139 struct task_cputime group_sample;
1140
1141 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1142
1143 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1144 return 1;
1145 }
1146
1147 return 0;
1148}
1149
1150/*
1151 * This is called from the timer interrupt handler. The irq handler has
1152 * already updated our counts. We need to check if any timers fire now.
1153 * Interrupts are disabled.
1154 */
1155void run_posix_cpu_timers(struct task_struct *tsk)
1156{
1157 LIST_HEAD(firing);
1158 struct k_itimer *timer, *next;
1159 unsigned long flags;
1160
1161 WARN_ON_ONCE(!irqs_disabled());
1162
1163 /*
1164 * The fast path checks that there are no expired thread or thread
1165 * group timers. If that's so, just return.
1166 */
1167 if (!fastpath_timer_check(tsk))
1168 return;
1169
1170 if (!lock_task_sighand(tsk, &flags))
1171 return;
1172 /*
1173 * Here we take off tsk->signal->cpu_timers[N] and
1174 * tsk->cpu_timers[N] all the timers that are firing, and
1175 * put them on the firing list.
1176 */
1177 check_thread_timers(tsk, &firing);
1178
1179 check_process_timers(tsk, &firing);
1180
1181 /*
1182 * We must release these locks before taking any timer's lock.
1183 * There is a potential race with timer deletion here, as the
1184 * siglock now protects our private firing list. We have set
1185 * the firing flag in each timer, so that a deletion attempt
1186 * that gets the timer lock before we do will give it up and
1187 * spin until we've taken care of that timer below.
1188 */
1189 unlock_task_sighand(tsk, &flags);
1190
1191 /*
1192 * Now that all the timers on our list have the firing flag,
1193 * no one will touch their list entries but us. We'll take
1194 * each timer's lock before clearing its firing flag, so no
1195 * timer call will interfere.
1196 */
1197 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1198 int cpu_firing;
1199
1200 spin_lock(&timer->it_lock);
1201 list_del_init(&timer->it.cpu.entry);
1202 cpu_firing = timer->it.cpu.firing;
1203 timer->it.cpu.firing = 0;
1204 /*
1205 * The firing flag is -1 if we collided with a reset
1206 * of the timer, which already reported this
1207 * almost-firing as an overrun. So don't generate an event.
1208 */
1209 if (likely(cpu_firing >= 0))
1210 cpu_timer_fire(timer);
1211 spin_unlock(&timer->it_lock);
1212 }
1213}
1214
1215/*
1216 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1217 * The tsk->sighand->siglock must be held by the caller.
1218 */
1219void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1220 cputime_t *newval, cputime_t *oldval)
1221{
1222 unsigned long long now;
1223
1224 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1225 cpu_timer_sample_group(clock_idx, tsk, &now);
1226
1227 if (oldval) {
1228 /*
1229 * We are setting itimer. The *oldval is absolute and we update
1230 * it to be relative, *newval argument is relative and we update
1231 * it to be absolute.
1232 */
1233 if (*oldval) {
1234 if (*oldval <= now) {
1235 /* Just about to fire. */
1236 *oldval = cputime_one_jiffy;
1237 } else {
1238 *oldval -= now;
1239 }
1240 }
1241
1242 if (!*newval)
1243 return;
1244 *newval += now;
1245 }
1246
1247 /*
1248 * Update expiration cache if we are the earliest timer, or eventually
1249 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1250 */
1251 switch (clock_idx) {
1252 case CPUCLOCK_PROF:
1253 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1254 tsk->signal->cputime_expires.prof_exp = *newval;
1255 break;
1256 case CPUCLOCK_VIRT:
1257 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1258 tsk->signal->cputime_expires.virt_exp = *newval;
1259 break;
1260 }
1261
1262 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1263}
1264
1265static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1266 struct timespec *rqtp, struct itimerspec *it)
1267{
1268 struct k_itimer timer;
1269 int error;
1270
1271 /*
1272 * Set up a temporary timer and then wait for it to go off.
1273 */
1274 memset(&timer, 0, sizeof timer);
1275 spin_lock_init(&timer.it_lock);
1276 timer.it_clock = which_clock;
1277 timer.it_overrun = -1;
1278 error = posix_cpu_timer_create(&timer);
1279 timer.it_process = current;
1280 if (!error) {
1281 static struct itimerspec zero_it;
1282
1283 memset(it, 0, sizeof *it);
1284 it->it_value = *rqtp;
1285
1286 spin_lock_irq(&timer.it_lock);
1287 error = posix_cpu_timer_set(&timer, flags, it, NULL);
1288 if (error) {
1289 spin_unlock_irq(&timer.it_lock);
1290 return error;
1291 }
1292
1293 while (!signal_pending(current)) {
1294 if (timer.it.cpu.expires == 0) {
1295 /*
1296 * Our timer fired and was reset, below
1297 * deletion can not fail.
1298 */
1299 posix_cpu_timer_del(&timer);
1300 spin_unlock_irq(&timer.it_lock);
1301 return 0;
1302 }
1303
1304 /*
1305 * Block until cpu_timer_fire (or a signal) wakes us.
1306 */
1307 __set_current_state(TASK_INTERRUPTIBLE);
1308 spin_unlock_irq(&timer.it_lock);
1309 schedule();
1310 spin_lock_irq(&timer.it_lock);
1311 }
1312
1313 /*
1314 * We were interrupted by a signal.
1315 */
1316 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp);
1317 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1318 if (!error) {
1319 /*
1320 * Timer is now unarmed, deletion can not fail.
1321 */
1322 posix_cpu_timer_del(&timer);
1323 }
1324 spin_unlock_irq(&timer.it_lock);
1325
1326 while (error == TIMER_RETRY) {
1327 /*
1328 * We need to handle case when timer was or is in the
1329 * middle of firing. In other cases we already freed
1330 * resources.
1331 */
1332 spin_lock_irq(&timer.it_lock);
1333 error = posix_cpu_timer_del(&timer);
1334 spin_unlock_irq(&timer.it_lock);
1335 }
1336
1337 if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) {
1338 /*
1339 * It actually did fire already.
1340 */
1341 return 0;
1342 }
1343
1344 error = -ERESTART_RESTARTBLOCK;
1345 }
1346
1347 return error;
1348}
1349
1350static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1351
1352static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1353 struct timespec *rqtp, struct timespec __user *rmtp)
1354{
1355 struct restart_block *restart_block = ¤t->restart_block;
1356 struct itimerspec it;
1357 int error;
1358
1359 /*
1360 * Diagnose required errors first.
1361 */
1362 if (CPUCLOCK_PERTHREAD(which_clock) &&
1363 (CPUCLOCK_PID(which_clock) == 0 ||
1364 CPUCLOCK_PID(which_clock) == current->pid))
1365 return -EINVAL;
1366
1367 error = do_cpu_nanosleep(which_clock, flags, rqtp, &it);
1368
1369 if (error == -ERESTART_RESTARTBLOCK) {
1370
1371 if (flags & TIMER_ABSTIME)
1372 return -ERESTARTNOHAND;
1373 /*
1374 * Report back to the user the time still remaining.
1375 */
1376 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1377 return -EFAULT;
1378
1379 restart_block->fn = posix_cpu_nsleep_restart;
1380 restart_block->nanosleep.clockid = which_clock;
1381 restart_block->nanosleep.rmtp = rmtp;
1382 restart_block->nanosleep.expires = timespec_to_ns(rqtp);
1383 }
1384 return error;
1385}
1386
1387static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1388{
1389 clockid_t which_clock = restart_block->nanosleep.clockid;
1390 struct timespec t;
1391 struct itimerspec it;
1392 int error;
1393
1394 t = ns_to_timespec(restart_block->nanosleep.expires);
1395
1396 error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it);
1397
1398 if (error == -ERESTART_RESTARTBLOCK) {
1399 struct timespec __user *rmtp = restart_block->nanosleep.rmtp;
1400 /*
1401 * Report back to the user the time still remaining.
1402 */
1403 if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp))
1404 return -EFAULT;
1405
1406 restart_block->nanosleep.expires = timespec_to_ns(&t);
1407 }
1408 return error;
1409
1410}
1411
1412#define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED)
1413#define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED)
1414
1415static int process_cpu_clock_getres(const clockid_t which_clock,
1416 struct timespec *tp)
1417{
1418 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1419}
1420static int process_cpu_clock_get(const clockid_t which_clock,
1421 struct timespec *tp)
1422{
1423 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1424}
1425static int process_cpu_timer_create(struct k_itimer *timer)
1426{
1427 timer->it_clock = PROCESS_CLOCK;
1428 return posix_cpu_timer_create(timer);
1429}
1430static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1431 struct timespec *rqtp,
1432 struct timespec __user *rmtp)
1433{
1434 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp);
1435}
1436static long process_cpu_nsleep_restart(struct restart_block *restart_block)
1437{
1438 return -EINVAL;
1439}
1440static int thread_cpu_clock_getres(const clockid_t which_clock,
1441 struct timespec *tp)
1442{
1443 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1444}
1445static int thread_cpu_clock_get(const clockid_t which_clock,
1446 struct timespec *tp)
1447{
1448 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1449}
1450static int thread_cpu_timer_create(struct k_itimer *timer)
1451{
1452 timer->it_clock = THREAD_CLOCK;
1453 return posix_cpu_timer_create(timer);
1454}
1455
1456struct k_clock clock_posix_cpu = {
1457 .clock_getres = posix_cpu_clock_getres,
1458 .clock_set = posix_cpu_clock_set,
1459 .clock_get = posix_cpu_clock_get,
1460 .timer_create = posix_cpu_timer_create,
1461 .nsleep = posix_cpu_nsleep,
1462 .nsleep_restart = posix_cpu_nsleep_restart,
1463 .timer_set = posix_cpu_timer_set,
1464 .timer_del = posix_cpu_timer_del,
1465 .timer_get = posix_cpu_timer_get,
1466};
1467
1468static __init int init_posix_cpu_timers(void)
1469{
1470 struct k_clock process = {
1471 .clock_getres = process_cpu_clock_getres,
1472 .clock_get = process_cpu_clock_get,
1473 .timer_create = process_cpu_timer_create,
1474 .nsleep = process_cpu_nsleep,
1475 .nsleep_restart = process_cpu_nsleep_restart,
1476 };
1477 struct k_clock thread = {
1478 .clock_getres = thread_cpu_clock_getres,
1479 .clock_get = thread_cpu_clock_get,
1480 .timer_create = thread_cpu_timer_create,
1481 };
1482 struct timespec ts;
1483
1484 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1485 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1486
1487 cputime_to_timespec(cputime_one_jiffy, &ts);
1488 onecputick = ts.tv_nsec;
1489 WARN_ON(ts.tv_sec != 0);
1490
1491 return 0;
1492}
1493__initcall(init_posix_cpu_timers);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
6#include <linux/sched/signal.h>
7#include <linux/sched/cputime.h>
8#include <linux/posix-timers.h>
9#include <linux/errno.h>
10#include <linux/math64.h>
11#include <linux/uaccess.h>
12#include <linux/kernel_stat.h>
13#include <trace/events/timer.h>
14#include <linux/tick.h>
15#include <linux/workqueue.h>
16#include <linux/compat.h>
17#include <linux/sched/deadline.h>
18#include <linux/task_work.h>
19
20#include "posix-timers.h"
21
22static void posix_cpu_timer_rearm(struct k_itimer *timer);
23
24void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
25{
26 posix_cputimers_init(pct);
27 if (cpu_limit != RLIM_INFINITY) {
28 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
29 pct->timers_active = true;
30 }
31}
32
33/*
34 * Called after updating RLIMIT_CPU to run cpu timer and update
35 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
36 * necessary. Needs siglock protection since other code may update the
37 * expiration cache as well.
38 *
39 * Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and
40 * we cannot lock_task_sighand. Cannot fail if task is current.
41 */
42int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
43{
44 u64 nsecs = rlim_new * NSEC_PER_SEC;
45 unsigned long irq_fl;
46
47 if (!lock_task_sighand(task, &irq_fl))
48 return -ESRCH;
49 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
50 unlock_task_sighand(task, &irq_fl);
51 return 0;
52}
53
54/*
55 * Functions for validating access to tasks.
56 */
57static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
58{
59 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
60 const pid_t upid = CPUCLOCK_PID(clock);
61 struct pid *pid;
62
63 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
64 return NULL;
65
66 /*
67 * If the encoded PID is 0, then the timer is targeted at current
68 * or the process to which current belongs.
69 */
70 if (upid == 0)
71 return thread ? task_pid(current) : task_tgid(current);
72
73 pid = find_vpid(upid);
74 if (!pid)
75 return NULL;
76
77 if (thread) {
78 struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
79 return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
80 }
81
82 /*
83 * For clock_gettime(PROCESS) allow finding the process by
84 * with the pid of the current task. The code needs the tgid
85 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
86 * used to find the process.
87 */
88 if (gettime && (pid == task_pid(current)))
89 return task_tgid(current);
90
91 /*
92 * For processes require that pid identifies a process.
93 */
94 return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
95}
96
97static inline int validate_clock_permissions(const clockid_t clock)
98{
99 int ret;
100
101 rcu_read_lock();
102 ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
103 rcu_read_unlock();
104
105 return ret;
106}
107
108static inline enum pid_type clock_pid_type(const clockid_t clock)
109{
110 return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
111}
112
113static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
114{
115 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
116}
117
118/*
119 * Update expiry time from increment, and increase overrun count,
120 * given the current clock sample.
121 */
122static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
123{
124 u64 delta, incr, expires = timer->it.cpu.node.expires;
125 int i;
126
127 if (!timer->it_interval)
128 return expires;
129
130 if (now < expires)
131 return expires;
132
133 incr = timer->it_interval;
134 delta = now + incr - expires;
135
136 /* Don't use (incr*2 < delta), incr*2 might overflow. */
137 for (i = 0; incr < delta - incr; i++)
138 incr = incr << 1;
139
140 for (; i >= 0; incr >>= 1, i--) {
141 if (delta < incr)
142 continue;
143
144 timer->it.cpu.node.expires += incr;
145 timer->it_overrun += 1LL << i;
146 delta -= incr;
147 }
148 return timer->it.cpu.node.expires;
149}
150
151/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
152static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
153{
154 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
155 ~pct->bases[CPUCLOCK_VIRT].nextevt |
156 ~pct->bases[CPUCLOCK_SCHED].nextevt);
157}
158
159static int
160posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
161{
162 int error = validate_clock_permissions(which_clock);
163
164 if (!error) {
165 tp->tv_sec = 0;
166 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
167 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
168 /*
169 * If sched_clock is using a cycle counter, we
170 * don't have any idea of its true resolution
171 * exported, but it is much more than 1s/HZ.
172 */
173 tp->tv_nsec = 1;
174 }
175 }
176 return error;
177}
178
179static int
180posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
181{
182 int error = validate_clock_permissions(clock);
183
184 /*
185 * You can never reset a CPU clock, but we check for other errors
186 * in the call before failing with EPERM.
187 */
188 return error ? : -EPERM;
189}
190
191/*
192 * Sample a per-thread clock for the given task. clkid is validated.
193 */
194static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
195{
196 u64 utime, stime;
197
198 if (clkid == CPUCLOCK_SCHED)
199 return task_sched_runtime(p);
200
201 task_cputime(p, &utime, &stime);
202
203 switch (clkid) {
204 case CPUCLOCK_PROF:
205 return utime + stime;
206 case CPUCLOCK_VIRT:
207 return utime;
208 default:
209 WARN_ON_ONCE(1);
210 }
211 return 0;
212}
213
214static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
215{
216 samples[CPUCLOCK_PROF] = stime + utime;
217 samples[CPUCLOCK_VIRT] = utime;
218 samples[CPUCLOCK_SCHED] = rtime;
219}
220
221static void task_sample_cputime(struct task_struct *p, u64 *samples)
222{
223 u64 stime, utime;
224
225 task_cputime(p, &utime, &stime);
226 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
227}
228
229static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
230 u64 *samples)
231{
232 u64 stime, utime, rtime;
233
234 utime = atomic64_read(&at->utime);
235 stime = atomic64_read(&at->stime);
236 rtime = atomic64_read(&at->sum_exec_runtime);
237 store_samples(samples, stime, utime, rtime);
238}
239
240/*
241 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
242 * to avoid race conditions with concurrent updates to cputime.
243 */
244static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
245{
246 u64 curr_cputime = atomic64_read(cputime);
247
248 do {
249 if (sum_cputime <= curr_cputime)
250 return;
251 } while (!atomic64_try_cmpxchg(cputime, &curr_cputime, sum_cputime));
252}
253
254static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
255 struct task_cputime *sum)
256{
257 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
258 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
259 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
260}
261
262/**
263 * thread_group_sample_cputime - Sample cputime for a given task
264 * @tsk: Task for which cputime needs to be started
265 * @samples: Storage for time samples
266 *
267 * Called from sys_getitimer() to calculate the expiry time of an active
268 * timer. That means group cputime accounting is already active. Called
269 * with task sighand lock held.
270 *
271 * Updates @times with an uptodate sample of the thread group cputimes.
272 */
273void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
274{
275 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
276 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
277
278 WARN_ON_ONCE(!pct->timers_active);
279
280 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
281}
282
283/**
284 * thread_group_start_cputime - Start cputime and return a sample
285 * @tsk: Task for which cputime needs to be started
286 * @samples: Storage for time samples
287 *
288 * The thread group cputime accounting is avoided when there are no posix
289 * CPU timers armed. Before starting a timer it's required to check whether
290 * the time accounting is active. If not, a full update of the atomic
291 * accounting store needs to be done and the accounting enabled.
292 *
293 * Updates @times with an uptodate sample of the thread group cputimes.
294 */
295static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
296{
297 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
298 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
299
300 lockdep_assert_task_sighand_held(tsk);
301
302 /* Check if cputimer isn't running. This is accessed without locking. */
303 if (!READ_ONCE(pct->timers_active)) {
304 struct task_cputime sum;
305
306 /*
307 * The POSIX timer interface allows for absolute time expiry
308 * values through the TIMER_ABSTIME flag, therefore we have
309 * to synchronize the timer to the clock every time we start it.
310 */
311 thread_group_cputime(tsk, &sum);
312 update_gt_cputime(&cputimer->cputime_atomic, &sum);
313
314 /*
315 * We're setting timers_active without a lock. Ensure this
316 * only gets written to in one operation. We set it after
317 * update_gt_cputime() as a small optimization, but
318 * barriers are not required because update_gt_cputime()
319 * can handle concurrent updates.
320 */
321 WRITE_ONCE(pct->timers_active, true);
322 }
323 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
324}
325
326static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
327{
328 struct task_cputime ct;
329
330 thread_group_cputime(tsk, &ct);
331 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
332}
333
334/*
335 * Sample a process (thread group) clock for the given task clkid. If the
336 * group's cputime accounting is already enabled, read the atomic
337 * store. Otherwise a full update is required. clkid is already validated.
338 */
339static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
340 bool start)
341{
342 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
343 struct posix_cputimers *pct = &p->signal->posix_cputimers;
344 u64 samples[CPUCLOCK_MAX];
345
346 if (!READ_ONCE(pct->timers_active)) {
347 if (start)
348 thread_group_start_cputime(p, samples);
349 else
350 __thread_group_cputime(p, samples);
351 } else {
352 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
353 }
354
355 return samples[clkid];
356}
357
358static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
359{
360 const clockid_t clkid = CPUCLOCK_WHICH(clock);
361 struct task_struct *tsk;
362 u64 t;
363
364 rcu_read_lock();
365 tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
366 if (!tsk) {
367 rcu_read_unlock();
368 return -EINVAL;
369 }
370
371 if (CPUCLOCK_PERTHREAD(clock))
372 t = cpu_clock_sample(clkid, tsk);
373 else
374 t = cpu_clock_sample_group(clkid, tsk, false);
375 rcu_read_unlock();
376
377 *tp = ns_to_timespec64(t);
378 return 0;
379}
380
381/*
382 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
383 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
384 * new timer already all-zeros initialized.
385 */
386static int posix_cpu_timer_create(struct k_itimer *new_timer)
387{
388 static struct lock_class_key posix_cpu_timers_key;
389 struct pid *pid;
390
391 rcu_read_lock();
392 pid = pid_for_clock(new_timer->it_clock, false);
393 if (!pid) {
394 rcu_read_unlock();
395 return -EINVAL;
396 }
397
398 /*
399 * If posix timer expiry is handled in task work context then
400 * timer::it_lock can be taken without disabling interrupts as all
401 * other locking happens in task context. This requires a separate
402 * lock class key otherwise regular posix timer expiry would record
403 * the lock class being taken in interrupt context and generate a
404 * false positive warning.
405 */
406 if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
407 lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
408
409 new_timer->kclock = &clock_posix_cpu;
410 timerqueue_init(&new_timer->it.cpu.node);
411 new_timer->it.cpu.pid = get_pid(pid);
412 rcu_read_unlock();
413 return 0;
414}
415
416static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
417 struct task_struct *tsk)
418{
419 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
420
421 if (CPUCLOCK_PERTHREAD(timer->it_clock))
422 return tsk->posix_cputimers.bases + clkidx;
423 else
424 return tsk->signal->posix_cputimers.bases + clkidx;
425}
426
427/*
428 * Force recalculating the base earliest expiration on the next tick.
429 * This will also re-evaluate the need to keep around the process wide
430 * cputime counter and tick dependency and eventually shut these down
431 * if necessary.
432 */
433static void trigger_base_recalc_expires(struct k_itimer *timer,
434 struct task_struct *tsk)
435{
436 struct posix_cputimer_base *base = timer_base(timer, tsk);
437
438 base->nextevt = 0;
439}
440
441/*
442 * Dequeue the timer and reset the base if it was its earliest expiration.
443 * It makes sure the next tick recalculates the base next expiration so we
444 * don't keep the costly process wide cputime counter around for a random
445 * amount of time, along with the tick dependency.
446 *
447 * If another timer gets queued between this and the next tick, its
448 * expiration will update the base next event if necessary on the next
449 * tick.
450 */
451static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
452{
453 struct cpu_timer *ctmr = &timer->it.cpu;
454 struct posix_cputimer_base *base;
455
456 if (!cpu_timer_dequeue(ctmr))
457 return;
458
459 base = timer_base(timer, p);
460 if (cpu_timer_getexpires(ctmr) == base->nextevt)
461 trigger_base_recalc_expires(timer, p);
462}
463
464
465/*
466 * Clean up a CPU-clock timer that is about to be destroyed.
467 * This is called from timer deletion with the timer already locked.
468 * If we return TIMER_RETRY, it's necessary to release the timer's lock
469 * and try again. (This happens when the timer is in the middle of firing.)
470 */
471static int posix_cpu_timer_del(struct k_itimer *timer)
472{
473 struct cpu_timer *ctmr = &timer->it.cpu;
474 struct sighand_struct *sighand;
475 struct task_struct *p;
476 unsigned long flags;
477 int ret = 0;
478
479 rcu_read_lock();
480 p = cpu_timer_task_rcu(timer);
481 if (!p)
482 goto out;
483
484 /*
485 * Protect against sighand release/switch in exit/exec and process/
486 * thread timer list entry concurrent read/writes.
487 */
488 sighand = lock_task_sighand(p, &flags);
489 if (unlikely(sighand == NULL)) {
490 /*
491 * This raced with the reaping of the task. The exit cleanup
492 * should have removed this timer from the timer queue.
493 */
494 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
495 } else {
496 if (timer->it.cpu.firing) {
497 /*
498 * Prevent signal delivery. The timer cannot be dequeued
499 * because it is on the firing list which is not protected
500 * by sighand->lock. The delivery path is waiting for
501 * the timer lock. So go back, unlock and retry.
502 */
503 timer->it.cpu.firing = false;
504 ret = TIMER_RETRY;
505 } else {
506 disarm_timer(timer, p);
507 }
508 unlock_task_sighand(p, &flags);
509 }
510
511out:
512 rcu_read_unlock();
513
514 if (!ret) {
515 put_pid(ctmr->pid);
516 timer->it_status = POSIX_TIMER_DISARMED;
517 }
518 return ret;
519}
520
521static void cleanup_timerqueue(struct timerqueue_head *head)
522{
523 struct timerqueue_node *node;
524 struct cpu_timer *ctmr;
525
526 while ((node = timerqueue_getnext(head))) {
527 timerqueue_del(head, node);
528 ctmr = container_of(node, struct cpu_timer, node);
529 ctmr->head = NULL;
530 }
531}
532
533/*
534 * Clean out CPU timers which are still armed when a thread exits. The
535 * timers are only removed from the list. No other updates are done. The
536 * corresponding posix timers are still accessible, but cannot be rearmed.
537 *
538 * This must be called with the siglock held.
539 */
540static void cleanup_timers(struct posix_cputimers *pct)
541{
542 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
543 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
544 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
545}
546
547/*
548 * These are both called with the siglock held, when the current thread
549 * is being reaped. When the final (leader) thread in the group is reaped,
550 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
551 */
552void posix_cpu_timers_exit(struct task_struct *tsk)
553{
554 cleanup_timers(&tsk->posix_cputimers);
555}
556void posix_cpu_timers_exit_group(struct task_struct *tsk)
557{
558 cleanup_timers(&tsk->signal->posix_cputimers);
559}
560
561/*
562 * Insert the timer on the appropriate list before any timers that
563 * expire later. This must be called with the sighand lock held.
564 */
565static void arm_timer(struct k_itimer *timer, struct task_struct *p)
566{
567 struct posix_cputimer_base *base = timer_base(timer, p);
568 struct cpu_timer *ctmr = &timer->it.cpu;
569 u64 newexp = cpu_timer_getexpires(ctmr);
570
571 timer->it_status = POSIX_TIMER_ARMED;
572 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
573 return;
574
575 /*
576 * We are the new earliest-expiring POSIX 1.b timer, hence
577 * need to update expiration cache. Take into account that
578 * for process timers we share expiration cache with itimers
579 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
580 */
581 if (newexp < base->nextevt)
582 base->nextevt = newexp;
583
584 if (CPUCLOCK_PERTHREAD(timer->it_clock))
585 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
586 else
587 tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
588}
589
590/*
591 * The timer is locked, fire it and arrange for its reload.
592 */
593static void cpu_timer_fire(struct k_itimer *timer)
594{
595 struct cpu_timer *ctmr = &timer->it.cpu;
596
597 timer->it_status = POSIX_TIMER_DISARMED;
598
599 if (unlikely(ctmr->nanosleep)) {
600 /*
601 * This a special case for clock_nanosleep,
602 * not a normal timer from sys_timer_create.
603 */
604 wake_up_process(timer->it_process);
605 cpu_timer_setexpires(ctmr, 0);
606 } else {
607 posix_timer_queue_signal(timer);
608 /* Disable oneshot timers */
609 if (!timer->it_interval)
610 cpu_timer_setexpires(ctmr, 0);
611 }
612}
613
614static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now);
615
616/*
617 * Guts of sys_timer_settime for CPU timers.
618 * This is called with the timer locked and interrupts disabled.
619 * If we return TIMER_RETRY, it's necessary to release the timer's lock
620 * and try again. (This happens when the timer is in the middle of firing.)
621 */
622static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
623 struct itimerspec64 *new, struct itimerspec64 *old)
624{
625 bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
626 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
627 struct cpu_timer *ctmr = &timer->it.cpu;
628 u64 old_expires, new_expires, now;
629 struct sighand_struct *sighand;
630 struct task_struct *p;
631 unsigned long flags;
632 int ret = 0;
633
634 rcu_read_lock();
635 p = cpu_timer_task_rcu(timer);
636 if (!p) {
637 /*
638 * If p has just been reaped, we can no
639 * longer get any information about it at all.
640 */
641 rcu_read_unlock();
642 return -ESRCH;
643 }
644
645 /*
646 * Use the to_ktime conversion because that clamps the maximum
647 * value to KTIME_MAX and avoid multiplication overflows.
648 */
649 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
650
651 /*
652 * Protect against sighand release/switch in exit/exec and p->cpu_timers
653 * and p->signal->cpu_timers read/write in arm_timer()
654 */
655 sighand = lock_task_sighand(p, &flags);
656 /*
657 * If p has just been reaped, we can no
658 * longer get any information about it at all.
659 */
660 if (unlikely(sighand == NULL)) {
661 rcu_read_unlock();
662 return -ESRCH;
663 }
664
665 /* Retrieve the current expiry time before disarming the timer */
666 old_expires = cpu_timer_getexpires(ctmr);
667
668 if (unlikely(timer->it.cpu.firing)) {
669 /*
670 * Prevent signal delivery. The timer cannot be dequeued
671 * because it is on the firing list which is not protected
672 * by sighand->lock. The delivery path is waiting for
673 * the timer lock. So go back, unlock and retry.
674 */
675 timer->it.cpu.firing = false;
676 ret = TIMER_RETRY;
677 } else {
678 cpu_timer_dequeue(ctmr);
679 timer->it_status = POSIX_TIMER_DISARMED;
680 }
681
682 /*
683 * Sample the current clock for saving the previous setting
684 * and for rearming the timer.
685 */
686 if (CPUCLOCK_PERTHREAD(timer->it_clock))
687 now = cpu_clock_sample(clkid, p);
688 else
689 now = cpu_clock_sample_group(clkid, p, !sigev_none);
690
691 /* Retrieve the previous expiry value if requested. */
692 if (old) {
693 old->it_value = (struct timespec64){ };
694 if (old_expires)
695 __posix_cpu_timer_get(timer, old, now);
696 }
697
698 /* Retry if the timer expiry is running concurrently */
699 if (unlikely(ret)) {
700 unlock_task_sighand(p, &flags);
701 goto out;
702 }
703
704 /* Convert relative expiry time to absolute */
705 if (new_expires && !(timer_flags & TIMER_ABSTIME))
706 new_expires += now;
707
708 /* Set the new expiry time (might be 0) */
709 cpu_timer_setexpires(ctmr, new_expires);
710
711 /*
712 * Arm the timer if it is not disabled, the new expiry value has
713 * not yet expired and the timer requires signal delivery.
714 * SIGEV_NONE timers are never armed. In case the timer is not
715 * armed, enforce the reevaluation of the timer base so that the
716 * process wide cputime counter can be disabled eventually.
717 */
718 if (likely(!sigev_none)) {
719 if (new_expires && now < new_expires)
720 arm_timer(timer, p);
721 else
722 trigger_base_recalc_expires(timer, p);
723 }
724
725 unlock_task_sighand(p, &flags);
726
727 posix_timer_set_common(timer, new);
728
729 /*
730 * If the new expiry time was already in the past the timer was not
731 * queued. Fire it immediately even if the thread never runs to
732 * accumulate more time on this clock.
733 */
734 if (!sigev_none && new_expires && now >= new_expires)
735 cpu_timer_fire(timer);
736out:
737 rcu_read_unlock();
738 return ret;
739}
740
741static void __posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp, u64 now)
742{
743 bool sigev_none = timer->it_sigev_notify == SIGEV_NONE;
744 u64 expires, iv = timer->it_interval;
745
746 /*
747 * Make sure that interval timers are moved forward for the
748 * following cases:
749 * - SIGEV_NONE timers which are never armed
750 * - Timers which expired, but the signal has not yet been
751 * delivered
752 */
753 if (iv && timer->it_status != POSIX_TIMER_ARMED)
754 expires = bump_cpu_timer(timer, now);
755 else
756 expires = cpu_timer_getexpires(&timer->it.cpu);
757
758 /*
759 * Expired interval timers cannot have a remaining time <= 0.
760 * The kernel has to move them forward so that the next
761 * timer expiry is > @now.
762 */
763 if (now < expires) {
764 itp->it_value = ns_to_timespec64(expires - now);
765 } else {
766 /*
767 * A single shot SIGEV_NONE timer must return 0, when it is
768 * expired! Timers which have a real signal delivery mode
769 * must return a remaining time greater than 0 because the
770 * signal has not yet been delivered.
771 */
772 if (!sigev_none)
773 itp->it_value.tv_nsec = 1;
774 }
775}
776
777static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
778{
779 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
780 struct task_struct *p;
781 u64 now;
782
783 rcu_read_lock();
784 p = cpu_timer_task_rcu(timer);
785 if (p && cpu_timer_getexpires(&timer->it.cpu)) {
786 itp->it_interval = ktime_to_timespec64(timer->it_interval);
787
788 if (CPUCLOCK_PERTHREAD(timer->it_clock))
789 now = cpu_clock_sample(clkid, p);
790 else
791 now = cpu_clock_sample_group(clkid, p, false);
792
793 __posix_cpu_timer_get(timer, itp, now);
794 }
795 rcu_read_unlock();
796}
797
798#define MAX_COLLECTED 20
799
800static u64 collect_timerqueue(struct timerqueue_head *head,
801 struct list_head *firing, u64 now)
802{
803 struct timerqueue_node *next;
804 int i = 0;
805
806 while ((next = timerqueue_getnext(head))) {
807 struct cpu_timer *ctmr;
808 u64 expires;
809
810 ctmr = container_of(next, struct cpu_timer, node);
811 expires = cpu_timer_getexpires(ctmr);
812 /* Limit the number of timers to expire at once */
813 if (++i == MAX_COLLECTED || now < expires)
814 return expires;
815
816 ctmr->firing = true;
817 /* See posix_cpu_timer_wait_running() */
818 rcu_assign_pointer(ctmr->handling, current);
819 cpu_timer_dequeue(ctmr);
820 list_add_tail(&ctmr->elist, firing);
821 }
822
823 return U64_MAX;
824}
825
826static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
827 struct list_head *firing)
828{
829 struct posix_cputimer_base *base = pct->bases;
830 int i;
831
832 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
833 base->nextevt = collect_timerqueue(&base->tqhead, firing,
834 samples[i]);
835 }
836}
837
838static inline void check_dl_overrun(struct task_struct *tsk)
839{
840 if (tsk->dl.dl_overrun) {
841 tsk->dl.dl_overrun = 0;
842 send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
843 }
844}
845
846static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
847{
848 if (time < limit)
849 return false;
850
851 if (print_fatal_signals) {
852 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
853 rt ? "RT" : "CPU", hard ? "hard" : "soft",
854 current->comm, task_pid_nr(current));
855 }
856 send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
857 return true;
858}
859
860/*
861 * Check for any per-thread CPU timers that have fired and move them off
862 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
863 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
864 */
865static void check_thread_timers(struct task_struct *tsk,
866 struct list_head *firing)
867{
868 struct posix_cputimers *pct = &tsk->posix_cputimers;
869 u64 samples[CPUCLOCK_MAX];
870 unsigned long soft;
871
872 if (dl_task(tsk))
873 check_dl_overrun(tsk);
874
875 if (expiry_cache_is_inactive(pct))
876 return;
877
878 task_sample_cputime(tsk, samples);
879 collect_posix_cputimers(pct, samples, firing);
880
881 /*
882 * Check for the special case thread timers.
883 */
884 soft = task_rlimit(tsk, RLIMIT_RTTIME);
885 if (soft != RLIM_INFINITY) {
886 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
887 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
888 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
889
890 /* At the hard limit, send SIGKILL. No further action. */
891 if (hard != RLIM_INFINITY &&
892 check_rlimit(rttime, hard, SIGKILL, true, true))
893 return;
894
895 /* At the soft limit, send a SIGXCPU every second */
896 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
897 soft += USEC_PER_SEC;
898 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
899 }
900 }
901
902 if (expiry_cache_is_inactive(pct))
903 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
904}
905
906static inline void stop_process_timers(struct signal_struct *sig)
907{
908 struct posix_cputimers *pct = &sig->posix_cputimers;
909
910 /* Turn off the active flag. This is done without locking. */
911 WRITE_ONCE(pct->timers_active, false);
912 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
913}
914
915static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
916 u64 *expires, u64 cur_time, int signo)
917{
918 if (!it->expires)
919 return;
920
921 if (cur_time >= it->expires) {
922 if (it->incr)
923 it->expires += it->incr;
924 else
925 it->expires = 0;
926
927 trace_itimer_expire(signo == SIGPROF ?
928 ITIMER_PROF : ITIMER_VIRTUAL,
929 task_tgid(tsk), cur_time);
930 send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
931 }
932
933 if (it->expires && it->expires < *expires)
934 *expires = it->expires;
935}
936
937/*
938 * Check for any per-thread CPU timers that have fired and move them
939 * off the tsk->*_timers list onto the firing list. Per-thread timers
940 * have already been taken off.
941 */
942static void check_process_timers(struct task_struct *tsk,
943 struct list_head *firing)
944{
945 struct signal_struct *const sig = tsk->signal;
946 struct posix_cputimers *pct = &sig->posix_cputimers;
947 u64 samples[CPUCLOCK_MAX];
948 unsigned long soft;
949
950 /*
951 * If there are no active process wide timers (POSIX 1.b, itimers,
952 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
953 * processing when there is already another task handling them.
954 */
955 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
956 return;
957
958 /*
959 * Signify that a thread is checking for process timers.
960 * Write access to this field is protected by the sighand lock.
961 */
962 pct->expiry_active = true;
963
964 /*
965 * Collect the current process totals. Group accounting is active
966 * so the sample can be taken directly.
967 */
968 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
969 collect_posix_cputimers(pct, samples, firing);
970
971 /*
972 * Check for the special case process timers.
973 */
974 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
975 &pct->bases[CPUCLOCK_PROF].nextevt,
976 samples[CPUCLOCK_PROF], SIGPROF);
977 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
978 &pct->bases[CPUCLOCK_VIRT].nextevt,
979 samples[CPUCLOCK_VIRT], SIGVTALRM);
980
981 soft = task_rlimit(tsk, RLIMIT_CPU);
982 if (soft != RLIM_INFINITY) {
983 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
984 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
985 u64 ptime = samples[CPUCLOCK_PROF];
986 u64 softns = (u64)soft * NSEC_PER_SEC;
987 u64 hardns = (u64)hard * NSEC_PER_SEC;
988
989 /* At the hard limit, send SIGKILL. No further action. */
990 if (hard != RLIM_INFINITY &&
991 check_rlimit(ptime, hardns, SIGKILL, false, true))
992 return;
993
994 /* At the soft limit, send a SIGXCPU every second */
995 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
996 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
997 softns += NSEC_PER_SEC;
998 }
999
1000 /* Update the expiry cache */
1001 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
1002 pct->bases[CPUCLOCK_PROF].nextevt = softns;
1003 }
1004
1005 if (expiry_cache_is_inactive(pct))
1006 stop_process_timers(sig);
1007
1008 pct->expiry_active = false;
1009}
1010
1011/*
1012 * This is called from the signal code (via posixtimer_rearm)
1013 * when the last timer signal was delivered and we have to reload the timer.
1014 */
1015static void posix_cpu_timer_rearm(struct k_itimer *timer)
1016{
1017 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
1018 struct task_struct *p;
1019 struct sighand_struct *sighand;
1020 unsigned long flags;
1021 u64 now;
1022
1023 rcu_read_lock();
1024 p = cpu_timer_task_rcu(timer);
1025 if (!p)
1026 goto out;
1027
1028 /* Protect timer list r/w in arm_timer() */
1029 sighand = lock_task_sighand(p, &flags);
1030 if (unlikely(sighand == NULL))
1031 goto out;
1032
1033 /*
1034 * Fetch the current sample and update the timer's expiry time.
1035 */
1036 if (CPUCLOCK_PERTHREAD(timer->it_clock))
1037 now = cpu_clock_sample(clkid, p);
1038 else
1039 now = cpu_clock_sample_group(clkid, p, true);
1040
1041 bump_cpu_timer(timer, now);
1042
1043 /*
1044 * Now re-arm for the new expiry time.
1045 */
1046 arm_timer(timer, p);
1047 unlock_task_sighand(p, &flags);
1048out:
1049 rcu_read_unlock();
1050}
1051
1052/**
1053 * task_cputimers_expired - Check whether posix CPU timers are expired
1054 *
1055 * @samples: Array of current samples for the CPUCLOCK clocks
1056 * @pct: Pointer to a posix_cputimers container
1057 *
1058 * Returns true if any member of @samples is greater than the corresponding
1059 * member of @pct->bases[CLK].nextevt. False otherwise
1060 */
1061static inline bool
1062task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1063{
1064 int i;
1065
1066 for (i = 0; i < CPUCLOCK_MAX; i++) {
1067 if (samples[i] >= pct->bases[i].nextevt)
1068 return true;
1069 }
1070 return false;
1071}
1072
1073/**
1074 * fastpath_timer_check - POSIX CPU timers fast path.
1075 *
1076 * @tsk: The task (thread) being checked.
1077 *
1078 * Check the task and thread group timers. If both are zero (there are no
1079 * timers set) return false. Otherwise snapshot the task and thread group
1080 * timers and compare them with the corresponding expiration times. Return
1081 * true if a timer has expired, else return false.
1082 */
1083static inline bool fastpath_timer_check(struct task_struct *tsk)
1084{
1085 struct posix_cputimers *pct = &tsk->posix_cputimers;
1086 struct signal_struct *sig;
1087
1088 if (!expiry_cache_is_inactive(pct)) {
1089 u64 samples[CPUCLOCK_MAX];
1090
1091 task_sample_cputime(tsk, samples);
1092 if (task_cputimers_expired(samples, pct))
1093 return true;
1094 }
1095
1096 sig = tsk->signal;
1097 pct = &sig->posix_cputimers;
1098 /*
1099 * Check if thread group timers expired when timers are active and
1100 * no other thread in the group is already handling expiry for
1101 * thread group cputimers. These fields are read without the
1102 * sighand lock. However, this is fine because this is meant to be
1103 * a fastpath heuristic to determine whether we should try to
1104 * acquire the sighand lock to handle timer expiry.
1105 *
1106 * In the worst case scenario, if concurrently timers_active is set
1107 * or expiry_active is cleared, but the current thread doesn't see
1108 * the change yet, the timer checks are delayed until the next
1109 * thread in the group gets a scheduler interrupt to handle the
1110 * timer. This isn't an issue in practice because these types of
1111 * delays with signals actually getting sent are expected.
1112 */
1113 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1114 u64 samples[CPUCLOCK_MAX];
1115
1116 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1117 samples);
1118
1119 if (task_cputimers_expired(samples, pct))
1120 return true;
1121 }
1122
1123 if (dl_task(tsk) && tsk->dl.dl_overrun)
1124 return true;
1125
1126 return false;
1127}
1128
1129static void handle_posix_cpu_timers(struct task_struct *tsk);
1130
1131#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1132static void posix_cpu_timers_work(struct callback_head *work)
1133{
1134 struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
1135
1136 mutex_lock(&cw->mutex);
1137 handle_posix_cpu_timers(current);
1138 mutex_unlock(&cw->mutex);
1139}
1140
1141/*
1142 * Invoked from the posix-timer core when a cancel operation failed because
1143 * the timer is marked firing. The caller holds rcu_read_lock(), which
1144 * protects the timer and the task which is expiring it from being freed.
1145 */
1146static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1147{
1148 struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
1149
1150 /* Has the handling task completed expiry already? */
1151 if (!tsk)
1152 return;
1153
1154 /* Ensure that the task cannot go away */
1155 get_task_struct(tsk);
1156 /* Now drop the RCU protection so the mutex can be locked */
1157 rcu_read_unlock();
1158 /* Wait on the expiry mutex */
1159 mutex_lock(&tsk->posix_cputimers_work.mutex);
1160 /* Release it immediately again. */
1161 mutex_unlock(&tsk->posix_cputimers_work.mutex);
1162 /* Drop the task reference. */
1163 put_task_struct(tsk);
1164 /* Relock RCU so the callsite is balanced */
1165 rcu_read_lock();
1166}
1167
1168static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1169{
1170 /* Ensure that timr->it.cpu.handling task cannot go away */
1171 rcu_read_lock();
1172 spin_unlock_irq(&timr->it_lock);
1173 posix_cpu_timer_wait_running(timr);
1174 rcu_read_unlock();
1175 /* @timr is on stack and is valid */
1176 spin_lock_irq(&timr->it_lock);
1177}
1178
1179/*
1180 * Clear existing posix CPU timers task work.
1181 */
1182void clear_posix_cputimers_work(struct task_struct *p)
1183{
1184 /*
1185 * A copied work entry from the old task is not meaningful, clear it.
1186 * N.B. init_task_work will not do this.
1187 */
1188 memset(&p->posix_cputimers_work.work, 0,
1189 sizeof(p->posix_cputimers_work.work));
1190 init_task_work(&p->posix_cputimers_work.work,
1191 posix_cpu_timers_work);
1192 mutex_init(&p->posix_cputimers_work.mutex);
1193 p->posix_cputimers_work.scheduled = false;
1194}
1195
1196/*
1197 * Initialize posix CPU timers task work in init task. Out of line to
1198 * keep the callback static and to avoid header recursion hell.
1199 */
1200void __init posix_cputimers_init_work(void)
1201{
1202 clear_posix_cputimers_work(current);
1203}
1204
1205/*
1206 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1207 * in hard interrupt context or in task context with interrupts
1208 * disabled. Aside of that the writer/reader interaction is always in the
1209 * context of the current task, which means they are strict per CPU.
1210 */
1211static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1212{
1213 return tsk->posix_cputimers_work.scheduled;
1214}
1215
1216static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1217{
1218 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1219 return;
1220
1221 /* Schedule task work to actually expire the timers */
1222 tsk->posix_cputimers_work.scheduled = true;
1223 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1224}
1225
1226static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1227 unsigned long start)
1228{
1229 bool ret = true;
1230
1231 /*
1232 * On !RT kernels interrupts are disabled while collecting expired
1233 * timers, so no tick can happen and the fast path check can be
1234 * reenabled without further checks.
1235 */
1236 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1237 tsk->posix_cputimers_work.scheduled = false;
1238 return true;
1239 }
1240
1241 /*
1242 * On RT enabled kernels ticks can happen while the expired timers
1243 * are collected under sighand lock. But any tick which observes
1244 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1245 * checks. So reenabling the tick work has do be done carefully:
1246 *
1247 * Disable interrupts and run the fast path check if jiffies have
1248 * advanced since the collecting of expired timers started. If
1249 * jiffies have not advanced or the fast path check did not find
1250 * newly expired timers, reenable the fast path check in the timer
1251 * interrupt. If there are newly expired timers, return false and
1252 * let the collection loop repeat.
1253 */
1254 local_irq_disable();
1255 if (start != jiffies && fastpath_timer_check(tsk))
1256 ret = false;
1257 else
1258 tsk->posix_cputimers_work.scheduled = false;
1259 local_irq_enable();
1260
1261 return ret;
1262}
1263#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1264static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1265{
1266 lockdep_posixtimer_enter();
1267 handle_posix_cpu_timers(tsk);
1268 lockdep_posixtimer_exit();
1269}
1270
1271static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1272{
1273 cpu_relax();
1274}
1275
1276static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1277{
1278 spin_unlock_irq(&timr->it_lock);
1279 cpu_relax();
1280 spin_lock_irq(&timr->it_lock);
1281}
1282
1283static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1284{
1285 return false;
1286}
1287
1288static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1289 unsigned long start)
1290{
1291 return true;
1292}
1293#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1294
1295static void handle_posix_cpu_timers(struct task_struct *tsk)
1296{
1297 struct k_itimer *timer, *next;
1298 unsigned long flags, start;
1299 LIST_HEAD(firing);
1300
1301 if (!lock_task_sighand(tsk, &flags))
1302 return;
1303
1304 do {
1305 /*
1306 * On RT locking sighand lock does not disable interrupts,
1307 * so this needs to be careful vs. ticks. Store the current
1308 * jiffies value.
1309 */
1310 start = READ_ONCE(jiffies);
1311 barrier();
1312
1313 /*
1314 * Here we take off tsk->signal->cpu_timers[N] and
1315 * tsk->cpu_timers[N] all the timers that are firing, and
1316 * put them on the firing list.
1317 */
1318 check_thread_timers(tsk, &firing);
1319
1320 check_process_timers(tsk, &firing);
1321
1322 /*
1323 * The above timer checks have updated the expiry cache and
1324 * because nothing can have queued or modified timers after
1325 * sighand lock was taken above it is guaranteed to be
1326 * consistent. So the next timer interrupt fastpath check
1327 * will find valid data.
1328 *
1329 * If timer expiry runs in the timer interrupt context then
1330 * the loop is not relevant as timers will be directly
1331 * expired in interrupt context. The stub function below
1332 * returns always true which allows the compiler to
1333 * optimize the loop out.
1334 *
1335 * If timer expiry is deferred to task work context then
1336 * the following rules apply:
1337 *
1338 * - On !RT kernels no tick can have happened on this CPU
1339 * after sighand lock was acquired because interrupts are
1340 * disabled. So reenabling task work before dropping
1341 * sighand lock and reenabling interrupts is race free.
1342 *
1343 * - On RT kernels ticks might have happened but the tick
1344 * work ignored posix CPU timer handling because the
1345 * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1346 * must be done very carefully including a check whether
1347 * ticks have happened since the start of the timer
1348 * expiry checks. posix_cpu_timers_enable_work() takes
1349 * care of that and eventually lets the expiry checks
1350 * run again.
1351 */
1352 } while (!posix_cpu_timers_enable_work(tsk, start));
1353
1354 /*
1355 * We must release sighand lock before taking any timer's lock.
1356 * There is a potential race with timer deletion here, as the
1357 * siglock now protects our private firing list. We have set
1358 * the firing flag in each timer, so that a deletion attempt
1359 * that gets the timer lock before we do will give it up and
1360 * spin until we've taken care of that timer below.
1361 */
1362 unlock_task_sighand(tsk, &flags);
1363
1364 /*
1365 * Now that all the timers on our list have the firing flag,
1366 * no one will touch their list entries but us. We'll take
1367 * each timer's lock before clearing its firing flag, so no
1368 * timer call will interfere.
1369 */
1370 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1371 bool cpu_firing;
1372
1373 /*
1374 * spin_lock() is sufficient here even independent of the
1375 * expiry context. If expiry happens in hard interrupt
1376 * context it's obvious. For task work context it's safe
1377 * because all other operations on timer::it_lock happen in
1378 * task context (syscall or exit).
1379 */
1380 spin_lock(&timer->it_lock);
1381 list_del_init(&timer->it.cpu.elist);
1382 cpu_firing = timer->it.cpu.firing;
1383 timer->it.cpu.firing = false;
1384 /*
1385 * If the firing flag is cleared then this raced with a
1386 * timer rearm/delete operation. So don't generate an
1387 * event.
1388 */
1389 if (likely(cpu_firing))
1390 cpu_timer_fire(timer);
1391 /* See posix_cpu_timer_wait_running() */
1392 rcu_assign_pointer(timer->it.cpu.handling, NULL);
1393 spin_unlock(&timer->it_lock);
1394 }
1395}
1396
1397/*
1398 * This is called from the timer interrupt handler. The irq handler has
1399 * already updated our counts. We need to check if any timers fire now.
1400 * Interrupts are disabled.
1401 */
1402void run_posix_cpu_timers(void)
1403{
1404 struct task_struct *tsk = current;
1405
1406 lockdep_assert_irqs_disabled();
1407
1408 /*
1409 * If the actual expiry is deferred to task work context and the
1410 * work is already scheduled there is no point to do anything here.
1411 */
1412 if (posix_cpu_timers_work_scheduled(tsk))
1413 return;
1414
1415 /*
1416 * The fast path checks that there are no expired thread or thread
1417 * group timers. If that's so, just return.
1418 */
1419 if (!fastpath_timer_check(tsk))
1420 return;
1421
1422 __run_posix_cpu_timers(tsk);
1423}
1424
1425/*
1426 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1427 * The tsk->sighand->siglock must be held by the caller.
1428 */
1429void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1430 u64 *newval, u64 *oldval)
1431{
1432 u64 now, *nextevt;
1433
1434 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1435 return;
1436
1437 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1438 now = cpu_clock_sample_group(clkid, tsk, true);
1439
1440 if (oldval) {
1441 /*
1442 * We are setting itimer. The *oldval is absolute and we update
1443 * it to be relative, *newval argument is relative and we update
1444 * it to be absolute.
1445 */
1446 if (*oldval) {
1447 if (*oldval <= now) {
1448 /* Just about to fire. */
1449 *oldval = TICK_NSEC;
1450 } else {
1451 *oldval -= now;
1452 }
1453 }
1454
1455 if (*newval)
1456 *newval += now;
1457 }
1458
1459 /*
1460 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1461 * expiry cache is also used by RLIMIT_CPU!.
1462 */
1463 if (*newval < *nextevt)
1464 *nextevt = *newval;
1465
1466 tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
1467}
1468
1469static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1470 const struct timespec64 *rqtp)
1471{
1472 struct itimerspec64 it;
1473 struct k_itimer timer;
1474 u64 expires;
1475 int error;
1476
1477 /*
1478 * Set up a temporary timer and then wait for it to go off.
1479 */
1480 memset(&timer, 0, sizeof timer);
1481 spin_lock_init(&timer.it_lock);
1482 timer.it_clock = which_clock;
1483 timer.it_overrun = -1;
1484 error = posix_cpu_timer_create(&timer);
1485 timer.it_process = current;
1486 timer.it.cpu.nanosleep = true;
1487
1488 if (!error) {
1489 static struct itimerspec64 zero_it;
1490 struct restart_block *restart;
1491
1492 memset(&it, 0, sizeof(it));
1493 it.it_value = *rqtp;
1494
1495 spin_lock_irq(&timer.it_lock);
1496 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1497 if (error) {
1498 spin_unlock_irq(&timer.it_lock);
1499 return error;
1500 }
1501
1502 while (!signal_pending(current)) {
1503 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1504 /*
1505 * Our timer fired and was reset, below
1506 * deletion can not fail.
1507 */
1508 posix_cpu_timer_del(&timer);
1509 spin_unlock_irq(&timer.it_lock);
1510 return 0;
1511 }
1512
1513 /*
1514 * Block until cpu_timer_fire (or a signal) wakes us.
1515 */
1516 __set_current_state(TASK_INTERRUPTIBLE);
1517 spin_unlock_irq(&timer.it_lock);
1518 schedule();
1519 spin_lock_irq(&timer.it_lock);
1520 }
1521
1522 /*
1523 * We were interrupted by a signal.
1524 */
1525 expires = cpu_timer_getexpires(&timer.it.cpu);
1526 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1527 if (!error) {
1528 /* Timer is now unarmed, deletion can not fail. */
1529 posix_cpu_timer_del(&timer);
1530 } else {
1531 while (error == TIMER_RETRY) {
1532 posix_cpu_timer_wait_running_nsleep(&timer);
1533 error = posix_cpu_timer_del(&timer);
1534 }
1535 }
1536
1537 spin_unlock_irq(&timer.it_lock);
1538
1539 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1540 /*
1541 * It actually did fire already.
1542 */
1543 return 0;
1544 }
1545
1546 error = -ERESTART_RESTARTBLOCK;
1547 /*
1548 * Report back to the user the time still remaining.
1549 */
1550 restart = ¤t->restart_block;
1551 restart->nanosleep.expires = expires;
1552 if (restart->nanosleep.type != TT_NONE)
1553 error = nanosleep_copyout(restart, &it.it_value);
1554 }
1555
1556 return error;
1557}
1558
1559static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1560
1561static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1562 const struct timespec64 *rqtp)
1563{
1564 struct restart_block *restart_block = ¤t->restart_block;
1565 int error;
1566
1567 /*
1568 * Diagnose required errors first.
1569 */
1570 if (CPUCLOCK_PERTHREAD(which_clock) &&
1571 (CPUCLOCK_PID(which_clock) == 0 ||
1572 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1573 return -EINVAL;
1574
1575 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1576
1577 if (error == -ERESTART_RESTARTBLOCK) {
1578
1579 if (flags & TIMER_ABSTIME)
1580 return -ERESTARTNOHAND;
1581
1582 restart_block->nanosleep.clockid = which_clock;
1583 set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1584 }
1585 return error;
1586}
1587
1588static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1589{
1590 clockid_t which_clock = restart_block->nanosleep.clockid;
1591 struct timespec64 t;
1592
1593 t = ns_to_timespec64(restart_block->nanosleep.expires);
1594
1595 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1596}
1597
1598#define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1599#define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1600
1601static int process_cpu_clock_getres(const clockid_t which_clock,
1602 struct timespec64 *tp)
1603{
1604 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1605}
1606static int process_cpu_clock_get(const clockid_t which_clock,
1607 struct timespec64 *tp)
1608{
1609 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1610}
1611static int process_cpu_timer_create(struct k_itimer *timer)
1612{
1613 timer->it_clock = PROCESS_CLOCK;
1614 return posix_cpu_timer_create(timer);
1615}
1616static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1617 const struct timespec64 *rqtp)
1618{
1619 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1620}
1621static int thread_cpu_clock_getres(const clockid_t which_clock,
1622 struct timespec64 *tp)
1623{
1624 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1625}
1626static int thread_cpu_clock_get(const clockid_t which_clock,
1627 struct timespec64 *tp)
1628{
1629 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1630}
1631static int thread_cpu_timer_create(struct k_itimer *timer)
1632{
1633 timer->it_clock = THREAD_CLOCK;
1634 return posix_cpu_timer_create(timer);
1635}
1636
1637const struct k_clock clock_posix_cpu = {
1638 .clock_getres = posix_cpu_clock_getres,
1639 .clock_set = posix_cpu_clock_set,
1640 .clock_get_timespec = posix_cpu_clock_get,
1641 .timer_create = posix_cpu_timer_create,
1642 .nsleep = posix_cpu_nsleep,
1643 .timer_set = posix_cpu_timer_set,
1644 .timer_del = posix_cpu_timer_del,
1645 .timer_get = posix_cpu_timer_get,
1646 .timer_rearm = posix_cpu_timer_rearm,
1647 .timer_wait_running = posix_cpu_timer_wait_running,
1648};
1649
1650const struct k_clock clock_process = {
1651 .clock_getres = process_cpu_clock_getres,
1652 .clock_get_timespec = process_cpu_clock_get,
1653 .timer_create = process_cpu_timer_create,
1654 .nsleep = process_cpu_nsleep,
1655};
1656
1657const struct k_clock clock_thread = {
1658 .clock_getres = thread_cpu_clock_getres,
1659 .clock_get_timespec = thread_cpu_clock_get,
1660 .timer_create = thread_cpu_timer_create,
1661};