Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Implement CPU time clocks for the POSIX clock interface.
   4 */
   5
   6#include <linux/sched/signal.h>
   7#include <linux/sched/cputime.h>
   8#include <linux/posix-timers.h>
   9#include <linux/errno.h>
  10#include <linux/math64.h>
  11#include <linux/uaccess.h>
  12#include <linux/kernel_stat.h>
  13#include <trace/events/timer.h>
  14#include <linux/tick.h>
  15#include <linux/workqueue.h>
  16#include <linux/compat.h>
  17#include <linux/sched/deadline.h>
 
  18
  19#include "posix-timers.h"
  20
  21static void posix_cpu_timer_rearm(struct k_itimer *timer);
  22
  23void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
  24{
  25	posix_cputimers_init(pct);
  26	if (cpu_limit != RLIM_INFINITY) {
  27		pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
  28		pct->timers_active = true;
  29	}
  30}
  31
  32/*
  33 * Called after updating RLIMIT_CPU to run cpu timer and update
  34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
  35 * necessary. Needs siglock protection since other code may update the
  36 * expiration cache as well.
 
 
 
  37 */
  38void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
  39{
  40	u64 nsecs = rlim_new * NSEC_PER_SEC;
 
  41
  42	spin_lock_irq(&task->sighand->siglock);
 
  43	set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
  44	spin_unlock_irq(&task->sighand->siglock);
 
  45}
  46
  47/*
  48 * Functions for validating access to tasks.
  49 */
  50static struct task_struct *lookup_task(const pid_t pid, bool thread,
  51				       bool gettime)
  52{
  53	struct task_struct *p;
 
 
 
 
 
  54
  55	/*
  56	 * If the encoded PID is 0, then the timer is targeted at current
  57	 * or the process to which current belongs.
  58	 */
  59	if (!pid)
  60		return thread ? current : current->group_leader;
  61
  62	p = find_task_by_vpid(pid);
  63	if (!p)
  64		return p;
  65
  66	if (thread)
  67		return same_thread_group(p, current) ? p : NULL;
 
  68
  69	if (gettime) {
  70		/*
  71		 * For clock_gettime(PROCESS) the task does not need to be
  72		 * the actual group leader. tsk->sighand gives
  73		 * access to the group's clock.
  74		 *
  75		 * Timers need the group leader because they take a
  76		 * reference on it and store the task pointer until the
  77		 * timer is destroyed.
  78		 */
  79		return (p == current || thread_group_leader(p)) ? p : NULL;
  80	}
  81
  82	/*
  83	 * For processes require that p is group leader.
 
 
 
  84	 */
  85	return has_group_leader_pid(p) ? p : NULL;
 
 
 
 
 
 
  86}
  87
  88static struct task_struct *__get_task_for_clock(const clockid_t clock,
  89						bool getref, bool gettime)
  90{
  91	const bool thread = !!CPUCLOCK_PERTHREAD(clock);
  92	const pid_t pid = CPUCLOCK_PID(clock);
  93	struct task_struct *p;
  94
  95	if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
  96		return NULL;
  97
  98	rcu_read_lock();
  99	p = lookup_task(pid, thread, gettime);
 100	if (p && getref)
 101		get_task_struct(p);
 102	rcu_read_unlock();
 103	return p;
 104}
 105
 106static inline struct task_struct *get_task_for_clock(const clockid_t clock)
 107{
 108	return __get_task_for_clock(clock, true, false);
 109}
 110
 111static inline struct task_struct *get_task_for_clock_get(const clockid_t clock)
 112{
 113	return __get_task_for_clock(clock, true, true);
 114}
 115
 116static inline int validate_clock_permissions(const clockid_t clock)
 117{
 118	return __get_task_for_clock(clock, false, false) ? 0 : -EINVAL;
 119}
 120
 121/*
 122 * Update expiry time from increment, and increase overrun count,
 123 * given the current clock sample.
 124 */
 125static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
 126{
 127	u64 delta, incr, expires = timer->it.cpu.node.expires;
 128	int i;
 129
 130	if (!timer->it_interval)
 131		return expires;
 132
 133	if (now < expires)
 134		return expires;
 135
 136	incr = timer->it_interval;
 137	delta = now + incr - expires;
 138
 139	/* Don't use (incr*2 < delta), incr*2 might overflow. */
 140	for (i = 0; incr < delta - incr; i++)
 141		incr = incr << 1;
 142
 143	for (; i >= 0; incr >>= 1, i--) {
 144		if (delta < incr)
 145			continue;
 146
 147		timer->it.cpu.node.expires += incr;
 148		timer->it_overrun += 1LL << i;
 149		delta -= incr;
 150	}
 151	return timer->it.cpu.node.expires;
 152}
 153
 154/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
 155static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
 156{
 157	return !(~pct->bases[CPUCLOCK_PROF].nextevt |
 158		 ~pct->bases[CPUCLOCK_VIRT].nextevt |
 159		 ~pct->bases[CPUCLOCK_SCHED].nextevt);
 160}
 161
 162static int
 163posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
 164{
 165	int error = validate_clock_permissions(which_clock);
 166
 167	if (!error) {
 168		tp->tv_sec = 0;
 169		tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
 170		if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 171			/*
 172			 * If sched_clock is using a cycle counter, we
 173			 * don't have any idea of its true resolution
 174			 * exported, but it is much more than 1s/HZ.
 175			 */
 176			tp->tv_nsec = 1;
 177		}
 178	}
 179	return error;
 180}
 181
 182static int
 183posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
 184{
 185	int error = validate_clock_permissions(clock);
 186
 187	/*
 188	 * You can never reset a CPU clock, but we check for other errors
 189	 * in the call before failing with EPERM.
 190	 */
 191	return error ? : -EPERM;
 192}
 193
 194/*
 195 * Sample a per-thread clock for the given task. clkid is validated.
 196 */
 197static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
 198{
 199	u64 utime, stime;
 200
 201	if (clkid == CPUCLOCK_SCHED)
 202		return task_sched_runtime(p);
 203
 204	task_cputime(p, &utime, &stime);
 205
 206	switch (clkid) {
 207	case CPUCLOCK_PROF:
 208		return utime + stime;
 209	case CPUCLOCK_VIRT:
 210		return utime;
 211	default:
 212		WARN_ON_ONCE(1);
 213	}
 214	return 0;
 215}
 216
 217static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
 218{
 219	samples[CPUCLOCK_PROF] = stime + utime;
 220	samples[CPUCLOCK_VIRT] = utime;
 221	samples[CPUCLOCK_SCHED] = rtime;
 222}
 223
 224static void task_sample_cputime(struct task_struct *p, u64 *samples)
 225{
 226	u64 stime, utime;
 227
 228	task_cputime(p, &utime, &stime);
 229	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
 230}
 231
 232static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
 233				       u64 *samples)
 234{
 235	u64 stime, utime, rtime;
 236
 237	utime = atomic64_read(&at->utime);
 238	stime = atomic64_read(&at->stime);
 239	rtime = atomic64_read(&at->sum_exec_runtime);
 240	store_samples(samples, stime, utime, rtime);
 241}
 242
 243/*
 244 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
 245 * to avoid race conditions with concurrent updates to cputime.
 246 */
 247static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
 248{
 249	u64 curr_cputime;
 250retry:
 251	curr_cputime = atomic64_read(cputime);
 252	if (sum_cputime > curr_cputime) {
 253		if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
 254			goto retry;
 255	}
 256}
 257
 258static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
 259			      struct task_cputime *sum)
 260{
 261	__update_gt_cputime(&cputime_atomic->utime, sum->utime);
 262	__update_gt_cputime(&cputime_atomic->stime, sum->stime);
 263	__update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
 264}
 265
 266/**
 267 * thread_group_sample_cputime - Sample cputime for a given task
 268 * @tsk:	Task for which cputime needs to be started
 269 * @samples:	Storage for time samples
 270 *
 271 * Called from sys_getitimer() to calculate the expiry time of an active
 272 * timer. That means group cputime accounting is already active. Called
 273 * with task sighand lock held.
 274 *
 275 * Updates @times with an uptodate sample of the thread group cputimes.
 276 */
 277void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
 278{
 279	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 280	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
 281
 282	WARN_ON_ONCE(!pct->timers_active);
 283
 284	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
 285}
 286
 287/**
 288 * thread_group_start_cputime - Start cputime and return a sample
 289 * @tsk:	Task for which cputime needs to be started
 290 * @samples:	Storage for time samples
 291 *
 292 * The thread group cputime accouting is avoided when there are no posix
 293 * CPU timers armed. Before starting a timer it's required to check whether
 294 * the time accounting is active. If not, a full update of the atomic
 295 * accounting store needs to be done and the accounting enabled.
 296 *
 297 * Updates @times with an uptodate sample of the thread group cputimes.
 298 */
 299static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
 300{
 301	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 302	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
 303
 
 
 304	/* Check if cputimer isn't running. This is accessed without locking. */
 305	if (!READ_ONCE(pct->timers_active)) {
 306		struct task_cputime sum;
 307
 308		/*
 309		 * The POSIX timer interface allows for absolute time expiry
 310		 * values through the TIMER_ABSTIME flag, therefore we have
 311		 * to synchronize the timer to the clock every time we start it.
 312		 */
 313		thread_group_cputime(tsk, &sum);
 314		update_gt_cputime(&cputimer->cputime_atomic, &sum);
 315
 316		/*
 317		 * We're setting timers_active without a lock. Ensure this
 318		 * only gets written to in one operation. We set it after
 319		 * update_gt_cputime() as a small optimization, but
 320		 * barriers are not required because update_gt_cputime()
 321		 * can handle concurrent updates.
 322		 */
 323		WRITE_ONCE(pct->timers_active, true);
 324	}
 325	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
 326}
 327
 328static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
 329{
 330	struct task_cputime ct;
 331
 332	thread_group_cputime(tsk, &ct);
 333	store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
 334}
 335
 336/*
 337 * Sample a process (thread group) clock for the given task clkid. If the
 338 * group's cputime accounting is already enabled, read the atomic
 339 * store. Otherwise a full update is required.  Task's sighand lock must be
 340 * held to protect the task traversal on a full update. clkid is already
 341 * validated.
 342 */
 343static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
 344				  bool start)
 345{
 346	struct thread_group_cputimer *cputimer = &p->signal->cputimer;
 347	struct posix_cputimers *pct = &p->signal->posix_cputimers;
 348	u64 samples[CPUCLOCK_MAX];
 349
 350	if (!READ_ONCE(pct->timers_active)) {
 351		if (start)
 352			thread_group_start_cputime(p, samples);
 353		else
 354			__thread_group_cputime(p, samples);
 355	} else {
 356		proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
 357	}
 358
 359	return samples[clkid];
 360}
 361
 362static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
 363{
 364	const clockid_t clkid = CPUCLOCK_WHICH(clock);
 365	struct task_struct *tsk;
 366	u64 t;
 367
 368	tsk = get_task_for_clock_get(clock);
 369	if (!tsk)
 
 
 370		return -EINVAL;
 
 371
 372	if (CPUCLOCK_PERTHREAD(clock))
 373		t = cpu_clock_sample(clkid, tsk);
 374	else
 375		t = cpu_clock_sample_group(clkid, tsk, false);
 376	put_task_struct(tsk);
 377
 378	*tp = ns_to_timespec64(t);
 379	return 0;
 380}
 381
 382/*
 383 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 384 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
 385 * new timer already all-zeros initialized.
 386 */
 387static int posix_cpu_timer_create(struct k_itimer *new_timer)
 388{
 389	struct task_struct *p = get_task_for_clock(new_timer->it_clock);
 
 390
 391	if (!p)
 
 
 
 392		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 393
 394	new_timer->kclock = &clock_posix_cpu;
 395	timerqueue_init(&new_timer->it.cpu.node);
 396	new_timer->it.cpu.task = p;
 
 397	return 0;
 398}
 399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400/*
 401 * Clean up a CPU-clock timer that is about to be destroyed.
 402 * This is called from timer deletion with the timer already locked.
 403 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 404 * and try again.  (This happens when the timer is in the middle of firing.)
 405 */
 406static int posix_cpu_timer_del(struct k_itimer *timer)
 407{
 408	struct cpu_timer *ctmr = &timer->it.cpu;
 409	struct task_struct *p = ctmr->task;
 410	struct sighand_struct *sighand;
 
 411	unsigned long flags;
 412	int ret = 0;
 413
 414	if (WARN_ON_ONCE(!p))
 415		return -EINVAL;
 
 
 416
 417	/*
 418	 * Protect against sighand release/switch in exit/exec and process/
 419	 * thread timer list entry concurrent read/writes.
 420	 */
 421	sighand = lock_task_sighand(p, &flags);
 422	if (unlikely(sighand == NULL)) {
 423		/*
 424		 * This raced with the reaping of the task. The exit cleanup
 425		 * should have removed this timer from the timer queue.
 426		 */
 427		WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
 428	} else {
 429		if (timer->it.cpu.firing)
 430			ret = TIMER_RETRY;
 431		else
 432			cpu_timer_dequeue(ctmr);
 433
 434		unlock_task_sighand(p, &flags);
 435	}
 436
 
 
 437	if (!ret)
 438		put_task_struct(p);
 439
 440	return ret;
 441}
 442
 443static void cleanup_timerqueue(struct timerqueue_head *head)
 444{
 445	struct timerqueue_node *node;
 446	struct cpu_timer *ctmr;
 447
 448	while ((node = timerqueue_getnext(head))) {
 449		timerqueue_del(head, node);
 450		ctmr = container_of(node, struct cpu_timer, node);
 451		ctmr->head = NULL;
 452	}
 453}
 454
 455/*
 456 * Clean out CPU timers which are still armed when a thread exits. The
 457 * timers are only removed from the list. No other updates are done. The
 458 * corresponding posix timers are still accessible, but cannot be rearmed.
 459 *
 460 * This must be called with the siglock held.
 461 */
 462static void cleanup_timers(struct posix_cputimers *pct)
 463{
 464	cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
 465	cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
 466	cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
 467}
 468
 469/*
 470 * These are both called with the siglock held, when the current thread
 471 * is being reaped.  When the final (leader) thread in the group is reaped,
 472 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
 473 */
 474void posix_cpu_timers_exit(struct task_struct *tsk)
 475{
 476	cleanup_timers(&tsk->posix_cputimers);
 477}
 478void posix_cpu_timers_exit_group(struct task_struct *tsk)
 479{
 480	cleanup_timers(&tsk->signal->posix_cputimers);
 481}
 482
 483/*
 484 * Insert the timer on the appropriate list before any timers that
 485 * expire later.  This must be called with the sighand lock held.
 486 */
 487static void arm_timer(struct k_itimer *timer)
 488{
 489	int clkidx = CPUCLOCK_WHICH(timer->it_clock);
 490	struct cpu_timer *ctmr = &timer->it.cpu;
 491	u64 newexp = cpu_timer_getexpires(ctmr);
 492	struct task_struct *p = ctmr->task;
 493	struct posix_cputimer_base *base;
 494
 495	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 496		base = p->posix_cputimers.bases + clkidx;
 497	else
 498		base = p->signal->posix_cputimers.bases + clkidx;
 499
 500	if (!cpu_timer_enqueue(&base->tqhead, ctmr))
 501		return;
 502
 503	/*
 504	 * We are the new earliest-expiring POSIX 1.b timer, hence
 505	 * need to update expiration cache. Take into account that
 506	 * for process timers we share expiration cache with itimers
 507	 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
 508	 */
 509	if (newexp < base->nextevt)
 510		base->nextevt = newexp;
 511
 512	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 513		tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
 514	else
 515		tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
 516}
 517
 518/*
 519 * The timer is locked, fire it and arrange for its reload.
 520 */
 521static void cpu_timer_fire(struct k_itimer *timer)
 522{
 523	struct cpu_timer *ctmr = &timer->it.cpu;
 524
 525	if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
 526		/*
 527		 * User don't want any signal.
 528		 */
 529		cpu_timer_setexpires(ctmr, 0);
 530	} else if (unlikely(timer->sigq == NULL)) {
 531		/*
 532		 * This a special case for clock_nanosleep,
 533		 * not a normal timer from sys_timer_create.
 534		 */
 535		wake_up_process(timer->it_process);
 536		cpu_timer_setexpires(ctmr, 0);
 537	} else if (!timer->it_interval) {
 538		/*
 539		 * One-shot timer.  Clear it as soon as it's fired.
 540		 */
 541		posix_timer_event(timer, 0);
 542		cpu_timer_setexpires(ctmr, 0);
 543	} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
 544		/*
 545		 * The signal did not get queued because the signal
 546		 * was ignored, so we won't get any callback to
 547		 * reload the timer.  But we need to keep it
 548		 * ticking in case the signal is deliverable next time.
 549		 */
 550		posix_cpu_timer_rearm(timer);
 551		++timer->it_requeue_pending;
 552	}
 553}
 554
 555/*
 556 * Guts of sys_timer_settime for CPU timers.
 557 * This is called with the timer locked and interrupts disabled.
 558 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 559 * and try again.  (This happens when the timer is in the middle of firing.)
 560 */
 561static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 562			       struct itimerspec64 *new, struct itimerspec64 *old)
 563{
 564	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
 565	u64 old_expires, new_expires, old_incr, val;
 566	struct cpu_timer *ctmr = &timer->it.cpu;
 567	struct task_struct *p = ctmr->task;
 568	struct sighand_struct *sighand;
 
 569	unsigned long flags;
 570	int ret = 0;
 571
 572	if (WARN_ON_ONCE(!p))
 573		return -EINVAL;
 
 
 
 
 
 
 
 
 574
 575	/*
 576	 * Use the to_ktime conversion because that clamps the maximum
 577	 * value to KTIME_MAX and avoid multiplication overflows.
 578	 */
 579	new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
 580
 581	/*
 582	 * Protect against sighand release/switch in exit/exec and p->cpu_timers
 583	 * and p->signal->cpu_timers read/write in arm_timer()
 584	 */
 585	sighand = lock_task_sighand(p, &flags);
 586	/*
 587	 * If p has just been reaped, we can no
 588	 * longer get any information about it at all.
 589	 */
 590	if (unlikely(sighand == NULL))
 
 591		return -ESRCH;
 
 592
 593	/*
 594	 * Disarm any old timer after extracting its expiry time.
 595	 */
 596	old_incr = timer->it_interval;
 597	old_expires = cpu_timer_getexpires(ctmr);
 598
 599	if (unlikely(timer->it.cpu.firing)) {
 600		timer->it.cpu.firing = -1;
 601		ret = TIMER_RETRY;
 602	} else {
 603		cpu_timer_dequeue(ctmr);
 604	}
 605
 606	/*
 607	 * We need to sample the current value to convert the new
 608	 * value from to relative and absolute, and to convert the
 609	 * old value from absolute to relative.  To set a process
 610	 * timer, we need a sample to balance the thread expiry
 611	 * times (in arm_timer).  With an absolute time, we must
 612	 * check if it's already passed.  In short, we need a sample.
 613	 */
 614	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 615		val = cpu_clock_sample(clkid, p);
 616	else
 617		val = cpu_clock_sample_group(clkid, p, true);
 618
 619	if (old) {
 620		if (old_expires == 0) {
 621			old->it_value.tv_sec = 0;
 622			old->it_value.tv_nsec = 0;
 623		} else {
 624			/*
 625			 * Update the timer in case it has overrun already.
 626			 * If it has, we'll report it as having overrun and
 627			 * with the next reloaded timer already ticking,
 628			 * though we are swallowing that pending
 629			 * notification here to install the new setting.
 630			 */
 631			u64 exp = bump_cpu_timer(timer, val);
 632
 633			if (val < exp) {
 634				old_expires = exp - val;
 635				old->it_value = ns_to_timespec64(old_expires);
 636			} else {
 637				old->it_value.tv_nsec = 1;
 638				old->it_value.tv_sec = 0;
 639			}
 640		}
 641	}
 642
 643	if (unlikely(ret)) {
 644		/*
 645		 * We are colliding with the timer actually firing.
 646		 * Punt after filling in the timer's old value, and
 647		 * disable this firing since we are already reporting
 648		 * it as an overrun (thanks to bump_cpu_timer above).
 649		 */
 650		unlock_task_sighand(p, &flags);
 651		goto out;
 652	}
 653
 654	if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
 655		new_expires += val;
 656	}
 657
 658	/*
 659	 * Install the new expiry time (or zero).
 660	 * For a timer with no notification action, we don't actually
 661	 * arm the timer (we'll just fake it for timer_gettime).
 662	 */
 663	cpu_timer_setexpires(ctmr, new_expires);
 664	if (new_expires != 0 && val < new_expires) {
 665		arm_timer(timer);
 666	}
 667
 668	unlock_task_sighand(p, &flags);
 669	/*
 670	 * Install the new reload setting, and
 671	 * set up the signal and overrun bookkeeping.
 672	 */
 673	timer->it_interval = timespec64_to_ktime(new->it_interval);
 674
 675	/*
 676	 * This acts as a modification timestamp for the timer,
 677	 * so any automatic reload attempt will punt on seeing
 678	 * that we have reset the timer manually.
 679	 */
 680	timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
 681		~REQUEUE_PENDING;
 682	timer->it_overrun_last = 0;
 683	timer->it_overrun = -1;
 684
 685	if (new_expires != 0 && !(val < new_expires)) {
 
 
 
 
 
 
 
 
 
 686		/*
 687		 * The designated time already passed, so we notify
 688		 * immediately, even if the thread never runs to
 689		 * accumulate more time on this clock.
 690		 */
 691		cpu_timer_fire(timer);
 692	}
 
 
 
 
 693
 694	ret = 0;
 
 695 out:
 
 696	if (old)
 697		old->it_interval = ns_to_timespec64(old_incr);
 698
 699	return ret;
 700}
 701
 702static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
 703{
 704	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
 705	struct cpu_timer *ctmr = &timer->it.cpu;
 706	u64 now, expires = cpu_timer_getexpires(ctmr);
 707	struct task_struct *p = ctmr->task;
 708
 709	if (WARN_ON_ONCE(!p))
 710		return;
 
 
 711
 712	/*
 713	 * Easy part: convert the reload time.
 714	 */
 715	itp->it_interval = ktime_to_timespec64(timer->it_interval);
 716
 717	if (!expires)
 718		return;
 719
 720	/*
 721	 * Sample the clock to take the difference with the expiry time.
 722	 */
 723	if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 724		now = cpu_clock_sample(clkid, p);
 725	} else {
 726		struct sighand_struct *sighand;
 727		unsigned long flags;
 728
 729		/*
 730		 * Protect against sighand release/switch in exit/exec and
 731		 * also make timer sampling safe if it ends up calling
 732		 * thread_group_cputime().
 733		 */
 734		sighand = lock_task_sighand(p, &flags);
 735		if (unlikely(sighand == NULL)) {
 736			/*
 737			 * The process has been reaped.
 738			 * We can't even collect a sample any more.
 739			 * Disarm the timer, nothing else to do.
 740			 */
 741			cpu_timer_setexpires(ctmr, 0);
 742			return;
 743		} else {
 744			now = cpu_clock_sample_group(clkid, p, false);
 745			unlock_task_sighand(p, &flags);
 746		}
 747	}
 748
 749	if (now < expires) {
 750		itp->it_value = ns_to_timespec64(expires - now);
 751	} else {
 752		/*
 753		 * The timer should have expired already, but the firing
 754		 * hasn't taken place yet.  Say it's just about to expire.
 755		 */
 756		itp->it_value.tv_nsec = 1;
 757		itp->it_value.tv_sec = 0;
 758	}
 
 
 759}
 760
 761#define MAX_COLLECTED	20
 762
 763static u64 collect_timerqueue(struct timerqueue_head *head,
 764			      struct list_head *firing, u64 now)
 765{
 766	struct timerqueue_node *next;
 767	int i = 0;
 768
 769	while ((next = timerqueue_getnext(head))) {
 770		struct cpu_timer *ctmr;
 771		u64 expires;
 772
 773		ctmr = container_of(next, struct cpu_timer, node);
 774		expires = cpu_timer_getexpires(ctmr);
 775		/* Limit the number of timers to expire at once */
 776		if (++i == MAX_COLLECTED || now < expires)
 777			return expires;
 778
 779		ctmr->firing = 1;
 780		cpu_timer_dequeue(ctmr);
 781		list_add_tail(&ctmr->elist, firing);
 782	}
 783
 784	return U64_MAX;
 785}
 786
 787static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
 788				    struct list_head *firing)
 789{
 790	struct posix_cputimer_base *base = pct->bases;
 791	int i;
 792
 793	for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
 794		base->nextevt = collect_timerqueue(&base->tqhead, firing,
 795						    samples[i]);
 796	}
 797}
 798
 799static inline void check_dl_overrun(struct task_struct *tsk)
 800{
 801	if (tsk->dl.dl_overrun) {
 802		tsk->dl.dl_overrun = 0;
 803		__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
 804	}
 805}
 806
 807static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
 808{
 809	if (time < limit)
 810		return false;
 811
 812	if (print_fatal_signals) {
 813		pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
 814			rt ? "RT" : "CPU", hard ? "hard" : "soft",
 815			current->comm, task_pid_nr(current));
 816	}
 817	__group_send_sig_info(signo, SEND_SIG_PRIV, current);
 818	return true;
 819}
 820
 821/*
 822 * Check for any per-thread CPU timers that have fired and move them off
 823 * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
 824 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
 825 */
 826static void check_thread_timers(struct task_struct *tsk,
 827				struct list_head *firing)
 828{
 829	struct posix_cputimers *pct = &tsk->posix_cputimers;
 830	u64 samples[CPUCLOCK_MAX];
 831	unsigned long soft;
 832
 833	if (dl_task(tsk))
 834		check_dl_overrun(tsk);
 835
 836	if (expiry_cache_is_inactive(pct))
 837		return;
 838
 839	task_sample_cputime(tsk, samples);
 840	collect_posix_cputimers(pct, samples, firing);
 841
 842	/*
 843	 * Check for the special case thread timers.
 844	 */
 845	soft = task_rlimit(tsk, RLIMIT_RTTIME);
 846	if (soft != RLIM_INFINITY) {
 847		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
 848		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
 849		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
 850
 851		/* At the hard limit, send SIGKILL. No further action. */
 852		if (hard != RLIM_INFINITY &&
 853		    check_rlimit(rttime, hard, SIGKILL, true, true))
 854			return;
 855
 856		/* At the soft limit, send a SIGXCPU every second */
 857		if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
 858			soft += USEC_PER_SEC;
 859			tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
 860		}
 861	}
 862
 863	if (expiry_cache_is_inactive(pct))
 864		tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
 865}
 866
 867static inline void stop_process_timers(struct signal_struct *sig)
 868{
 869	struct posix_cputimers *pct = &sig->posix_cputimers;
 870
 871	/* Turn off the active flag. This is done without locking. */
 872	WRITE_ONCE(pct->timers_active, false);
 873	tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
 874}
 875
 876static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
 877			     u64 *expires, u64 cur_time, int signo)
 878{
 879	if (!it->expires)
 880		return;
 881
 882	if (cur_time >= it->expires) {
 883		if (it->incr)
 884			it->expires += it->incr;
 885		else
 886			it->expires = 0;
 887
 888		trace_itimer_expire(signo == SIGPROF ?
 889				    ITIMER_PROF : ITIMER_VIRTUAL,
 890				    task_tgid(tsk), cur_time);
 891		__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
 892	}
 893
 894	if (it->expires && it->expires < *expires)
 895		*expires = it->expires;
 896}
 897
 898/*
 899 * Check for any per-thread CPU timers that have fired and move them
 900 * off the tsk->*_timers list onto the firing list.  Per-thread timers
 901 * have already been taken off.
 902 */
 903static void check_process_timers(struct task_struct *tsk,
 904				 struct list_head *firing)
 905{
 906	struct signal_struct *const sig = tsk->signal;
 907	struct posix_cputimers *pct = &sig->posix_cputimers;
 908	u64 samples[CPUCLOCK_MAX];
 909	unsigned long soft;
 910
 911	/*
 912	 * If there are no active process wide timers (POSIX 1.b, itimers,
 913	 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
 914	 * processing when there is already another task handling them.
 915	 */
 916	if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
 917		return;
 918
 919	/*
 920	 * Signify that a thread is checking for process timers.
 921	 * Write access to this field is protected by the sighand lock.
 922	 */
 923	pct->expiry_active = true;
 924
 925	/*
 926	 * Collect the current process totals. Group accounting is active
 927	 * so the sample can be taken directly.
 928	 */
 929	proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
 930	collect_posix_cputimers(pct, samples, firing);
 931
 932	/*
 933	 * Check for the special case process timers.
 934	 */
 935	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
 936			 &pct->bases[CPUCLOCK_PROF].nextevt,
 937			 samples[CPUCLOCK_PROF], SIGPROF);
 938	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
 939			 &pct->bases[CPUCLOCK_VIRT].nextevt,
 940			 samples[CPUCLOCK_VIRT], SIGVTALRM);
 941
 942	soft = task_rlimit(tsk, RLIMIT_CPU);
 943	if (soft != RLIM_INFINITY) {
 944		/* RLIMIT_CPU is in seconds. Samples are nanoseconds */
 945		unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
 946		u64 ptime = samples[CPUCLOCK_PROF];
 947		u64 softns = (u64)soft * NSEC_PER_SEC;
 948		u64 hardns = (u64)hard * NSEC_PER_SEC;
 949
 950		/* At the hard limit, send SIGKILL. No further action. */
 951		if (hard != RLIM_INFINITY &&
 952		    check_rlimit(ptime, hardns, SIGKILL, false, true))
 953			return;
 954
 955		/* At the soft limit, send a SIGXCPU every second */
 956		if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
 957			sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
 958			softns += NSEC_PER_SEC;
 959		}
 960
 961		/* Update the expiry cache */
 962		if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
 963			pct->bases[CPUCLOCK_PROF].nextevt = softns;
 964	}
 965
 966	if (expiry_cache_is_inactive(pct))
 967		stop_process_timers(sig);
 968
 969	pct->expiry_active = false;
 970}
 971
 972/*
 973 * This is called from the signal code (via posixtimer_rearm)
 974 * when the last timer signal was delivered and we have to reload the timer.
 975 */
 976static void posix_cpu_timer_rearm(struct k_itimer *timer)
 977{
 978	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
 979	struct cpu_timer *ctmr = &timer->it.cpu;
 980	struct task_struct *p = ctmr->task;
 981	struct sighand_struct *sighand;
 982	unsigned long flags;
 983	u64 now;
 984
 985	if (WARN_ON_ONCE(!p))
 986		return;
 
 
 
 
 
 
 
 987
 988	/*
 989	 * Fetch the current sample and update the timer's expiry time.
 990	 */
 991	if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
 992		now = cpu_clock_sample(clkid, p);
 993		bump_cpu_timer(timer, now);
 994		if (unlikely(p->exit_state))
 995			return;
 996
 997		/* Protect timer list r/w in arm_timer() */
 998		sighand = lock_task_sighand(p, &flags);
 999		if (!sighand)
1000			return;
1001	} else {
1002		/*
1003		 * Protect arm_timer() and timer sampling in case of call to
1004		 * thread_group_cputime().
1005		 */
1006		sighand = lock_task_sighand(p, &flags);
1007		if (unlikely(sighand == NULL)) {
1008			/*
1009			 * The process has been reaped.
1010			 * We can't even collect a sample any more.
1011			 */
1012			cpu_timer_setexpires(ctmr, 0);
1013			return;
1014		} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1015			/* If the process is dying, no need to rearm */
1016			goto unlock;
1017		}
1018		now = cpu_clock_sample_group(clkid, p, true);
1019		bump_cpu_timer(timer, now);
1020		/* Leave the sighand locked for the call below.  */
1021	}
1022
1023	/*
1024	 * Now re-arm for the new expiry time.
1025	 */
1026	arm_timer(timer);
1027unlock:
1028	unlock_task_sighand(p, &flags);
 
 
1029}
1030
1031/**
1032 * task_cputimers_expired - Check whether posix CPU timers are expired
1033 *
1034 * @samples:	Array of current samples for the CPUCLOCK clocks
1035 * @pct:	Pointer to a posix_cputimers container
1036 *
1037 * Returns true if any member of @samples is greater than the corresponding
1038 * member of @pct->bases[CLK].nextevt. False otherwise
1039 */
1040static inline bool
1041task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1042{
1043	int i;
1044
1045	for (i = 0; i < CPUCLOCK_MAX; i++) {
1046		if (samples[i] >= pct->bases[i].nextevt)
1047			return true;
1048	}
1049	return false;
1050}
1051
1052/**
1053 * fastpath_timer_check - POSIX CPU timers fast path.
1054 *
1055 * @tsk:	The task (thread) being checked.
1056 *
1057 * Check the task and thread group timers.  If both are zero (there are no
1058 * timers set) return false.  Otherwise snapshot the task and thread group
1059 * timers and compare them with the corresponding expiration times.  Return
1060 * true if a timer has expired, else return false.
1061 */
1062static inline bool fastpath_timer_check(struct task_struct *tsk)
1063{
1064	struct posix_cputimers *pct = &tsk->posix_cputimers;
1065	struct signal_struct *sig;
1066
1067	if (!expiry_cache_is_inactive(pct)) {
1068		u64 samples[CPUCLOCK_MAX];
1069
1070		task_sample_cputime(tsk, samples);
1071		if (task_cputimers_expired(samples, pct))
1072			return true;
1073	}
1074
1075	sig = tsk->signal;
1076	pct = &sig->posix_cputimers;
1077	/*
1078	 * Check if thread group timers expired when timers are active and
1079	 * no other thread in the group is already handling expiry for
1080	 * thread group cputimers. These fields are read without the
1081	 * sighand lock. However, this is fine because this is meant to be
1082	 * a fastpath heuristic to determine whether we should try to
1083	 * acquire the sighand lock to handle timer expiry.
1084	 *
1085	 * In the worst case scenario, if concurrently timers_active is set
1086	 * or expiry_active is cleared, but the current thread doesn't see
1087	 * the change yet, the timer checks are delayed until the next
1088	 * thread in the group gets a scheduler interrupt to handle the
1089	 * timer. This isn't an issue in practice because these types of
1090	 * delays with signals actually getting sent are expected.
1091	 */
1092	if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1093		u64 samples[CPUCLOCK_MAX];
1094
1095		proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1096					   samples);
1097
1098		if (task_cputimers_expired(samples, pct))
1099			return true;
1100	}
1101
1102	if (dl_task(tsk) && tsk->dl.dl_overrun)
1103		return true;
1104
1105	return false;
1106}
1107
 
 
 
 
 
 
 
 
1108/*
1109 * This is called from the timer interrupt handler.  The irq handler has
1110 * already updated our counts.  We need to check if any timers fire now.
1111 * Interrupts are disabled.
1112 */
1113void run_posix_cpu_timers(void)
1114{
1115	struct task_struct *tsk = current;
1116	struct k_itimer *timer, *next;
1117	unsigned long flags;
1118	LIST_HEAD(firing);
 
 
 
 
 
 
1119
1120	lockdep_assert_irqs_disabled();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121
1122	/*
1123	 * The fast path checks that there are no expired thread or thread
1124	 * group timers.  If that's so, just return.
 
1125	 */
1126	if (!fastpath_timer_check(tsk))
1127		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1128
1129	if (!lock_task_sighand(tsk, &flags))
1130		return;
1131	/*
1132	 * Here we take off tsk->signal->cpu_timers[N] and
1133	 * tsk->cpu_timers[N] all the timers that are firing, and
1134	 * put them on the firing list.
1135	 */
1136	check_thread_timers(tsk, &firing);
1137
1138	check_process_timers(tsk, &firing);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139
1140	/*
1141	 * We must release these locks before taking any timer's lock.
1142	 * There is a potential race with timer deletion here, as the
1143	 * siglock now protects our private firing list.  We have set
1144	 * the firing flag in each timer, so that a deletion attempt
1145	 * that gets the timer lock before we do will give it up and
1146	 * spin until we've taken care of that timer below.
1147	 */
1148	unlock_task_sighand(tsk, &flags);
1149
1150	/*
1151	 * Now that all the timers on our list have the firing flag,
1152	 * no one will touch their list entries but us.  We'll take
1153	 * each timer's lock before clearing its firing flag, so no
1154	 * timer call will interfere.
1155	 */
1156	list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1157		int cpu_firing;
1158
 
 
 
 
 
 
 
1159		spin_lock(&timer->it_lock);
1160		list_del_init(&timer->it.cpu.elist);
1161		cpu_firing = timer->it.cpu.firing;
1162		timer->it.cpu.firing = 0;
1163		/*
1164		 * The firing flag is -1 if we collided with a reset
1165		 * of the timer, which already reported this
1166		 * almost-firing as an overrun.  So don't generate an event.
1167		 */
1168		if (likely(cpu_firing >= 0))
1169			cpu_timer_fire(timer);
1170		spin_unlock(&timer->it_lock);
1171	}
1172}
1173
1174/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1175 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1176 * The tsk->sighand->siglock must be held by the caller.
1177 */
1178void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1179			   u64 *newval, u64 *oldval)
1180{
1181	u64 now, *nextevt;
1182
1183	if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1184		return;
1185
1186	nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1187	now = cpu_clock_sample_group(clkid, tsk, true);
1188
1189	if (oldval) {
1190		/*
1191		 * We are setting itimer. The *oldval is absolute and we update
1192		 * it to be relative, *newval argument is relative and we update
1193		 * it to be absolute.
1194		 */
1195		if (*oldval) {
1196			if (*oldval <= now) {
1197				/* Just about to fire. */
1198				*oldval = TICK_NSEC;
1199			} else {
1200				*oldval -= now;
1201			}
1202		}
1203
1204		if (!*newval)
1205			return;
1206		*newval += now;
1207	}
1208
1209	/*
1210	 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1211	 * expiry cache is also used by RLIMIT_CPU!.
1212	 */
1213	if (*newval < *nextevt)
1214		*nextevt = *newval;
1215
1216	tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1217}
1218
1219static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1220			    const struct timespec64 *rqtp)
1221{
1222	struct itimerspec64 it;
1223	struct k_itimer timer;
1224	u64 expires;
1225	int error;
1226
1227	/*
1228	 * Set up a temporary timer and then wait for it to go off.
1229	 */
1230	memset(&timer, 0, sizeof timer);
1231	spin_lock_init(&timer.it_lock);
1232	timer.it_clock = which_clock;
1233	timer.it_overrun = -1;
1234	error = posix_cpu_timer_create(&timer);
1235	timer.it_process = current;
1236
1237	if (!error) {
1238		static struct itimerspec64 zero_it;
1239		struct restart_block *restart;
1240
1241		memset(&it, 0, sizeof(it));
1242		it.it_value = *rqtp;
1243
1244		spin_lock_irq(&timer.it_lock);
1245		error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1246		if (error) {
1247			spin_unlock_irq(&timer.it_lock);
1248			return error;
1249		}
1250
1251		while (!signal_pending(current)) {
1252			if (!cpu_timer_getexpires(&timer.it.cpu)) {
1253				/*
1254				 * Our timer fired and was reset, below
1255				 * deletion can not fail.
1256				 */
1257				posix_cpu_timer_del(&timer);
1258				spin_unlock_irq(&timer.it_lock);
1259				return 0;
1260			}
1261
1262			/*
1263			 * Block until cpu_timer_fire (or a signal) wakes us.
1264			 */
1265			__set_current_state(TASK_INTERRUPTIBLE);
1266			spin_unlock_irq(&timer.it_lock);
1267			schedule();
1268			spin_lock_irq(&timer.it_lock);
1269		}
1270
1271		/*
1272		 * We were interrupted by a signal.
1273		 */
1274		expires = cpu_timer_getexpires(&timer.it.cpu);
1275		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1276		if (!error) {
1277			/*
1278			 * Timer is now unarmed, deletion can not fail.
1279			 */
1280			posix_cpu_timer_del(&timer);
1281		}
1282		spin_unlock_irq(&timer.it_lock);
1283
1284		while (error == TIMER_RETRY) {
1285			/*
1286			 * We need to handle case when timer was or is in the
1287			 * middle of firing. In other cases we already freed
1288			 * resources.
1289			 */
1290			spin_lock_irq(&timer.it_lock);
1291			error = posix_cpu_timer_del(&timer);
1292			spin_unlock_irq(&timer.it_lock);
1293		}
1294
1295		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1296			/*
1297			 * It actually did fire already.
1298			 */
1299			return 0;
1300		}
1301
1302		error = -ERESTART_RESTARTBLOCK;
1303		/*
1304		 * Report back to the user the time still remaining.
1305		 */
1306		restart = &current->restart_block;
1307		restart->nanosleep.expires = expires;
1308		if (restart->nanosleep.type != TT_NONE)
1309			error = nanosleep_copyout(restart, &it.it_value);
1310	}
1311
1312	return error;
1313}
1314
1315static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1316
1317static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1318			    const struct timespec64 *rqtp)
1319{
1320	struct restart_block *restart_block = &current->restart_block;
1321	int error;
1322
1323	/*
1324	 * Diagnose required errors first.
1325	 */
1326	if (CPUCLOCK_PERTHREAD(which_clock) &&
1327	    (CPUCLOCK_PID(which_clock) == 0 ||
1328	     CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1329		return -EINVAL;
1330
1331	error = do_cpu_nanosleep(which_clock, flags, rqtp);
1332
1333	if (error == -ERESTART_RESTARTBLOCK) {
1334
1335		if (flags & TIMER_ABSTIME)
1336			return -ERESTARTNOHAND;
1337
1338		restart_block->fn = posix_cpu_nsleep_restart;
1339		restart_block->nanosleep.clockid = which_clock;
 
1340	}
1341	return error;
1342}
1343
1344static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1345{
1346	clockid_t which_clock = restart_block->nanosleep.clockid;
1347	struct timespec64 t;
1348
1349	t = ns_to_timespec64(restart_block->nanosleep.expires);
1350
1351	return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1352}
1353
1354#define PROCESS_CLOCK	make_process_cpuclock(0, CPUCLOCK_SCHED)
1355#define THREAD_CLOCK	make_thread_cpuclock(0, CPUCLOCK_SCHED)
1356
1357static int process_cpu_clock_getres(const clockid_t which_clock,
1358				    struct timespec64 *tp)
1359{
1360	return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1361}
1362static int process_cpu_clock_get(const clockid_t which_clock,
1363				 struct timespec64 *tp)
1364{
1365	return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1366}
1367static int process_cpu_timer_create(struct k_itimer *timer)
1368{
1369	timer->it_clock = PROCESS_CLOCK;
1370	return posix_cpu_timer_create(timer);
1371}
1372static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1373			      const struct timespec64 *rqtp)
1374{
1375	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1376}
1377static int thread_cpu_clock_getres(const clockid_t which_clock,
1378				   struct timespec64 *tp)
1379{
1380	return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1381}
1382static int thread_cpu_clock_get(const clockid_t which_clock,
1383				struct timespec64 *tp)
1384{
1385	return posix_cpu_clock_get(THREAD_CLOCK, tp);
1386}
1387static int thread_cpu_timer_create(struct k_itimer *timer)
1388{
1389	timer->it_clock = THREAD_CLOCK;
1390	return posix_cpu_timer_create(timer);
1391}
1392
1393const struct k_clock clock_posix_cpu = {
1394	.clock_getres	= posix_cpu_clock_getres,
1395	.clock_set	= posix_cpu_clock_set,
1396	.clock_get	= posix_cpu_clock_get,
1397	.timer_create	= posix_cpu_timer_create,
1398	.nsleep		= posix_cpu_nsleep,
1399	.timer_set	= posix_cpu_timer_set,
1400	.timer_del	= posix_cpu_timer_del,
1401	.timer_get	= posix_cpu_timer_get,
1402	.timer_rearm	= posix_cpu_timer_rearm,
1403};
1404
1405const struct k_clock clock_process = {
1406	.clock_getres	= process_cpu_clock_getres,
1407	.clock_get	= process_cpu_clock_get,
1408	.timer_create	= process_cpu_timer_create,
1409	.nsleep		= process_cpu_nsleep,
1410};
1411
1412const struct k_clock clock_thread = {
1413	.clock_getres	= thread_cpu_clock_getres,
1414	.clock_get	= thread_cpu_clock_get,
1415	.timer_create	= thread_cpu_timer_create,
1416};
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Implement CPU time clocks for the POSIX clock interface.
   4 */
   5
   6#include <linux/sched/signal.h>
   7#include <linux/sched/cputime.h>
   8#include <linux/posix-timers.h>
   9#include <linux/errno.h>
  10#include <linux/math64.h>
  11#include <linux/uaccess.h>
  12#include <linux/kernel_stat.h>
  13#include <trace/events/timer.h>
  14#include <linux/tick.h>
  15#include <linux/workqueue.h>
  16#include <linux/compat.h>
  17#include <linux/sched/deadline.h>
  18#include <linux/task_work.h>
  19
  20#include "posix-timers.h"
  21
  22static void posix_cpu_timer_rearm(struct k_itimer *timer);
  23
  24void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
  25{
  26	posix_cputimers_init(pct);
  27	if (cpu_limit != RLIM_INFINITY) {
  28		pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
  29		pct->timers_active = true;
  30	}
  31}
  32
  33/*
  34 * Called after updating RLIMIT_CPU to run cpu timer and update
  35 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
  36 * necessary. Needs siglock protection since other code may update the
  37 * expiration cache as well.
  38 *
  39 * Returns 0 on success, -ESRCH on failure.  Can fail if the task is exiting and
  40 * we cannot lock_task_sighand.  Cannot fail if task is current.
  41 */
  42int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
  43{
  44	u64 nsecs = rlim_new * NSEC_PER_SEC;
  45	unsigned long irq_fl;
  46
  47	if (!lock_task_sighand(task, &irq_fl))
  48		return -ESRCH;
  49	set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
  50	unlock_task_sighand(task, &irq_fl);
  51	return 0;
  52}
  53
  54/*
  55 * Functions for validating access to tasks.
  56 */
  57static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
 
  58{
  59	const bool thread = !!CPUCLOCK_PERTHREAD(clock);
  60	const pid_t upid = CPUCLOCK_PID(clock);
  61	struct pid *pid;
  62
  63	if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
  64		return NULL;
  65
  66	/*
  67	 * If the encoded PID is 0, then the timer is targeted at current
  68	 * or the process to which current belongs.
  69	 */
  70	if (upid == 0)
  71		return thread ? task_pid(current) : task_tgid(current);
 
 
 
 
  72
  73	pid = find_vpid(upid);
  74	if (!pid)
  75		return NULL;
  76
  77	if (thread) {
  78		struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
  79		return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
 
 
 
 
 
 
 
 
  80	}
  81
  82	/*
  83	 * For clock_gettime(PROCESS) allow finding the process by
  84	 * with the pid of the current task.  The code needs the tgid
  85	 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
  86	 * used to find the process.
  87	 */
  88	if (gettime && (pid == task_pid(current)))
  89		return task_tgid(current);
  90
  91	/*
  92	 * For processes require that pid identifies a process.
  93	 */
  94	return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
  95}
  96
  97static inline int validate_clock_permissions(const clockid_t clock)
 
  98{
  99	int ret;
 
 
 
 
 
 100
 101	rcu_read_lock();
 102	ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
 
 
 103	rcu_read_unlock();
 
 
 104
 105	return ret;
 
 
 106}
 107
 108static inline enum pid_type clock_pid_type(const clockid_t clock)
 109{
 110	return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
 111}
 112
 113static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
 114{
 115	return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
 116}
 117
 118/*
 119 * Update expiry time from increment, and increase overrun count,
 120 * given the current clock sample.
 121 */
 122static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
 123{
 124	u64 delta, incr, expires = timer->it.cpu.node.expires;
 125	int i;
 126
 127	if (!timer->it_interval)
 128		return expires;
 129
 130	if (now < expires)
 131		return expires;
 132
 133	incr = timer->it_interval;
 134	delta = now + incr - expires;
 135
 136	/* Don't use (incr*2 < delta), incr*2 might overflow. */
 137	for (i = 0; incr < delta - incr; i++)
 138		incr = incr << 1;
 139
 140	for (; i >= 0; incr >>= 1, i--) {
 141		if (delta < incr)
 142			continue;
 143
 144		timer->it.cpu.node.expires += incr;
 145		timer->it_overrun += 1LL << i;
 146		delta -= incr;
 147	}
 148	return timer->it.cpu.node.expires;
 149}
 150
 151/* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
 152static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
 153{
 154	return !(~pct->bases[CPUCLOCK_PROF].nextevt |
 155		 ~pct->bases[CPUCLOCK_VIRT].nextevt |
 156		 ~pct->bases[CPUCLOCK_SCHED].nextevt);
 157}
 158
 159static int
 160posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
 161{
 162	int error = validate_clock_permissions(which_clock);
 163
 164	if (!error) {
 165		tp->tv_sec = 0;
 166		tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
 167		if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
 168			/*
 169			 * If sched_clock is using a cycle counter, we
 170			 * don't have any idea of its true resolution
 171			 * exported, but it is much more than 1s/HZ.
 172			 */
 173			tp->tv_nsec = 1;
 174		}
 175	}
 176	return error;
 177}
 178
 179static int
 180posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
 181{
 182	int error = validate_clock_permissions(clock);
 183
 184	/*
 185	 * You can never reset a CPU clock, but we check for other errors
 186	 * in the call before failing with EPERM.
 187	 */
 188	return error ? : -EPERM;
 189}
 190
 191/*
 192 * Sample a per-thread clock for the given task. clkid is validated.
 193 */
 194static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
 195{
 196	u64 utime, stime;
 197
 198	if (clkid == CPUCLOCK_SCHED)
 199		return task_sched_runtime(p);
 200
 201	task_cputime(p, &utime, &stime);
 202
 203	switch (clkid) {
 204	case CPUCLOCK_PROF:
 205		return utime + stime;
 206	case CPUCLOCK_VIRT:
 207		return utime;
 208	default:
 209		WARN_ON_ONCE(1);
 210	}
 211	return 0;
 212}
 213
 214static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
 215{
 216	samples[CPUCLOCK_PROF] = stime + utime;
 217	samples[CPUCLOCK_VIRT] = utime;
 218	samples[CPUCLOCK_SCHED] = rtime;
 219}
 220
 221static void task_sample_cputime(struct task_struct *p, u64 *samples)
 222{
 223	u64 stime, utime;
 224
 225	task_cputime(p, &utime, &stime);
 226	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
 227}
 228
 229static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
 230				       u64 *samples)
 231{
 232	u64 stime, utime, rtime;
 233
 234	utime = atomic64_read(&at->utime);
 235	stime = atomic64_read(&at->stime);
 236	rtime = atomic64_read(&at->sum_exec_runtime);
 237	store_samples(samples, stime, utime, rtime);
 238}
 239
 240/*
 241 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
 242 * to avoid race conditions with concurrent updates to cputime.
 243 */
 244static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
 245{
 246	u64 curr_cputime;
 247retry:
 248	curr_cputime = atomic64_read(cputime);
 249	if (sum_cputime > curr_cputime) {
 250		if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
 251			goto retry;
 252	}
 253}
 254
 255static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
 256			      struct task_cputime *sum)
 257{
 258	__update_gt_cputime(&cputime_atomic->utime, sum->utime);
 259	__update_gt_cputime(&cputime_atomic->stime, sum->stime);
 260	__update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
 261}
 262
 263/**
 264 * thread_group_sample_cputime - Sample cputime for a given task
 265 * @tsk:	Task for which cputime needs to be started
 266 * @samples:	Storage for time samples
 267 *
 268 * Called from sys_getitimer() to calculate the expiry time of an active
 269 * timer. That means group cputime accounting is already active. Called
 270 * with task sighand lock held.
 271 *
 272 * Updates @times with an uptodate sample of the thread group cputimes.
 273 */
 274void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
 275{
 276	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 277	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
 278
 279	WARN_ON_ONCE(!pct->timers_active);
 280
 281	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
 282}
 283
 284/**
 285 * thread_group_start_cputime - Start cputime and return a sample
 286 * @tsk:	Task for which cputime needs to be started
 287 * @samples:	Storage for time samples
 288 *
 289 * The thread group cputime accounting is avoided when there are no posix
 290 * CPU timers armed. Before starting a timer it's required to check whether
 291 * the time accounting is active. If not, a full update of the atomic
 292 * accounting store needs to be done and the accounting enabled.
 293 *
 294 * Updates @times with an uptodate sample of the thread group cputimes.
 295 */
 296static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
 297{
 298	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 299	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
 300
 301	lockdep_assert_task_sighand_held(tsk);
 302
 303	/* Check if cputimer isn't running. This is accessed without locking. */
 304	if (!READ_ONCE(pct->timers_active)) {
 305		struct task_cputime sum;
 306
 307		/*
 308		 * The POSIX timer interface allows for absolute time expiry
 309		 * values through the TIMER_ABSTIME flag, therefore we have
 310		 * to synchronize the timer to the clock every time we start it.
 311		 */
 312		thread_group_cputime(tsk, &sum);
 313		update_gt_cputime(&cputimer->cputime_atomic, &sum);
 314
 315		/*
 316		 * We're setting timers_active without a lock. Ensure this
 317		 * only gets written to in one operation. We set it after
 318		 * update_gt_cputime() as a small optimization, but
 319		 * barriers are not required because update_gt_cputime()
 320		 * can handle concurrent updates.
 321		 */
 322		WRITE_ONCE(pct->timers_active, true);
 323	}
 324	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
 325}
 326
 327static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
 328{
 329	struct task_cputime ct;
 330
 331	thread_group_cputime(tsk, &ct);
 332	store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
 333}
 334
 335/*
 336 * Sample a process (thread group) clock for the given task clkid. If the
 337 * group's cputime accounting is already enabled, read the atomic
 338 * store. Otherwise a full update is required.  clkid is already validated.
 
 
 339 */
 340static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
 341				  bool start)
 342{
 343	struct thread_group_cputimer *cputimer = &p->signal->cputimer;
 344	struct posix_cputimers *pct = &p->signal->posix_cputimers;
 345	u64 samples[CPUCLOCK_MAX];
 346
 347	if (!READ_ONCE(pct->timers_active)) {
 348		if (start)
 349			thread_group_start_cputime(p, samples);
 350		else
 351			__thread_group_cputime(p, samples);
 352	} else {
 353		proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
 354	}
 355
 356	return samples[clkid];
 357}
 358
 359static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
 360{
 361	const clockid_t clkid = CPUCLOCK_WHICH(clock);
 362	struct task_struct *tsk;
 363	u64 t;
 364
 365	rcu_read_lock();
 366	tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
 367	if (!tsk) {
 368		rcu_read_unlock();
 369		return -EINVAL;
 370	}
 371
 372	if (CPUCLOCK_PERTHREAD(clock))
 373		t = cpu_clock_sample(clkid, tsk);
 374	else
 375		t = cpu_clock_sample_group(clkid, tsk, false);
 376	rcu_read_unlock();
 377
 378	*tp = ns_to_timespec64(t);
 379	return 0;
 380}
 381
 382/*
 383 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
 384 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
 385 * new timer already all-zeros initialized.
 386 */
 387static int posix_cpu_timer_create(struct k_itimer *new_timer)
 388{
 389	static struct lock_class_key posix_cpu_timers_key;
 390	struct pid *pid;
 391
 392	rcu_read_lock();
 393	pid = pid_for_clock(new_timer->it_clock, false);
 394	if (!pid) {
 395		rcu_read_unlock();
 396		return -EINVAL;
 397	}
 398
 399	/*
 400	 * If posix timer expiry is handled in task work context then
 401	 * timer::it_lock can be taken without disabling interrupts as all
 402	 * other locking happens in task context. This requires a separate
 403	 * lock class key otherwise regular posix timer expiry would record
 404	 * the lock class being taken in interrupt context and generate a
 405	 * false positive warning.
 406	 */
 407	if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
 408		lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
 409
 410	new_timer->kclock = &clock_posix_cpu;
 411	timerqueue_init(&new_timer->it.cpu.node);
 412	new_timer->it.cpu.pid = get_pid(pid);
 413	rcu_read_unlock();
 414	return 0;
 415}
 416
 417static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
 418					      struct task_struct *tsk)
 419{
 420	int clkidx = CPUCLOCK_WHICH(timer->it_clock);
 421
 422	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 423		return tsk->posix_cputimers.bases + clkidx;
 424	else
 425		return tsk->signal->posix_cputimers.bases + clkidx;
 426}
 427
 428/*
 429 * Force recalculating the base earliest expiration on the next tick.
 430 * This will also re-evaluate the need to keep around the process wide
 431 * cputime counter and tick dependency and eventually shut these down
 432 * if necessary.
 433 */
 434static void trigger_base_recalc_expires(struct k_itimer *timer,
 435					struct task_struct *tsk)
 436{
 437	struct posix_cputimer_base *base = timer_base(timer, tsk);
 438
 439	base->nextevt = 0;
 440}
 441
 442/*
 443 * Dequeue the timer and reset the base if it was its earliest expiration.
 444 * It makes sure the next tick recalculates the base next expiration so we
 445 * don't keep the costly process wide cputime counter around for a random
 446 * amount of time, along with the tick dependency.
 447 *
 448 * If another timer gets queued between this and the next tick, its
 449 * expiration will update the base next event if necessary on the next
 450 * tick.
 451 */
 452static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
 453{
 454	struct cpu_timer *ctmr = &timer->it.cpu;
 455	struct posix_cputimer_base *base;
 456
 457	if (!cpu_timer_dequeue(ctmr))
 458		return;
 459
 460	base = timer_base(timer, p);
 461	if (cpu_timer_getexpires(ctmr) == base->nextevt)
 462		trigger_base_recalc_expires(timer, p);
 463}
 464
 465
 466/*
 467 * Clean up a CPU-clock timer that is about to be destroyed.
 468 * This is called from timer deletion with the timer already locked.
 469 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 470 * and try again.  (This happens when the timer is in the middle of firing.)
 471 */
 472static int posix_cpu_timer_del(struct k_itimer *timer)
 473{
 474	struct cpu_timer *ctmr = &timer->it.cpu;
 
 475	struct sighand_struct *sighand;
 476	struct task_struct *p;
 477	unsigned long flags;
 478	int ret = 0;
 479
 480	rcu_read_lock();
 481	p = cpu_timer_task_rcu(timer);
 482	if (!p)
 483		goto out;
 484
 485	/*
 486	 * Protect against sighand release/switch in exit/exec and process/
 487	 * thread timer list entry concurrent read/writes.
 488	 */
 489	sighand = lock_task_sighand(p, &flags);
 490	if (unlikely(sighand == NULL)) {
 491		/*
 492		 * This raced with the reaping of the task. The exit cleanup
 493		 * should have removed this timer from the timer queue.
 494		 */
 495		WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
 496	} else {
 497		if (timer->it.cpu.firing)
 498			ret = TIMER_RETRY;
 499		else
 500			disarm_timer(timer, p);
 501
 502		unlock_task_sighand(p, &flags);
 503	}
 504
 505out:
 506	rcu_read_unlock();
 507	if (!ret)
 508		put_pid(ctmr->pid);
 509
 510	return ret;
 511}
 512
 513static void cleanup_timerqueue(struct timerqueue_head *head)
 514{
 515	struct timerqueue_node *node;
 516	struct cpu_timer *ctmr;
 517
 518	while ((node = timerqueue_getnext(head))) {
 519		timerqueue_del(head, node);
 520		ctmr = container_of(node, struct cpu_timer, node);
 521		ctmr->head = NULL;
 522	}
 523}
 524
 525/*
 526 * Clean out CPU timers which are still armed when a thread exits. The
 527 * timers are only removed from the list. No other updates are done. The
 528 * corresponding posix timers are still accessible, but cannot be rearmed.
 529 *
 530 * This must be called with the siglock held.
 531 */
 532static void cleanup_timers(struct posix_cputimers *pct)
 533{
 534	cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
 535	cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
 536	cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
 537}
 538
 539/*
 540 * These are both called with the siglock held, when the current thread
 541 * is being reaped.  When the final (leader) thread in the group is reaped,
 542 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
 543 */
 544void posix_cpu_timers_exit(struct task_struct *tsk)
 545{
 546	cleanup_timers(&tsk->posix_cputimers);
 547}
 548void posix_cpu_timers_exit_group(struct task_struct *tsk)
 549{
 550	cleanup_timers(&tsk->signal->posix_cputimers);
 551}
 552
 553/*
 554 * Insert the timer on the appropriate list before any timers that
 555 * expire later.  This must be called with the sighand lock held.
 556 */
 557static void arm_timer(struct k_itimer *timer, struct task_struct *p)
 558{
 559	struct posix_cputimer_base *base = timer_base(timer, p);
 560	struct cpu_timer *ctmr = &timer->it.cpu;
 561	u64 newexp = cpu_timer_getexpires(ctmr);
 
 
 
 
 
 
 
 562
 563	if (!cpu_timer_enqueue(&base->tqhead, ctmr))
 564		return;
 565
 566	/*
 567	 * We are the new earliest-expiring POSIX 1.b timer, hence
 568	 * need to update expiration cache. Take into account that
 569	 * for process timers we share expiration cache with itimers
 570	 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
 571	 */
 572	if (newexp < base->nextevt)
 573		base->nextevt = newexp;
 574
 575	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 576		tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
 577	else
 578		tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
 579}
 580
 581/*
 582 * The timer is locked, fire it and arrange for its reload.
 583 */
 584static void cpu_timer_fire(struct k_itimer *timer)
 585{
 586	struct cpu_timer *ctmr = &timer->it.cpu;
 587
 588	if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
 589		/*
 590		 * User don't want any signal.
 591		 */
 592		cpu_timer_setexpires(ctmr, 0);
 593	} else if (unlikely(timer->sigq == NULL)) {
 594		/*
 595		 * This a special case for clock_nanosleep,
 596		 * not a normal timer from sys_timer_create.
 597		 */
 598		wake_up_process(timer->it_process);
 599		cpu_timer_setexpires(ctmr, 0);
 600	} else if (!timer->it_interval) {
 601		/*
 602		 * One-shot timer.  Clear it as soon as it's fired.
 603		 */
 604		posix_timer_event(timer, 0);
 605		cpu_timer_setexpires(ctmr, 0);
 606	} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
 607		/*
 608		 * The signal did not get queued because the signal
 609		 * was ignored, so we won't get any callback to
 610		 * reload the timer.  But we need to keep it
 611		 * ticking in case the signal is deliverable next time.
 612		 */
 613		posix_cpu_timer_rearm(timer);
 614		++timer->it_requeue_pending;
 615	}
 616}
 617
 618/*
 619 * Guts of sys_timer_settime for CPU timers.
 620 * This is called with the timer locked and interrupts disabled.
 621 * If we return TIMER_RETRY, it's necessary to release the timer's lock
 622 * and try again.  (This happens when the timer is in the middle of firing.)
 623 */
 624static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
 625			       struct itimerspec64 *new, struct itimerspec64 *old)
 626{
 627	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
 628	u64 old_expires, new_expires, old_incr, val;
 629	struct cpu_timer *ctmr = &timer->it.cpu;
 
 630	struct sighand_struct *sighand;
 631	struct task_struct *p;
 632	unsigned long flags;
 633	int ret = 0;
 634
 635	rcu_read_lock();
 636	p = cpu_timer_task_rcu(timer);
 637	if (!p) {
 638		/*
 639		 * If p has just been reaped, we can no
 640		 * longer get any information about it at all.
 641		 */
 642		rcu_read_unlock();
 643		return -ESRCH;
 644	}
 645
 646	/*
 647	 * Use the to_ktime conversion because that clamps the maximum
 648	 * value to KTIME_MAX and avoid multiplication overflows.
 649	 */
 650	new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
 651
 652	/*
 653	 * Protect against sighand release/switch in exit/exec and p->cpu_timers
 654	 * and p->signal->cpu_timers read/write in arm_timer()
 655	 */
 656	sighand = lock_task_sighand(p, &flags);
 657	/*
 658	 * If p has just been reaped, we can no
 659	 * longer get any information about it at all.
 660	 */
 661	if (unlikely(sighand == NULL)) {
 662		rcu_read_unlock();
 663		return -ESRCH;
 664	}
 665
 666	/*
 667	 * Disarm any old timer after extracting its expiry time.
 668	 */
 669	old_incr = timer->it_interval;
 670	old_expires = cpu_timer_getexpires(ctmr);
 671
 672	if (unlikely(timer->it.cpu.firing)) {
 673		timer->it.cpu.firing = -1;
 674		ret = TIMER_RETRY;
 675	} else {
 676		cpu_timer_dequeue(ctmr);
 677	}
 678
 679	/*
 680	 * We need to sample the current value to convert the new
 681	 * value from to relative and absolute, and to convert the
 682	 * old value from absolute to relative.  To set a process
 683	 * timer, we need a sample to balance the thread expiry
 684	 * times (in arm_timer).  With an absolute time, we must
 685	 * check if it's already passed.  In short, we need a sample.
 686	 */
 687	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 688		val = cpu_clock_sample(clkid, p);
 689	else
 690		val = cpu_clock_sample_group(clkid, p, true);
 691
 692	if (old) {
 693		if (old_expires == 0) {
 694			old->it_value.tv_sec = 0;
 695			old->it_value.tv_nsec = 0;
 696		} else {
 697			/*
 698			 * Update the timer in case it has overrun already.
 699			 * If it has, we'll report it as having overrun and
 700			 * with the next reloaded timer already ticking,
 701			 * though we are swallowing that pending
 702			 * notification here to install the new setting.
 703			 */
 704			u64 exp = bump_cpu_timer(timer, val);
 705
 706			if (val < exp) {
 707				old_expires = exp - val;
 708				old->it_value = ns_to_timespec64(old_expires);
 709			} else {
 710				old->it_value.tv_nsec = 1;
 711				old->it_value.tv_sec = 0;
 712			}
 713		}
 714	}
 715
 716	if (unlikely(ret)) {
 717		/*
 718		 * We are colliding with the timer actually firing.
 719		 * Punt after filling in the timer's old value, and
 720		 * disable this firing since we are already reporting
 721		 * it as an overrun (thanks to bump_cpu_timer above).
 722		 */
 723		unlock_task_sighand(p, &flags);
 724		goto out;
 725	}
 726
 727	if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
 728		new_expires += val;
 729	}
 730
 731	/*
 732	 * Install the new expiry time (or zero).
 733	 * For a timer with no notification action, we don't actually
 734	 * arm the timer (we'll just fake it for timer_gettime).
 735	 */
 736	cpu_timer_setexpires(ctmr, new_expires);
 737	if (new_expires != 0 && val < new_expires) {
 738		arm_timer(timer, p);
 739	}
 740
 741	unlock_task_sighand(p, &flags);
 742	/*
 743	 * Install the new reload setting, and
 744	 * set up the signal and overrun bookkeeping.
 745	 */
 746	timer->it_interval = timespec64_to_ktime(new->it_interval);
 747
 748	/*
 749	 * This acts as a modification timestamp for the timer,
 750	 * so any automatic reload attempt will punt on seeing
 751	 * that we have reset the timer manually.
 752	 */
 753	timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
 754		~REQUEUE_PENDING;
 755	timer->it_overrun_last = 0;
 756	timer->it_overrun = -1;
 757
 758	if (val >= new_expires) {
 759		if (new_expires != 0) {
 760			/*
 761			 * The designated time already passed, so we notify
 762			 * immediately, even if the thread never runs to
 763			 * accumulate more time on this clock.
 764			 */
 765			cpu_timer_fire(timer);
 766		}
 767
 768		/*
 769		 * Make sure we don't keep around the process wide cputime
 770		 * counter or the tick dependency if they are not necessary.
 
 771		 */
 772		sighand = lock_task_sighand(p, &flags);
 773		if (!sighand)
 774			goto out;
 775
 776		if (!cpu_timer_queued(ctmr))
 777			trigger_base_recalc_expires(timer, p);
 778
 779		unlock_task_sighand(p, &flags);
 780	}
 781 out:
 782	rcu_read_unlock();
 783	if (old)
 784		old->it_interval = ns_to_timespec64(old_incr);
 785
 786	return ret;
 787}
 788
 789static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
 790{
 791	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
 792	struct cpu_timer *ctmr = &timer->it.cpu;
 793	u64 now, expires = cpu_timer_getexpires(ctmr);
 794	struct task_struct *p;
 795
 796	rcu_read_lock();
 797	p = cpu_timer_task_rcu(timer);
 798	if (!p)
 799		goto out;
 800
 801	/*
 802	 * Easy part: convert the reload time.
 803	 */
 804	itp->it_interval = ktime_to_timespec64(timer->it_interval);
 805
 806	if (!expires)
 807		goto out;
 808
 809	/*
 810	 * Sample the clock to take the difference with the expiry time.
 811	 */
 812	if (CPUCLOCK_PERTHREAD(timer->it_clock))
 813		now = cpu_clock_sample(clkid, p);
 814	else
 815		now = cpu_clock_sample_group(clkid, p, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816
 817	if (now < expires) {
 818		itp->it_value = ns_to_timespec64(expires - now);
 819	} else {
 820		/*
 821		 * The timer should have expired already, but the firing
 822		 * hasn't taken place yet.  Say it's just about to expire.
 823		 */
 824		itp->it_value.tv_nsec = 1;
 825		itp->it_value.tv_sec = 0;
 826	}
 827out:
 828	rcu_read_unlock();
 829}
 830
 831#define MAX_COLLECTED	20
 832
 833static u64 collect_timerqueue(struct timerqueue_head *head,
 834			      struct list_head *firing, u64 now)
 835{
 836	struct timerqueue_node *next;
 837	int i = 0;
 838
 839	while ((next = timerqueue_getnext(head))) {
 840		struct cpu_timer *ctmr;
 841		u64 expires;
 842
 843		ctmr = container_of(next, struct cpu_timer, node);
 844		expires = cpu_timer_getexpires(ctmr);
 845		/* Limit the number of timers to expire at once */
 846		if (++i == MAX_COLLECTED || now < expires)
 847			return expires;
 848
 849		ctmr->firing = 1;
 850		cpu_timer_dequeue(ctmr);
 851		list_add_tail(&ctmr->elist, firing);
 852	}
 853
 854	return U64_MAX;
 855}
 856
 857static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
 858				    struct list_head *firing)
 859{
 860	struct posix_cputimer_base *base = pct->bases;
 861	int i;
 862
 863	for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
 864		base->nextevt = collect_timerqueue(&base->tqhead, firing,
 865						    samples[i]);
 866	}
 867}
 868
 869static inline void check_dl_overrun(struct task_struct *tsk)
 870{
 871	if (tsk->dl.dl_overrun) {
 872		tsk->dl.dl_overrun = 0;
 873		send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
 874	}
 875}
 876
 877static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
 878{
 879	if (time < limit)
 880		return false;
 881
 882	if (print_fatal_signals) {
 883		pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
 884			rt ? "RT" : "CPU", hard ? "hard" : "soft",
 885			current->comm, task_pid_nr(current));
 886	}
 887	send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
 888	return true;
 889}
 890
 891/*
 892 * Check for any per-thread CPU timers that have fired and move them off
 893 * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
 894 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
 895 */
 896static void check_thread_timers(struct task_struct *tsk,
 897				struct list_head *firing)
 898{
 899	struct posix_cputimers *pct = &tsk->posix_cputimers;
 900	u64 samples[CPUCLOCK_MAX];
 901	unsigned long soft;
 902
 903	if (dl_task(tsk))
 904		check_dl_overrun(tsk);
 905
 906	if (expiry_cache_is_inactive(pct))
 907		return;
 908
 909	task_sample_cputime(tsk, samples);
 910	collect_posix_cputimers(pct, samples, firing);
 911
 912	/*
 913	 * Check for the special case thread timers.
 914	 */
 915	soft = task_rlimit(tsk, RLIMIT_RTTIME);
 916	if (soft != RLIM_INFINITY) {
 917		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
 918		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
 919		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
 920
 921		/* At the hard limit, send SIGKILL. No further action. */
 922		if (hard != RLIM_INFINITY &&
 923		    check_rlimit(rttime, hard, SIGKILL, true, true))
 924			return;
 925
 926		/* At the soft limit, send a SIGXCPU every second */
 927		if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
 928			soft += USEC_PER_SEC;
 929			tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
 930		}
 931	}
 932
 933	if (expiry_cache_is_inactive(pct))
 934		tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
 935}
 936
 937static inline void stop_process_timers(struct signal_struct *sig)
 938{
 939	struct posix_cputimers *pct = &sig->posix_cputimers;
 940
 941	/* Turn off the active flag. This is done without locking. */
 942	WRITE_ONCE(pct->timers_active, false);
 943	tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
 944}
 945
 946static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
 947			     u64 *expires, u64 cur_time, int signo)
 948{
 949	if (!it->expires)
 950		return;
 951
 952	if (cur_time >= it->expires) {
 953		if (it->incr)
 954			it->expires += it->incr;
 955		else
 956			it->expires = 0;
 957
 958		trace_itimer_expire(signo == SIGPROF ?
 959				    ITIMER_PROF : ITIMER_VIRTUAL,
 960				    task_tgid(tsk), cur_time);
 961		send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
 962	}
 963
 964	if (it->expires && it->expires < *expires)
 965		*expires = it->expires;
 966}
 967
 968/*
 969 * Check for any per-thread CPU timers that have fired and move them
 970 * off the tsk->*_timers list onto the firing list.  Per-thread timers
 971 * have already been taken off.
 972 */
 973static void check_process_timers(struct task_struct *tsk,
 974				 struct list_head *firing)
 975{
 976	struct signal_struct *const sig = tsk->signal;
 977	struct posix_cputimers *pct = &sig->posix_cputimers;
 978	u64 samples[CPUCLOCK_MAX];
 979	unsigned long soft;
 980
 981	/*
 982	 * If there are no active process wide timers (POSIX 1.b, itimers,
 983	 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
 984	 * processing when there is already another task handling them.
 985	 */
 986	if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
 987		return;
 988
 989	/*
 990	 * Signify that a thread is checking for process timers.
 991	 * Write access to this field is protected by the sighand lock.
 992	 */
 993	pct->expiry_active = true;
 994
 995	/*
 996	 * Collect the current process totals. Group accounting is active
 997	 * so the sample can be taken directly.
 998	 */
 999	proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
1000	collect_posix_cputimers(pct, samples, firing);
1001
1002	/*
1003	 * Check for the special case process timers.
1004	 */
1005	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
1006			 &pct->bases[CPUCLOCK_PROF].nextevt,
1007			 samples[CPUCLOCK_PROF], SIGPROF);
1008	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
1009			 &pct->bases[CPUCLOCK_VIRT].nextevt,
1010			 samples[CPUCLOCK_VIRT], SIGVTALRM);
1011
1012	soft = task_rlimit(tsk, RLIMIT_CPU);
1013	if (soft != RLIM_INFINITY) {
1014		/* RLIMIT_CPU is in seconds. Samples are nanoseconds */
1015		unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
1016		u64 ptime = samples[CPUCLOCK_PROF];
1017		u64 softns = (u64)soft * NSEC_PER_SEC;
1018		u64 hardns = (u64)hard * NSEC_PER_SEC;
1019
1020		/* At the hard limit, send SIGKILL. No further action. */
1021		if (hard != RLIM_INFINITY &&
1022		    check_rlimit(ptime, hardns, SIGKILL, false, true))
1023			return;
1024
1025		/* At the soft limit, send a SIGXCPU every second */
1026		if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
1027			sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
1028			softns += NSEC_PER_SEC;
1029		}
1030
1031		/* Update the expiry cache */
1032		if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
1033			pct->bases[CPUCLOCK_PROF].nextevt = softns;
1034	}
1035
1036	if (expiry_cache_is_inactive(pct))
1037		stop_process_timers(sig);
1038
1039	pct->expiry_active = false;
1040}
1041
1042/*
1043 * This is called from the signal code (via posixtimer_rearm)
1044 * when the last timer signal was delivered and we have to reload the timer.
1045 */
1046static void posix_cpu_timer_rearm(struct k_itimer *timer)
1047{
1048	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
1049	struct task_struct *p;
 
1050	struct sighand_struct *sighand;
1051	unsigned long flags;
1052	u64 now;
1053
1054	rcu_read_lock();
1055	p = cpu_timer_task_rcu(timer);
1056	if (!p)
1057		goto out;
1058
1059	/* Protect timer list r/w in arm_timer() */
1060	sighand = lock_task_sighand(p, &flags);
1061	if (unlikely(sighand == NULL))
1062		goto out;
1063
1064	/*
1065	 * Fetch the current sample and update the timer's expiry time.
1066	 */
1067	if (CPUCLOCK_PERTHREAD(timer->it_clock))
1068		now = cpu_clock_sample(clkid, p);
1069	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1070		now = cpu_clock_sample_group(clkid, p, true);
1071
1072	bump_cpu_timer(timer, now);
 
1073
1074	/*
1075	 * Now re-arm for the new expiry time.
1076	 */
1077	arm_timer(timer, p);
 
1078	unlock_task_sighand(p, &flags);
1079out:
1080	rcu_read_unlock();
1081}
1082
1083/**
1084 * task_cputimers_expired - Check whether posix CPU timers are expired
1085 *
1086 * @samples:	Array of current samples for the CPUCLOCK clocks
1087 * @pct:	Pointer to a posix_cputimers container
1088 *
1089 * Returns true if any member of @samples is greater than the corresponding
1090 * member of @pct->bases[CLK].nextevt. False otherwise
1091 */
1092static inline bool
1093task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1094{
1095	int i;
1096
1097	for (i = 0; i < CPUCLOCK_MAX; i++) {
1098		if (samples[i] >= pct->bases[i].nextevt)
1099			return true;
1100	}
1101	return false;
1102}
1103
1104/**
1105 * fastpath_timer_check - POSIX CPU timers fast path.
1106 *
1107 * @tsk:	The task (thread) being checked.
1108 *
1109 * Check the task and thread group timers.  If both are zero (there are no
1110 * timers set) return false.  Otherwise snapshot the task and thread group
1111 * timers and compare them with the corresponding expiration times.  Return
1112 * true if a timer has expired, else return false.
1113 */
1114static inline bool fastpath_timer_check(struct task_struct *tsk)
1115{
1116	struct posix_cputimers *pct = &tsk->posix_cputimers;
1117	struct signal_struct *sig;
1118
1119	if (!expiry_cache_is_inactive(pct)) {
1120		u64 samples[CPUCLOCK_MAX];
1121
1122		task_sample_cputime(tsk, samples);
1123		if (task_cputimers_expired(samples, pct))
1124			return true;
1125	}
1126
1127	sig = tsk->signal;
1128	pct = &sig->posix_cputimers;
1129	/*
1130	 * Check if thread group timers expired when timers are active and
1131	 * no other thread in the group is already handling expiry for
1132	 * thread group cputimers. These fields are read without the
1133	 * sighand lock. However, this is fine because this is meant to be
1134	 * a fastpath heuristic to determine whether we should try to
1135	 * acquire the sighand lock to handle timer expiry.
1136	 *
1137	 * In the worst case scenario, if concurrently timers_active is set
1138	 * or expiry_active is cleared, but the current thread doesn't see
1139	 * the change yet, the timer checks are delayed until the next
1140	 * thread in the group gets a scheduler interrupt to handle the
1141	 * timer. This isn't an issue in practice because these types of
1142	 * delays with signals actually getting sent are expected.
1143	 */
1144	if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1145		u64 samples[CPUCLOCK_MAX];
1146
1147		proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1148					   samples);
1149
1150		if (task_cputimers_expired(samples, pct))
1151			return true;
1152	}
1153
1154	if (dl_task(tsk) && tsk->dl.dl_overrun)
1155		return true;
1156
1157	return false;
1158}
1159
1160static void handle_posix_cpu_timers(struct task_struct *tsk);
1161
1162#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1163static void posix_cpu_timers_work(struct callback_head *work)
1164{
1165	handle_posix_cpu_timers(current);
1166}
1167
1168/*
1169 * Clear existing posix CPU timers task work.
 
 
1170 */
1171void clear_posix_cputimers_work(struct task_struct *p)
1172{
1173	/*
1174	 * A copied work entry from the old task is not meaningful, clear it.
1175	 * N.B. init_task_work will not do this.
1176	 */
1177	memset(&p->posix_cputimers_work.work, 0,
1178	       sizeof(p->posix_cputimers_work.work));
1179	init_task_work(&p->posix_cputimers_work.work,
1180		       posix_cpu_timers_work);
1181	p->posix_cputimers_work.scheduled = false;
1182}
1183
1184/*
1185 * Initialize posix CPU timers task work in init task. Out of line to
1186 * keep the callback static and to avoid header recursion hell.
1187 */
1188void __init posix_cputimers_init_work(void)
1189{
1190	clear_posix_cputimers_work(current);
1191}
1192
1193/*
1194 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1195 * in hard interrupt context or in task context with interrupts
1196 * disabled. Aside of that the writer/reader interaction is always in the
1197 * context of the current task, which means they are strict per CPU.
1198 */
1199static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1200{
1201	return tsk->posix_cputimers_work.scheduled;
1202}
1203
1204static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1205{
1206	if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1207		return;
1208
1209	/* Schedule task work to actually expire the timers */
1210	tsk->posix_cputimers_work.scheduled = true;
1211	task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1212}
1213
1214static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1215						unsigned long start)
1216{
1217	bool ret = true;
1218
1219	/*
1220	 * On !RT kernels interrupts are disabled while collecting expired
1221	 * timers, so no tick can happen and the fast path check can be
1222	 * reenabled without further checks.
1223	 */
1224	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1225		tsk->posix_cputimers_work.scheduled = false;
1226		return true;
1227	}
1228
1229	/*
1230	 * On RT enabled kernels ticks can happen while the expired timers
1231	 * are collected under sighand lock. But any tick which observes
1232	 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1233	 * checks. So reenabling the tick work has do be done carefully:
1234	 *
1235	 * Disable interrupts and run the fast path check if jiffies have
1236	 * advanced since the collecting of expired timers started. If
1237	 * jiffies have not advanced or the fast path check did not find
1238	 * newly expired timers, reenable the fast path check in the timer
1239	 * interrupt. If there are newly expired timers, return false and
1240	 * let the collection loop repeat.
1241	 */
1242	local_irq_disable();
1243	if (start != jiffies && fastpath_timer_check(tsk))
1244		ret = false;
1245	else
1246		tsk->posix_cputimers_work.scheduled = false;
1247	local_irq_enable();
1248
1249	return ret;
1250}
1251#else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1252static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1253{
1254	lockdep_posixtimer_enter();
1255	handle_posix_cpu_timers(tsk);
1256	lockdep_posixtimer_exit();
1257}
1258
1259static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1260{
1261	return false;
1262}
1263
1264static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1265						unsigned long start)
1266{
1267	return true;
1268}
1269#endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1270
1271static void handle_posix_cpu_timers(struct task_struct *tsk)
1272{
1273	struct k_itimer *timer, *next;
1274	unsigned long flags, start;
1275	LIST_HEAD(firing);
1276
1277	if (!lock_task_sighand(tsk, &flags))
1278		return;
 
 
 
 
 
 
1279
1280	do {
1281		/*
1282		 * On RT locking sighand lock does not disable interrupts,
1283		 * so this needs to be careful vs. ticks. Store the current
1284		 * jiffies value.
1285		 */
1286		start = READ_ONCE(jiffies);
1287		barrier();
1288
1289		/*
1290		 * Here we take off tsk->signal->cpu_timers[N] and
1291		 * tsk->cpu_timers[N] all the timers that are firing, and
1292		 * put them on the firing list.
1293		 */
1294		check_thread_timers(tsk, &firing);
1295
1296		check_process_timers(tsk, &firing);
1297
1298		/*
1299		 * The above timer checks have updated the expiry cache and
1300		 * because nothing can have queued or modified timers after
1301		 * sighand lock was taken above it is guaranteed to be
1302		 * consistent. So the next timer interrupt fastpath check
1303		 * will find valid data.
1304		 *
1305		 * If timer expiry runs in the timer interrupt context then
1306		 * the loop is not relevant as timers will be directly
1307		 * expired in interrupt context. The stub function below
1308		 * returns always true which allows the compiler to
1309		 * optimize the loop out.
1310		 *
1311		 * If timer expiry is deferred to task work context then
1312		 * the following rules apply:
1313		 *
1314		 * - On !RT kernels no tick can have happened on this CPU
1315		 *   after sighand lock was acquired because interrupts are
1316		 *   disabled. So reenabling task work before dropping
1317		 *   sighand lock and reenabling interrupts is race free.
1318		 *
1319		 * - On RT kernels ticks might have happened but the tick
1320		 *   work ignored posix CPU timer handling because the
1321		 *   CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1322		 *   must be done very carefully including a check whether
1323		 *   ticks have happened since the start of the timer
1324		 *   expiry checks. posix_cpu_timers_enable_work() takes
1325		 *   care of that and eventually lets the expiry checks
1326		 *   run again.
1327		 */
1328	} while (!posix_cpu_timers_enable_work(tsk, start));
1329
1330	/*
1331	 * We must release sighand lock before taking any timer's lock.
1332	 * There is a potential race with timer deletion here, as the
1333	 * siglock now protects our private firing list.  We have set
1334	 * the firing flag in each timer, so that a deletion attempt
1335	 * that gets the timer lock before we do will give it up and
1336	 * spin until we've taken care of that timer below.
1337	 */
1338	unlock_task_sighand(tsk, &flags);
1339
1340	/*
1341	 * Now that all the timers on our list have the firing flag,
1342	 * no one will touch their list entries but us.  We'll take
1343	 * each timer's lock before clearing its firing flag, so no
1344	 * timer call will interfere.
1345	 */
1346	list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1347		int cpu_firing;
1348
1349		/*
1350		 * spin_lock() is sufficient here even independent of the
1351		 * expiry context. If expiry happens in hard interrupt
1352		 * context it's obvious. For task work context it's safe
1353		 * because all other operations on timer::it_lock happen in
1354		 * task context (syscall or exit).
1355		 */
1356		spin_lock(&timer->it_lock);
1357		list_del_init(&timer->it.cpu.elist);
1358		cpu_firing = timer->it.cpu.firing;
1359		timer->it.cpu.firing = 0;
1360		/*
1361		 * The firing flag is -1 if we collided with a reset
1362		 * of the timer, which already reported this
1363		 * almost-firing as an overrun.  So don't generate an event.
1364		 */
1365		if (likely(cpu_firing >= 0))
1366			cpu_timer_fire(timer);
1367		spin_unlock(&timer->it_lock);
1368	}
1369}
1370
1371/*
1372 * This is called from the timer interrupt handler.  The irq handler has
1373 * already updated our counts.  We need to check if any timers fire now.
1374 * Interrupts are disabled.
1375 */
1376void run_posix_cpu_timers(void)
1377{
1378	struct task_struct *tsk = current;
1379
1380	lockdep_assert_irqs_disabled();
1381
1382	/*
1383	 * If the actual expiry is deferred to task work context and the
1384	 * work is already scheduled there is no point to do anything here.
1385	 */
1386	if (posix_cpu_timers_work_scheduled(tsk))
1387		return;
1388
1389	/*
1390	 * The fast path checks that there are no expired thread or thread
1391	 * group timers.  If that's so, just return.
1392	 */
1393	if (!fastpath_timer_check(tsk))
1394		return;
1395
1396	__run_posix_cpu_timers(tsk);
1397}
1398
1399/*
1400 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1401 * The tsk->sighand->siglock must be held by the caller.
1402 */
1403void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1404			   u64 *newval, u64 *oldval)
1405{
1406	u64 now, *nextevt;
1407
1408	if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1409		return;
1410
1411	nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1412	now = cpu_clock_sample_group(clkid, tsk, true);
1413
1414	if (oldval) {
1415		/*
1416		 * We are setting itimer. The *oldval is absolute and we update
1417		 * it to be relative, *newval argument is relative and we update
1418		 * it to be absolute.
1419		 */
1420		if (*oldval) {
1421			if (*oldval <= now) {
1422				/* Just about to fire. */
1423				*oldval = TICK_NSEC;
1424			} else {
1425				*oldval -= now;
1426			}
1427		}
1428
1429		if (*newval)
1430			*newval += now;
 
1431	}
1432
1433	/*
1434	 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1435	 * expiry cache is also used by RLIMIT_CPU!.
1436	 */
1437	if (*newval < *nextevt)
1438		*nextevt = *newval;
1439
1440	tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
1441}
1442
1443static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1444			    const struct timespec64 *rqtp)
1445{
1446	struct itimerspec64 it;
1447	struct k_itimer timer;
1448	u64 expires;
1449	int error;
1450
1451	/*
1452	 * Set up a temporary timer and then wait for it to go off.
1453	 */
1454	memset(&timer, 0, sizeof timer);
1455	spin_lock_init(&timer.it_lock);
1456	timer.it_clock = which_clock;
1457	timer.it_overrun = -1;
1458	error = posix_cpu_timer_create(&timer);
1459	timer.it_process = current;
1460
1461	if (!error) {
1462		static struct itimerspec64 zero_it;
1463		struct restart_block *restart;
1464
1465		memset(&it, 0, sizeof(it));
1466		it.it_value = *rqtp;
1467
1468		spin_lock_irq(&timer.it_lock);
1469		error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1470		if (error) {
1471			spin_unlock_irq(&timer.it_lock);
1472			return error;
1473		}
1474
1475		while (!signal_pending(current)) {
1476			if (!cpu_timer_getexpires(&timer.it.cpu)) {
1477				/*
1478				 * Our timer fired and was reset, below
1479				 * deletion can not fail.
1480				 */
1481				posix_cpu_timer_del(&timer);
1482				spin_unlock_irq(&timer.it_lock);
1483				return 0;
1484			}
1485
1486			/*
1487			 * Block until cpu_timer_fire (or a signal) wakes us.
1488			 */
1489			__set_current_state(TASK_INTERRUPTIBLE);
1490			spin_unlock_irq(&timer.it_lock);
1491			schedule();
1492			spin_lock_irq(&timer.it_lock);
1493		}
1494
1495		/*
1496		 * We were interrupted by a signal.
1497		 */
1498		expires = cpu_timer_getexpires(&timer.it.cpu);
1499		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1500		if (!error) {
1501			/*
1502			 * Timer is now unarmed, deletion can not fail.
1503			 */
1504			posix_cpu_timer_del(&timer);
1505		}
1506		spin_unlock_irq(&timer.it_lock);
1507
1508		while (error == TIMER_RETRY) {
1509			/*
1510			 * We need to handle case when timer was or is in the
1511			 * middle of firing. In other cases we already freed
1512			 * resources.
1513			 */
1514			spin_lock_irq(&timer.it_lock);
1515			error = posix_cpu_timer_del(&timer);
1516			spin_unlock_irq(&timer.it_lock);
1517		}
1518
1519		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1520			/*
1521			 * It actually did fire already.
1522			 */
1523			return 0;
1524		}
1525
1526		error = -ERESTART_RESTARTBLOCK;
1527		/*
1528		 * Report back to the user the time still remaining.
1529		 */
1530		restart = &current->restart_block;
1531		restart->nanosleep.expires = expires;
1532		if (restart->nanosleep.type != TT_NONE)
1533			error = nanosleep_copyout(restart, &it.it_value);
1534	}
1535
1536	return error;
1537}
1538
1539static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1540
1541static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1542			    const struct timespec64 *rqtp)
1543{
1544	struct restart_block *restart_block = &current->restart_block;
1545	int error;
1546
1547	/*
1548	 * Diagnose required errors first.
1549	 */
1550	if (CPUCLOCK_PERTHREAD(which_clock) &&
1551	    (CPUCLOCK_PID(which_clock) == 0 ||
1552	     CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1553		return -EINVAL;
1554
1555	error = do_cpu_nanosleep(which_clock, flags, rqtp);
1556
1557	if (error == -ERESTART_RESTARTBLOCK) {
1558
1559		if (flags & TIMER_ABSTIME)
1560			return -ERESTARTNOHAND;
1561
 
1562		restart_block->nanosleep.clockid = which_clock;
1563		set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1564	}
1565	return error;
1566}
1567
1568static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1569{
1570	clockid_t which_clock = restart_block->nanosleep.clockid;
1571	struct timespec64 t;
1572
1573	t = ns_to_timespec64(restart_block->nanosleep.expires);
1574
1575	return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1576}
1577
1578#define PROCESS_CLOCK	make_process_cpuclock(0, CPUCLOCK_SCHED)
1579#define THREAD_CLOCK	make_thread_cpuclock(0, CPUCLOCK_SCHED)
1580
1581static int process_cpu_clock_getres(const clockid_t which_clock,
1582				    struct timespec64 *tp)
1583{
1584	return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1585}
1586static int process_cpu_clock_get(const clockid_t which_clock,
1587				 struct timespec64 *tp)
1588{
1589	return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1590}
1591static int process_cpu_timer_create(struct k_itimer *timer)
1592{
1593	timer->it_clock = PROCESS_CLOCK;
1594	return posix_cpu_timer_create(timer);
1595}
1596static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1597			      const struct timespec64 *rqtp)
1598{
1599	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1600}
1601static int thread_cpu_clock_getres(const clockid_t which_clock,
1602				   struct timespec64 *tp)
1603{
1604	return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1605}
1606static int thread_cpu_clock_get(const clockid_t which_clock,
1607				struct timespec64 *tp)
1608{
1609	return posix_cpu_clock_get(THREAD_CLOCK, tp);
1610}
1611static int thread_cpu_timer_create(struct k_itimer *timer)
1612{
1613	timer->it_clock = THREAD_CLOCK;
1614	return posix_cpu_timer_create(timer);
1615}
1616
1617const struct k_clock clock_posix_cpu = {
1618	.clock_getres		= posix_cpu_clock_getres,
1619	.clock_set		= posix_cpu_clock_set,
1620	.clock_get_timespec	= posix_cpu_clock_get,
1621	.timer_create		= posix_cpu_timer_create,
1622	.nsleep			= posix_cpu_nsleep,
1623	.timer_set		= posix_cpu_timer_set,
1624	.timer_del		= posix_cpu_timer_del,
1625	.timer_get		= posix_cpu_timer_get,
1626	.timer_rearm		= posix_cpu_timer_rearm,
1627};
1628
1629const struct k_clock clock_process = {
1630	.clock_getres		= process_cpu_clock_getres,
1631	.clock_get_timespec	= process_cpu_clock_get,
1632	.timer_create		= process_cpu_timer_create,
1633	.nsleep			= process_cpu_nsleep,
1634};
1635
1636const struct k_clock clock_thread = {
1637	.clock_getres		= thread_cpu_clock_getres,
1638	.clock_get_timespec	= thread_cpu_clock_get,
1639	.timer_create		= thread_cpu_timer_create,
1640};