Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   4 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   5 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   6 *
   7 *  No idle tick implementation for low and high resolution timers
   8 *
   9 *  Started by: Thomas Gleixner and Ingo Molnar
  10 */
 
  11#include <linux/cpu.h>
  12#include <linux/err.h>
  13#include <linux/hrtimer.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/percpu.h>
  17#include <linux/nmi.h>
  18#include <linux/profile.h>
  19#include <linux/sched/signal.h>
  20#include <linux/sched/clock.h>
  21#include <linux/sched/stat.h>
  22#include <linux/sched/nohz.h>
 
  23#include <linux/module.h>
  24#include <linux/irq_work.h>
  25#include <linux/posix-timers.h>
  26#include <linux/context_tracking.h>
  27#include <linux/mm.h>
  28
  29#include <asm/irq_regs.h>
  30
  31#include "tick-internal.h"
  32
  33#include <trace/events/timer.h>
  34
  35/*
  36 * Per-CPU nohz control structure
  37 */
  38static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
  39
  40struct tick_sched *tick_get_tick_sched(int cpu)
  41{
  42	return &per_cpu(tick_cpu_sched, cpu);
  43}
  44
  45#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
  46/*
  47 * The time, when the last jiffy update happened. Protected by jiffies_lock.
 
 
  48 */
  49static ktime_t last_jiffies_update;
  50
  51/*
  52 * Must be called with interrupts disabled !
  53 */
  54static void tick_do_update_jiffies64(ktime_t now)
  55{
  56	unsigned long ticks = 0;
  57	ktime_t delta;
  58
  59	/*
  60	 * Do a quick check without holding jiffies_lock:
 
 
 
 
 
 
  61	 */
  62	delta = ktime_sub(now, last_jiffies_update);
  63	if (delta < tick_period)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64		return;
 
  65
  66	/* Reevaluate with jiffies_lock held */
  67	write_seqlock(&jiffies_lock);
  68
  69	delta = ktime_sub(now, last_jiffies_update);
  70	if (delta >= tick_period) {
 
 
  71
  72		delta = ktime_sub(delta, tick_period);
  73		last_jiffies_update = ktime_add(last_jiffies_update,
  74						tick_period);
  75
  76		/* Slow path for long timeouts */
  77		if (unlikely(delta >= tick_period)) {
  78			s64 incr = ktime_to_ns(tick_period);
 
 
 
  79
  80			ticks = ktime_divns(delta, incr);
 
  81
  82			last_jiffies_update = ktime_add_ns(last_jiffies_update,
  83							   incr * ticks);
  84		}
  85		do_timer(++ticks);
  86
  87		/* Keep the tick_next_period variable up to date */
  88		tick_next_period = ktime_add(last_jiffies_update, tick_period);
 
 
 
 
 
 
  89	} else {
  90		write_sequnlock(&jiffies_lock);
  91		return;
 
 
 
  92	}
  93	write_sequnlock(&jiffies_lock);
 
 
 
 
 
 
 
 
 
 
  94	update_wall_time();
  95}
  96
  97/*
  98 * Initialize and return retrieve the jiffies update.
  99 */
 100static ktime_t tick_init_jiffy_update(void)
 101{
 102	ktime_t period;
 103
 104	write_seqlock(&jiffies_lock);
 105	/* Did we start the jiffies update yet ? */
 106	if (last_jiffies_update == 0)
 
 
 
 
 
 
 
 
 
 
 
 
 107		last_jiffies_update = tick_next_period;
 
 108	period = last_jiffies_update;
 109	write_sequnlock(&jiffies_lock);
 
 
 
 110	return period;
 111}
 112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 113static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
 114{
 115	int cpu = smp_processor_id();
 116
 117#ifdef CONFIG_NO_HZ_COMMON
 118	/*
 119	 * Check if the do_timer duty was dropped. We don't care about
 120	 * concurrency: This happens only when the CPU in charge went
 121	 * into a long sleep. If two CPUs happen to assign themselves to
 122	 * this duty, then the jiffies update is still serialized by
 123	 * jiffies_lock.
 124	 *
 125	 * If nohz_full is enabled, this should not happen because the
 126	 * tick_do_timer_cpu never relinquishes.
 127	 */
 128	if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
 
 
 129#ifdef CONFIG_NO_HZ_FULL
 130		WARN_ON(tick_nohz_full_running);
 131#endif
 132		tick_do_timer_cpu = cpu;
 
 133	}
 134#endif
 135
 136	/* Check, if the jiffies need an update */
 137	if (tick_do_timer_cpu == cpu)
 138		tick_do_update_jiffies64(now);
 139
 140	if (ts->inidle)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141		ts->got_idle_tick = 1;
 142}
 143
 144static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 145{
 146#ifdef CONFIG_NO_HZ_COMMON
 147	/*
 148	 * When we are idle and the tick is stopped, we have to touch
 149	 * the watchdog as we might not schedule for a really long
 150	 * time. This happens on complete idle SMP systems while
 151	 * waiting on the login prompt. We also increment the "start of
 152	 * idle" jiffy stamp so the idle accounting adjustment we do
 153	 * when we go busy again does not account too much ticks.
 154	 */
 155	if (ts->tick_stopped) {
 
 156		touch_softlockup_watchdog_sched();
 157		if (is_idle_task(current))
 158			ts->idle_jiffies++;
 159		/*
 160		 * In case the current tick fired too early past its expected
 161		 * expiration, make sure we don't bypass the next clock reprogramming
 162		 * to the same deadline.
 163		 */
 164		ts->next_tick = 0;
 165	}
 166#endif
 167	update_process_times(user_mode(regs));
 168	profile_tick(CPU_PROFILING);
 169}
 170#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171
 172#ifdef CONFIG_NO_HZ_FULL
 173cpumask_var_t tick_nohz_full_mask;
 
 174bool tick_nohz_full_running;
 
 175static atomic_t tick_dep_mask;
 176
 177static bool check_tick_dependency(atomic_t *dep)
 178{
 179	int val = atomic_read(dep);
 180
 181	if (val & TICK_DEP_MASK_POSIX_TIMER) {
 182		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
 183		return true;
 184	}
 185
 186	if (val & TICK_DEP_MASK_PERF_EVENTS) {
 187		trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
 188		return true;
 189	}
 190
 191	if (val & TICK_DEP_MASK_SCHED) {
 192		trace_tick_stop(0, TICK_DEP_MASK_SCHED);
 193		return true;
 194	}
 195
 196	if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
 197		trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
 198		return true;
 199	}
 200
 
 
 
 
 
 
 
 
 
 
 201	return false;
 202}
 203
 204static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
 205{
 206	lockdep_assert_irqs_disabled();
 207
 208	if (unlikely(!cpu_online(cpu)))
 209		return false;
 210
 211	if (check_tick_dependency(&tick_dep_mask))
 212		return false;
 213
 214	if (check_tick_dependency(&ts->tick_dep_mask))
 215		return false;
 216
 217	if (check_tick_dependency(&current->tick_dep_mask))
 218		return false;
 219
 220	if (check_tick_dependency(&current->signal->tick_dep_mask))
 221		return false;
 222
 223	return true;
 224}
 225
 226static void nohz_full_kick_func(struct irq_work *work)
 227{
 228	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
 229}
 230
 231static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
 232	.func = nohz_full_kick_func,
 233};
 234
 235/*
 236 * Kick this CPU if it's full dynticks in order to force it to
 237 * re-evaluate its dependency on the tick and restart it if necessary.
 238 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
 239 * is NMI safe.
 240 */
 241static void tick_nohz_full_kick(void)
 242{
 243	if (!tick_nohz_full_cpu(smp_processor_id()))
 244		return;
 245
 246	irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
 247}
 248
 249/*
 250 * Kick the CPU if it's full dynticks in order to force it to
 251 * re-evaluate its dependency on the tick and restart it if necessary.
 252 */
 253void tick_nohz_full_kick_cpu(int cpu)
 254{
 255	if (!tick_nohz_full_cpu(cpu))
 256		return;
 257
 258	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
 259}
 260
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261/*
 262 * Kick all full dynticks CPUs in order to force these to re-evaluate
 263 * their dependency on the tick and restart it if necessary.
 264 */
 265static void tick_nohz_full_kick_all(void)
 266{
 267	int cpu;
 268
 269	if (!tick_nohz_full_running)
 270		return;
 271
 272	preempt_disable();
 273	for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
 274		tick_nohz_full_kick_cpu(cpu);
 275	preempt_enable();
 276}
 277
 278static void tick_nohz_dep_set_all(atomic_t *dep,
 279				  enum tick_dep_bits bit)
 280{
 281	int prev;
 282
 283	prev = atomic_fetch_or(BIT(bit), dep);
 284	if (!prev)
 285		tick_nohz_full_kick_all();
 286}
 287
 288/*
 289 * Set a global tick dependency. Used by perf events that rely on freq and
 290 * by unstable clock.
 291 */
 292void tick_nohz_dep_set(enum tick_dep_bits bit)
 293{
 294	tick_nohz_dep_set_all(&tick_dep_mask, bit);
 295}
 296
 297void tick_nohz_dep_clear(enum tick_dep_bits bit)
 298{
 299	atomic_andnot(BIT(bit), &tick_dep_mask);
 300}
 301
 302/*
 303 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
 304 * manage events throttling.
 305 */
 306void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
 307{
 308	int prev;
 309	struct tick_sched *ts;
 310
 311	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 312
 313	prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
 314	if (!prev) {
 315		preempt_disable();
 316		/* Perf needs local kick that is NMI safe */
 317		if (cpu == smp_processor_id()) {
 318			tick_nohz_full_kick();
 319		} else {
 320			/* Remote irq work not NMI-safe */
 321			if (!WARN_ON_ONCE(in_nmi()))
 322				tick_nohz_full_kick_cpu(cpu);
 323		}
 324		preempt_enable();
 325	}
 326}
 
 327
 328void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
 329{
 330	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 331
 332	atomic_andnot(BIT(bit), &ts->tick_dep_mask);
 333}
 
 334
 335/*
 336 * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
 337 * per task timers.
 338 */
 339void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
 340{
 341	/*
 342	 * We could optimize this with just kicking the target running the task
 343	 * if that noise matters for nohz full users.
 344	 */
 345	tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
 346}
 
 347
 348void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
 349{
 350	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
 351}
 
 352
 353/*
 354 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
 355 * per process timers.
 356 */
 357void tick_nohz_dep_set_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 
 358{
 359	tick_nohz_dep_set_all(&sig->tick_dep_mask, bit);
 
 
 
 
 
 
 
 
 
 
 360}
 361
 362void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 363{
 364	atomic_andnot(BIT(bit), &sig->tick_dep_mask);
 365}
 366
 367/*
 368 * Re-evaluate the need for the tick as we switch the current task.
 369 * It might need the tick due to per task/process properties:
 370 * perf events, posix CPU timers, ...
 371 */
 372void __tick_nohz_task_switch(void)
 373{
 374	unsigned long flags;
 375	struct tick_sched *ts;
 376
 377	local_irq_save(flags);
 378
 379	if (!tick_nohz_full_cpu(smp_processor_id()))
 380		goto out;
 381
 382	ts = this_cpu_ptr(&tick_cpu_sched);
 383
 384	if (ts->tick_stopped) {
 385		if (atomic_read(&current->tick_dep_mask) ||
 386		    atomic_read(&current->signal->tick_dep_mask))
 387			tick_nohz_full_kick();
 388	}
 389out:
 390	local_irq_restore(flags);
 391}
 392
 393/* Get the boot-time nohz CPU list from the kernel parameters. */
 394void __init tick_nohz_full_setup(cpumask_var_t cpumask)
 395{
 396	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
 397	cpumask_copy(tick_nohz_full_mask, cpumask);
 398	tick_nohz_full_running = true;
 399}
 400
 401static int tick_nohz_cpu_down(unsigned int cpu)
 402{
 403	/*
 404	 * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
 405	 * timers, workqueues, timekeeping, ...) on behalf of full dynticks
 406	 * CPUs. It must remain online when nohz full is enabled.
 407	 */
 408	if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
 409		return -EBUSY;
 410	return 0;
 
 
 
 
 
 411}
 412
 413void __init tick_nohz_init(void)
 414{
 415	int cpu, ret;
 416
 417	if (!tick_nohz_full_running)
 418		return;
 419
 420	/*
 421	 * Full dynticks uses irq work to drive the tick rescheduling on safe
 422	 * locking contexts. But then we need irq work to raise its own
 423	 * interrupts to avoid circular dependency on the tick
 424	 */
 425	if (!arch_irq_work_has_interrupt()) {
 426		pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support irq work self-IPIs\n");
 427		cpumask_clear(tick_nohz_full_mask);
 428		tick_nohz_full_running = false;
 429		return;
 430	}
 431
 432	if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
 433			!IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
 434		cpu = smp_processor_id();
 435
 436		if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
 437			pr_warn("NO_HZ: Clearing %d from nohz_full range "
 438				"for timekeeping\n", cpu);
 439			cpumask_clear_cpu(cpu, tick_nohz_full_mask);
 440		}
 441	}
 442
 443	for_each_cpu(cpu, tick_nohz_full_mask)
 444		context_tracking_cpu_set(cpu);
 445
 446	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
 447					"kernel/nohz:predown", NULL,
 448					tick_nohz_cpu_down);
 449	WARN_ON(ret < 0);
 450	pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
 451		cpumask_pr_args(tick_nohz_full_mask));
 452}
 453#endif
 454
 455/*
 456 * NOHZ - aka dynamic tick functionality
 457 */
 458#ifdef CONFIG_NO_HZ_COMMON
 459/*
 460 * NO HZ enabled ?
 461 */
 462bool tick_nohz_enabled __read_mostly  = true;
 463unsigned long tick_nohz_active  __read_mostly;
 464/*
 465 * Enable / Disable tickless mode
 466 */
 467static int __init setup_tick_nohz(char *str)
 468{
 469	return (kstrtobool(str, &tick_nohz_enabled) == 0);
 470}
 471
 472__setup("nohz=", setup_tick_nohz);
 473
 474bool tick_nohz_tick_stopped(void)
 475{
 476	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 477
 478	return ts->tick_stopped;
 479}
 480
 481bool tick_nohz_tick_stopped_cpu(int cpu)
 482{
 483	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 484
 485	return ts->tick_stopped;
 486}
 487
 488/**
 489 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
 
 490 *
 491 * Called from interrupt entry when the CPU was idle
 492 *
 493 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
 494 * must be updated. Otherwise an interrupt handler could use a stale jiffy
 495 * value. We do this unconditionally on any CPU, as we don't know whether the
 496 * CPU, which has the update task assigned is in a long sleep.
 497 */
 498static void tick_nohz_update_jiffies(ktime_t now)
 499{
 500	unsigned long flags;
 501
 502	__this_cpu_write(tick_cpu_sched.idle_waketime, now);
 503
 504	local_irq_save(flags);
 505	tick_do_update_jiffies64(now);
 506	local_irq_restore(flags);
 507
 508	touch_softlockup_watchdog_sched();
 509}
 510
 511/*
 512 * Updates the per-CPU time idle statistics counters
 513 */
 514static void
 515update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
 516{
 517	ktime_t delta;
 518
 519	if (ts->idle_active) {
 520		delta = ktime_sub(now, ts->idle_entrytime);
 521		if (nr_iowait_cpu(cpu) > 0)
 522			ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
 523		else
 524			ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
 525		ts->idle_entrytime = now;
 526	}
 527
 528	if (last_update_time)
 529		*last_update_time = ktime_to_us(now);
 530
 531}
 
 
 
 
 532
 533static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
 534{
 535	update_ts_time_stats(smp_processor_id(), ts, now, NULL);
 536	ts->idle_active = 0;
 537
 538	sched_clock_idle_wakeup_event();
 539}
 540
 541static void tick_nohz_start_idle(struct tick_sched *ts)
 542{
 
 543	ts->idle_entrytime = ktime_get();
 544	ts->idle_active = 1;
 
 
 545	sched_clock_idle_sleep_event();
 546}
 547
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548/**
 549 * get_cpu_idle_time_us - get the total idle time of a CPU
 550 * @cpu: CPU number to query
 551 * @last_update_time: variable to store update time in. Do not update
 552 * counters if NULL.
 553 *
 554 * Return the cumulative idle time (since boot) for a given
 555 * CPU, in microseconds.
 
 
 
 556 *
 557 * This time is measured via accounting rather than sampling,
 558 * and is as accurate as ktime_get() is.
 559 *
 560 * This function returns -1 if NOHZ is not enabled.
 561 */
 562u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 563{
 564	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 565	ktime_t now, idle;
 566
 567	if (!tick_nohz_active)
 568		return -1;
 569
 570	now = ktime_get();
 571	if (last_update_time) {
 572		update_ts_time_stats(cpu, ts, now, last_update_time);
 573		idle = ts->idle_sleeptime;
 574	} else {
 575		if (ts->idle_active && !nr_iowait_cpu(cpu)) {
 576			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 577
 578			idle = ktime_add(ts->idle_sleeptime, delta);
 579		} else {
 580			idle = ts->idle_sleeptime;
 581		}
 582	}
 583
 584	return ktime_to_us(idle);
 585
 
 
 586}
 587EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 588
 589/**
 590 * get_cpu_iowait_time_us - get the total iowait time of a CPU
 591 * @cpu: CPU number to query
 592 * @last_update_time: variable to store update time in. Do not update
 593 * counters if NULL.
 594 *
 595 * Return the cumulative iowait time (since boot) for a given
 596 * CPU, in microseconds.
 
 
 
 597 *
 598 * This time is measured via accounting rather than sampling,
 599 * and is as accurate as ktime_get() is.
 600 *
 601 * This function returns -1 if NOHZ is not enabled.
 602 */
 603u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 604{
 605	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 606	ktime_t now, iowait;
 607
 608	if (!tick_nohz_active)
 609		return -1;
 610
 611	now = ktime_get();
 612	if (last_update_time) {
 613		update_ts_time_stats(cpu, ts, now, last_update_time);
 614		iowait = ts->iowait_sleeptime;
 615	} else {
 616		if (ts->idle_active && nr_iowait_cpu(cpu) > 0) {
 617			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 618
 619			iowait = ktime_add(ts->iowait_sleeptime, delta);
 620		} else {
 621			iowait = ts->iowait_sleeptime;
 622		}
 623	}
 624
 625	return ktime_to_us(iowait);
 
 626}
 627EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 628
 629static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 630{
 631	hrtimer_cancel(&ts->sched_timer);
 632	hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
 633
 634	/* Forward the time to expire in the future */
 635	hrtimer_forward(&ts->sched_timer, now, tick_period);
 636
 637	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
 638		hrtimer_start_expires(&ts->sched_timer,
 639				      HRTIMER_MODE_ABS_PINNED_HARD);
 640	} else {
 641		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 642	}
 643
 644	/*
 645	 * Reset to make sure next tick stop doesn't get fooled by past
 646	 * cached clock deadline.
 647	 */
 648	ts->next_tick = 0;
 649}
 650
 651static inline bool local_timer_softirq_pending(void)
 652{
 653	return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 654}
 655
 656static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
 
 
 
 657{
 658	u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
 659	unsigned long basejiff;
 660	unsigned int seq;
 
 661
 662	/* Read jiffies and the time when jiffies were updated last */
 663	do {
 664		seq = read_seqbegin(&jiffies_lock);
 665		basemono = last_jiffies_update;
 666		basejiff = jiffies;
 667	} while (read_seqretry(&jiffies_lock, seq));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668	ts->last_jiffies = basejiff;
 669	ts->timer_expires_base = basemono;
 670
 671	/*
 672	 * Keep the periodic tick, when RCU, architecture or irq_work
 673	 * requests it.
 674	 * Aside of that check whether the local timer softirq is
 675	 * pending. If so its a bad idea to call get_next_timer_interrupt()
 676	 * because there is an already expired timer, so it will request
 677	 * immeditate expiry, which rearms the hardware timer with a
 678	 * minimal delta which brings us back to this place
 679	 * immediately. Lather, rinse and repeat...
 680	 */
 681	if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
 682	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
 683		next_tick = basemono + TICK_NSEC;
 684	} else {
 685		/*
 686		 * Get the next pending timer. If high resolution
 687		 * timers are enabled this only takes the timer wheel
 688		 * timers into account. If high resolution timers are
 689		 * disabled this also looks at the next expiring
 690		 * hrtimer.
 691		 */
 692		next_tmr = get_next_timer_interrupt(basejiff, basemono);
 693		ts->next_timer = next_tmr;
 694		/* Take the next rcu event into account */
 695		next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
 696	}
 697
 
 
 
 
 698	/*
 699	 * If the tick is due in the next period, keep it ticking or
 700	 * force prod the timer.
 701	 */
 702	delta = next_tick - basemono;
 703	if (delta <= (u64)TICK_NSEC) {
 704		/*
 705		 * Tell the timer code that the base is not idle, i.e. undo
 706		 * the effect of get_next_timer_interrupt():
 707		 */
 708		timer_clear_idle();
 709		/*
 710		 * We've not stopped the tick yet, and there's a timer in the
 711		 * next period, so no point in stopping it either, bail.
 712		 */
 713		if (!ts->tick_stopped) {
 714			ts->timer_expires = 0;
 715			goto out;
 716		}
 717	}
 718
 719	/*
 720	 * If this CPU is the one which had the do_timer() duty last, we limit
 721	 * the sleep time to the timekeeping max_deferment value.
 722	 * Otherwise we can sleep as long as we want.
 723	 */
 724	delta = timekeeping_max_deferment();
 725	if (cpu != tick_do_timer_cpu &&
 726	    (tick_do_timer_cpu != TICK_DO_TIMER_NONE || !ts->do_timer_last))
 
 727		delta = KTIME_MAX;
 728
 729	/* Calculate the next expiry time */
 730	if (delta < (KTIME_MAX - basemono))
 731		expires = basemono + delta;
 732	else
 733		expires = KTIME_MAX;
 734
 735	ts->timer_expires = min_t(u64, expires, next_tick);
 736
 737out:
 738	return ts->timer_expires;
 739}
 740
 741static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
 742{
 743	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
 744	u64 basemono = ts->timer_expires_base;
 745	u64 expires = ts->timer_expires;
 746	ktime_t tick = expires;
 
 747
 748	/* Make sure we won't be trying to stop it twice in a row. */
 749	ts->timer_expires_base = 0;
 750
 751	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 752	 * If this CPU is the one which updates jiffies, then give up
 753	 * the assignment and let it be taken by the CPU which runs
 754	 * the tick timer next, which might be this CPU as well. If we
 755	 * don't drop this here the jiffies might be stale and
 756	 * do_timer() never invoked. Keep track of the fact that it
 757	 * was the one which had the do_timer() duty last.
 758	 */
 759	if (cpu == tick_do_timer_cpu) {
 760		tick_do_timer_cpu = TICK_DO_TIMER_NONE;
 761		ts->do_timer_last = 1;
 762	} else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
 763		ts->do_timer_last = 0;
 
 764	}
 765
 766	/* Skip reprogram of event if its not changed */
 767	if (ts->tick_stopped && (expires == ts->next_tick)) {
 768		/* Sanity check: make sure clockevent is actually programmed */
 769		if (tick == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
 770			return;
 771
 772		WARN_ON_ONCE(1);
 773		printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
 774			    basemono, ts->next_tick, dev->next_event,
 775			    hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
 776	}
 777
 778	/*
 779	 * nohz_stop_sched_tick can be called several times before
 780	 * the nohz_restart_sched_tick is called. This happens when
 781	 * interrupts arrive which do not cause a reschedule. In the
 782	 * first call we save the current tick time, so we can restart
 783	 * the scheduler tick in nohz_restart_sched_tick.
 784	 */
 785	if (!ts->tick_stopped) {
 786		calc_load_nohz_start();
 787		quiet_vmstat();
 788
 789		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
 790		ts->tick_stopped = 1;
 791		trace_tick_stop(1, TICK_DEP_MASK_NONE);
 792	}
 793
 794	ts->next_tick = tick;
 795
 796	/*
 797	 * If the expiration time == KTIME_MAX, then we simply stop
 798	 * the tick timer.
 799	 */
 800	if (unlikely(expires == KTIME_MAX)) {
 801		if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
 802			hrtimer_cancel(&ts->sched_timer);
 803		return;
 804	}
 805
 806	if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
 807		hrtimer_start(&ts->sched_timer, tick,
 808			      HRTIMER_MODE_ABS_PINNED_HARD);
 809	} else {
 810		hrtimer_set_expires(&ts->sched_timer, tick);
 811		tick_program_event(tick, 1);
 812	}
 813}
 814
 815static void tick_nohz_retain_tick(struct tick_sched *ts)
 816{
 817	ts->timer_expires_base = 0;
 818}
 819
 820#ifdef CONFIG_NO_HZ_FULL
 821static void tick_nohz_stop_sched_tick(struct tick_sched *ts, int cpu)
 822{
 823	if (tick_nohz_next_event(ts, cpu))
 824		tick_nohz_stop_tick(ts, cpu);
 825	else
 826		tick_nohz_retain_tick(ts);
 827}
 828#endif /* CONFIG_NO_HZ_FULL */
 829
 830static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 831{
 832	/* Update jiffies first */
 833	tick_do_update_jiffies64(now);
 834
 835	/*
 836	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
 837	 * the clock forward checks in the enqueue path:
 838	 */
 839	timer_clear_idle();
 840
 841	calc_load_nohz_stop();
 842	touch_softlockup_watchdog_sched();
 843	/*
 844	 * Cancel the scheduled timer and restore the tick
 845	 */
 846	ts->tick_stopped  = 0;
 847	ts->idle_exittime = now;
 848
 
 
 849	tick_nohz_restart(ts, now);
 850}
 851
 852static void tick_nohz_full_update_tick(struct tick_sched *ts)
 
 853{
 854#ifdef CONFIG_NO_HZ_FULL
 855	int cpu = smp_processor_id();
 856
 857	if (!tick_nohz_full_cpu(cpu))
 
 
 
 
 
 
 
 
 
 858		return;
 859
 860	if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
 861		return;
 862
 863	if (can_stop_full_tick(cpu, ts))
 864		tick_nohz_stop_sched_tick(ts, cpu);
 865	else if (ts->tick_stopped)
 866		tick_nohz_restart_sched_tick(ts, ktime_get());
 867#endif
 868}
 869
 870static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
 
 
 
 
 
 
 
 
 
 
 871{
 872	/*
 873	 * If this CPU is offline and it is the one which updates
 874	 * jiffies, then give up the assignment and let it be taken by
 875	 * the CPU which runs the tick timer next. If we don't drop
 876	 * this here the jiffies might be stale and do_timer() never
 877	 * invoked.
 878	 */
 879	if (unlikely(!cpu_online(cpu))) {
 880		if (cpu == tick_do_timer_cpu)
 881			tick_do_timer_cpu = TICK_DO_TIMER_NONE;
 882		/*
 883		 * Make sure the CPU doesn't get fooled by obsolete tick
 884		 * deadline if it comes back online later.
 885		 */
 886		ts->next_tick = 0;
 887		return false;
 
 
 
 
 
 
 888	}
 889
 890	if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
 891		return false;
 892
 893	if (need_resched())
 
 894		return false;
 895
 896	if (unlikely(local_softirq_pending())) {
 897		static int ratelimit;
 
 898
 899		if (ratelimit < 10 &&
 900		    (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
 901			pr_warn("NOHZ: local_softirq_pending %02x\n",
 902				(unsigned int) local_softirq_pending());
 903			ratelimit++;
 904		}
 
 
 
 
 
 
 
 
 905		return false;
 906	}
 907
 908	if (tick_nohz_full_enabled()) {
 
 
 909		/*
 910		 * Keep the tick alive to guarantee timekeeping progression
 911		 * if there are full dynticks CPUs around
 912		 */
 913		if (tick_do_timer_cpu == cpu)
 914			return false;
 915		/*
 916		 * Boot safety: make sure the timekeeping duty has been
 917		 * assigned before entering dyntick-idle mode,
 918		 * tick_do_timer_cpu is TICK_DO_TIMER_BOOT
 919		 */
 920		if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_BOOT))
 921			return false;
 922
 923		/* Should not happen for nohz-full */
 924		if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
 925			return false;
 926	}
 927
 928	return true;
 929}
 930
 931static void __tick_nohz_idle_stop_tick(struct tick_sched *ts)
 
 
 
 
 
 932{
 933	ktime_t expires;
 934	int cpu = smp_processor_id();
 
 935
 936	/*
 937	 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
 938	 * tick timer expiration time is known already.
 939	 */
 940	if (ts->timer_expires_base)
 941		expires = ts->timer_expires;
 942	else if (can_stop_idle_tick(cpu, ts))
 943		expires = tick_nohz_next_event(ts, cpu);
 944	else
 945		return;
 946
 947	ts->idle_calls++;
 948
 949	if (expires > 0LL) {
 950		int was_stopped = ts->tick_stopped;
 951
 952		tick_nohz_stop_tick(ts, cpu);
 953
 954		ts->idle_sleeps++;
 955		ts->idle_expires = expires;
 956
 957		if (!was_stopped && ts->tick_stopped) {
 958			ts->idle_jiffies = ts->last_jiffies;
 959			nohz_balance_enter_idle(cpu);
 960		}
 961	} else {
 962		tick_nohz_retain_tick(ts);
 963	}
 964}
 965
 966/**
 967 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
 968 *
 969 * When the next event is more than a tick into the future, stop the idle tick
 970 */
 971void tick_nohz_idle_stop_tick(void)
 972{
 973	__tick_nohz_idle_stop_tick(this_cpu_ptr(&tick_cpu_sched));
 974}
 975
 976void tick_nohz_idle_retain_tick(void)
 977{
 978	tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
 979	/*
 980	 * Undo the effect of get_next_timer_interrupt() called from
 981	 * tick_nohz_next_event().
 982	 */
 983	timer_clear_idle();
 984}
 985
 986/**
 987 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
 988 *
 989 * Called when we start the idle loop.
 990 */
 991void tick_nohz_idle_enter(void)
 992{
 993	struct tick_sched *ts;
 994
 995	lockdep_assert_irqs_enabled();
 996
 997	local_irq_disable();
 998
 999	ts = this_cpu_ptr(&tick_cpu_sched);
1000
1001	WARN_ON_ONCE(ts->timer_expires_base);
1002
1003	ts->inidle = 1;
1004	tick_nohz_start_idle(ts);
1005
1006	local_irq_enable();
1007}
1008
1009/**
1010 * tick_nohz_irq_exit - update next tick event from interrupt exit
 
 
 
 
 
 
 
 
 
1011 *
1012 * When an interrupt fires while we are idle and it doesn't cause
1013 * a reschedule, it may still add, modify or delete a timer, enqueue
1014 * an RCU callback, etc...
1015 * So we need to re-calculate and reprogram the next tick event.
 
 
1016 */
1017void tick_nohz_irq_exit(void)
1018{
1019	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1020
1021	if (ts->inidle)
1022		tick_nohz_start_idle(ts);
1023	else
1024		tick_nohz_full_update_tick(ts);
1025}
1026
1027/**
1028 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
 
 
1029 */
1030bool tick_nohz_idle_got_tick(void)
1031{
1032	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1033
1034	if (ts->got_idle_tick) {
1035		ts->got_idle_tick = 0;
1036		return true;
1037	}
1038	return false;
1039}
1040
1041/**
1042 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1043 * or the tick, whatever that expires first. Note that, if the tick has been
1044 * stopped, it returns the next hrtimer.
1045 *
1046 * Called from power state control code with interrupts disabled
 
 
1047 */
1048ktime_t tick_nohz_get_next_hrtimer(void)
1049{
1050	return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1051}
1052
1053/**
1054 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1055 * @delta_next: duration until the next event if the tick cannot be stopped
1056 *
1057 * Called from power state control code with interrupts disabled
 
 
 
 
 
 
1058 */
1059ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1060{
1061	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
1062	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1063	int cpu = smp_processor_id();
1064	/*
1065	 * The idle entry time is expected to be a sufficient approximation of
1066	 * the current time at this point.
1067	 */
1068	ktime_t now = ts->idle_entrytime;
1069	ktime_t next_event;
1070
1071	WARN_ON_ONCE(!ts->inidle);
1072
1073	*delta_next = ktime_sub(dev->next_event, now);
1074
1075	if (!can_stop_idle_tick(cpu, ts))
1076		return *delta_next;
1077
1078	next_event = tick_nohz_next_event(ts, cpu);
1079	if (!next_event)
1080		return *delta_next;
1081
1082	/*
1083	 * If the next highres timer to expire is earlier than next_event, the
1084	 * idle governor needs to know that.
1085	 */
1086	next_event = min_t(u64, next_event,
1087			   hrtimer_next_event_without(&ts->sched_timer));
1088
1089	return ktime_sub(next_event, now);
1090}
1091
1092/**
1093 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1094 * for a particular CPU.
 
1095 *
1096 * Called from the schedutil frequency scaling governor in scheduler context.
 
 
1097 */
1098unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1099{
1100	struct tick_sched *ts = tick_get_tick_sched(cpu);
1101
1102	return ts->idle_calls;
1103}
1104
1105/**
1106 * tick_nohz_get_idle_calls - return the current idle calls counter value
1107 *
1108 * Called from the schedutil frequency scaling governor in scheduler context.
 
 
1109 */
1110unsigned long tick_nohz_get_idle_calls(void)
1111{
1112	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1113
1114	return ts->idle_calls;
1115}
1116
1117static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
 
1118{
1119#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1120	unsigned long ticks;
1121
1122	if (vtime_accounting_cpu_enabled())
 
 
1123		return;
1124	/*
1125	 * We stopped the tick in idle. Update process times would miss the
1126	 * time we slept as update_process_times does only a 1 tick
1127	 * accounting. Enforce that this is accounted to idle !
1128	 */
1129	ticks = jiffies - ts->idle_jiffies;
1130	/*
1131	 * We might be one off. Do not randomly account a huge number of ticks!
1132	 */
1133	if (ticks && ticks < LONG_MAX)
1134		account_idle_ticks(ticks);
1135#endif
1136}
1137
1138static void __tick_nohz_idle_restart_tick(struct tick_sched *ts, ktime_t now)
1139{
1140	tick_nohz_restart_sched_tick(ts, now);
1141	tick_nohz_account_idle_ticks(ts);
 
 
 
 
 
1142}
1143
1144void tick_nohz_idle_restart_tick(void)
1145{
1146	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
 
 
1147
1148	if (ts->tick_stopped)
1149		__tick_nohz_idle_restart_tick(ts, ktime_get());
1150}
1151
1152/**
1153 * tick_nohz_idle_exit - restart the idle tick from the idle task
 
 
 
 
 
 
 
 
 
 
 
 
1154 *
1155 * Restart the idle tick when the CPU is woken up from idle
1156 * This also exit the RCU extended quiescent state. The CPU
1157 * can use RCU again after this function is called.
1158 */
1159void tick_nohz_idle_exit(void)
1160{
1161	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1162	bool idle_active, tick_stopped;
1163	ktime_t now;
1164
1165	local_irq_disable();
1166
1167	WARN_ON_ONCE(!ts->inidle);
1168	WARN_ON_ONCE(ts->timer_expires_base);
1169
1170	ts->inidle = 0;
1171	idle_active = ts->idle_active;
1172	tick_stopped = ts->tick_stopped;
1173
1174	if (idle_active || tick_stopped)
1175		now = ktime_get();
1176
1177	if (idle_active)
1178		tick_nohz_stop_idle(ts, now);
1179
1180	if (tick_stopped)
1181		__tick_nohz_idle_restart_tick(ts, now);
1182
1183	local_irq_enable();
1184}
1185
1186/*
1187 * The nohz low res interrupt handler
 
 
 
1188 */
1189static void tick_nohz_handler(struct clock_event_device *dev)
1190{
1191	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1192	struct pt_regs *regs = get_irq_regs();
1193	ktime_t now = ktime_get();
1194
1195	dev->next_event = KTIME_MAX;
1196
1197	tick_sched_do_timer(ts, now);
1198	tick_sched_handle(ts, regs);
1199
1200	/* No need to reprogram if we are running tickless  */
1201	if (unlikely(ts->tick_stopped))
1202		return;
1203
1204	hrtimer_forward(&ts->sched_timer, now, tick_period);
1205	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1206}
1207
1208static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
1209{
1210	if (!tick_nohz_enabled)
1211		return;
1212	ts->nohz_mode = mode;
1213	/* One update is enough */
1214	if (!test_and_set_bit(0, &tick_nohz_active))
1215		timers_update_nohz();
1216}
1217
1218/**
1219 * tick_nohz_switch_to_nohz - switch to nohz mode
1220 */
1221static void tick_nohz_switch_to_nohz(void)
1222{
1223	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1224	ktime_t next;
1225
1226	if (!tick_nohz_enabled)
1227		return;
1228
1229	if (tick_switch_to_oneshot(tick_nohz_handler))
1230		return;
1231
1232	/*
1233	 * Recycle the hrtimer in ts, so we can share the
1234	 * hrtimer_forward with the highres code.
1235	 */
1236	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1237	/* Get the next period */
1238	next = tick_init_jiffy_update();
1239
1240	hrtimer_set_expires(&ts->sched_timer, next);
1241	hrtimer_forward_now(&ts->sched_timer, tick_period);
1242	tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1243	tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
1244}
1245
1246static inline void tick_nohz_irq_enter(void)
1247{
1248	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1249	ktime_t now;
1250
1251	if (!ts->idle_active && !ts->tick_stopped)
1252		return;
1253	now = ktime_get();
1254	if (ts->idle_active)
1255		tick_nohz_stop_idle(ts, now);
1256	if (ts->tick_stopped)
 
 
 
 
 
 
 
1257		tick_nohz_update_jiffies(now);
1258}
1259
1260#else
1261
1262static inline void tick_nohz_switch_to_nohz(void) { }
1263static inline void tick_nohz_irq_enter(void) { }
1264static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
1265
1266#endif /* CONFIG_NO_HZ_COMMON */
1267
1268/*
1269 * Called from irq_enter to notify about the possible interruption of idle()
1270 */
1271void tick_irq_enter(void)
1272{
1273	tick_check_oneshot_broadcast_this_cpu();
1274	tick_nohz_irq_enter();
1275}
1276
1277/*
1278 * High resolution timer specific code
1279 */
1280#ifdef CONFIG_HIGH_RES_TIMERS
1281/*
1282 * We rearm the timer until we get disabled by the idle code.
1283 * Called with interrupts disabled.
1284 */
1285static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
1286{
1287	struct tick_sched *ts =
1288		container_of(timer, struct tick_sched, sched_timer);
1289	struct pt_regs *regs = get_irq_regs();
1290	ktime_t now = ktime_get();
1291
1292	tick_sched_do_timer(ts, now);
1293
1294	/*
1295	 * Do not call, when we are not in irq context and have
1296	 * no valid regs pointer
1297	 */
1298	if (regs)
1299		tick_sched_handle(ts, regs);
1300	else
1301		ts->next_tick = 0;
1302
1303	/* No need to reprogram if we are in idle or full dynticks mode */
1304	if (unlikely(ts->tick_stopped))
1305		return HRTIMER_NORESTART;
1306
1307	hrtimer_forward(timer, now, tick_period);
1308
1309	return HRTIMER_RESTART;
1310}
1311
1312static int sched_skew_tick;
1313
1314static int __init skew_tick(char *str)
1315{
1316	get_option(&str, &sched_skew_tick);
1317
1318	return 0;
1319}
1320early_param("skew_tick", skew_tick);
1321
1322/**
1323 * tick_setup_sched_timer - setup the tick emulation timer
 
1324 */
1325void tick_setup_sched_timer(void)
1326{
1327	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1328	ktime_t now = ktime_get();
1329
1330	/*
1331	 * Emulate tick processing via per-CPU hrtimers:
1332	 */
1333	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1334	ts->sched_timer.function = tick_sched_timer;
 
 
 
 
1335
1336	/* Get the next period (per-CPU) */
1337	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1338
1339	/* Offset the tick to avert jiffies_lock contention. */
1340	if (sched_skew_tick) {
1341		u64 offset = ktime_to_ns(tick_period) >> 1;
1342		do_div(offset, num_possible_cpus());
1343		offset *= smp_processor_id();
1344		hrtimer_add_expires_ns(&ts->sched_timer, offset);
1345	}
1346
1347	hrtimer_forward(&ts->sched_timer, now, tick_period);
1348	hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
1349	tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
 
 
 
1350}
1351#endif /* HIGH_RES_TIMERS */
1352
1353#if defined CONFIG_NO_HZ_COMMON || defined CONFIG_HIGH_RES_TIMERS
1354void tick_cancel_sched_timer(int cpu)
 
 
 
1355{
 
1356	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 
 
 
1357
1358# ifdef CONFIG_HIGH_RES_TIMERS
1359	if (ts->sched_timer.base)
1360		hrtimer_cancel(&ts->sched_timer);
1361# endif
 
 
 
 
 
1362
 
 
 
 
1363	memset(ts, 0, sizeof(*ts));
 
 
 
 
1364}
1365#endif
1366
1367/**
1368 * Async notification about clocksource changes
1369 */
1370void tick_clock_notify(void)
1371{
1372	int cpu;
1373
1374	for_each_possible_cpu(cpu)
1375		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1376}
1377
1378/*
1379 * Async notification about clock event changes
1380 */
1381void tick_oneshot_notify(void)
1382{
1383	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1384
1385	set_bit(0, &ts->check_clocks);
1386}
1387
1388/**
1389 * Check, if a change happened, which makes oneshot possible.
1390 *
1391 * Called cyclic from the hrtimer softirq (driven by the timer
1392 * softirq) allow_nohz signals, that we can switch into low-res nohz
1393 * mode, because high resolution timers are disabled (either compile
1394 * or runtime). Called with interrupts disabled.
1395 */
1396int tick_check_oneshot_change(int allow_nohz)
1397{
1398	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1399
1400	if (!test_and_clear_bit(0, &ts->check_clocks))
1401		return 0;
1402
1403	if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
1404		return 0;
1405
1406	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1407		return 0;
1408
1409	if (!allow_nohz)
1410		return 1;
1411
1412	tick_nohz_switch_to_nohz();
1413	return 0;
1414}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   4 *  Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   5 *  Copyright(C) 2006-2007  Timesys Corp., Thomas Gleixner
   6 *
   7 *  NOHZ implementation for low and high resolution timers
   8 *
   9 *  Started by: Thomas Gleixner and Ingo Molnar
  10 */
  11#include <linux/compiler.h>
  12#include <linux/cpu.h>
  13#include <linux/err.h>
  14#include <linux/hrtimer.h>
  15#include <linux/interrupt.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/percpu.h>
  18#include <linux/nmi.h>
  19#include <linux/profile.h>
  20#include <linux/sched/signal.h>
  21#include <linux/sched/clock.h>
  22#include <linux/sched/stat.h>
  23#include <linux/sched/nohz.h>
  24#include <linux/sched/loadavg.h>
  25#include <linux/module.h>
  26#include <linux/irq_work.h>
  27#include <linux/posix-timers.h>
  28#include <linux/context_tracking.h>
  29#include <linux/mm.h>
  30
  31#include <asm/irq_regs.h>
  32
  33#include "tick-internal.h"
  34
  35#include <trace/events/timer.h>
  36
  37/*
  38 * Per-CPU nohz control structure
  39 */
  40static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
  41
  42struct tick_sched *tick_get_tick_sched(int cpu)
  43{
  44	return &per_cpu(tick_cpu_sched, cpu);
  45}
  46
 
  47/*
  48 * The time when the last jiffy update happened. Write access must hold
  49 * jiffies_lock and jiffies_seq. tick_nohz_next_event() needs to get a
  50 * consistent view of jiffies and last_jiffies_update.
  51 */
  52static ktime_t last_jiffies_update;
  53
  54/*
  55 * Must be called with interrupts disabled !
  56 */
  57static void tick_do_update_jiffies64(ktime_t now)
  58{
  59	unsigned long ticks = 1;
  60	ktime_t delta, nextp;
  61
  62	/*
  63	 * 64-bit can do a quick check without holding the jiffies lock and
  64	 * without looking at the sequence count. The smp_load_acquire()
  65	 * pairs with the update done later in this function.
  66	 *
  67	 * 32-bit cannot do that because the store of 'tick_next_period'
  68	 * consists of two 32-bit stores, and the first store could be
  69	 * moved by the CPU to a random point in the future.
  70	 */
  71	if (IS_ENABLED(CONFIG_64BIT)) {
  72		if (ktime_before(now, smp_load_acquire(&tick_next_period)))
  73			return;
  74	} else {
  75		unsigned int seq;
  76
  77		/*
  78		 * Avoid contention on 'jiffies_lock' and protect the quick
  79		 * check with the sequence count.
  80		 */
  81		do {
  82			seq = read_seqcount_begin(&jiffies_seq);
  83			nextp = tick_next_period;
  84		} while (read_seqcount_retry(&jiffies_seq, seq));
  85
  86		if (ktime_before(now, nextp))
  87			return;
  88	}
  89
  90	/* Quick check failed, i.e. update is required. */
  91	raw_spin_lock(&jiffies_lock);
  92	/*
  93	 * Re-evaluate with the lock held. Another CPU might have done the
  94	 * update already.
  95	 */
  96	if (ktime_before(now, tick_next_period)) {
  97		raw_spin_unlock(&jiffies_lock);
  98		return;
  99	}
 100
 101	write_seqcount_begin(&jiffies_seq);
 
 102
 103	delta = ktime_sub(now, tick_next_period);
 104	if (unlikely(delta >= TICK_NSEC)) {
 105		/* Slow path for long idle sleep times */
 106		s64 incr = TICK_NSEC;
 107
 108		ticks += ktime_divns(delta, incr);
 
 
 109
 110		last_jiffies_update = ktime_add_ns(last_jiffies_update,
 111						   incr * ticks);
 112	} else {
 113		last_jiffies_update = ktime_add_ns(last_jiffies_update,
 114						   TICK_NSEC);
 115	}
 116
 117	/* Advance jiffies to complete the 'jiffies_seq' protected job */
 118	jiffies_64 += ticks;
 119
 120	/* Keep the tick_next_period variable up to date */
 121	nextp = ktime_add_ns(last_jiffies_update, TICK_NSEC);
 
 
 122
 123	if (IS_ENABLED(CONFIG_64BIT)) {
 124		/*
 125		 * Pairs with smp_load_acquire() in the lockless quick
 126		 * check above, and ensures that the update to 'jiffies_64' is
 127		 * not reordered vs. the store to 'tick_next_period', neither
 128		 * by the compiler nor by the CPU.
 129		 */
 130		smp_store_release(&tick_next_period, nextp);
 131	} else {
 132		/*
 133		 * A plain store is good enough on 32-bit, as the quick check
 134		 * above is protected by the sequence count.
 135		 */
 136		tick_next_period = nextp;
 137	}
 138
 139	/*
 140	 * Release the sequence count. calc_global_load() below is not
 141	 * protected by it, but 'jiffies_lock' needs to be held to prevent
 142	 * concurrent invocations.
 143	 */
 144	write_seqcount_end(&jiffies_seq);
 145
 146	calc_global_load();
 147
 148	raw_spin_unlock(&jiffies_lock);
 149	update_wall_time();
 150}
 151
 152/*
 153 * Initialize and return retrieve the jiffies update.
 154 */
 155static ktime_t tick_init_jiffy_update(void)
 156{
 157	ktime_t period;
 158
 159	raw_spin_lock(&jiffies_lock);
 160	write_seqcount_begin(&jiffies_seq);
 161
 162	/* Have we started the jiffies update yet ? */
 163	if (last_jiffies_update == 0) {
 164		u32 rem;
 165
 166		/*
 167		 * Ensure that the tick is aligned to a multiple of
 168		 * TICK_NSEC.
 169		 */
 170		div_u64_rem(tick_next_period, TICK_NSEC, &rem);
 171		if (rem)
 172			tick_next_period += TICK_NSEC - rem;
 173
 174		last_jiffies_update = tick_next_period;
 175	}
 176	period = last_jiffies_update;
 177
 178	write_seqcount_end(&jiffies_seq);
 179	raw_spin_unlock(&jiffies_lock);
 180
 181	return period;
 182}
 183
 184static inline int tick_sched_flag_test(struct tick_sched *ts,
 185				       unsigned long flag)
 186{
 187	return !!(ts->flags & flag);
 188}
 189
 190static inline void tick_sched_flag_set(struct tick_sched *ts,
 191				       unsigned long flag)
 192{
 193	lockdep_assert_irqs_disabled();
 194	ts->flags |= flag;
 195}
 196
 197static inline void tick_sched_flag_clear(struct tick_sched *ts,
 198					 unsigned long flag)
 199{
 200	lockdep_assert_irqs_disabled();
 201	ts->flags &= ~flag;
 202}
 203
 204#define MAX_STALLED_JIFFIES 5
 205
 206static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
 207{
 208	int tick_cpu, cpu = smp_processor_id();
 209
 
 210	/*
 211	 * Check if the do_timer duty was dropped. We don't care about
 212	 * concurrency: This happens only when the CPU in charge went
 213	 * into a long sleep. If two CPUs happen to assign themselves to
 214	 * this duty, then the jiffies update is still serialized by
 215	 * 'jiffies_lock'.
 216	 *
 217	 * If nohz_full is enabled, this should not happen because the
 218	 * 'tick_do_timer_cpu' CPU never relinquishes.
 219	 */
 220	tick_cpu = READ_ONCE(tick_do_timer_cpu);
 221
 222	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) {
 223#ifdef CONFIG_NO_HZ_FULL
 224		WARN_ON_ONCE(tick_nohz_full_running);
 225#endif
 226		WRITE_ONCE(tick_do_timer_cpu, cpu);
 227		tick_cpu = cpu;
 228	}
 
 229
 230	/* Check if jiffies need an update */
 231	if (tick_cpu == cpu)
 232		tick_do_update_jiffies64(now);
 233
 234	/*
 235	 * If the jiffies update stalled for too long (timekeeper in stop_machine()
 236	 * or VMEXIT'ed for several msecs), force an update.
 237	 */
 238	if (ts->last_tick_jiffies != jiffies) {
 239		ts->stalled_jiffies = 0;
 240		ts->last_tick_jiffies = READ_ONCE(jiffies);
 241	} else {
 242		if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
 243			tick_do_update_jiffies64(now);
 244			ts->stalled_jiffies = 0;
 245			ts->last_tick_jiffies = READ_ONCE(jiffies);
 246		}
 247	}
 248
 249	if (tick_sched_flag_test(ts, TS_FLAG_INIDLE))
 250		ts->got_idle_tick = 1;
 251}
 252
 253static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 254{
 
 255	/*
 256	 * When we are idle and the tick is stopped, we have to touch
 257	 * the watchdog as we might not schedule for a really long
 258	 * time. This happens on completely idle SMP systems while
 259	 * waiting on the login prompt. We also increment the "start of
 260	 * idle" jiffy stamp so the idle accounting adjustment we do
 261	 * when we go busy again does not account too many ticks.
 262	 */
 263	if (IS_ENABLED(CONFIG_NO_HZ_COMMON) &&
 264	    tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
 265		touch_softlockup_watchdog_sched();
 266		if (is_idle_task(current))
 267			ts->idle_jiffies++;
 268		/*
 269		 * In case the current tick fired too early past its expected
 270		 * expiration, make sure we don't bypass the next clock reprogramming
 271		 * to the same deadline.
 272		 */
 273		ts->next_tick = 0;
 274	}
 275
 276	update_process_times(user_mode(regs));
 277	profile_tick(CPU_PROFILING);
 278}
 279
 280/*
 281 * We rearm the timer until we get disabled by the idle code.
 282 * Called with interrupts disabled.
 283 */
 284static enum hrtimer_restart tick_nohz_handler(struct hrtimer *timer)
 285{
 286	struct tick_sched *ts =	container_of(timer, struct tick_sched, sched_timer);
 287	struct pt_regs *regs = get_irq_regs();
 288	ktime_t now = ktime_get();
 289
 290	tick_sched_do_timer(ts, now);
 291
 292	/*
 293	 * Do not call when we are not in IRQ context and have
 294	 * no valid 'regs' pointer
 295	 */
 296	if (regs)
 297		tick_sched_handle(ts, regs);
 298	else
 299		ts->next_tick = 0;
 300
 301	/*
 302	 * In dynticks mode, tick reprogram is deferred:
 303	 * - to the idle task if in dynticks-idle
 304	 * - to IRQ exit if in full-dynticks.
 305	 */
 306	if (unlikely(tick_sched_flag_test(ts, TS_FLAG_STOPPED)))
 307		return HRTIMER_NORESTART;
 308
 309	hrtimer_forward(timer, now, TICK_NSEC);
 310
 311	return HRTIMER_RESTART;
 312}
 313
 314static void tick_sched_timer_cancel(struct tick_sched *ts)
 315{
 316	if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES))
 317		hrtimer_cancel(&ts->sched_timer);
 318	else if (tick_sched_flag_test(ts, TS_FLAG_NOHZ))
 319		tick_program_event(KTIME_MAX, 1);
 320}
 321
 322#ifdef CONFIG_NO_HZ_FULL
 323cpumask_var_t tick_nohz_full_mask;
 324EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
 325bool tick_nohz_full_running;
 326EXPORT_SYMBOL_GPL(tick_nohz_full_running);
 327static atomic_t tick_dep_mask;
 328
 329static bool check_tick_dependency(atomic_t *dep)
 330{
 331	int val = atomic_read(dep);
 332
 333	if (val & TICK_DEP_MASK_POSIX_TIMER) {
 334		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
 335		return true;
 336	}
 337
 338	if (val & TICK_DEP_MASK_PERF_EVENTS) {
 339		trace_tick_stop(0, TICK_DEP_MASK_PERF_EVENTS);
 340		return true;
 341	}
 342
 343	if (val & TICK_DEP_MASK_SCHED) {
 344		trace_tick_stop(0, TICK_DEP_MASK_SCHED);
 345		return true;
 346	}
 347
 348	if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
 349		trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
 350		return true;
 351	}
 352
 353	if (val & TICK_DEP_MASK_RCU) {
 354		trace_tick_stop(0, TICK_DEP_MASK_RCU);
 355		return true;
 356	}
 357
 358	if (val & TICK_DEP_MASK_RCU_EXP) {
 359		trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
 360		return true;
 361	}
 362
 363	return false;
 364}
 365
 366static bool can_stop_full_tick(int cpu, struct tick_sched *ts)
 367{
 368	lockdep_assert_irqs_disabled();
 369
 370	if (unlikely(!cpu_online(cpu)))
 371		return false;
 372
 373	if (check_tick_dependency(&tick_dep_mask))
 374		return false;
 375
 376	if (check_tick_dependency(&ts->tick_dep_mask))
 377		return false;
 378
 379	if (check_tick_dependency(&current->tick_dep_mask))
 380		return false;
 381
 382	if (check_tick_dependency(&current->signal->tick_dep_mask))
 383		return false;
 384
 385	return true;
 386}
 387
 388static void nohz_full_kick_func(struct irq_work *work)
 389{
 390	/* Empty, the tick restart happens on tick_nohz_irq_exit() */
 391}
 392
 393static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) =
 394	IRQ_WORK_INIT_HARD(nohz_full_kick_func);
 
 395
 396/*
 397 * Kick this CPU if it's full dynticks in order to force it to
 398 * re-evaluate its dependency on the tick and restart it if necessary.
 399 * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(),
 400 * is NMI safe.
 401 */
 402static void tick_nohz_full_kick(void)
 403{
 404	if (!tick_nohz_full_cpu(smp_processor_id()))
 405		return;
 406
 407	irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
 408}
 409
 410/*
 411 * Kick the CPU if it's full dynticks in order to force it to
 412 * re-evaluate its dependency on the tick and restart it if necessary.
 413 */
 414void tick_nohz_full_kick_cpu(int cpu)
 415{
 416	if (!tick_nohz_full_cpu(cpu))
 417		return;
 418
 419	irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
 420}
 421
 422static void tick_nohz_kick_task(struct task_struct *tsk)
 423{
 424	int cpu;
 425
 426	/*
 427	 * If the task is not running, run_posix_cpu_timers()
 428	 * has nothing to elapse, and an IPI can then be optimized out.
 429	 *
 430	 * activate_task()                      STORE p->tick_dep_mask
 431	 *   STORE p->on_rq
 432	 * __schedule() (switch to task 'p')    smp_mb() (atomic_fetch_or())
 433	 *   LOCK rq->lock                      LOAD p->on_rq
 434	 *   smp_mb__after_spin_lock()
 435	 *   tick_nohz_task_switch()
 436	 *     LOAD p->tick_dep_mask
 437	 */
 438	if (!sched_task_on_rq(tsk))
 439		return;
 440
 441	/*
 442	 * If the task concurrently migrates to another CPU,
 443	 * we guarantee it sees the new tick dependency upon
 444	 * schedule.
 445	 *
 446	 * set_task_cpu(p, cpu);
 447	 *   STORE p->cpu = @cpu
 448	 * __schedule() (switch to task 'p')
 449	 *   LOCK rq->lock
 450	 *   smp_mb__after_spin_lock()          STORE p->tick_dep_mask
 451	 *   tick_nohz_task_switch()            smp_mb() (atomic_fetch_or())
 452	 *      LOAD p->tick_dep_mask           LOAD p->cpu
 453	 */
 454	cpu = task_cpu(tsk);
 455
 456	preempt_disable();
 457	if (cpu_online(cpu))
 458		tick_nohz_full_kick_cpu(cpu);
 459	preempt_enable();
 460}
 461
 462/*
 463 * Kick all full dynticks CPUs in order to force these to re-evaluate
 464 * their dependency on the tick and restart it if necessary.
 465 */
 466static void tick_nohz_full_kick_all(void)
 467{
 468	int cpu;
 469
 470	if (!tick_nohz_full_running)
 471		return;
 472
 473	preempt_disable();
 474	for_each_cpu_and(cpu, tick_nohz_full_mask, cpu_online_mask)
 475		tick_nohz_full_kick_cpu(cpu);
 476	preempt_enable();
 477}
 478
 479static void tick_nohz_dep_set_all(atomic_t *dep,
 480				  enum tick_dep_bits bit)
 481{
 482	int prev;
 483
 484	prev = atomic_fetch_or(BIT(bit), dep);
 485	if (!prev)
 486		tick_nohz_full_kick_all();
 487}
 488
 489/*
 490 * Set a global tick dependency. Used by perf events that rely on freq and
 491 * unstable clocks.
 492 */
 493void tick_nohz_dep_set(enum tick_dep_bits bit)
 494{
 495	tick_nohz_dep_set_all(&tick_dep_mask, bit);
 496}
 497
 498void tick_nohz_dep_clear(enum tick_dep_bits bit)
 499{
 500	atomic_andnot(BIT(bit), &tick_dep_mask);
 501}
 502
 503/*
 504 * Set per-CPU tick dependency. Used by scheduler and perf events in order to
 505 * manage event-throttling.
 506 */
 507void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit)
 508{
 509	int prev;
 510	struct tick_sched *ts;
 511
 512	ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 513
 514	prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask);
 515	if (!prev) {
 516		preempt_disable();
 517		/* Perf needs local kick that is NMI safe */
 518		if (cpu == smp_processor_id()) {
 519			tick_nohz_full_kick();
 520		} else {
 521			/* Remote IRQ work not NMI-safe */
 522			if (!WARN_ON_ONCE(in_nmi()))
 523				tick_nohz_full_kick_cpu(cpu);
 524		}
 525		preempt_enable();
 526	}
 527}
 528EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
 529
 530void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
 531{
 532	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 533
 534	atomic_andnot(BIT(bit), &ts->tick_dep_mask);
 535}
 536EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
 537
 538/*
 539 * Set a per-task tick dependency. RCU needs this. Also posix CPU timers
 540 * in order to elapse per task timers.
 541 */
 542void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
 543{
 544	if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask))
 545		tick_nohz_kick_task(tsk);
 
 
 
 546}
 547EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
 548
 549void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
 550{
 551	atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
 552}
 553EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
 554
 555/*
 556 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
 557 * per process timers.
 558 */
 559void tick_nohz_dep_set_signal(struct task_struct *tsk,
 560			      enum tick_dep_bits bit)
 561{
 562	int prev;
 563	struct signal_struct *sig = tsk->signal;
 564
 565	prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask);
 566	if (!prev) {
 567		struct task_struct *t;
 568
 569		lockdep_assert_held(&tsk->sighand->siglock);
 570		__for_each_thread(sig, t)
 571			tick_nohz_kick_task(t);
 572	}
 573}
 574
 575void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bit)
 576{
 577	atomic_andnot(BIT(bit), &sig->tick_dep_mask);
 578}
 579
 580/*
 581 * Re-evaluate the need for the tick as we switch the current task.
 582 * It might need the tick due to per task/process properties:
 583 * perf events, posix CPU timers, ...
 584 */
 585void __tick_nohz_task_switch(void)
 586{
 
 587	struct tick_sched *ts;
 588
 
 
 589	if (!tick_nohz_full_cpu(smp_processor_id()))
 590		return;
 591
 592	ts = this_cpu_ptr(&tick_cpu_sched);
 593
 594	if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
 595		if (atomic_read(&current->tick_dep_mask) ||
 596		    atomic_read(&current->signal->tick_dep_mask))
 597			tick_nohz_full_kick();
 598	}
 
 
 599}
 600
 601/* Get the boot-time nohz CPU list from the kernel parameters. */
 602void __init tick_nohz_full_setup(cpumask_var_t cpumask)
 603{
 604	alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
 605	cpumask_copy(tick_nohz_full_mask, cpumask);
 606	tick_nohz_full_running = true;
 607}
 608
 609bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
 610{
 611	/*
 612	 * The 'tick_do_timer_cpu' CPU handles housekeeping duty (unbound
 613	 * timers, workqueues, timekeeping, ...) on behalf of full dynticks
 614	 * CPUs. It must remain online when nohz full is enabled.
 615	 */
 616	if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
 617		return false;
 618	return true;
 619}
 620
 621static int tick_nohz_cpu_down(unsigned int cpu)
 622{
 623	return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
 624}
 625
 626void __init tick_nohz_init(void)
 627{
 628	int cpu, ret;
 629
 630	if (!tick_nohz_full_running)
 631		return;
 632
 633	/*
 634	 * Full dynticks uses IRQ work to drive the tick rescheduling on safe
 635	 * locking contexts. But then we need IRQ work to raise its own
 636	 * interrupts to avoid circular dependency on the tick.
 637	 */
 638	if (!arch_irq_work_has_interrupt()) {
 639		pr_warn("NO_HZ: Can't run full dynticks because arch doesn't support IRQ work self-IPIs\n");
 640		cpumask_clear(tick_nohz_full_mask);
 641		tick_nohz_full_running = false;
 642		return;
 643	}
 644
 645	if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
 646			!IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
 647		cpu = smp_processor_id();
 648
 649		if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
 650			pr_warn("NO_HZ: Clearing %d from nohz_full range "
 651				"for timekeeping\n", cpu);
 652			cpumask_clear_cpu(cpu, tick_nohz_full_mask);
 653		}
 654	}
 655
 656	for_each_cpu(cpu, tick_nohz_full_mask)
 657		ct_cpu_track_user(cpu);
 658
 659	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
 660					"kernel/nohz:predown", NULL,
 661					tick_nohz_cpu_down);
 662	WARN_ON(ret < 0);
 663	pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n",
 664		cpumask_pr_args(tick_nohz_full_mask));
 665}
 666#endif /* #ifdef CONFIG_NO_HZ_FULL */
 667
 668/*
 669 * NOHZ - aka dynamic tick functionality
 670 */
 671#ifdef CONFIG_NO_HZ_COMMON
 672/*
 673 * NO HZ enabled ?
 674 */
 675bool tick_nohz_enabled __read_mostly  = true;
 676unsigned long tick_nohz_active  __read_mostly;
 677/*
 678 * Enable / Disable tickless mode
 679 */
 680static int __init setup_tick_nohz(char *str)
 681{
 682	return (kstrtobool(str, &tick_nohz_enabled) == 0);
 683}
 684
 685__setup("nohz=", setup_tick_nohz);
 686
 687bool tick_nohz_tick_stopped(void)
 688{
 689	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 690
 691	return tick_sched_flag_test(ts, TS_FLAG_STOPPED);
 692}
 693
 694bool tick_nohz_tick_stopped_cpu(int cpu)
 695{
 696	struct tick_sched *ts = per_cpu_ptr(&tick_cpu_sched, cpu);
 697
 698	return tick_sched_flag_test(ts, TS_FLAG_STOPPED);
 699}
 700
 701/**
 702 * tick_nohz_update_jiffies - update jiffies when idle was interrupted
 703 * @now: current ktime_t
 704 *
 705 * Called from interrupt entry when the CPU was idle
 706 *
 707 * In case the sched_tick was stopped on this CPU, we have to check if jiffies
 708 * must be updated. Otherwise an interrupt handler could use a stale jiffy
 709 * value. We do this unconditionally on any CPU, as we don't know whether the
 710 * CPU, which has the update task assigned, is in a long sleep.
 711 */
 712static void tick_nohz_update_jiffies(ktime_t now)
 713{
 714	unsigned long flags;
 715
 716	__this_cpu_write(tick_cpu_sched.idle_waketime, now);
 717
 718	local_irq_save(flags);
 719	tick_do_update_jiffies64(now);
 720	local_irq_restore(flags);
 721
 722	touch_softlockup_watchdog_sched();
 723}
 724
 725static void tick_nohz_stop_idle(struct tick_sched *ts, ktime_t now)
 
 
 
 
 726{
 727	ktime_t delta;
 728
 729	if (WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE)))
 730		return;
 
 
 
 
 
 
 731
 732	delta = ktime_sub(now, ts->idle_entrytime);
 
 733
 734	write_seqcount_begin(&ts->idle_sleeptime_seq);
 735	if (nr_iowait_cpu(smp_processor_id()) > 0)
 736		ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
 737	else
 738		ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
 739
 740	ts->idle_entrytime = now;
 741	tick_sched_flag_clear(ts, TS_FLAG_IDLE_ACTIVE);
 742	write_seqcount_end(&ts->idle_sleeptime_seq);
 
 743
 744	sched_clock_idle_wakeup_event();
 745}
 746
 747static void tick_nohz_start_idle(struct tick_sched *ts)
 748{
 749	write_seqcount_begin(&ts->idle_sleeptime_seq);
 750	ts->idle_entrytime = ktime_get();
 751	tick_sched_flag_set(ts, TS_FLAG_IDLE_ACTIVE);
 752	write_seqcount_end(&ts->idle_sleeptime_seq);
 753
 754	sched_clock_idle_sleep_event();
 755}
 756
 757static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime,
 758				 bool compute_delta, u64 *last_update_time)
 759{
 760	ktime_t now, idle;
 761	unsigned int seq;
 762
 763	if (!tick_nohz_active)
 764		return -1;
 765
 766	now = ktime_get();
 767	if (last_update_time)
 768		*last_update_time = ktime_to_us(now);
 769
 770	do {
 771		seq = read_seqcount_begin(&ts->idle_sleeptime_seq);
 772
 773		if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE) && compute_delta) {
 774			ktime_t delta = ktime_sub(now, ts->idle_entrytime);
 775
 776			idle = ktime_add(*sleeptime, delta);
 777		} else {
 778			idle = *sleeptime;
 779		}
 780	} while (read_seqcount_retry(&ts->idle_sleeptime_seq, seq));
 781
 782	return ktime_to_us(idle);
 783
 784}
 785
 786/**
 787 * get_cpu_idle_time_us - get the total idle time of a CPU
 788 * @cpu: CPU number to query
 789 * @last_update_time: variable to store update time in. Do not update
 790 * counters if NULL.
 791 *
 792 * Return the cumulative idle time (since boot) for a given
 793 * CPU, in microseconds. Note that this is partially broken due to
 794 * the counter of iowait tasks that can be remotely updated without
 795 * any synchronization. Therefore it is possible to observe backward
 796 * values within two consecutive reads.
 797 *
 798 * This time is measured via accounting rather than sampling,
 799 * and is as accurate as ktime_get() is.
 800 *
 801 * Return: -1 if NOHZ is not enabled, else total idle time of the @cpu
 802 */
 803u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 804{
 805	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 806
 807	return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime,
 808				     !nr_iowait_cpu(cpu), last_update_time);
 809}
 810EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 811
 812/**
 813 * get_cpu_iowait_time_us - get the total iowait time of a CPU
 814 * @cpu: CPU number to query
 815 * @last_update_time: variable to store update time in. Do not update
 816 * counters if NULL.
 817 *
 818 * Return the cumulative iowait time (since boot) for a given
 819 * CPU, in microseconds. Note this is partially broken due to
 820 * the counter of iowait tasks that can be remotely updated without
 821 * any synchronization. Therefore it is possible to observe backward
 822 * values within two consecutive reads.
 823 *
 824 * This time is measured via accounting rather than sampling,
 825 * and is as accurate as ktime_get() is.
 826 *
 827 * Return: -1 if NOHZ is not enabled, else total iowait time of @cpu
 828 */
 829u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 830{
 831	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 832
 833	return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime,
 834				     nr_iowait_cpu(cpu), last_update_time);
 835}
 836EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 837
 838static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
 839{
 840	hrtimer_cancel(&ts->sched_timer);
 841	hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
 842
 843	/* Forward the time to expire in the future */
 844	hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
 845
 846	if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) {
 847		hrtimer_start_expires(&ts->sched_timer,
 848				      HRTIMER_MODE_ABS_PINNED_HARD);
 849	} else {
 850		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 851	}
 852
 853	/*
 854	 * Reset to make sure the next tick stop doesn't get fooled by past
 855	 * cached clock deadline.
 856	 */
 857	ts->next_tick = 0;
 858}
 859
 860static inline bool local_timer_softirq_pending(void)
 861{
 862	return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
 863}
 864
 865/*
 866 * Read jiffies and the time when jiffies were updated last
 867 */
 868u64 get_jiffies_update(unsigned long *basej)
 869{
 
 870	unsigned long basejiff;
 871	unsigned int seq;
 872	u64 basemono;
 873
 
 874	do {
 875		seq = read_seqcount_begin(&jiffies_seq);
 876		basemono = last_jiffies_update;
 877		basejiff = jiffies;
 878	} while (read_seqcount_retry(&jiffies_seq, seq));
 879	*basej = basejiff;
 880	return basemono;
 881}
 882
 883/**
 884 * tick_nohz_next_event() - return the clock monotonic based next event
 885 * @ts:		pointer to tick_sched struct
 886 * @cpu:	CPU number
 887 *
 888 * Return:
 889 * *%0		- When the next event is a maximum of TICK_NSEC in the future
 890 *		  and the tick is not stopped yet
 891 * *%next_event	- Next event based on clock monotonic
 892 */
 893static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
 894{
 895	u64 basemono, next_tick, delta, expires;
 896	unsigned long basejiff;
 897	int tick_cpu;
 898
 899	basemono = get_jiffies_update(&basejiff);
 900	ts->last_jiffies = basejiff;
 901	ts->timer_expires_base = basemono;
 902
 903	/*
 904	 * Keep the periodic tick, when RCU, architecture or irq_work
 905	 * requests it.
 906	 * Aside of that, check whether the local timer softirq is
 907	 * pending. If so, its a bad idea to call get_next_timer_interrupt(),
 908	 * because there is an already expired timer, so it will request
 909	 * immediate expiry, which rearms the hardware timer with a
 910	 * minimal delta, which brings us back to this place
 911	 * immediately. Lather, rinse and repeat...
 912	 */
 913	if (rcu_needs_cpu() || arch_needs_cpu() ||
 914	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
 915		next_tick = basemono + TICK_NSEC;
 916	} else {
 917		/*
 918		 * Get the next pending timer. If high resolution
 919		 * timers are enabled this only takes the timer wheel
 920		 * timers into account. If high resolution timers are
 921		 * disabled this also looks at the next expiring
 922		 * hrtimer.
 923		 */
 924		next_tick = get_next_timer_interrupt(basejiff, basemono);
 925		ts->next_timer = next_tick;
 
 
 926	}
 927
 928	/* Make sure next_tick is never before basemono! */
 929	if (WARN_ON_ONCE(basemono > next_tick))
 930		next_tick = basemono;
 931
 932	/*
 933	 * If the tick is due in the next period, keep it ticking or
 934	 * force prod the timer.
 935	 */
 936	delta = next_tick - basemono;
 937	if (delta <= (u64)TICK_NSEC) {
 938		/*
 
 
 
 
 
 939		 * We've not stopped the tick yet, and there's a timer in the
 940		 * next period, so no point in stopping it either, bail.
 941		 */
 942		if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
 943			ts->timer_expires = 0;
 944			goto out;
 945		}
 946	}
 947
 948	/*
 949	 * If this CPU is the one which had the do_timer() duty last, we limit
 950	 * the sleep time to the timekeeping 'max_deferment' value.
 951	 * Otherwise we can sleep as long as we want.
 952	 */
 953	delta = timekeeping_max_deferment();
 954	tick_cpu = READ_ONCE(tick_do_timer_cpu);
 955	if (tick_cpu != cpu &&
 956	    (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
 957		delta = KTIME_MAX;
 958
 959	/* Calculate the next expiry time */
 960	if (delta < (KTIME_MAX - basemono))
 961		expires = basemono + delta;
 962	else
 963		expires = KTIME_MAX;
 964
 965	ts->timer_expires = min_t(u64, expires, next_tick);
 966
 967out:
 968	return ts->timer_expires;
 969}
 970
 971static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
 972{
 973	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 974	unsigned long basejiff = ts->last_jiffies;
 975	u64 basemono = ts->timer_expires_base;
 976	bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
 977	int tick_cpu;
 978	u64 expires;
 979
 980	/* Make sure we won't be trying to stop it twice in a row. */
 981	ts->timer_expires_base = 0;
 982
 983	/*
 984	 * Now the tick should be stopped definitely - so the timer base needs
 985	 * to be marked idle as well to not miss a newly queued timer.
 986	 */
 987	expires = timer_base_try_to_set_idle(basejiff, basemono, &timer_idle);
 988	if (expires > ts->timer_expires) {
 989		/*
 990		 * This path could only happen when the first timer was removed
 991		 * between calculating the possible sleep length and now (when
 992		 * high resolution mode is not active, timer could also be a
 993		 * hrtimer).
 994		 *
 995		 * We have to stick to the original calculated expiry value to
 996		 * not stop the tick for too long with a shallow C-state (which
 997		 * was programmed by cpuidle because of an early next expiration
 998		 * value).
 999		 */
1000		expires = ts->timer_expires;
1001	}
1002
1003	/* If the timer base is not idle, retain the not yet stopped tick. */
1004	if (!timer_idle)
1005		return;
1006
1007	/*
1008	 * If this CPU is the one which updates jiffies, then give up
1009	 * the assignment and let it be taken by the CPU which runs
1010	 * the tick timer next, which might be this CPU as well. If we
1011	 * don't drop this here, the jiffies might be stale and
1012	 * do_timer() never gets invoked. Keep track of the fact that it
1013	 * was the one which had the do_timer() duty last.
1014	 */
1015	tick_cpu = READ_ONCE(tick_do_timer_cpu);
1016	if (tick_cpu == cpu) {
1017		WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE);
1018		tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST);
1019	} else if (tick_cpu != TICK_DO_TIMER_NONE) {
1020		tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST);
1021	}
1022
1023	/* Skip reprogram of event if it's not changed */
1024	if (tick_sched_flag_test(ts, TS_FLAG_STOPPED) && (expires == ts->next_tick)) {
1025		/* Sanity check: make sure clockevent is actually programmed */
1026		if (expires == KTIME_MAX || ts->next_tick == hrtimer_get_expires(&ts->sched_timer))
1027			return;
1028
1029		WARN_ON_ONCE(1);
1030		printk_once("basemono: %llu ts->next_tick: %llu dev->next_event: %llu timer->active: %d timer->expires: %llu\n",
1031			    basemono, ts->next_tick, dev->next_event,
1032			    hrtimer_active(&ts->sched_timer), hrtimer_get_expires(&ts->sched_timer));
1033	}
1034
1035	/*
1036	 * tick_nohz_stop_tick() can be called several times before
1037	 * tick_nohz_restart_sched_tick() is called. This happens when
1038	 * interrupts arrive which do not cause a reschedule. In the first
1039	 * call we save the current tick time, so we can restart the
1040	 * scheduler tick in tick_nohz_restart_sched_tick().
1041	 */
1042	if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
1043		calc_load_nohz_start();
1044		quiet_vmstat();
1045
1046		ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
1047		tick_sched_flag_set(ts, TS_FLAG_STOPPED);
1048		trace_tick_stop(1, TICK_DEP_MASK_NONE);
1049	}
1050
1051	ts->next_tick = expires;
1052
1053	/*
1054	 * If the expiration time == KTIME_MAX, then we simply stop
1055	 * the tick timer.
1056	 */
1057	if (unlikely(expires == KTIME_MAX)) {
1058		tick_sched_timer_cancel(ts);
 
1059		return;
1060	}
1061
1062	if (tick_sched_flag_test(ts, TS_FLAG_HIGHRES)) {
1063		hrtimer_start(&ts->sched_timer, expires,
1064			      HRTIMER_MODE_ABS_PINNED_HARD);
1065	} else {
1066		hrtimer_set_expires(&ts->sched_timer, expires);
1067		tick_program_event(expires, 1);
1068	}
1069}
1070
1071static void tick_nohz_retain_tick(struct tick_sched *ts)
1072{
1073	ts->timer_expires_base = 0;
1074}
1075
1076#ifdef CONFIG_NO_HZ_FULL
1077static void tick_nohz_full_stop_tick(struct tick_sched *ts, int cpu)
1078{
1079	if (tick_nohz_next_event(ts, cpu))
1080		tick_nohz_stop_tick(ts, cpu);
1081	else
1082		tick_nohz_retain_tick(ts);
1083}
1084#endif /* CONFIG_NO_HZ_FULL */
1085
1086static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
1087{
1088	/* Update jiffies first */
1089	tick_do_update_jiffies64(now);
1090
1091	/*
1092	 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
1093	 * the clock forward checks in the enqueue path:
1094	 */
1095	timer_clear_idle();
1096
1097	calc_load_nohz_stop();
1098	touch_softlockup_watchdog_sched();
 
 
 
 
 
1099
1100	/* Cancel the scheduled timer and restore the tick: */
1101	tick_sched_flag_clear(ts, TS_FLAG_STOPPED);
1102	tick_nohz_restart(ts, now);
1103}
1104
1105static void __tick_nohz_full_update_tick(struct tick_sched *ts,
1106					 ktime_t now)
1107{
1108#ifdef CONFIG_NO_HZ_FULL
1109	int cpu = smp_processor_id();
1110
1111	if (can_stop_full_tick(cpu, ts))
1112		tick_nohz_full_stop_tick(ts, cpu);
1113	else if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
1114		tick_nohz_restart_sched_tick(ts, now);
1115#endif
1116}
1117
1118static void tick_nohz_full_update_tick(struct tick_sched *ts)
1119{
1120	if (!tick_nohz_full_cpu(smp_processor_id()))
1121		return;
1122
1123	if (!tick_sched_flag_test(ts, TS_FLAG_NOHZ))
1124		return;
1125
1126	__tick_nohz_full_update_tick(ts, ktime_get());
 
 
 
 
1127}
1128
1129/*
1130 * A pending softirq outside an IRQ (or softirq disabled section) context
1131 * should be waiting for ksoftirqd to handle it. Therefore we shouldn't
1132 * reach this code due to the need_resched() early check in can_stop_idle_tick().
1133 *
1134 * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the
1135 * cpu_down() process, softirqs can still be raised while ksoftirqd is parked,
1136 * triggering the code below, since wakep_softirqd() is ignored.
1137 *
1138 */
1139static bool report_idle_softirq(void)
1140{
1141	static int ratelimit;
1142	unsigned int pending = local_softirq_pending();
1143
1144	if (likely(!pending))
 
 
 
 
 
 
 
 
 
 
 
1145		return false;
1146
1147	/* Some softirqs claim to be safe against hotplug and ksoftirqd parking */
1148	if (!cpu_active(smp_processor_id())) {
1149		pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK;
1150		if (!pending)
1151			return false;
1152	}
1153
1154	if (ratelimit >= 10)
1155		return false;
1156
1157	/* On RT, softirq handling may be waiting on some lock */
1158	if (local_bh_blocked())
1159		return false;
1160
1161	pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
1162		pending);
1163	ratelimit++;
1164
1165	return true;
1166}
1167
1168static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
1169{
1170	WARN_ON_ONCE(cpu_is_offline(cpu));
1171
1172	if (unlikely(!tick_sched_flag_test(ts, TS_FLAG_NOHZ)))
1173		return false;
1174
1175	if (need_resched())
1176		return false;
1177
1178	if (unlikely(report_idle_softirq()))
1179		return false;
 
1180
1181	if (tick_nohz_full_enabled()) {
1182		int tick_cpu = READ_ONCE(tick_do_timer_cpu);
1183
1184		/*
1185		 * Keep the tick alive to guarantee timekeeping progression
1186		 * if there are full dynticks CPUs around
1187		 */
1188		if (tick_cpu == cpu)
 
 
 
 
 
 
 
1189			return false;
1190
1191		/* Should not happen for nohz-full */
1192		if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE))
1193			return false;
1194	}
1195
1196	return true;
1197}
1198
1199/**
1200 * tick_nohz_idle_stop_tick - stop the idle tick from the idle task
1201 *
1202 * When the next event is more than a tick into the future, stop the idle tick
1203 */
1204void tick_nohz_idle_stop_tick(void)
1205{
1206	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1207	int cpu = smp_processor_id();
1208	ktime_t expires;
1209
1210	/*
1211	 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
1212	 * tick timer expiration time is known already.
1213	 */
1214	if (ts->timer_expires_base)
1215		expires = ts->timer_expires;
1216	else if (can_stop_idle_tick(cpu, ts))
1217		expires = tick_nohz_next_event(ts, cpu);
1218	else
1219		return;
1220
1221	ts->idle_calls++;
1222
1223	if (expires > 0LL) {
1224		int was_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
1225
1226		tick_nohz_stop_tick(ts, cpu);
1227
1228		ts->idle_sleeps++;
1229		ts->idle_expires = expires;
1230
1231		if (!was_stopped && tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
1232			ts->idle_jiffies = ts->last_jiffies;
1233			nohz_balance_enter_idle(cpu);
1234		}
1235	} else {
1236		tick_nohz_retain_tick(ts);
1237	}
1238}
1239
 
 
 
 
 
 
 
 
 
 
1240void tick_nohz_idle_retain_tick(void)
1241{
1242	tick_nohz_retain_tick(this_cpu_ptr(&tick_cpu_sched));
 
 
 
 
 
1243}
1244
1245/**
1246 * tick_nohz_idle_enter - prepare for entering idle on the current CPU
1247 *
1248 * Called when we start the idle loop.
1249 */
1250void tick_nohz_idle_enter(void)
1251{
1252	struct tick_sched *ts;
1253
1254	lockdep_assert_irqs_enabled();
1255
1256	local_irq_disable();
1257
1258	ts = this_cpu_ptr(&tick_cpu_sched);
1259
1260	WARN_ON_ONCE(ts->timer_expires_base);
1261
1262	tick_sched_flag_set(ts, TS_FLAG_INIDLE);
1263	tick_nohz_start_idle(ts);
1264
1265	local_irq_enable();
1266}
1267
1268/**
1269 * tick_nohz_irq_exit - Notify the tick about IRQ exit
1270 *
1271 * A timer may have been added/modified/deleted either by the current IRQ,
1272 * or by another place using this IRQ as a notification. This IRQ may have
1273 * also updated the RCU callback list. These events may require a
1274 * re-evaluation of the next tick. Depending on the context:
1275 *
1276 * 1) If the CPU is idle and no resched is pending, just proceed with idle
1277 *    time accounting. The next tick will be re-evaluated on the next idle
1278 *    loop iteration.
1279 *
1280 * 2) If the CPU is nohz_full:
1281 *
1282 *    2.1) If there is any tick dependency, restart the tick if stopped.
1283 *
1284 *    2.2) If there is no tick dependency, (re-)evaluate the next tick and
1285 *         stop/update it accordingly.
1286 */
1287void tick_nohz_irq_exit(void)
1288{
1289	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1290
1291	if (tick_sched_flag_test(ts, TS_FLAG_INIDLE))
1292		tick_nohz_start_idle(ts);
1293	else
1294		tick_nohz_full_update_tick(ts);
1295}
1296
1297/**
1298 * tick_nohz_idle_got_tick - Check whether or not the tick handler has run
1299 *
1300 * Return: %true if the tick handler has run, otherwise %false
1301 */
1302bool tick_nohz_idle_got_tick(void)
1303{
1304	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1305
1306	if (ts->got_idle_tick) {
1307		ts->got_idle_tick = 0;
1308		return true;
1309	}
1310	return false;
1311}
1312
1313/**
1314 * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1315 * or the tick, whichever expires first. Note that, if the tick has been
1316 * stopped, it returns the next hrtimer.
1317 *
1318 * Called from power state control code with interrupts disabled
1319 *
1320 * Return: the next expiration time
1321 */
1322ktime_t tick_nohz_get_next_hrtimer(void)
1323{
1324	return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1325}
1326
1327/**
1328 * tick_nohz_get_sleep_length - return the expected length of the current sleep
1329 * @delta_next: duration until the next event if the tick cannot be stopped
1330 *
1331 * Called from power state control code with interrupts disabled.
1332 *
1333 * The return value of this function and/or the value returned by it through the
1334 * @delta_next pointer can be negative which must be taken into account by its
1335 * callers.
1336 *
1337 * Return: the expected length of the current sleep
1338 */
1339ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
1340{
1341	struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
1342	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1343	int cpu = smp_processor_id();
1344	/*
1345	 * The idle entry time is expected to be a sufficient approximation of
1346	 * the current time at this point.
1347	 */
1348	ktime_t now = ts->idle_entrytime;
1349	ktime_t next_event;
1350
1351	WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE));
1352
1353	*delta_next = ktime_sub(dev->next_event, now);
1354
1355	if (!can_stop_idle_tick(cpu, ts))
1356		return *delta_next;
1357
1358	next_event = tick_nohz_next_event(ts, cpu);
1359	if (!next_event)
1360		return *delta_next;
1361
1362	/*
1363	 * If the next highres timer to expire is earlier than 'next_event', the
1364	 * idle governor needs to know that.
1365	 */
1366	next_event = min_t(u64, next_event,
1367			   hrtimer_next_event_without(&ts->sched_timer));
1368
1369	return ktime_sub(next_event, now);
1370}
1371
1372/**
1373 * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
1374 * for a particular CPU.
1375 * @cpu: target CPU number
1376 *
1377 * Called from the schedutil frequency scaling governor in scheduler context.
1378 *
1379 * Return: the current idle calls counter value for @cpu
1380 */
1381unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
1382{
1383	struct tick_sched *ts = tick_get_tick_sched(cpu);
1384
1385	return ts->idle_calls;
1386}
1387
1388/**
1389 * tick_nohz_get_idle_calls - return the current idle calls counter value
1390 *
1391 * Called from the schedutil frequency scaling governor in scheduler context.
1392 *
1393 * Return: the current idle calls counter value for the current CPU
1394 */
1395unsigned long tick_nohz_get_idle_calls(void)
1396{
1397	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1398
1399	return ts->idle_calls;
1400}
1401
1402static void tick_nohz_account_idle_time(struct tick_sched *ts,
1403					ktime_t now)
1404{
 
1405	unsigned long ticks;
1406
1407	ts->idle_exittime = now;
1408
1409	if (vtime_accounting_enabled_this_cpu())
1410		return;
1411	/*
1412	 * We stopped the tick in idle. update_process_times() would miss the
1413	 * time we slept, as it does only a 1 tick accounting.
1414	 * Enforce that this is accounted to idle !
1415	 */
1416	ticks = jiffies - ts->idle_jiffies;
1417	/*
1418	 * We might be one off. Do not randomly account a huge number of ticks!
1419	 */
1420	if (ticks && ticks < LONG_MAX)
1421		account_idle_ticks(ticks);
 
1422}
1423
1424void tick_nohz_idle_restart_tick(void)
1425{
1426	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1427
1428	if (tick_sched_flag_test(ts, TS_FLAG_STOPPED)) {
1429		ktime_t now = ktime_get();
1430		tick_nohz_restart_sched_tick(ts, now);
1431		tick_nohz_account_idle_time(ts, now);
1432	}
1433}
1434
1435static void tick_nohz_idle_update_tick(struct tick_sched *ts, ktime_t now)
1436{
1437	if (tick_nohz_full_cpu(smp_processor_id()))
1438		__tick_nohz_full_update_tick(ts, now);
1439	else
1440		tick_nohz_restart_sched_tick(ts, now);
1441
1442	tick_nohz_account_idle_time(ts, now);
 
1443}
1444
1445/**
1446 * tick_nohz_idle_exit - Update the tick upon idle task exit
1447 *
1448 * When the idle task exits, update the tick depending on the
1449 * following situations:
1450 *
1451 * 1) If the CPU is not in nohz_full mode (most cases), then
1452 *    restart the tick.
1453 *
1454 * 2) If the CPU is in nohz_full mode (corner case):
1455 *   2.1) If the tick can be kept stopped (no tick dependencies)
1456 *        then re-evaluate the next tick and try to keep it stopped
1457 *        as long as possible.
1458 *   2.2) If the tick has dependencies, restart the tick.
1459 *
 
 
 
1460 */
1461void tick_nohz_idle_exit(void)
1462{
1463	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1464	bool idle_active, tick_stopped;
1465	ktime_t now;
1466
1467	local_irq_disable();
1468
1469	WARN_ON_ONCE(!tick_sched_flag_test(ts, TS_FLAG_INIDLE));
1470	WARN_ON_ONCE(ts->timer_expires_base);
1471
1472	tick_sched_flag_clear(ts, TS_FLAG_INIDLE);
1473	idle_active = tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE);
1474	tick_stopped = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
1475
1476	if (idle_active || tick_stopped)
1477		now = ktime_get();
1478
1479	if (idle_active)
1480		tick_nohz_stop_idle(ts, now);
1481
1482	if (tick_stopped)
1483		tick_nohz_idle_update_tick(ts, now);
1484
1485	local_irq_enable();
1486}
1487
1488/*
1489 * In low-resolution mode, the tick handler must be implemented directly
1490 * at the clockevent level. hrtimer can't be used instead, because its
1491 * infrastructure actually relies on the tick itself as a backend in
1492 * low-resolution mode (see hrtimer_run_queues()).
1493 */
1494static void tick_nohz_lowres_handler(struct clock_event_device *dev)
1495{
1496	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
 
1497
1498	dev->next_event = KTIME_MAX;
1499
1500	if (likely(tick_nohz_handler(&ts->sched_timer) == HRTIMER_RESTART))
1501		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
 
 
 
 
 
 
 
1502}
1503
1504static inline void tick_nohz_activate(struct tick_sched *ts)
1505{
1506	if (!tick_nohz_enabled)
1507		return;
1508	tick_sched_flag_set(ts, TS_FLAG_NOHZ);
1509	/* One update is enough */
1510	if (!test_and_set_bit(0, &tick_nohz_active))
1511		timers_update_nohz();
1512}
1513
1514/**
1515 * tick_nohz_switch_to_nohz - switch to NOHZ mode
1516 */
1517static void tick_nohz_switch_to_nohz(void)
1518{
 
 
 
1519	if (!tick_nohz_enabled)
1520		return;
1521
1522	if (tick_switch_to_oneshot(tick_nohz_lowres_handler))
1523		return;
1524
1525	/*
1526	 * Recycle the hrtimer in 'ts', so we can share the
1527	 * highres code.
1528	 */
1529	tick_setup_sched_timer(false);
 
 
 
 
 
 
 
1530}
1531
1532static inline void tick_nohz_irq_enter(void)
1533{
1534	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1535	ktime_t now;
1536
1537	if (!tick_sched_flag_test(ts, TS_FLAG_STOPPED | TS_FLAG_IDLE_ACTIVE))
1538		return;
1539	now = ktime_get();
1540	if (tick_sched_flag_test(ts, TS_FLAG_IDLE_ACTIVE))
1541		tick_nohz_stop_idle(ts, now);
1542	/*
1543	 * If all CPUs are idle we may need to update a stale jiffies value.
1544	 * Note nohz_full is a special case: a timekeeper is guaranteed to stay
1545	 * alive but it might be busy looping with interrupts disabled in some
1546	 * rare case (typically stop machine). So we must make sure we have a
1547	 * last resort.
1548	 */
1549	if (tick_sched_flag_test(ts, TS_FLAG_STOPPED))
1550		tick_nohz_update_jiffies(now);
1551}
1552
1553#else
1554
1555static inline void tick_nohz_switch_to_nohz(void) { }
1556static inline void tick_nohz_irq_enter(void) { }
1557static inline void tick_nohz_activate(struct tick_sched *ts) { }
1558
1559#endif /* CONFIG_NO_HZ_COMMON */
1560
1561/*
1562 * Called from irq_enter() to notify about the possible interruption of idle()
1563 */
1564void tick_irq_enter(void)
1565{
1566	tick_check_oneshot_broadcast_this_cpu();
1567	tick_nohz_irq_enter();
1568}
1569
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570static int sched_skew_tick;
1571
1572static int __init skew_tick(char *str)
1573{
1574	get_option(&str, &sched_skew_tick);
1575
1576	return 0;
1577}
1578early_param("skew_tick", skew_tick);
1579
1580/**
1581 * tick_setup_sched_timer - setup the tick emulation timer
1582 * @hrtimer: whether to use the hrtimer or not
1583 */
1584void tick_setup_sched_timer(bool hrtimer)
1585{
1586	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
 
1587
1588	/* Emulate tick processing via per-CPU hrtimers: */
 
 
1589	hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
1590
1591	if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer) {
1592		tick_sched_flag_set(ts, TS_FLAG_HIGHRES);
1593		ts->sched_timer.function = tick_nohz_handler;
1594	}
1595
1596	/* Get the next period (per-CPU) */
1597	hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
1598
1599	/* Offset the tick to avert 'jiffies_lock' contention. */
1600	if (sched_skew_tick) {
1601		u64 offset = TICK_NSEC >> 1;
1602		do_div(offset, num_possible_cpus());
1603		offset *= smp_processor_id();
1604		hrtimer_add_expires_ns(&ts->sched_timer, offset);
1605	}
1606
1607	hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
1608	if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS) && hrtimer)
1609		hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
1610	else
1611		tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1612	tick_nohz_activate(ts);
1613}
 
1614
1615/*
1616 * Shut down the tick and make sure the CPU won't try to retake the timekeeping
1617 * duty before disabling IRQs in idle for the last time.
1618 */
1619void tick_sched_timer_dying(int cpu)
1620{
1621	struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
1622	struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
1623	struct clock_event_device *dev = td->evtdev;
1624	ktime_t idle_sleeptime, iowait_sleeptime;
1625	unsigned long idle_calls, idle_sleeps;
1626
1627	/* This must happen before hrtimers are migrated! */
1628	tick_sched_timer_cancel(ts);
1629
1630	/*
1631	 * If the clockevents doesn't support CLOCK_EVT_STATE_ONESHOT_STOPPED,
1632	 * make sure not to call low-res tick handler.
1633	 */
1634	if (tick_sched_flag_test(ts, TS_FLAG_NOHZ))
1635		dev->event_handler = clockevents_handle_noop;
1636
1637	idle_sleeptime = ts->idle_sleeptime;
1638	iowait_sleeptime = ts->iowait_sleeptime;
1639	idle_calls = ts->idle_calls;
1640	idle_sleeps = ts->idle_sleeps;
1641	memset(ts, 0, sizeof(*ts));
1642	ts->idle_sleeptime = idle_sleeptime;
1643	ts->iowait_sleeptime = iowait_sleeptime;
1644	ts->idle_calls = idle_calls;
1645	ts->idle_sleeps = idle_sleeps;
1646}
 
1647
1648/*
1649 * Async notification about clocksource changes
1650 */
1651void tick_clock_notify(void)
1652{
1653	int cpu;
1654
1655	for_each_possible_cpu(cpu)
1656		set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks);
1657}
1658
1659/*
1660 * Async notification about clock event changes
1661 */
1662void tick_oneshot_notify(void)
1663{
1664	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1665
1666	set_bit(0, &ts->check_clocks);
1667}
1668
1669/*
1670 * Check if a change happened, which makes oneshot possible.
1671 *
1672 * Called cyclically from the hrtimer softirq (driven by the timer
1673 * softirq). 'allow_nohz' signals that we can switch into low-res NOHZ
1674 * mode, because high resolution timers are disabled (either compile
1675 * or runtime). Called with interrupts disabled.
1676 */
1677int tick_check_oneshot_change(int allow_nohz)
1678{
1679	struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1680
1681	if (!test_and_clear_bit(0, &ts->check_clocks))
1682		return 0;
1683
1684	if (tick_sched_flag_test(ts, TS_FLAG_NOHZ))
1685		return 0;
1686
1687	if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
1688		return 0;
1689
1690	if (!allow_nohz)
1691		return 1;
1692
1693	tick_nohz_switch_to_nohz();
1694	return 0;
1695}