Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/sched/mm.h>
   7#include <linux/proc_fs.h>
   8#include <linux/smp.h>
   9#include <linux/init.h>
  10#include <linux/notifier.h>
  11#include <linux/sched/signal.h>
  12#include <linux/sched/hotplug.h>
  13#include <linux/sched/isolation.h>
  14#include <linux/sched/task.h>
  15#include <linux/sched/smt.h>
  16#include <linux/unistd.h>
  17#include <linux/cpu.h>
  18#include <linux/oom.h>
  19#include <linux/rcupdate.h>
  20#include <linux/delay.h>
  21#include <linux/export.h>
  22#include <linux/bug.h>
  23#include <linux/kthread.h>
  24#include <linux/stop_machine.h>
  25#include <linux/mutex.h>
  26#include <linux/gfp.h>
  27#include <linux/suspend.h>
  28#include <linux/lockdep.h>
  29#include <linux/tick.h>
  30#include <linux/irq.h>
  31#include <linux/nmi.h>
  32#include <linux/smpboot.h>
  33#include <linux/relay.h>
  34#include <linux/slab.h>
  35#include <linux/scs.h>
  36#include <linux/percpu-rwsem.h>
  37#include <linux/cpuset.h>
  38#include <linux/random.h>
  39#include <linux/cc_platform.h>
  40
  41#include <trace/events/power.h>
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/cpuhp.h>
  44
  45#include "smpboot.h"
  46
  47/**
  48 * struct cpuhp_cpu_state - Per cpu hotplug state storage
  49 * @state:	The current cpu state
  50 * @target:	The target state
  51 * @fail:	Current CPU hotplug callback state
  52 * @thread:	Pointer to the hotplug thread
  53 * @should_run:	Thread should execute
  54 * @rollback:	Perform a rollback
  55 * @single:	Single callback invocation
  56 * @bringup:	Single callback bringup or teardown selector
  57 * @node:	Remote CPU node; for multi-instance, do a
  58 *		single entry callback for install/remove
  59 * @last:	For multi-instance rollback, remember how far we got
  60 * @cb_state:	The state for a single callback (install/uninstall)
  61 * @result:	Result of the operation
  62 * @ap_sync_state:	State for AP synchronization
  63 * @done_up:	Signal completion to the issuer of the task for cpu-up
  64 * @done_down:	Signal completion to the issuer of the task for cpu-down
  65 */
  66struct cpuhp_cpu_state {
  67	enum cpuhp_state	state;
  68	enum cpuhp_state	target;
  69	enum cpuhp_state	fail;
  70#ifdef CONFIG_SMP
  71	struct task_struct	*thread;
  72	bool			should_run;
  73	bool			rollback;
  74	bool			single;
  75	bool			bringup;
 
  76	struct hlist_node	*node;
  77	struct hlist_node	*last;
  78	enum cpuhp_state	cb_state;
  79	int			result;
  80	atomic_t		ap_sync_state;
  81	struct completion	done_up;
  82	struct completion	done_down;
  83#endif
  84};
  85
  86static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  87	.fail = CPUHP_INVALID,
  88};
  89
  90#ifdef CONFIG_SMP
  91cpumask_t cpus_booted_once_mask;
  92#endif
  93
  94#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
  95static struct lockdep_map cpuhp_state_up_map =
  96	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  97static struct lockdep_map cpuhp_state_down_map =
  98	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
  99
 100
 101static inline void cpuhp_lock_acquire(bool bringup)
 102{
 103	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 104}
 105
 106static inline void cpuhp_lock_release(bool bringup)
 107{
 108	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 109}
 110#else
 111
 112static inline void cpuhp_lock_acquire(bool bringup) { }
 113static inline void cpuhp_lock_release(bool bringup) { }
 114
 115#endif
 116
 117/**
 118 * struct cpuhp_step - Hotplug state machine step
 119 * @name:	Name of the step
 120 * @startup:	Startup function of the step
 121 * @teardown:	Teardown function of the step
 122 * @cant_stop:	Bringup/teardown can't be stopped at this step
 123 * @multi_instance:	State has multiple instances which get added afterwards
 124 */
 125struct cpuhp_step {
 126	const char		*name;
 127	union {
 128		int		(*single)(unsigned int cpu);
 129		int		(*multi)(unsigned int cpu,
 130					 struct hlist_node *node);
 131	} startup;
 132	union {
 133		int		(*single)(unsigned int cpu);
 134		int		(*multi)(unsigned int cpu,
 135					 struct hlist_node *node);
 136	} teardown;
 137	/* private: */
 138	struct hlist_head	list;
 139	/* public: */
 140	bool			cant_stop;
 141	bool			multi_instance;
 142};
 143
 144static DEFINE_MUTEX(cpuhp_state_mutex);
 145static struct cpuhp_step cpuhp_hp_states[];
 146
 147static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
 148{
 149	return cpuhp_hp_states + state;
 150}
 151
 152static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
 153{
 154	return bringup ? !step->startup.single : !step->teardown.single;
 155}
 156
 157/**
 158 * cpuhp_invoke_callback - Invoke the callbacks for a given state
 159 * @cpu:	The cpu for which the callback should be invoked
 160 * @state:	The state to do callbacks for
 161 * @bringup:	True if the bringup callback should be invoked
 162 * @node:	For multi-instance, do a single entry callback for install/remove
 163 * @lastp:	For multi-instance rollback, remember how far we got
 164 *
 165 * Called from cpu hotplug and from the state register machinery.
 166 *
 167 * Return: %0 on success or a negative errno code
 168 */
 169static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
 170				 bool bringup, struct hlist_node *node,
 171				 struct hlist_node **lastp)
 172{
 173	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 174	struct cpuhp_step *step = cpuhp_get_step(state);
 175	int (*cbm)(unsigned int cpu, struct hlist_node *node);
 176	int (*cb)(unsigned int cpu);
 177	int ret, cnt;
 178
 179	if (st->fail == state) {
 180		st->fail = CPUHP_INVALID;
 181		return -EAGAIN;
 182	}
 183
 184	if (cpuhp_step_empty(bringup, step)) {
 185		WARN_ON_ONCE(1);
 186		return 0;
 187	}
 188
 189	if (!step->multi_instance) {
 190		WARN_ON_ONCE(lastp && *lastp);
 191		cb = bringup ? step->startup.single : step->teardown.single;
 192
 193		trace_cpuhp_enter(cpu, st->target, state, cb);
 194		ret = cb(cpu);
 195		trace_cpuhp_exit(cpu, st->state, state, ret);
 196		return ret;
 197	}
 198	cbm = bringup ? step->startup.multi : step->teardown.multi;
 199
 200	/* Single invocation for instance add/remove */
 201	if (node) {
 202		WARN_ON_ONCE(lastp && *lastp);
 203		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 204		ret = cbm(cpu, node);
 205		trace_cpuhp_exit(cpu, st->state, state, ret);
 206		return ret;
 207	}
 208
 209	/* State transition. Invoke on all instances */
 210	cnt = 0;
 211	hlist_for_each(node, &step->list) {
 212		if (lastp && node == *lastp)
 213			break;
 214
 215		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 216		ret = cbm(cpu, node);
 217		trace_cpuhp_exit(cpu, st->state, state, ret);
 218		if (ret) {
 219			if (!lastp)
 220				goto err;
 221
 222			*lastp = node;
 223			return ret;
 224		}
 225		cnt++;
 226	}
 227	if (lastp)
 228		*lastp = NULL;
 229	return 0;
 230err:
 231	/* Rollback the instances if one failed */
 232	cbm = !bringup ? step->startup.multi : step->teardown.multi;
 233	if (!cbm)
 234		return ret;
 235
 236	hlist_for_each(node, &step->list) {
 237		if (!cnt--)
 238			break;
 239
 240		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 241		ret = cbm(cpu, node);
 242		trace_cpuhp_exit(cpu, st->state, state, ret);
 243		/*
 244		 * Rollback must not fail,
 245		 */
 246		WARN_ON_ONCE(ret);
 247	}
 248	return ret;
 249}
 250
 251#ifdef CONFIG_SMP
 252static bool cpuhp_is_ap_state(enum cpuhp_state state)
 253{
 254	/*
 255	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
 256	 * purposes as that state is handled explicitly in cpu_down.
 257	 */
 258	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
 259}
 260
 261static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 262{
 263	struct completion *done = bringup ? &st->done_up : &st->done_down;
 264	wait_for_completion(done);
 265}
 266
 267static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 268{
 269	struct completion *done = bringup ? &st->done_up : &st->done_down;
 270	complete(done);
 271}
 272
 273/*
 274 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 275 */
 276static bool cpuhp_is_atomic_state(enum cpuhp_state state)
 277{
 278	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
 279}
 280
 281/* Synchronization state management */
 282enum cpuhp_sync_state {
 283	SYNC_STATE_DEAD,
 284	SYNC_STATE_KICKED,
 285	SYNC_STATE_SHOULD_DIE,
 286	SYNC_STATE_ALIVE,
 287	SYNC_STATE_SHOULD_ONLINE,
 288	SYNC_STATE_ONLINE,
 289};
 290
 291#ifdef CONFIG_HOTPLUG_CORE_SYNC
 292/**
 293 * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown
 294 * @state:	The synchronization state to set
 295 *
 296 * No synchronization point. Just update of the synchronization state, but implies
 297 * a full barrier so that the AP changes are visible before the control CPU proceeds.
 298 */
 299static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
 300{
 301	atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
 302
 303	(void)atomic_xchg(st, state);
 304}
 305
 306void __weak arch_cpuhp_sync_state_poll(void) { cpu_relax(); }
 307
 308static bool cpuhp_wait_for_sync_state(unsigned int cpu, enum cpuhp_sync_state state,
 309				      enum cpuhp_sync_state next_state)
 310{
 311	atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
 312	ktime_t now, end, start = ktime_get();
 313	int sync;
 314
 315	end = start + 10ULL * NSEC_PER_SEC;
 316
 317	sync = atomic_read(st);
 318	while (1) {
 319		if (sync == state) {
 320			if (!atomic_try_cmpxchg(st, &sync, next_state))
 321				continue;
 322			return true;
 323		}
 324
 325		now = ktime_get();
 326		if (now > end) {
 327			/* Timeout. Leave the state unchanged */
 328			return false;
 329		} else if (now - start < NSEC_PER_MSEC) {
 330			/* Poll for one millisecond */
 331			arch_cpuhp_sync_state_poll();
 332		} else {
 333			usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
 334		}
 335		sync = atomic_read(st);
 336	}
 337	return true;
 338}
 339#else  /* CONFIG_HOTPLUG_CORE_SYNC */
 340static inline void cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { }
 341#endif /* !CONFIG_HOTPLUG_CORE_SYNC */
 342
 343#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
 344/**
 345 * cpuhp_ap_report_dead - Update synchronization state to DEAD
 346 *
 347 * No synchronization point. Just update of the synchronization state.
 348 */
 349void cpuhp_ap_report_dead(void)
 350{
 351	cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
 352}
 353
 354void __weak arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) { }
 355
 356/*
 357 * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down
 358 * because the AP cannot issue complete() at this stage.
 359 */
 360static void cpuhp_bp_sync_dead(unsigned int cpu)
 361{
 362	atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
 363	int sync = atomic_read(st);
 364
 365	do {
 366		/* CPU can have reported dead already. Don't overwrite that! */
 367		if (sync == SYNC_STATE_DEAD)
 368			break;
 369	} while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
 370
 371	if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) {
 372		/* CPU reached dead state. Invoke the cleanup function */
 373		arch_cpuhp_cleanup_dead_cpu(cpu);
 374		return;
 375	}
 376
 377	/* No further action possible. Emit message and give up. */
 378	pr_err("CPU%u failed to report dead state\n", cpu);
 379}
 380#else /* CONFIG_HOTPLUG_CORE_SYNC_DEAD */
 381static inline void cpuhp_bp_sync_dead(unsigned int cpu) { }
 382#endif /* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
 383
 384#ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL
 385/**
 386 * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive
 387 *
 388 * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits
 389 * for the BP to release it.
 390 */
 391void cpuhp_ap_sync_alive(void)
 392{
 393	atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
 394
 395	cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
 396
 397	/* Wait for the control CPU to release it. */
 398	while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
 399		cpu_relax();
 400}
 401
 402static bool cpuhp_can_boot_ap(unsigned int cpu)
 403{
 404	atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu);
 405	int sync = atomic_read(st);
 406
 407again:
 408	switch (sync) {
 409	case SYNC_STATE_DEAD:
 410		/* CPU is properly dead */
 411		break;
 412	case SYNC_STATE_KICKED:
 413		/* CPU did not come up in previous attempt */
 414		break;
 415	case SYNC_STATE_ALIVE:
 416		/* CPU is stuck cpuhp_ap_sync_alive(). */
 417		break;
 418	default:
 419		/* CPU failed to report online or dead and is in limbo state. */
 420		return false;
 421	}
 422
 423	/* Prepare for booting */
 424	if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED))
 425		goto again;
 426
 427	return true;
 428}
 429
 430void __weak arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) { }
 431
 432/*
 433 * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up
 434 * because the AP cannot issue complete() so early in the bringup.
 435 */
 436static int cpuhp_bp_sync_alive(unsigned int cpu)
 437{
 438	int ret = 0;
 439
 440	if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL))
 441		return 0;
 442
 443	if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
 444		pr_err("CPU%u failed to report alive state\n", cpu);
 445		ret = -EIO;
 446	}
 447
 448	/* Let the architecture cleanup the kick alive mechanics. */
 449	arch_cpuhp_cleanup_kick_cpu(cpu);
 450	return ret;
 451}
 452#else /* CONFIG_HOTPLUG_CORE_SYNC_FULL */
 453static inline int cpuhp_bp_sync_alive(unsigned int cpu) { return 0; }
 454static inline bool cpuhp_can_boot_ap(unsigned int cpu) { return true; }
 455#endif /* !CONFIG_HOTPLUG_CORE_SYNC_FULL */
 456
 457/* Serializes the updates to cpu_online_mask, cpu_present_mask */
 458static DEFINE_MUTEX(cpu_add_remove_lock);
 459bool cpuhp_tasks_frozen;
 460EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
 461
 462/*
 463 * The following two APIs (cpu_maps_update_begin/done) must be used when
 464 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 465 */
 466void cpu_maps_update_begin(void)
 467{
 468	mutex_lock(&cpu_add_remove_lock);
 469}
 470
 471void cpu_maps_update_done(void)
 472{
 473	mutex_unlock(&cpu_add_remove_lock);
 474}
 475
 476/*
 477 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 478 * Should always be manipulated under cpu_add_remove_lock
 479 */
 480static int cpu_hotplug_disabled;
 481
 482#ifdef CONFIG_HOTPLUG_CPU
 483
 484DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
 485
 486static bool cpu_hotplug_offline_disabled __ro_after_init;
 487
 488void cpus_read_lock(void)
 489{
 490	percpu_down_read(&cpu_hotplug_lock);
 491}
 492EXPORT_SYMBOL_GPL(cpus_read_lock);
 493
 494int cpus_read_trylock(void)
 495{
 496	return percpu_down_read_trylock(&cpu_hotplug_lock);
 497}
 498EXPORT_SYMBOL_GPL(cpus_read_trylock);
 499
 500void cpus_read_unlock(void)
 501{
 502	percpu_up_read(&cpu_hotplug_lock);
 503}
 504EXPORT_SYMBOL_GPL(cpus_read_unlock);
 505
 506void cpus_write_lock(void)
 507{
 508	percpu_down_write(&cpu_hotplug_lock);
 509}
 510
 511void cpus_write_unlock(void)
 512{
 513	percpu_up_write(&cpu_hotplug_lock);
 514}
 515
 516void lockdep_assert_cpus_held(void)
 517{
 518	/*
 519	 * We can't have hotplug operations before userspace starts running,
 520	 * and some init codepaths will knowingly not take the hotplug lock.
 521	 * This is all valid, so mute lockdep until it makes sense to report
 522	 * unheld locks.
 523	 */
 524	if (system_state < SYSTEM_RUNNING)
 525		return;
 526
 527	percpu_rwsem_assert_held(&cpu_hotplug_lock);
 528}
 529
 530#ifdef CONFIG_LOCKDEP
 531int lockdep_is_cpus_held(void)
 532{
 533	return percpu_rwsem_is_held(&cpu_hotplug_lock);
 534}
 535#endif
 536
 537static void lockdep_acquire_cpus_lock(void)
 538{
 539	rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
 540}
 541
 542static void lockdep_release_cpus_lock(void)
 543{
 544	rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
 545}
 546
 547/* Declare CPU offlining not supported */
 548void cpu_hotplug_disable_offlining(void)
 549{
 550	cpu_maps_update_begin();
 551	cpu_hotplug_offline_disabled = true;
 552	cpu_maps_update_done();
 553}
 554
 555/*
 556 * Wait for currently running CPU hotplug operations to complete (if any) and
 557 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 558 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 559 * hotplug path before performing hotplug operations. So acquiring that lock
 560 * guarantees mutual exclusion from any currently running hotplug operations.
 561 */
 562void cpu_hotplug_disable(void)
 563{
 564	cpu_maps_update_begin();
 565	cpu_hotplug_disabled++;
 566	cpu_maps_update_done();
 567}
 568EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 569
 570static void __cpu_hotplug_enable(void)
 571{
 572	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
 573		return;
 574	cpu_hotplug_disabled--;
 575}
 576
 577void cpu_hotplug_enable(void)
 578{
 579	cpu_maps_update_begin();
 580	__cpu_hotplug_enable();
 581	cpu_maps_update_done();
 582}
 583EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 584
 585#else
 586
 587static void lockdep_acquire_cpus_lock(void)
 588{
 589}
 590
 591static void lockdep_release_cpus_lock(void)
 592{
 593}
 594
 595#endif	/* CONFIG_HOTPLUG_CPU */
 596
 597/*
 598 * Architectures that need SMT-specific errata handling during SMT hotplug
 599 * should override this.
 600 */
 601void __weak arch_smt_update(void) { }
 602
 603#ifdef CONFIG_HOTPLUG_SMT
 604
 605enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 606static unsigned int cpu_smt_max_threads __ro_after_init;
 607unsigned int cpu_smt_num_threads __read_mostly = UINT_MAX;
 608
 609void __init cpu_smt_disable(bool force)
 610{
 611	if (!cpu_smt_possible())
 612		return;
 613
 614	if (force) {
 615		pr_info("SMT: Force disabled\n");
 616		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
 617	} else {
 618		pr_info("SMT: disabled\n");
 619		cpu_smt_control = CPU_SMT_DISABLED;
 620	}
 621	cpu_smt_num_threads = 1;
 622}
 623
 624/*
 625 * The decision whether SMT is supported can only be done after the full
 626 * CPU identification. Called from architecture code.
 627 */
 628void __init cpu_smt_set_num_threads(unsigned int num_threads,
 629				    unsigned int max_threads)
 630{
 631	WARN_ON(!num_threads || (num_threads > max_threads));
 632
 633	if (max_threads == 1)
 634		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 635
 636	cpu_smt_max_threads = max_threads;
 637
 638	/*
 639	 * If SMT has been disabled via the kernel command line or SMT is
 640	 * not supported, set cpu_smt_num_threads to 1 for consistency.
 641	 * If enabled, take the architecture requested number of threads
 642	 * to bring up into account.
 643	 */
 644	if (cpu_smt_control != CPU_SMT_ENABLED)
 645		cpu_smt_num_threads = 1;
 646	else if (num_threads < cpu_smt_num_threads)
 647		cpu_smt_num_threads = num_threads;
 648}
 649
 650static int __init smt_cmdline_disable(char *str)
 651{
 652	cpu_smt_disable(str && !strcmp(str, "force"));
 653	return 0;
 654}
 655early_param("nosmt", smt_cmdline_disable);
 656
 657/*
 658 * For Archicture supporting partial SMT states check if the thread is allowed.
 659 * Otherwise this has already been checked through cpu_smt_max_threads when
 660 * setting the SMT level.
 661 */
 662static inline bool cpu_smt_thread_allowed(unsigned int cpu)
 663{
 664#ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC
 665	return topology_smt_thread_allowed(cpu);
 666#else
 667	return true;
 668#endif
 669}
 670
 671static inline bool cpu_bootable(unsigned int cpu)
 672{
 673	if (cpu_smt_control == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
 674		return true;
 675
 676	/* All CPUs are bootable if controls are not configured */
 677	if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED)
 678		return true;
 679
 680	/* All CPUs are bootable if CPU is not SMT capable */
 681	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
 682		return true;
 683
 684	if (topology_is_primary_thread(cpu))
 685		return true;
 686
 687	/*
 688	 * On x86 it's required to boot all logical CPUs at least once so
 689	 * that the init code can get a chance to set CR4.MCE on each
 690	 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
 691	 * core will shutdown the machine.
 692	 */
 693	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
 694}
 695
 696/* Returns true if SMT is supported and not forcefully (irreversibly) disabled */
 697bool cpu_smt_possible(void)
 698{
 699	return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
 700		cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
 701}
 702EXPORT_SYMBOL_GPL(cpu_smt_possible);
 703
 704#else
 705static inline bool cpu_bootable(unsigned int cpu) { return true; }
 706#endif
 707
 708static inline enum cpuhp_state
 709cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
 710{
 711	enum cpuhp_state prev_state = st->state;
 712	bool bringup = st->state < target;
 713
 714	st->rollback = false;
 715	st->last = NULL;
 716
 717	st->target = target;
 718	st->single = false;
 719	st->bringup = bringup;
 720	if (cpu_dying(cpu) != !bringup)
 721		set_cpu_dying(cpu, !bringup);
 722
 723	return prev_state;
 724}
 725
 726static inline void
 727cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
 728		  enum cpuhp_state prev_state)
 729{
 730	bool bringup = !st->bringup;
 731
 732	st->target = prev_state;
 733
 734	/*
 735	 * Already rolling back. No need invert the bringup value or to change
 736	 * the current state.
 737	 */
 738	if (st->rollback)
 739		return;
 740
 741	st->rollback = true;
 742
 743	/*
 744	 * If we have st->last we need to undo partial multi_instance of this
 745	 * state first. Otherwise start undo at the previous state.
 746	 */
 747	if (!st->last) {
 748		if (st->bringup)
 749			st->state--;
 750		else
 751			st->state++;
 752	}
 753
 754	st->bringup = bringup;
 755	if (cpu_dying(cpu) != !bringup)
 756		set_cpu_dying(cpu, !bringup);
 757}
 758
 759/* Regular hotplug invocation of the AP hotplug thread */
 760static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
 761{
 762	if (!st->single && st->state == st->target)
 763		return;
 764
 765	st->result = 0;
 766	/*
 767	 * Make sure the above stores are visible before should_run becomes
 768	 * true. Paired with the mb() above in cpuhp_thread_fun()
 769	 */
 770	smp_mb();
 771	st->should_run = true;
 772	wake_up_process(st->thread);
 773	wait_for_ap_thread(st, st->bringup);
 774}
 775
 776static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
 777			 enum cpuhp_state target)
 778{
 779	enum cpuhp_state prev_state;
 780	int ret;
 781
 782	prev_state = cpuhp_set_state(cpu, st, target);
 783	__cpuhp_kick_ap(st);
 784	if ((ret = st->result)) {
 785		cpuhp_reset_state(cpu, st, prev_state);
 786		__cpuhp_kick_ap(st);
 787	}
 788
 789	return ret;
 790}
 791
 792static int bringup_wait_for_ap_online(unsigned int cpu)
 793{
 794	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 795
 796	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
 797	wait_for_ap_thread(st, true);
 798	if (WARN_ON_ONCE((!cpu_online(cpu))))
 799		return -ECANCELED;
 800
 801	/* Unpark the hotplug thread of the target cpu */
 802	kthread_unpark(st->thread);
 803
 804	/*
 805	 * SMT soft disabling on X86 requires to bring the CPU out of the
 806	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
 807	 * CPU marked itself as booted_once in notify_cpu_starting() so the
 808	 * cpu_bootable() check will now return false if this is not the
 809	 * primary sibling.
 810	 */
 811	if (!cpu_bootable(cpu))
 812		return -ECANCELED;
 813	return 0;
 814}
 815
 816#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
 817static int cpuhp_kick_ap_alive(unsigned int cpu)
 818{
 819	if (!cpuhp_can_boot_ap(cpu))
 820		return -EAGAIN;
 821
 822	return arch_cpuhp_kick_ap_alive(cpu, idle_thread_get(cpu));
 823}
 824
 825static int cpuhp_bringup_ap(unsigned int cpu)
 826{
 827	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 828	int ret;
 829
 830	/*
 831	 * Some architectures have to walk the irq descriptors to
 832	 * setup the vector space for the cpu which comes online.
 833	 * Prevent irq alloc/free across the bringup.
 834	 */
 835	irq_lock_sparse();
 836
 837	ret = cpuhp_bp_sync_alive(cpu);
 838	if (ret)
 839		goto out_unlock;
 840
 841	ret = bringup_wait_for_ap_online(cpu);
 842	if (ret)
 843		goto out_unlock;
 844
 845	irq_unlock_sparse();
 846
 847	if (st->target <= CPUHP_AP_ONLINE_IDLE)
 848		return 0;
 849
 850	return cpuhp_kick_ap(cpu, st, st->target);
 851
 852out_unlock:
 853	irq_unlock_sparse();
 854	return ret;
 855}
 856#else
 857static int bringup_cpu(unsigned int cpu)
 858{
 859	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 860	struct task_struct *idle = idle_thread_get(cpu);
 861	int ret;
 862
 863	if (!cpuhp_can_boot_ap(cpu))
 864		return -EAGAIN;
 865
 866	/*
 867	 * Some architectures have to walk the irq descriptors to
 868	 * setup the vector space for the cpu which comes online.
 869	 *
 870	 * Prevent irq alloc/free across the bringup by acquiring the
 871	 * sparse irq lock. Hold it until the upcoming CPU completes the
 872	 * startup in cpuhp_online_idle() which allows to avoid
 873	 * intermediate synchronization points in the architecture code.
 874	 */
 875	irq_lock_sparse();
 876
 
 877	ret = __cpu_up(cpu, idle);
 878	if (ret)
 879		goto out_unlock;
 880
 881	ret = cpuhp_bp_sync_alive(cpu);
 882	if (ret)
 883		goto out_unlock;
 884
 885	ret = bringup_wait_for_ap_online(cpu);
 886	if (ret)
 887		goto out_unlock;
 888
 889	irq_unlock_sparse();
 890
 891	if (st->target <= CPUHP_AP_ONLINE_IDLE)
 892		return 0;
 893
 894	return cpuhp_kick_ap(cpu, st, st->target);
 895
 896out_unlock:
 897	irq_unlock_sparse();
 898	return ret;
 
 
 899}
 900#endif
 901
 902static int finish_cpu(unsigned int cpu)
 903{
 904	struct task_struct *idle = idle_thread_get(cpu);
 905	struct mm_struct *mm = idle->active_mm;
 906
 907	/*
 908	 * idle_task_exit() will have switched to &init_mm, now
 909	 * clean up any remaining active_mm state.
 910	 */
 911	if (mm != &init_mm)
 912		idle->active_mm = &init_mm;
 913	mmdrop_lazy_tlb(mm);
 914	return 0;
 915}
 916
 917/*
 918 * Hotplug state machine related functions
 919 */
 920
 921/*
 922 * Get the next state to run. Empty ones will be skipped. Returns true if a
 923 * state must be run.
 924 *
 925 * st->state will be modified ahead of time, to match state_to_run, as if it
 926 * has already ran.
 927 */
 928static bool cpuhp_next_state(bool bringup,
 929			     enum cpuhp_state *state_to_run,
 930			     struct cpuhp_cpu_state *st,
 931			     enum cpuhp_state target)
 932{
 933	do {
 934		if (bringup) {
 935			if (st->state >= target)
 936				return false;
 937
 938			*state_to_run = ++st->state;
 939		} else {
 940			if (st->state <= target)
 941				return false;
 942
 943			*state_to_run = st->state--;
 944		}
 945
 946		if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
 947			break;
 948	} while (true);
 949
 950	return true;
 951}
 952
 953static int __cpuhp_invoke_callback_range(bool bringup,
 954					 unsigned int cpu,
 955					 struct cpuhp_cpu_state *st,
 956					 enum cpuhp_state target,
 957					 bool nofail)
 958{
 959	enum cpuhp_state state;
 960	int ret = 0;
 961
 962	while (cpuhp_next_state(bringup, &state, st, target)) {
 963		int err;
 964
 965		err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
 966		if (!err)
 967			continue;
 968
 969		if (nofail) {
 970			pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
 971				cpu, bringup ? "UP" : "DOWN",
 972				cpuhp_get_step(st->state)->name,
 973				st->state, err);
 974			ret = -1;
 975		} else {
 976			ret = err;
 977			break;
 978		}
 979	}
 980
 981	return ret;
 982}
 983
 984static inline int cpuhp_invoke_callback_range(bool bringup,
 985					      unsigned int cpu,
 986					      struct cpuhp_cpu_state *st,
 987					      enum cpuhp_state target)
 988{
 989	return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
 990}
 991
 992static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
 993						      unsigned int cpu,
 994						      struct cpuhp_cpu_state *st,
 995						      enum cpuhp_state target)
 996{
 997	__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
 998}
 999
1000static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
1001{
1002	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
1003		return true;
1004	/*
1005	 * When CPU hotplug is disabled, then taking the CPU down is not
1006	 * possible because takedown_cpu() and the architecture and
1007	 * subsystem specific mechanisms are not available. So the CPU
1008	 * which would be completely unplugged again needs to stay around
1009	 * in the current state.
1010	 */
1011	return st->state <= CPUHP_BRINGUP_CPU;
1012}
1013
1014static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1015			      enum cpuhp_state target)
1016{
1017	enum cpuhp_state prev_state = st->state;
1018	int ret = 0;
1019
1020	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1021	if (ret) {
1022		pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
1023			 ret, cpu, cpuhp_get_step(st->state)->name,
1024			 st->state);
1025
1026		cpuhp_reset_state(cpu, st, prev_state);
1027		if (can_rollback_cpu(st))
1028			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
1029							    prev_state));
1030	}
1031	return ret;
1032}
1033
1034/*
1035 * The cpu hotplug threads manage the bringup and teardown of the cpus
1036 */
 
 
 
 
 
 
 
 
 
1037static int cpuhp_should_run(unsigned int cpu)
1038{
1039	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1040
1041	return st->should_run;
1042}
1043
1044/*
1045 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
1046 * callbacks when a state gets [un]installed at runtime.
1047 *
1048 * Each invocation of this function by the smpboot thread does a single AP
1049 * state callback.
1050 *
1051 * It has 3 modes of operation:
1052 *  - single: runs st->cb_state
1053 *  - up:     runs ++st->state, while st->state < st->target
1054 *  - down:   runs st->state--, while st->state > st->target
1055 *
1056 * When complete or on error, should_run is cleared and the completion is fired.
1057 */
1058static void cpuhp_thread_fun(unsigned int cpu)
1059{
1060	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1061	bool bringup = st->bringup;
1062	enum cpuhp_state state;
1063
1064	if (WARN_ON_ONCE(!st->should_run))
1065		return;
1066
1067	/*
1068	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
1069	 * that if we see ->should_run we also see the rest of the state.
1070	 */
1071	smp_mb();
1072
1073	/*
1074	 * The BP holds the hotplug lock, but we're now running on the AP,
1075	 * ensure that anybody asserting the lock is held, will actually find
1076	 * it so.
1077	 */
1078	lockdep_acquire_cpus_lock();
1079	cpuhp_lock_acquire(bringup);
1080
1081	if (st->single) {
1082		state = st->cb_state;
1083		st->should_run = false;
1084	} else {
1085		st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
1086		if (!st->should_run)
1087			goto end;
1088	}
1089
1090	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
1091
1092	if (cpuhp_is_atomic_state(state)) {
1093		local_irq_disable();
1094		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1095		local_irq_enable();
1096
1097		/*
1098		 * STARTING/DYING must not fail!
1099		 */
1100		WARN_ON_ONCE(st->result);
1101	} else {
1102		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
1103	}
1104
1105	if (st->result) {
1106		/*
1107		 * If we fail on a rollback, we're up a creek without no
1108		 * paddle, no way forward, no way back. We loose, thanks for
1109		 * playing.
1110		 */
1111		WARN_ON_ONCE(st->rollback);
1112		st->should_run = false;
1113	}
1114
1115end:
1116	cpuhp_lock_release(bringup);
1117	lockdep_release_cpus_lock();
1118
1119	if (!st->should_run)
1120		complete_ap_thread(st, bringup);
1121}
1122
1123/* Invoke a single callback on a remote cpu */
1124static int
1125cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
1126			 struct hlist_node *node)
1127{
1128	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1129	int ret;
1130
1131	if (!cpu_online(cpu))
1132		return 0;
1133
1134	cpuhp_lock_acquire(false);
1135	cpuhp_lock_release(false);
1136
1137	cpuhp_lock_acquire(true);
1138	cpuhp_lock_release(true);
1139
1140	/*
1141	 * If we are up and running, use the hotplug thread. For early calls
1142	 * we invoke the thread function directly.
1143	 */
1144	if (!st->thread)
1145		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1146
1147	st->rollback = false;
1148	st->last = NULL;
1149
1150	st->node = node;
1151	st->bringup = bringup;
1152	st->cb_state = state;
1153	st->single = true;
1154
1155	__cpuhp_kick_ap(st);
1156
1157	/*
1158	 * If we failed and did a partial, do a rollback.
1159	 */
1160	if ((ret = st->result) && st->last) {
1161		st->rollback = true;
1162		st->bringup = !bringup;
1163
1164		__cpuhp_kick_ap(st);
1165	}
1166
1167	/*
1168	 * Clean up the leftovers so the next hotplug operation wont use stale
1169	 * data.
1170	 */
1171	st->node = st->last = NULL;
1172	return ret;
1173}
1174
1175static int cpuhp_kick_ap_work(unsigned int cpu)
1176{
1177	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1178	enum cpuhp_state prev_state = st->state;
1179	int ret;
1180
1181	cpuhp_lock_acquire(false);
1182	cpuhp_lock_release(false);
1183
1184	cpuhp_lock_acquire(true);
1185	cpuhp_lock_release(true);
1186
1187	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
1188	ret = cpuhp_kick_ap(cpu, st, st->target);
1189	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
1190
1191	return ret;
1192}
1193
1194static struct smp_hotplug_thread cpuhp_threads = {
1195	.store			= &cpuhp_state.thread,
 
1196	.thread_should_run	= cpuhp_should_run,
1197	.thread_fn		= cpuhp_thread_fun,
1198	.thread_comm		= "cpuhp/%u",
1199	.selfparking		= true,
1200};
1201
1202static __init void cpuhp_init_state(void)
1203{
1204	struct cpuhp_cpu_state *st;
1205	int cpu;
1206
1207	for_each_possible_cpu(cpu) {
1208		st = per_cpu_ptr(&cpuhp_state, cpu);
1209		init_completion(&st->done_up);
1210		init_completion(&st->done_down);
1211	}
1212}
1213
1214void __init cpuhp_threads_init(void)
1215{
1216	cpuhp_init_state();
1217	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
1218	kthread_unpark(this_cpu_read(cpuhp_state.thread));
1219}
1220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221#ifdef CONFIG_HOTPLUG_CPU
1222#ifndef arch_clear_mm_cpumask_cpu
1223#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
1224#endif
1225
1226/**
1227 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
1228 * @cpu: a CPU id
1229 *
1230 * This function walks all processes, finds a valid mm struct for each one and
1231 * then clears a corresponding bit in mm's cpumask.  While this all sounds
1232 * trivial, there are various non-obvious corner cases, which this function
1233 * tries to solve in a safe manner.
1234 *
1235 * Also note that the function uses a somewhat relaxed locking scheme, so it may
1236 * be called only for an already offlined CPU.
1237 */
1238void clear_tasks_mm_cpumask(int cpu)
1239{
1240	struct task_struct *p;
1241
1242	/*
1243	 * This function is called after the cpu is taken down and marked
1244	 * offline, so its not like new tasks will ever get this cpu set in
1245	 * their mm mask. -- Peter Zijlstra
1246	 * Thus, we may use rcu_read_lock() here, instead of grabbing
1247	 * full-fledged tasklist_lock.
1248	 */
1249	WARN_ON(cpu_online(cpu));
1250	rcu_read_lock();
1251	for_each_process(p) {
1252		struct task_struct *t;
1253
1254		/*
1255		 * Main thread might exit, but other threads may still have
1256		 * a valid mm. Find one.
1257		 */
1258		t = find_lock_task_mm(p);
1259		if (!t)
1260			continue;
1261		arch_clear_mm_cpumask_cpu(cpu, t->mm);
1262		task_unlock(t);
1263	}
1264	rcu_read_unlock();
1265}
1266
1267/* Take this CPU down. */
1268static int take_cpu_down(void *_param)
1269{
1270	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1271	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1272	int err, cpu = smp_processor_id();
 
1273
1274	/* Ensure this CPU doesn't handle any more interrupts. */
1275	err = __cpu_disable();
1276	if (err < 0)
1277		return err;
1278
1279	/*
1280	 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1281	 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1282	 */
1283	WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1284
 
 
 
1285	/*
1286	 * Invoke the former CPU_DYING callbacks. DYING must not fail!
1287	 */
1288	cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
1289
 
 
 
 
1290	/* Park the stopper thread */
1291	stop_machine_park(cpu);
1292	return 0;
1293}
1294
1295static int takedown_cpu(unsigned int cpu)
1296{
1297	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1298	int err;
1299
1300	/* Park the smpboot threads */
1301	kthread_park(st->thread);
1302
1303	/*
1304	 * Prevent irq alloc/free while the dying cpu reorganizes the
1305	 * interrupt affinities.
1306	 */
1307	irq_lock_sparse();
1308
1309	/*
1310	 * So now all preempt/rcu users must observe !cpu_active().
1311	 */
1312	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1313	if (err) {
1314		/* CPU refused to die */
1315		irq_unlock_sparse();
1316		/* Unpark the hotplug thread so we can rollback there */
1317		kthread_unpark(st->thread);
1318		return err;
1319	}
1320	BUG_ON(cpu_online(cpu));
1321
1322	/*
1323	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1324	 * all runnable tasks from the CPU, there's only the idle task left now
1325	 * that the migration thread is done doing the stop_machine thing.
1326	 *
1327	 * Wait for the stop thread to go away.
1328	 */
1329	wait_for_ap_thread(st, false);
1330	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1331
1332	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
1333	irq_unlock_sparse();
1334
1335	hotplug_cpu__broadcast_tick_pull(cpu);
1336	/* This actually kills the CPU. */
1337	__cpu_die(cpu);
1338
1339	cpuhp_bp_sync_dead(cpu);
1340
1341	lockdep_cleanup_dead_cpu(cpu, idle_thread_get(cpu));
1342
1343	/*
1344	 * Callbacks must be re-integrated right away to the RCU state machine.
1345	 * Otherwise an RCU callback could block a further teardown function
1346	 * waiting for its completion.
1347	 */
1348	rcutree_migrate_callbacks(cpu);
1349
1350	return 0;
1351}
1352
1353static void cpuhp_complete_idle_dead(void *arg)
1354{
1355	struct cpuhp_cpu_state *st = arg;
1356
1357	complete_ap_thread(st, false);
1358}
1359
1360void cpuhp_report_idle_dead(void)
1361{
1362	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1363
1364	BUG_ON(st->state != CPUHP_AP_OFFLINE);
1365	tick_assert_timekeeping_handover();
1366	rcutree_report_cpu_dead();
1367	st->state = CPUHP_AP_IDLE_DEAD;
1368	/*
1369	 * We cannot call complete after rcutree_report_cpu_dead() so we delegate it
1370	 * to an online cpu.
1371	 */
1372	smp_call_function_single(cpumask_first(cpu_online_mask),
1373				 cpuhp_complete_idle_dead, st, 0);
1374}
1375
1376static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1377				enum cpuhp_state target)
1378{
1379	enum cpuhp_state prev_state = st->state;
1380	int ret = 0;
1381
1382	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1383	if (ret) {
1384		pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1385			 ret, cpu, cpuhp_get_step(st->state)->name,
1386			 st->state);
1387
1388		cpuhp_reset_state(cpu, st, prev_state);
1389
1390		if (st->state < prev_state)
1391			WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1392							    prev_state));
1393	}
1394
1395	return ret;
1396}
1397
1398/* Requires cpu_add_remove_lock to be held */
1399static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1400			   enum cpuhp_state target)
1401{
1402	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1403	int prev_state, ret = 0;
1404
1405	if (num_online_cpus() == 1)
1406		return -EBUSY;
1407
1408	if (!cpu_present(cpu))
1409		return -EINVAL;
1410
1411	cpus_write_lock();
1412
1413	cpuhp_tasks_frozen = tasks_frozen;
1414
1415	prev_state = cpuhp_set_state(cpu, st, target);
1416	/*
1417	 * If the current CPU state is in the range of the AP hotplug thread,
1418	 * then we need to kick the thread.
1419	 */
1420	if (st->state > CPUHP_TEARDOWN_CPU) {
1421		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1422		ret = cpuhp_kick_ap_work(cpu);
1423		/*
1424		 * The AP side has done the error rollback already. Just
1425		 * return the error code..
1426		 */
1427		if (ret)
1428			goto out;
1429
1430		/*
1431		 * We might have stopped still in the range of the AP hotplug
1432		 * thread. Nothing to do anymore.
1433		 */
1434		if (st->state > CPUHP_TEARDOWN_CPU)
1435			goto out;
1436
1437		st->target = target;
1438	}
1439	/*
1440	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1441	 * to do the further cleanups.
1442	 */
1443	ret = cpuhp_down_callbacks(cpu, st, target);
1444	if (ret && st->state < prev_state) {
1445		if (st->state == CPUHP_TEARDOWN_CPU) {
1446			cpuhp_reset_state(cpu, st, prev_state);
1447			__cpuhp_kick_ap(st);
1448		} else {
1449			WARN(1, "DEAD callback error for CPU%d", cpu);
1450		}
1451	}
1452
1453out:
1454	cpus_write_unlock();
1455	/*
1456	 * Do post unplug cleanup. This is still protected against
1457	 * concurrent CPU hotplug via cpu_add_remove_lock.
1458	 */
1459	lockup_detector_cleanup();
1460	arch_smt_update();
 
1461	return ret;
1462}
1463
1464struct cpu_down_work {
1465	unsigned int		cpu;
1466	enum cpuhp_state	target;
1467};
1468
1469static long __cpu_down_maps_locked(void *arg)
1470{
1471	struct cpu_down_work *work = arg;
1472
1473	return _cpu_down(work->cpu, 0, work->target);
1474}
1475
1476static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1477{
1478	struct cpu_down_work work = { .cpu = cpu, .target = target, };
1479
1480	/*
1481	 * If the platform does not support hotplug, report it explicitly to
1482	 * differentiate it from a transient offlining failure.
1483	 */
1484	if (cpu_hotplug_offline_disabled)
1485		return -EOPNOTSUPP;
1486	if (cpu_hotplug_disabled)
1487		return -EBUSY;
1488
1489	/*
1490	 * Ensure that the control task does not run on the to be offlined
1491	 * CPU to prevent a deadlock against cfs_b->period_timer.
1492	 * Also keep at least one housekeeping cpu onlined to avoid generating
1493	 * an empty sched_domain span.
1494	 */
1495	for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) {
1496		if (cpu != work.cpu)
1497			return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
1498	}
1499	return -EBUSY;
1500}
1501
1502static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1503{
1504	int err;
1505
1506	cpu_maps_update_begin();
1507	err = cpu_down_maps_locked(cpu, target);
1508	cpu_maps_update_done();
1509	return err;
1510}
1511
1512/**
1513 * cpu_device_down - Bring down a cpu device
1514 * @dev: Pointer to the cpu device to offline
1515 *
1516 * This function is meant to be used by device core cpu subsystem only.
1517 *
1518 * Other subsystems should use remove_cpu() instead.
1519 *
1520 * Return: %0 on success or a negative errno code
1521 */
1522int cpu_device_down(struct device *dev)
1523{
1524	return cpu_down(dev->id, CPUHP_OFFLINE);
1525}
1526
1527int remove_cpu(unsigned int cpu)
1528{
1529	int ret;
1530
1531	lock_device_hotplug();
1532	ret = device_offline(get_cpu_device(cpu));
1533	unlock_device_hotplug();
1534
1535	return ret;
1536}
1537EXPORT_SYMBOL_GPL(remove_cpu);
1538
1539void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1540{
1541	unsigned int cpu;
1542	int error;
1543
1544	cpu_maps_update_begin();
1545
1546	/*
1547	 * Make certain the cpu I'm about to reboot on is online.
1548	 *
1549	 * This is inline to what migrate_to_reboot_cpu() already do.
1550	 */
1551	if (!cpu_online(primary_cpu))
1552		primary_cpu = cpumask_first(cpu_online_mask);
1553
1554	for_each_online_cpu(cpu) {
1555		if (cpu == primary_cpu)
1556			continue;
1557
1558		error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1559		if (error) {
1560			pr_err("Failed to offline CPU%d - error=%d",
1561				cpu, error);
1562			break;
1563		}
1564	}
1565
1566	/*
1567	 * Ensure all but the reboot CPU are offline.
1568	 */
1569	BUG_ON(num_online_cpus() > 1);
1570
1571	/*
1572	 * Make sure the CPUs won't be enabled by someone else after this
1573	 * point. Kexec will reboot to a new kernel shortly resetting
1574	 * everything along the way.
1575	 */
1576	cpu_hotplug_disabled++;
1577
1578	cpu_maps_update_done();
1579}
1580
1581#else
1582#define takedown_cpu		NULL
1583#endif /*CONFIG_HOTPLUG_CPU*/
1584
1585/**
1586 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1587 * @cpu: cpu that just started
1588 *
1589 * It must be called by the arch code on the new cpu, before the new cpu
1590 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1591 */
1592void notify_cpu_starting(unsigned int cpu)
1593{
1594	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1595	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
 
1596
1597	rcutree_report_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
1598	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
 
1599
1600	/*
1601	 * STARTING must not fail!
1602	 */
1603	cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
1604}
1605
1606/*
1607 * Called from the idle task. Wake up the controlling task which brings the
1608 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1609 * online bringup to the hotplug thread.
1610 */
1611void cpuhp_online_idle(enum cpuhp_state state)
1612{
1613	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1614
1615	/* Happens for the boot cpu */
1616	if (state != CPUHP_AP_ONLINE_IDLE)
1617		return;
1618
1619	cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
1620
1621	/*
1622	 * Unpark the stopper thread before we start the idle loop (and start
1623	 * scheduling); this ensures the stopper task is always available.
1624	 */
1625	stop_machine_unpark(smp_processor_id());
1626
1627	st->state = CPUHP_AP_ONLINE_IDLE;
1628	complete_ap_thread(st, true);
1629}
1630
1631/* Requires cpu_add_remove_lock to be held */
1632static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1633{
1634	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1635	struct task_struct *idle;
1636	int ret = 0;
1637
1638	cpus_write_lock();
1639
1640	if (!cpu_present(cpu)) {
1641		ret = -EINVAL;
1642		goto out;
1643	}
1644
1645	/*
1646	 * The caller of cpu_up() might have raced with another
1647	 * caller. Nothing to do.
1648	 */
1649	if (st->state >= target)
1650		goto out;
1651
1652	if (st->state == CPUHP_OFFLINE) {
1653		/* Let it fail before we try to bring the cpu up */
1654		idle = idle_thread_get(cpu);
1655		if (IS_ERR(idle)) {
1656			ret = PTR_ERR(idle);
1657			goto out;
1658		}
1659
1660		/*
1661		 * Reset stale stack state from the last time this CPU was online.
1662		 */
1663		scs_task_reset(idle);
1664		kasan_unpoison_task_stack(idle);
1665	}
1666
1667	cpuhp_tasks_frozen = tasks_frozen;
1668
1669	cpuhp_set_state(cpu, st, target);
1670	/*
1671	 * If the current CPU state is in the range of the AP hotplug thread,
1672	 * then we need to kick the thread once more.
1673	 */
1674	if (st->state > CPUHP_BRINGUP_CPU) {
1675		ret = cpuhp_kick_ap_work(cpu);
1676		/*
1677		 * The AP side has done the error rollback already. Just
1678		 * return the error code..
1679		 */
1680		if (ret)
1681			goto out;
1682	}
1683
1684	/*
1685	 * Try to reach the target state. We max out on the BP at
1686	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1687	 * responsible for bringing it up to the target state.
1688	 */
1689	target = min((int)target, CPUHP_BRINGUP_CPU);
1690	ret = cpuhp_up_callbacks(cpu, st, target);
1691out:
1692	cpus_write_unlock();
1693	arch_smt_update();
 
1694	return ret;
1695}
1696
1697static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1698{
1699	int err = 0;
1700
1701	if (!cpu_possible(cpu)) {
1702		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1703		       cpu);
 
 
 
1704		return -EINVAL;
1705	}
1706
1707	err = try_online_node(cpu_to_node(cpu));
1708	if (err)
1709		return err;
1710
1711	cpu_maps_update_begin();
1712
1713	if (cpu_hotplug_disabled) {
1714		err = -EBUSY;
1715		goto out;
1716	}
1717	if (!cpu_bootable(cpu)) {
1718		err = -EPERM;
1719		goto out;
1720	}
1721
1722	err = _cpu_up(cpu, 0, target);
1723out:
1724	cpu_maps_update_done();
1725	return err;
1726}
1727
1728/**
1729 * cpu_device_up - Bring up a cpu device
1730 * @dev: Pointer to the cpu device to online
1731 *
1732 * This function is meant to be used by device core cpu subsystem only.
1733 *
1734 * Other subsystems should use add_cpu() instead.
1735 *
1736 * Return: %0 on success or a negative errno code
1737 */
1738int cpu_device_up(struct device *dev)
1739{
1740	return cpu_up(dev->id, CPUHP_ONLINE);
1741}
1742
1743int add_cpu(unsigned int cpu)
1744{
1745	int ret;
1746
1747	lock_device_hotplug();
1748	ret = device_online(get_cpu_device(cpu));
1749	unlock_device_hotplug();
1750
1751	return ret;
1752}
1753EXPORT_SYMBOL_GPL(add_cpu);
1754
1755/**
1756 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1757 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1758 *
1759 * On some architectures like arm64, we can hibernate on any CPU, but on
1760 * wake up the CPU we hibernated on might be offline as a side effect of
1761 * using maxcpus= for example.
1762 *
1763 * Return: %0 on success or a negative errno code
1764 */
1765int bringup_hibernate_cpu(unsigned int sleep_cpu)
1766{
1767	int ret;
1768
1769	if (!cpu_online(sleep_cpu)) {
1770		pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1771		ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1772		if (ret) {
1773			pr_err("Failed to bring hibernate-CPU up!\n");
1774			return ret;
1775		}
1776	}
1777	return 0;
1778}
1779
1780static void __init cpuhp_bringup_mask(const struct cpumask *mask, unsigned int ncpus,
1781				      enum cpuhp_state target)
1782{
1783	unsigned int cpu;
1784
1785	for_each_cpu(cpu, mask) {
1786		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1787
1788		if (cpu_up(cpu, target) && can_rollback_cpu(st)) {
1789			/*
1790			 * If this failed then cpu_up() might have only
1791			 * rolled back to CPUHP_BP_KICK_AP for the final
1792			 * online. Clean it up. NOOP if already rolled back.
1793			 */
1794			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
1795		}
1796
1797		if (!--ncpus)
1798			break;
 
 
1799	}
1800}
1801
1802#ifdef CONFIG_HOTPLUG_PARALLEL
1803static bool __cpuhp_parallel_bringup __ro_after_init = true;
1804
1805static int __init parallel_bringup_parse_param(char *arg)
1806{
1807	return kstrtobool(arg, &__cpuhp_parallel_bringup);
1808}
1809early_param("cpuhp.parallel", parallel_bringup_parse_param);
1810
1811#ifdef CONFIG_HOTPLUG_SMT
1812static inline bool cpuhp_smt_aware(void)
1813{
1814	return cpu_smt_max_threads > 1;
1815}
1816
1817static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1818{
1819	return cpu_primary_thread_mask;
1820}
1821#else
1822static inline bool cpuhp_smt_aware(void)
1823{
1824	return false;
1825}
1826static inline const struct cpumask *cpuhp_get_primary_thread_mask(void)
1827{
1828	return cpu_none_mask;
1829}
1830#endif
1831
1832bool __weak arch_cpuhp_init_parallel_bringup(void)
1833{
1834	return true;
1835}
1836
1837/*
1838 * On architectures which have enabled parallel bringup this invokes all BP
1839 * prepare states for each of the to be onlined APs first. The last state
1840 * sends the startup IPI to the APs. The APs proceed through the low level
1841 * bringup code in parallel and then wait for the control CPU to release
1842 * them one by one for the final onlining procedure.
1843 *
1844 * This avoids waiting for each AP to respond to the startup IPI in
1845 * CPUHP_BRINGUP_CPU.
1846 */
1847static bool __init cpuhp_bringup_cpus_parallel(unsigned int ncpus)
1848{
1849	const struct cpumask *mask = cpu_present_mask;
1850
1851	if (__cpuhp_parallel_bringup)
1852		__cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup();
1853	if (!__cpuhp_parallel_bringup)
1854		return false;
1855
1856	if (cpuhp_smt_aware()) {
1857		const struct cpumask *pmask = cpuhp_get_primary_thread_mask();
1858		static struct cpumask tmp_mask __initdata;
1859
1860		/*
1861		 * X86 requires to prevent that SMT siblings stopped while
1862		 * the primary thread does a microcode update for various
1863		 * reasons. Bring the primary threads up first.
1864		 */
1865		cpumask_and(&tmp_mask, mask, pmask);
1866		cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
1867		cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE);
1868		/* Account for the online CPUs */
1869		ncpus -= num_online_cpus();
1870		if (!ncpus)
1871			return true;
1872		/* Create the mask for secondary CPUs */
1873		cpumask_andnot(&tmp_mask, mask, pmask);
1874		mask = &tmp_mask;
1875	}
1876
1877	/* Bring the not-yet started CPUs up */
1878	cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
1879	cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE);
1880	return true;
1881}
1882#else
1883static inline bool cpuhp_bringup_cpus_parallel(unsigned int ncpus) { return false; }
1884#endif /* CONFIG_HOTPLUG_PARALLEL */
1885
1886void __init bringup_nonboot_cpus(unsigned int max_cpus)
1887{
1888	if (!max_cpus)
1889		return;
1890
1891	/* Try parallel bringup optimization if enabled */
1892	if (cpuhp_bringup_cpus_parallel(max_cpus))
1893		return;
1894
1895	/* Full per CPU serialized bringup */
1896	cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
1897}
1898
1899#ifdef CONFIG_PM_SLEEP_SMP
1900static cpumask_var_t frozen_cpus;
1901
1902int freeze_secondary_cpus(int primary)
1903{
1904	int cpu, error = 0;
1905
1906	cpu_maps_update_begin();
1907	if (primary == -1) {
1908		primary = cpumask_first(cpu_online_mask);
1909		if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1910			primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1911	} else {
1912		if (!cpu_online(primary))
1913			primary = cpumask_first(cpu_online_mask);
1914	}
1915
1916	/*
1917	 * We take down all of the non-boot CPUs in one shot to avoid races
1918	 * with the userspace trying to use the CPU hotplug at the same time
1919	 */
1920	cpumask_clear(frozen_cpus);
1921
1922	pr_info("Disabling non-boot CPUs ...\n");
1923	for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) {
1924		if (!cpu_online(cpu) || cpu == primary)
1925			continue;
1926
1927		if (pm_wakeup_pending()) {
1928			pr_info("Wakeup pending. Abort CPU freeze\n");
1929			error = -EBUSY;
1930			break;
1931		}
1932
1933		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1934		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1935		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1936		if (!error)
1937			cpumask_set_cpu(cpu, frozen_cpus);
1938		else {
1939			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1940			break;
1941		}
1942	}
1943
1944	if (!error)
1945		BUG_ON(num_online_cpus() > 1);
1946	else
1947		pr_err("Non-boot CPUs are not disabled\n");
1948
1949	/*
1950	 * Make sure the CPUs won't be enabled by someone else. We need to do
1951	 * this even in case of failure as all freeze_secondary_cpus() users are
1952	 * supposed to do thaw_secondary_cpus() on the failure path.
1953	 */
1954	cpu_hotplug_disabled++;
1955
1956	cpu_maps_update_done();
1957	return error;
1958}
1959
1960void __weak arch_thaw_secondary_cpus_begin(void)
1961{
1962}
1963
1964void __weak arch_thaw_secondary_cpus_end(void)
1965{
1966}
1967
1968void thaw_secondary_cpus(void)
1969{
1970	int cpu, error;
1971
1972	/* Allow everyone to use the CPU hotplug again */
1973	cpu_maps_update_begin();
1974	__cpu_hotplug_enable();
1975	if (cpumask_empty(frozen_cpus))
1976		goto out;
1977
1978	pr_info("Enabling non-boot CPUs ...\n");
1979
1980	arch_thaw_secondary_cpus_begin();
1981
1982	for_each_cpu(cpu, frozen_cpus) {
1983		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1984		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1985		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1986		if (!error) {
1987			pr_info("CPU%d is up\n", cpu);
1988			continue;
1989		}
1990		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1991	}
1992
1993	arch_thaw_secondary_cpus_end();
1994
1995	cpumask_clear(frozen_cpus);
1996out:
1997	cpu_maps_update_done();
1998}
1999
2000static int __init alloc_frozen_cpus(void)
2001{
2002	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
2003		return -ENOMEM;
2004	return 0;
2005}
2006core_initcall(alloc_frozen_cpus);
2007
2008/*
2009 * When callbacks for CPU hotplug notifications are being executed, we must
2010 * ensure that the state of the system with respect to the tasks being frozen
2011 * or not, as reported by the notification, remains unchanged *throughout the
2012 * duration* of the execution of the callbacks.
2013 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
2014 *
2015 * This synchronization is implemented by mutually excluding regular CPU
2016 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
2017 * Hibernate notifications.
2018 */
2019static int
2020cpu_hotplug_pm_callback(struct notifier_block *nb,
2021			unsigned long action, void *ptr)
2022{
2023	switch (action) {
2024
2025	case PM_SUSPEND_PREPARE:
2026	case PM_HIBERNATION_PREPARE:
2027		cpu_hotplug_disable();
2028		break;
2029
2030	case PM_POST_SUSPEND:
2031	case PM_POST_HIBERNATION:
2032		cpu_hotplug_enable();
2033		break;
2034
2035	default:
2036		return NOTIFY_DONE;
2037	}
2038
2039	return NOTIFY_OK;
2040}
2041
2042
2043static int __init cpu_hotplug_pm_sync_init(void)
2044{
2045	/*
2046	 * cpu_hotplug_pm_callback has higher priority than x86
2047	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
2048	 * to disable cpu hotplug to avoid cpu hotplug race.
2049	 */
2050	pm_notifier(cpu_hotplug_pm_callback, 0);
2051	return 0;
2052}
2053core_initcall(cpu_hotplug_pm_sync_init);
2054
2055#endif /* CONFIG_PM_SLEEP_SMP */
2056
2057int __boot_cpu_id;
2058
2059#endif /* CONFIG_SMP */
2060
2061/* Boot processor state steps */
2062static struct cpuhp_step cpuhp_hp_states[] = {
2063	[CPUHP_OFFLINE] = {
2064		.name			= "offline",
2065		.startup.single		= NULL,
2066		.teardown.single	= NULL,
2067	},
2068#ifdef CONFIG_SMP
2069	[CPUHP_CREATE_THREADS]= {
2070		.name			= "threads:prepare",
2071		.startup.single		= smpboot_create_threads,
2072		.teardown.single	= NULL,
2073		.cant_stop		= true,
2074	},
2075	[CPUHP_PERF_PREPARE] = {
2076		.name			= "perf:prepare",
2077		.startup.single		= perf_event_init_cpu,
2078		.teardown.single	= perf_event_exit_cpu,
2079	},
2080	[CPUHP_RANDOM_PREPARE] = {
2081		.name			= "random:prepare",
2082		.startup.single		= random_prepare_cpu,
2083		.teardown.single	= NULL,
2084	},
2085	[CPUHP_WORKQUEUE_PREP] = {
2086		.name			= "workqueue:prepare",
2087		.startup.single		= workqueue_prepare_cpu,
2088		.teardown.single	= NULL,
2089	},
2090	[CPUHP_HRTIMERS_PREPARE] = {
2091		.name			= "hrtimers:prepare",
2092		.startup.single		= hrtimers_prepare_cpu,
2093		.teardown.single	= NULL,
2094	},
2095	[CPUHP_SMPCFD_PREPARE] = {
2096		.name			= "smpcfd:prepare",
2097		.startup.single		= smpcfd_prepare_cpu,
2098		.teardown.single	= smpcfd_dead_cpu,
2099	},
2100	[CPUHP_RELAY_PREPARE] = {
2101		.name			= "relay:prepare",
2102		.startup.single		= relay_prepare_cpu,
2103		.teardown.single	= NULL,
2104	},
 
 
 
 
 
2105	[CPUHP_RCUTREE_PREP] = {
2106		.name			= "RCU/tree:prepare",
2107		.startup.single		= rcutree_prepare_cpu,
2108		.teardown.single	= rcutree_dead_cpu,
2109	},
2110	/*
2111	 * On the tear-down path, timers_dead_cpu() must be invoked
2112	 * before blk_mq_queue_reinit_notify() from notify_dead(),
2113	 * otherwise a RCU stall occurs.
2114	 */
2115	[CPUHP_TIMERS_PREPARE] = {
2116		.name			= "timers:prepare",
2117		.startup.single		= timers_prepare_cpu,
2118		.teardown.single	= timers_dead_cpu,
2119	},
2120
2121#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP
2122	/*
2123	 * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until
2124	 * the next step will release it.
2125	 */
2126	[CPUHP_BP_KICK_AP] = {
2127		.name			= "cpu:kick_ap",
2128		.startup.single		= cpuhp_kick_ap_alive,
2129	},
2130
2131	/*
2132	 * Waits for the AP to reach cpuhp_ap_sync_alive() and then
2133	 * releases it for the complete bringup.
2134	 */
2135	[CPUHP_BRINGUP_CPU] = {
2136		.name			= "cpu:bringup",
2137		.startup.single		= cpuhp_bringup_ap,
2138		.teardown.single	= finish_cpu,
2139		.cant_stop		= true,
2140	},
2141#else
2142	/*
2143	 * All-in-one CPU bringup state which includes the kick alive.
2144	 */
2145	[CPUHP_BRINGUP_CPU] = {
2146		.name			= "cpu:bringup",
2147		.startup.single		= bringup_cpu,
2148		.teardown.single	= finish_cpu,
2149		.cant_stop		= true,
2150	},
2151#endif
2152	/* Final state before CPU kills itself */
2153	[CPUHP_AP_IDLE_DEAD] = {
2154		.name			= "idle:dead",
2155	},
2156	/*
2157	 * Last state before CPU enters the idle loop to die. Transient state
2158	 * for synchronization.
2159	 */
2160	[CPUHP_AP_OFFLINE] = {
2161		.name			= "ap:offline",
2162		.cant_stop		= true,
2163	},
2164	/* First state is scheduler control. Interrupts are disabled */
2165	[CPUHP_AP_SCHED_STARTING] = {
2166		.name			= "sched:starting",
2167		.startup.single		= sched_cpu_starting,
2168		.teardown.single	= sched_cpu_dying,
2169	},
2170	[CPUHP_AP_RCUTREE_DYING] = {
2171		.name			= "RCU/tree:dying",
2172		.startup.single		= NULL,
2173		.teardown.single	= rcutree_dying_cpu,
2174	},
2175	[CPUHP_AP_SMPCFD_DYING] = {
2176		.name			= "smpcfd:dying",
2177		.startup.single		= NULL,
2178		.teardown.single	= smpcfd_dying_cpu,
2179	},
2180	[CPUHP_AP_HRTIMERS_DYING] = {
2181		.name			= "hrtimers:dying",
2182		.startup.single		= hrtimers_cpu_starting,
2183		.teardown.single	= hrtimers_cpu_dying,
2184	},
2185	[CPUHP_AP_TICK_DYING] = {
2186		.name			= "tick:dying",
2187		.startup.single		= NULL,
2188		.teardown.single	= tick_cpu_dying,
2189	},
2190	/* Entry state on starting. Interrupts enabled from here on. Transient
2191	 * state for synchronsization */
2192	[CPUHP_AP_ONLINE] = {
2193		.name			= "ap:online",
2194	},
2195	/*
2196	 * Handled on control processor until the plugged processor manages
2197	 * this itself.
2198	 */
2199	[CPUHP_TEARDOWN_CPU] = {
2200		.name			= "cpu:teardown",
2201		.startup.single		= NULL,
2202		.teardown.single	= takedown_cpu,
2203		.cant_stop		= true,
2204	},
2205
2206	[CPUHP_AP_SCHED_WAIT_EMPTY] = {
2207		.name			= "sched:waitempty",
2208		.startup.single		= NULL,
2209		.teardown.single	= sched_cpu_wait_empty,
2210	},
2211
2212	/* Handle smpboot threads park/unpark */
2213	[CPUHP_AP_SMPBOOT_THREADS] = {
2214		.name			= "smpboot/threads:online",
2215		.startup.single		= smpboot_unpark_threads,
2216		.teardown.single	= smpboot_park_threads,
2217	},
2218	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
2219		.name			= "irq/affinity:online",
2220		.startup.single		= irq_affinity_online_cpu,
2221		.teardown.single	= NULL,
2222	},
2223	[CPUHP_AP_PERF_ONLINE] = {
2224		.name			= "perf:online",
2225		.startup.single		= perf_event_init_cpu,
2226		.teardown.single	= perf_event_exit_cpu,
2227	},
2228	[CPUHP_AP_WATCHDOG_ONLINE] = {
2229		.name			= "lockup_detector:online",
2230		.startup.single		= lockup_detector_online_cpu,
2231		.teardown.single	= lockup_detector_offline_cpu,
2232	},
2233	[CPUHP_AP_WORKQUEUE_ONLINE] = {
2234		.name			= "workqueue:online",
2235		.startup.single		= workqueue_online_cpu,
2236		.teardown.single	= workqueue_offline_cpu,
2237	},
2238	[CPUHP_AP_RANDOM_ONLINE] = {
2239		.name			= "random:online",
2240		.startup.single		= random_online_cpu,
2241		.teardown.single	= NULL,
2242	},
2243	[CPUHP_AP_RCUTREE_ONLINE] = {
2244		.name			= "RCU/tree:online",
2245		.startup.single		= rcutree_online_cpu,
2246		.teardown.single	= rcutree_offline_cpu,
2247	},
2248#endif
2249	/*
2250	 * The dynamically registered state space is here
2251	 */
2252
2253#ifdef CONFIG_SMP
2254	/* Last state is scheduler control setting the cpu active */
2255	[CPUHP_AP_ACTIVE] = {
2256		.name			= "sched:active",
2257		.startup.single		= sched_cpu_activate,
2258		.teardown.single	= sched_cpu_deactivate,
2259	},
2260#endif
2261
2262	/* CPU is fully up and running. */
2263	[CPUHP_ONLINE] = {
2264		.name			= "online",
2265		.startup.single		= NULL,
2266		.teardown.single	= NULL,
2267	},
2268};
2269
2270/* Sanity check for callbacks */
2271static int cpuhp_cb_check(enum cpuhp_state state)
2272{
2273	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
2274		return -EINVAL;
2275	return 0;
2276}
2277
2278/*
2279 * Returns a free for dynamic slot assignment of the Online state. The states
2280 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
2281 * by having no name assigned.
2282 */
2283static int cpuhp_reserve_state(enum cpuhp_state state)
2284{
2285	enum cpuhp_state i, end;
2286	struct cpuhp_step *step;
2287
2288	switch (state) {
2289	case CPUHP_AP_ONLINE_DYN:
2290		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
2291		end = CPUHP_AP_ONLINE_DYN_END;
2292		break;
2293	case CPUHP_BP_PREPARE_DYN:
2294		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
2295		end = CPUHP_BP_PREPARE_DYN_END;
2296		break;
2297	default:
2298		return -EINVAL;
2299	}
2300
2301	for (i = state; i <= end; i++, step++) {
2302		if (!step->name)
2303			return i;
2304	}
2305	WARN(1, "No more dynamic states available for CPU hotplug\n");
2306	return -ENOSPC;
2307}
2308
2309static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
2310				 int (*startup)(unsigned int cpu),
2311				 int (*teardown)(unsigned int cpu),
2312				 bool multi_instance)
2313{
2314	/* (Un)Install the callbacks for further cpu hotplug operations */
2315	struct cpuhp_step *sp;
2316	int ret = 0;
2317
2318	/*
2319	 * If name is NULL, then the state gets removed.
2320	 *
2321	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
2322	 * the first allocation from these dynamic ranges, so the removal
2323	 * would trigger a new allocation and clear the wrong (already
2324	 * empty) state, leaving the callbacks of the to be cleared state
2325	 * dangling, which causes wreckage on the next hotplug operation.
2326	 */
2327	if (name && (state == CPUHP_AP_ONLINE_DYN ||
2328		     state == CPUHP_BP_PREPARE_DYN)) {
2329		ret = cpuhp_reserve_state(state);
2330		if (ret < 0)
2331			return ret;
2332		state = ret;
2333	}
2334	sp = cpuhp_get_step(state);
2335	if (name && sp->name)
2336		return -EBUSY;
2337
2338	sp->startup.single = startup;
2339	sp->teardown.single = teardown;
2340	sp->name = name;
2341	sp->multi_instance = multi_instance;
2342	INIT_HLIST_HEAD(&sp->list);
2343	return ret;
2344}
2345
2346static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
2347{
2348	return cpuhp_get_step(state)->teardown.single;
2349}
2350
2351/*
2352 * Call the startup/teardown function for a step either on the AP or
2353 * on the current CPU.
2354 */
2355static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
2356			    struct hlist_node *node)
2357{
2358	struct cpuhp_step *sp = cpuhp_get_step(state);
2359	int ret;
2360
2361	/*
2362	 * If there's nothing to do, we done.
2363	 * Relies on the union for multi_instance.
2364	 */
2365	if (cpuhp_step_empty(bringup, sp))
2366		return 0;
2367	/*
2368	 * The non AP bound callbacks can fail on bringup. On teardown
2369	 * e.g. module removal we crash for now.
2370	 */
2371#ifdef CONFIG_SMP
2372	if (cpuhp_is_ap_state(state))
2373		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
2374	else
2375		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2376#else
2377	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
2378#endif
2379	BUG_ON(ret && !bringup);
2380	return ret;
2381}
2382
2383/*
2384 * Called from __cpuhp_setup_state on a recoverable failure.
2385 *
2386 * Note: The teardown callbacks for rollback are not allowed to fail!
2387 */
2388static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
2389				   struct hlist_node *node)
2390{
2391	int cpu;
2392
2393	/* Roll back the already executed steps on the other cpus */
2394	for_each_present_cpu(cpu) {
2395		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2396		int cpustate = st->state;
2397
2398		if (cpu >= failedcpu)
2399			break;
2400
2401		/* Did we invoke the startup call on that cpu ? */
2402		if (cpustate >= state)
2403			cpuhp_issue_call(cpu, state, false, node);
2404	}
2405}
2406
2407int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
2408					  struct hlist_node *node,
2409					  bool invoke)
2410{
2411	struct cpuhp_step *sp;
2412	int cpu;
2413	int ret;
2414
2415	lockdep_assert_cpus_held();
2416
2417	sp = cpuhp_get_step(state);
2418	if (sp->multi_instance == false)
2419		return -EINVAL;
2420
2421	mutex_lock(&cpuhp_state_mutex);
2422
2423	if (!invoke || !sp->startup.multi)
2424		goto add_node;
2425
2426	/*
2427	 * Try to call the startup callback for each present cpu
2428	 * depending on the hotplug state of the cpu.
2429	 */
2430	for_each_present_cpu(cpu) {
2431		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2432		int cpustate = st->state;
2433
2434		if (cpustate < state)
2435			continue;
2436
2437		ret = cpuhp_issue_call(cpu, state, true, node);
2438		if (ret) {
2439			if (sp->teardown.multi)
2440				cpuhp_rollback_install(cpu, state, node);
2441			goto unlock;
2442		}
2443	}
2444add_node:
2445	ret = 0;
2446	hlist_add_head(node, &sp->list);
2447unlock:
2448	mutex_unlock(&cpuhp_state_mutex);
2449	return ret;
2450}
2451
2452int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2453			       bool invoke)
2454{
2455	int ret;
2456
2457	cpus_read_lock();
2458	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2459	cpus_read_unlock();
2460	return ret;
2461}
2462EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2463
2464/**
2465 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2466 * @state:		The state to setup
2467 * @name:		Name of the step
2468 * @invoke:		If true, the startup function is invoked for cpus where
2469 *			cpu state >= @state
2470 * @startup:		startup callback function
2471 * @teardown:		teardown callback function
2472 * @multi_instance:	State is set up for multiple instances which get
2473 *			added afterwards.
2474 *
2475 * The caller needs to hold cpus read locked while calling this function.
2476 * Return:
2477 *   On success:
2478 *      Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN;
2479 *      0 for all other states
2480 *   On failure: proper (negative) error code
2481 */
2482int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2483				   const char *name, bool invoke,
2484				   int (*startup)(unsigned int cpu),
2485				   int (*teardown)(unsigned int cpu),
2486				   bool multi_instance)
2487{
2488	int cpu, ret = 0;
2489	bool dynstate;
2490
2491	lockdep_assert_cpus_held();
2492
2493	if (cpuhp_cb_check(state) || !name)
2494		return -EINVAL;
2495
2496	mutex_lock(&cpuhp_state_mutex);
2497
2498	ret = cpuhp_store_callbacks(state, name, startup, teardown,
2499				    multi_instance);
2500
2501	dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN;
2502	if (ret > 0 && dynstate) {
2503		state = ret;
2504		ret = 0;
2505	}
2506
2507	if (ret || !invoke || !startup)
2508		goto out;
2509
2510	/*
2511	 * Try to call the startup callback for each present cpu
2512	 * depending on the hotplug state of the cpu.
2513	 */
2514	for_each_present_cpu(cpu) {
2515		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2516		int cpustate = st->state;
2517
2518		if (cpustate < state)
2519			continue;
2520
2521		ret = cpuhp_issue_call(cpu, state, true, NULL);
2522		if (ret) {
2523			if (teardown)
2524				cpuhp_rollback_install(cpu, state, NULL);
2525			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2526			goto out;
2527		}
2528	}
2529out:
2530	mutex_unlock(&cpuhp_state_mutex);
2531	/*
2532	 * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN,
2533	 * return the dynamically allocated state in case of success.
2534	 */
2535	if (!ret && dynstate)
2536		return state;
2537	return ret;
2538}
2539EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2540
2541int __cpuhp_setup_state(enum cpuhp_state state,
2542			const char *name, bool invoke,
2543			int (*startup)(unsigned int cpu),
2544			int (*teardown)(unsigned int cpu),
2545			bool multi_instance)
2546{
2547	int ret;
2548
2549	cpus_read_lock();
2550	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2551					     teardown, multi_instance);
2552	cpus_read_unlock();
2553	return ret;
2554}
2555EXPORT_SYMBOL(__cpuhp_setup_state);
2556
2557int __cpuhp_state_remove_instance(enum cpuhp_state state,
2558				  struct hlist_node *node, bool invoke)
2559{
2560	struct cpuhp_step *sp = cpuhp_get_step(state);
2561	int cpu;
2562
2563	BUG_ON(cpuhp_cb_check(state));
2564
2565	if (!sp->multi_instance)
2566		return -EINVAL;
2567
2568	cpus_read_lock();
2569	mutex_lock(&cpuhp_state_mutex);
2570
2571	if (!invoke || !cpuhp_get_teardown_cb(state))
2572		goto remove;
2573	/*
2574	 * Call the teardown callback for each present cpu depending
2575	 * on the hotplug state of the cpu. This function is not
2576	 * allowed to fail currently!
2577	 */
2578	for_each_present_cpu(cpu) {
2579		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2580		int cpustate = st->state;
2581
2582		if (cpustate >= state)
2583			cpuhp_issue_call(cpu, state, false, node);
2584	}
2585
2586remove:
2587	hlist_del(node);
2588	mutex_unlock(&cpuhp_state_mutex);
2589	cpus_read_unlock();
2590
2591	return 0;
2592}
2593EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2594
2595/**
2596 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2597 * @state:	The state to remove
2598 * @invoke:	If true, the teardown function is invoked for cpus where
2599 *		cpu state >= @state
2600 *
2601 * The caller needs to hold cpus read locked while calling this function.
2602 * The teardown callback is currently not allowed to fail. Think
2603 * about module removal!
2604 */
2605void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2606{
2607	struct cpuhp_step *sp = cpuhp_get_step(state);
2608	int cpu;
2609
2610	BUG_ON(cpuhp_cb_check(state));
2611
2612	lockdep_assert_cpus_held();
2613
2614	mutex_lock(&cpuhp_state_mutex);
2615	if (sp->multi_instance) {
2616		WARN(!hlist_empty(&sp->list),
2617		     "Error: Removing state %d which has instances left.\n",
2618		     state);
2619		goto remove;
2620	}
2621
2622	if (!invoke || !cpuhp_get_teardown_cb(state))
2623		goto remove;
2624
2625	/*
2626	 * Call the teardown callback for each present cpu depending
2627	 * on the hotplug state of the cpu. This function is not
2628	 * allowed to fail currently!
2629	 */
2630	for_each_present_cpu(cpu) {
2631		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2632		int cpustate = st->state;
2633
2634		if (cpustate >= state)
2635			cpuhp_issue_call(cpu, state, false, NULL);
2636	}
2637remove:
2638	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2639	mutex_unlock(&cpuhp_state_mutex);
2640}
2641EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2642
2643void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2644{
2645	cpus_read_lock();
2646	__cpuhp_remove_state_cpuslocked(state, invoke);
2647	cpus_read_unlock();
2648}
2649EXPORT_SYMBOL(__cpuhp_remove_state);
2650
2651#ifdef CONFIG_HOTPLUG_SMT
2652static void cpuhp_offline_cpu_device(unsigned int cpu)
2653{
2654	struct device *dev = get_cpu_device(cpu);
2655
2656	dev->offline = true;
2657	/* Tell user space about the state change */
2658	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2659}
2660
2661static void cpuhp_online_cpu_device(unsigned int cpu)
2662{
2663	struct device *dev = get_cpu_device(cpu);
2664
2665	dev->offline = false;
2666	/* Tell user space about the state change */
2667	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2668}
2669
2670int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2671{
2672	int cpu, ret = 0;
2673
2674	cpu_maps_update_begin();
2675	for_each_online_cpu(cpu) {
2676		if (topology_is_primary_thread(cpu))
2677			continue;
2678		/*
2679		 * Disable can be called with CPU_SMT_ENABLED when changing
2680		 * from a higher to lower number of SMT threads per core.
2681		 */
2682		if (ctrlval == CPU_SMT_ENABLED && cpu_smt_thread_allowed(cpu))
2683			continue;
2684		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2685		if (ret)
2686			break;
2687		/*
2688		 * As this needs to hold the cpu maps lock it's impossible
2689		 * to call device_offline() because that ends up calling
2690		 * cpu_down() which takes cpu maps lock. cpu maps lock
2691		 * needs to be held as this might race against in kernel
2692		 * abusers of the hotplug machinery (thermal management).
2693		 *
2694		 * So nothing would update device:offline state. That would
2695		 * leave the sysfs entry stale and prevent onlining after
2696		 * smt control has been changed to 'off' again. This is
2697		 * called under the sysfs hotplug lock, so it is properly
2698		 * serialized against the regular offline usage.
2699		 */
2700		cpuhp_offline_cpu_device(cpu);
2701	}
2702	if (!ret)
2703		cpu_smt_control = ctrlval;
2704	cpu_maps_update_done();
2705	return ret;
2706}
2707
2708/* Check if the core a CPU belongs to is online */
2709#if !defined(topology_is_core_online)
2710static inline bool topology_is_core_online(unsigned int cpu)
2711{
2712	return true;
2713}
2714#endif
2715
2716int cpuhp_smt_enable(void)
2717{
2718	int cpu, ret = 0;
2719
2720	cpu_maps_update_begin();
2721	cpu_smt_control = CPU_SMT_ENABLED;
2722	for_each_present_cpu(cpu) {
2723		/* Skip online CPUs and CPUs on offline nodes */
2724		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2725			continue;
2726		if (!cpu_smt_thread_allowed(cpu) || !topology_is_core_online(cpu))
2727			continue;
2728		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2729		if (ret)
2730			break;
2731		/* See comment in cpuhp_smt_disable() */
2732		cpuhp_online_cpu_device(cpu);
2733	}
2734	cpu_maps_update_done();
2735	return ret;
2736}
2737#endif
2738
2739#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2740static ssize_t state_show(struct device *dev,
2741			  struct device_attribute *attr, char *buf)
2742{
2743	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2744
2745	return sprintf(buf, "%d\n", st->state);
2746}
2747static DEVICE_ATTR_RO(state);
2748
2749static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2750			    const char *buf, size_t count)
 
2751{
2752	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2753	struct cpuhp_step *sp;
2754	int target, ret;
2755
2756	ret = kstrtoint(buf, 10, &target);
2757	if (ret)
2758		return ret;
2759
2760#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2761	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2762		return -EINVAL;
2763#else
2764	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2765		return -EINVAL;
2766#endif
2767
2768	ret = lock_device_hotplug_sysfs();
2769	if (ret)
2770		return ret;
2771
2772	mutex_lock(&cpuhp_state_mutex);
2773	sp = cpuhp_get_step(target);
2774	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2775	mutex_unlock(&cpuhp_state_mutex);
2776	if (ret)
2777		goto out;
2778
2779	if (st->state < target)
2780		ret = cpu_up(dev->id, target);
2781	else if (st->state > target)
2782		ret = cpu_down(dev->id, target);
2783	else if (WARN_ON(st->target != target))
2784		st->target = target;
2785out:
2786	unlock_device_hotplug();
2787	return ret ? ret : count;
2788}
2789
2790static ssize_t target_show(struct device *dev,
2791			   struct device_attribute *attr, char *buf)
2792{
2793	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2794
2795	return sprintf(buf, "%d\n", st->target);
2796}
2797static DEVICE_ATTR_RW(target);
 
2798
2799static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2800			  const char *buf, size_t count)
 
2801{
2802	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2803	struct cpuhp_step *sp;
2804	int fail, ret;
2805
2806	ret = kstrtoint(buf, 10, &fail);
2807	if (ret)
2808		return ret;
2809
2810	if (fail == CPUHP_INVALID) {
2811		st->fail = fail;
2812		return count;
2813	}
2814
2815	if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2816		return -EINVAL;
2817
2818	/*
2819	 * Cannot fail STARTING/DYING callbacks.
2820	 */
2821	if (cpuhp_is_atomic_state(fail))
2822		return -EINVAL;
2823
2824	/*
2825	 * DEAD callbacks cannot fail...
2826	 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2827	 * triggering STARTING callbacks, a failure in this state would
2828	 * hinder rollback.
2829	 */
2830	if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2831		return -EINVAL;
2832
2833	/*
2834	 * Cannot fail anything that doesn't have callbacks.
2835	 */
2836	mutex_lock(&cpuhp_state_mutex);
2837	sp = cpuhp_get_step(fail);
2838	if (!sp->startup.single && !sp->teardown.single)
2839		ret = -EINVAL;
2840	mutex_unlock(&cpuhp_state_mutex);
2841	if (ret)
2842		return ret;
2843
2844	st->fail = fail;
2845
2846	return count;
2847}
2848
2849static ssize_t fail_show(struct device *dev,
2850			 struct device_attribute *attr, char *buf)
2851{
2852	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2853
2854	return sprintf(buf, "%d\n", st->fail);
2855}
2856
2857static DEVICE_ATTR_RW(fail);
2858
2859static struct attribute *cpuhp_cpu_attrs[] = {
2860	&dev_attr_state.attr,
2861	&dev_attr_target.attr,
2862	&dev_attr_fail.attr,
2863	NULL
2864};
2865
2866static const struct attribute_group cpuhp_cpu_attr_group = {
2867	.attrs = cpuhp_cpu_attrs,
2868	.name = "hotplug",
 
2869};
2870
2871static ssize_t states_show(struct device *dev,
2872				 struct device_attribute *attr, char *buf)
2873{
2874	ssize_t cur, res = 0;
2875	int i;
2876
2877	mutex_lock(&cpuhp_state_mutex);
2878	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2879		struct cpuhp_step *sp = cpuhp_get_step(i);
2880
2881		if (sp->name) {
2882			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2883			buf += cur;
2884			res += cur;
2885		}
2886	}
2887	mutex_unlock(&cpuhp_state_mutex);
2888	return res;
2889}
2890static DEVICE_ATTR_RO(states);
2891
2892static struct attribute *cpuhp_cpu_root_attrs[] = {
2893	&dev_attr_states.attr,
2894	NULL
2895};
2896
2897static const struct attribute_group cpuhp_cpu_root_attr_group = {
2898	.attrs = cpuhp_cpu_root_attrs,
2899	.name = "hotplug",
 
2900};
2901
2902#ifdef CONFIG_HOTPLUG_SMT
2903
2904static bool cpu_smt_num_threads_valid(unsigned int threads)
2905{
2906	if (IS_ENABLED(CONFIG_SMT_NUM_THREADS_DYNAMIC))
2907		return threads >= 1 && threads <= cpu_smt_max_threads;
2908	return threads == 1 || threads == cpu_smt_max_threads;
2909}
2910
2911static ssize_t
2912__store_smt_control(struct device *dev, struct device_attribute *attr,
2913		    const char *buf, size_t count)
2914{
2915	int ctrlval, ret, num_threads, orig_threads;
2916	bool force_off;
 
 
 
 
 
 
 
 
2917
2918	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2919		return -EPERM;
2920
2921	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2922		return -ENODEV;
2923
2924	if (sysfs_streq(buf, "on")) {
2925		ctrlval = CPU_SMT_ENABLED;
2926		num_threads = cpu_smt_max_threads;
2927	} else if (sysfs_streq(buf, "off")) {
2928		ctrlval = CPU_SMT_DISABLED;
2929		num_threads = 1;
2930	} else if (sysfs_streq(buf, "forceoff")) {
2931		ctrlval = CPU_SMT_FORCE_DISABLED;
2932		num_threads = 1;
2933	} else if (kstrtoint(buf, 10, &num_threads) == 0) {
2934		if (num_threads == 1)
2935			ctrlval = CPU_SMT_DISABLED;
2936		else if (cpu_smt_num_threads_valid(num_threads))
2937			ctrlval = CPU_SMT_ENABLED;
2938		else
2939			return -EINVAL;
2940	} else {
2941		return -EINVAL;
2942	}
2943
2944	ret = lock_device_hotplug_sysfs();
2945	if (ret)
2946		return ret;
2947
2948	orig_threads = cpu_smt_num_threads;
2949	cpu_smt_num_threads = num_threads;
2950
2951	force_off = ctrlval != cpu_smt_control && ctrlval == CPU_SMT_FORCE_DISABLED;
2952
2953	if (num_threads > orig_threads)
2954		ret = cpuhp_smt_enable();
2955	else if (num_threads < orig_threads || force_off)
2956		ret = cpuhp_smt_disable(ctrlval);
 
 
2957
2958	unlock_device_hotplug();
2959	return ret ? ret : count;
2960}
2961
2962#else /* !CONFIG_HOTPLUG_SMT */
2963static ssize_t
2964__store_smt_control(struct device *dev, struct device_attribute *attr,
2965		    const char *buf, size_t count)
2966{
2967	return -ENODEV;
2968}
2969#endif /* CONFIG_HOTPLUG_SMT */
2970
2971static const char *smt_states[] = {
2972	[CPU_SMT_ENABLED]		= "on",
2973	[CPU_SMT_DISABLED]		= "off",
2974	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
2975	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
2976	[CPU_SMT_NOT_IMPLEMENTED]	= "notimplemented",
2977};
2978
2979static ssize_t control_show(struct device *dev,
2980			    struct device_attribute *attr, char *buf)
2981{
2982	const char *state = smt_states[cpu_smt_control];
2983
2984#ifdef CONFIG_HOTPLUG_SMT
2985	/*
2986	 * If SMT is enabled but not all threads are enabled then show the
2987	 * number of threads. If all threads are enabled show "on". Otherwise
2988	 * show the state name.
2989	 */
2990	if (cpu_smt_control == CPU_SMT_ENABLED &&
2991	    cpu_smt_num_threads != cpu_smt_max_threads)
2992		return sysfs_emit(buf, "%d\n", cpu_smt_num_threads);
2993#endif
2994
2995	return sysfs_emit(buf, "%s\n", state);
2996}
2997
2998static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2999			     const char *buf, size_t count)
 
3000{
3001	return __store_smt_control(dev, attr, buf, count);
3002}
3003static DEVICE_ATTR_RW(control);
3004
3005static ssize_t active_show(struct device *dev,
3006			   struct device_attribute *attr, char *buf)
3007{
3008	return sysfs_emit(buf, "%d\n", sched_smt_active());
3009}
3010static DEVICE_ATTR_RO(active);
3011
3012static struct attribute *cpuhp_smt_attrs[] = {
3013	&dev_attr_control.attr,
3014	&dev_attr_active.attr,
3015	NULL
3016};
3017
3018static const struct attribute_group cpuhp_smt_attr_group = {
3019	.attrs = cpuhp_smt_attrs,
3020	.name = "smt",
 
3021};
3022
3023static int __init cpu_smt_sysfs_init(void)
3024{
3025	struct device *dev_root;
3026	int ret = -ENODEV;
3027
3028	dev_root = bus_get_dev_root(&cpu_subsys);
3029	if (dev_root) {
3030		ret = sysfs_create_group(&dev_root->kobj, &cpuhp_smt_attr_group);
3031		put_device(dev_root);
3032	}
3033	return ret;
3034}
3035
3036static int __init cpuhp_sysfs_init(void)
3037{
3038	struct device *dev_root;
3039	int cpu, ret;
3040
3041	ret = cpu_smt_sysfs_init();
3042	if (ret)
3043		return ret;
3044
3045	dev_root = bus_get_dev_root(&cpu_subsys);
3046	if (dev_root) {
3047		ret = sysfs_create_group(&dev_root->kobj, &cpuhp_cpu_root_attr_group);
3048		put_device(dev_root);
3049		if (ret)
3050			return ret;
3051	}
3052
3053	for_each_possible_cpu(cpu) {
3054		struct device *dev = get_cpu_device(cpu);
3055
3056		if (!dev)
3057			continue;
3058		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
3059		if (ret)
3060			return ret;
3061	}
3062	return 0;
3063}
3064device_initcall(cpuhp_sysfs_init);
3065#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
3066
3067/*
3068 * cpu_bit_bitmap[] is a special, "compressed" data structure that
3069 * represents all NR_CPUS bits binary values of 1<<nr.
3070 *
3071 * It is used by cpumask_of() to get a constant address to a CPU
3072 * mask value that has a single bit set only.
3073 */
3074
3075/* cpu_bit_bitmap[0] is empty - so we can back into it */
3076#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
3077#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
3078#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
3079#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
3080
3081const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
3082
3083	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
3084	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
3085#if BITS_PER_LONG > 32
3086	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
3087	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
3088#endif
3089};
3090EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
3091
3092const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
3093EXPORT_SYMBOL(cpu_all_bits);
3094
3095#ifdef CONFIG_INIT_ALL_POSSIBLE
3096struct cpumask __cpu_possible_mask __ro_after_init
3097	= {CPU_BITS_ALL};
3098#else
3099struct cpumask __cpu_possible_mask __ro_after_init;
3100#endif
3101EXPORT_SYMBOL(__cpu_possible_mask);
3102
3103struct cpumask __cpu_online_mask __read_mostly;
3104EXPORT_SYMBOL(__cpu_online_mask);
3105
3106struct cpumask __cpu_enabled_mask __read_mostly;
3107EXPORT_SYMBOL(__cpu_enabled_mask);
3108
3109struct cpumask __cpu_present_mask __read_mostly;
3110EXPORT_SYMBOL(__cpu_present_mask);
3111
3112struct cpumask __cpu_active_mask __read_mostly;
3113EXPORT_SYMBOL(__cpu_active_mask);
3114
3115struct cpumask __cpu_dying_mask __read_mostly;
3116EXPORT_SYMBOL(__cpu_dying_mask);
3117
3118atomic_t __num_online_cpus __read_mostly;
3119EXPORT_SYMBOL(__num_online_cpus);
3120
3121void init_cpu_present(const struct cpumask *src)
3122{
3123	cpumask_copy(&__cpu_present_mask, src);
3124}
3125
3126void init_cpu_possible(const struct cpumask *src)
3127{
3128	cpumask_copy(&__cpu_possible_mask, src);
3129}
3130
3131void init_cpu_online(const struct cpumask *src)
3132{
3133	cpumask_copy(&__cpu_online_mask, src);
3134}
3135
3136void set_cpu_online(unsigned int cpu, bool online)
3137{
3138	/*
3139	 * atomic_inc/dec() is required to handle the horrid abuse of this
3140	 * function by the reboot and kexec code which invoke it from
3141	 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
3142	 * regular CPU hotplug is properly serialized.
3143	 *
3144	 * Note, that the fact that __num_online_cpus is of type atomic_t
3145	 * does not protect readers which are not serialized against
3146	 * concurrent hotplug operations.
3147	 */
3148	if (online) {
3149		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
3150			atomic_inc(&__num_online_cpus);
3151	} else {
3152		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
3153			atomic_dec(&__num_online_cpus);
3154	}
3155}
3156
3157/*
3158 * Activate the first processor.
3159 */
3160void __init boot_cpu_init(void)
3161{
3162	int cpu = smp_processor_id();
3163
3164	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
3165	set_cpu_online(cpu, true);
3166	set_cpu_active(cpu, true);
3167	set_cpu_present(cpu, true);
3168	set_cpu_possible(cpu, true);
3169
3170#ifdef CONFIG_SMP
3171	__boot_cpu_id = cpu;
3172#endif
3173}
3174
3175/*
3176 * Must be called _AFTER_ setting up the per_cpu areas
3177 */
3178void __init boot_cpu_hotplug_init(void)
3179{
3180#ifdef CONFIG_SMP
3181	cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
3182	atomic_set(this_cpu_ptr(&cpuhp_state.ap_sync_state), SYNC_STATE_ONLINE);
3183#endif
3184	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
3185	this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
3186}
3187
3188#ifdef CONFIG_CPU_MITIGATIONS
3189/*
3190 * These are used for a global "mitigations=" cmdline option for toggling
3191 * optional CPU mitigations.
3192 */
3193enum cpu_mitigations {
3194	CPU_MITIGATIONS_OFF,
3195	CPU_MITIGATIONS_AUTO,
3196	CPU_MITIGATIONS_AUTO_NOSMT,
3197};
3198
3199static enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
 
3200
3201static int __init mitigations_parse_cmdline(char *arg)
3202{
3203	if (!strcmp(arg, "off"))
3204		cpu_mitigations = CPU_MITIGATIONS_OFF;
3205	else if (!strcmp(arg, "auto"))
3206		cpu_mitigations = CPU_MITIGATIONS_AUTO;
3207	else if (!strcmp(arg, "auto,nosmt"))
3208		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
3209	else
3210		pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
3211			arg);
3212
3213	return 0;
3214}
 
3215
3216/* mitigations=off */
3217bool cpu_mitigations_off(void)
3218{
3219	return cpu_mitigations == CPU_MITIGATIONS_OFF;
3220}
3221EXPORT_SYMBOL_GPL(cpu_mitigations_off);
3222
3223/* mitigations=auto,nosmt */
3224bool cpu_mitigations_auto_nosmt(void)
3225{
3226	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
3227}
3228EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
3229#else
3230static int __init mitigations_parse_cmdline(char *arg)
3231{
3232	pr_crit("Kernel compiled without mitigations, ignoring 'mitigations'; system may still be vulnerable\n");
3233	return 0;
3234}
3235#endif
3236early_param("mitigations", mitigations_parse_cmdline);
v5.14.15
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/sched/mm.h>
   7#include <linux/proc_fs.h>
   8#include <linux/smp.h>
   9#include <linux/init.h>
  10#include <linux/notifier.h>
  11#include <linux/sched/signal.h>
  12#include <linux/sched/hotplug.h>
  13#include <linux/sched/isolation.h>
  14#include <linux/sched/task.h>
  15#include <linux/sched/smt.h>
  16#include <linux/unistd.h>
  17#include <linux/cpu.h>
  18#include <linux/oom.h>
  19#include <linux/rcupdate.h>
 
  20#include <linux/export.h>
  21#include <linux/bug.h>
  22#include <linux/kthread.h>
  23#include <linux/stop_machine.h>
  24#include <linux/mutex.h>
  25#include <linux/gfp.h>
  26#include <linux/suspend.h>
  27#include <linux/lockdep.h>
  28#include <linux/tick.h>
  29#include <linux/irq.h>
  30#include <linux/nmi.h>
  31#include <linux/smpboot.h>
  32#include <linux/relay.h>
  33#include <linux/slab.h>
 
  34#include <linux/percpu-rwsem.h>
  35#include <linux/cpuset.h>
 
 
  36
  37#include <trace/events/power.h>
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/cpuhp.h>
  40
  41#include "smpboot.h"
  42
  43/**
  44 * cpuhp_cpu_state - Per cpu hotplug state storage
  45 * @state:	The current cpu state
  46 * @target:	The target state
 
  47 * @thread:	Pointer to the hotplug thread
  48 * @should_run:	Thread should execute
  49 * @rollback:	Perform a rollback
  50 * @single:	Single callback invocation
  51 * @bringup:	Single callback bringup or teardown selector
 
 
 
  52 * @cb_state:	The state for a single callback (install/uninstall)
  53 * @result:	Result of the operation
 
  54 * @done_up:	Signal completion to the issuer of the task for cpu-up
  55 * @done_down:	Signal completion to the issuer of the task for cpu-down
  56 */
  57struct cpuhp_cpu_state {
  58	enum cpuhp_state	state;
  59	enum cpuhp_state	target;
  60	enum cpuhp_state	fail;
  61#ifdef CONFIG_SMP
  62	struct task_struct	*thread;
  63	bool			should_run;
  64	bool			rollback;
  65	bool			single;
  66	bool			bringup;
  67	int			cpu;
  68	struct hlist_node	*node;
  69	struct hlist_node	*last;
  70	enum cpuhp_state	cb_state;
  71	int			result;
 
  72	struct completion	done_up;
  73	struct completion	done_down;
  74#endif
  75};
  76
  77static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  78	.fail = CPUHP_INVALID,
  79};
  80
  81#ifdef CONFIG_SMP
  82cpumask_t cpus_booted_once_mask;
  83#endif
  84
  85#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
  86static struct lockdep_map cpuhp_state_up_map =
  87	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  88static struct lockdep_map cpuhp_state_down_map =
  89	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
  90
  91
  92static inline void cpuhp_lock_acquire(bool bringup)
  93{
  94	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  95}
  96
  97static inline void cpuhp_lock_release(bool bringup)
  98{
  99	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 100}
 101#else
 102
 103static inline void cpuhp_lock_acquire(bool bringup) { }
 104static inline void cpuhp_lock_release(bool bringup) { }
 105
 106#endif
 107
 108/**
 109 * cpuhp_step - Hotplug state machine step
 110 * @name:	Name of the step
 111 * @startup:	Startup function of the step
 112 * @teardown:	Teardown function of the step
 113 * @cant_stop:	Bringup/teardown can't be stopped at this step
 
 114 */
 115struct cpuhp_step {
 116	const char		*name;
 117	union {
 118		int		(*single)(unsigned int cpu);
 119		int		(*multi)(unsigned int cpu,
 120					 struct hlist_node *node);
 121	} startup;
 122	union {
 123		int		(*single)(unsigned int cpu);
 124		int		(*multi)(unsigned int cpu,
 125					 struct hlist_node *node);
 126	} teardown;
 
 127	struct hlist_head	list;
 
 128	bool			cant_stop;
 129	bool			multi_instance;
 130};
 131
 132static DEFINE_MUTEX(cpuhp_state_mutex);
 133static struct cpuhp_step cpuhp_hp_states[];
 134
 135static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
 136{
 137	return cpuhp_hp_states + state;
 138}
 139
 140static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
 141{
 142	return bringup ? !step->startup.single : !step->teardown.single;
 143}
 144
 145/**
 146 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
 147 * @cpu:	The cpu for which the callback should be invoked
 148 * @state:	The state to do callbacks for
 149 * @bringup:	True if the bringup callback should be invoked
 150 * @node:	For multi-instance, do a single entry callback for install/remove
 151 * @lastp:	For multi-instance rollback, remember how far we got
 152 *
 153 * Called from cpu hotplug and from the state register machinery.
 
 
 154 */
 155static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
 156				 bool bringup, struct hlist_node *node,
 157				 struct hlist_node **lastp)
 158{
 159	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 160	struct cpuhp_step *step = cpuhp_get_step(state);
 161	int (*cbm)(unsigned int cpu, struct hlist_node *node);
 162	int (*cb)(unsigned int cpu);
 163	int ret, cnt;
 164
 165	if (st->fail == state) {
 166		st->fail = CPUHP_INVALID;
 167		return -EAGAIN;
 168	}
 169
 170	if (cpuhp_step_empty(bringup, step)) {
 171		WARN_ON_ONCE(1);
 172		return 0;
 173	}
 174
 175	if (!step->multi_instance) {
 176		WARN_ON_ONCE(lastp && *lastp);
 177		cb = bringup ? step->startup.single : step->teardown.single;
 178
 179		trace_cpuhp_enter(cpu, st->target, state, cb);
 180		ret = cb(cpu);
 181		trace_cpuhp_exit(cpu, st->state, state, ret);
 182		return ret;
 183	}
 184	cbm = bringup ? step->startup.multi : step->teardown.multi;
 185
 186	/* Single invocation for instance add/remove */
 187	if (node) {
 188		WARN_ON_ONCE(lastp && *lastp);
 189		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 190		ret = cbm(cpu, node);
 191		trace_cpuhp_exit(cpu, st->state, state, ret);
 192		return ret;
 193	}
 194
 195	/* State transition. Invoke on all instances */
 196	cnt = 0;
 197	hlist_for_each(node, &step->list) {
 198		if (lastp && node == *lastp)
 199			break;
 200
 201		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 202		ret = cbm(cpu, node);
 203		trace_cpuhp_exit(cpu, st->state, state, ret);
 204		if (ret) {
 205			if (!lastp)
 206				goto err;
 207
 208			*lastp = node;
 209			return ret;
 210		}
 211		cnt++;
 212	}
 213	if (lastp)
 214		*lastp = NULL;
 215	return 0;
 216err:
 217	/* Rollback the instances if one failed */
 218	cbm = !bringup ? step->startup.multi : step->teardown.multi;
 219	if (!cbm)
 220		return ret;
 221
 222	hlist_for_each(node, &step->list) {
 223		if (!cnt--)
 224			break;
 225
 226		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 227		ret = cbm(cpu, node);
 228		trace_cpuhp_exit(cpu, st->state, state, ret);
 229		/*
 230		 * Rollback must not fail,
 231		 */
 232		WARN_ON_ONCE(ret);
 233	}
 234	return ret;
 235}
 236
 237#ifdef CONFIG_SMP
 238static bool cpuhp_is_ap_state(enum cpuhp_state state)
 239{
 240	/*
 241	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
 242	 * purposes as that state is handled explicitly in cpu_down.
 243	 */
 244	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
 245}
 246
 247static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 248{
 249	struct completion *done = bringup ? &st->done_up : &st->done_down;
 250	wait_for_completion(done);
 251}
 252
 253static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 254{
 255	struct completion *done = bringup ? &st->done_up : &st->done_down;
 256	complete(done);
 257}
 258
 259/*
 260 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 261 */
 262static bool cpuhp_is_atomic_state(enum cpuhp_state state)
 263{
 264	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
 265}
 266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267/* Serializes the updates to cpu_online_mask, cpu_present_mask */
 268static DEFINE_MUTEX(cpu_add_remove_lock);
 269bool cpuhp_tasks_frozen;
 270EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
 271
 272/*
 273 * The following two APIs (cpu_maps_update_begin/done) must be used when
 274 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 275 */
 276void cpu_maps_update_begin(void)
 277{
 278	mutex_lock(&cpu_add_remove_lock);
 279}
 280
 281void cpu_maps_update_done(void)
 282{
 283	mutex_unlock(&cpu_add_remove_lock);
 284}
 285
 286/*
 287 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 288 * Should always be manipulated under cpu_add_remove_lock
 289 */
 290static int cpu_hotplug_disabled;
 291
 292#ifdef CONFIG_HOTPLUG_CPU
 293
 294DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
 295
 
 
 296void cpus_read_lock(void)
 297{
 298	percpu_down_read(&cpu_hotplug_lock);
 299}
 300EXPORT_SYMBOL_GPL(cpus_read_lock);
 301
 302int cpus_read_trylock(void)
 303{
 304	return percpu_down_read_trylock(&cpu_hotplug_lock);
 305}
 306EXPORT_SYMBOL_GPL(cpus_read_trylock);
 307
 308void cpus_read_unlock(void)
 309{
 310	percpu_up_read(&cpu_hotplug_lock);
 311}
 312EXPORT_SYMBOL_GPL(cpus_read_unlock);
 313
 314void cpus_write_lock(void)
 315{
 316	percpu_down_write(&cpu_hotplug_lock);
 317}
 318
 319void cpus_write_unlock(void)
 320{
 321	percpu_up_write(&cpu_hotplug_lock);
 322}
 323
 324void lockdep_assert_cpus_held(void)
 325{
 326	/*
 327	 * We can't have hotplug operations before userspace starts running,
 328	 * and some init codepaths will knowingly not take the hotplug lock.
 329	 * This is all valid, so mute lockdep until it makes sense to report
 330	 * unheld locks.
 331	 */
 332	if (system_state < SYSTEM_RUNNING)
 333		return;
 334
 335	percpu_rwsem_assert_held(&cpu_hotplug_lock);
 336}
 337
 338#ifdef CONFIG_LOCKDEP
 339int lockdep_is_cpus_held(void)
 340{
 341	return percpu_rwsem_is_held(&cpu_hotplug_lock);
 342}
 343#endif
 344
 345static void lockdep_acquire_cpus_lock(void)
 346{
 347	rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
 348}
 349
 350static void lockdep_release_cpus_lock(void)
 351{
 352	rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
 353}
 354
 
 
 
 
 
 
 
 
 355/*
 356 * Wait for currently running CPU hotplug operations to complete (if any) and
 357 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 358 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 359 * hotplug path before performing hotplug operations. So acquiring that lock
 360 * guarantees mutual exclusion from any currently running hotplug operations.
 361 */
 362void cpu_hotplug_disable(void)
 363{
 364	cpu_maps_update_begin();
 365	cpu_hotplug_disabled++;
 366	cpu_maps_update_done();
 367}
 368EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 369
 370static void __cpu_hotplug_enable(void)
 371{
 372	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
 373		return;
 374	cpu_hotplug_disabled--;
 375}
 376
 377void cpu_hotplug_enable(void)
 378{
 379	cpu_maps_update_begin();
 380	__cpu_hotplug_enable();
 381	cpu_maps_update_done();
 382}
 383EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 384
 385#else
 386
 387static void lockdep_acquire_cpus_lock(void)
 388{
 389}
 390
 391static void lockdep_release_cpus_lock(void)
 392{
 393}
 394
 395#endif	/* CONFIG_HOTPLUG_CPU */
 396
 397/*
 398 * Architectures that need SMT-specific errata handling during SMT hotplug
 399 * should override this.
 400 */
 401void __weak arch_smt_update(void) { }
 402
 403#ifdef CONFIG_HOTPLUG_SMT
 
 404enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 
 
 405
 406void __init cpu_smt_disable(bool force)
 407{
 408	if (!cpu_smt_possible())
 409		return;
 410
 411	if (force) {
 412		pr_info("SMT: Force disabled\n");
 413		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
 414	} else {
 415		pr_info("SMT: disabled\n");
 416		cpu_smt_control = CPU_SMT_DISABLED;
 417	}
 
 418}
 419
 420/*
 421 * The decision whether SMT is supported can only be done after the full
 422 * CPU identification. Called from architecture code.
 423 */
 424void __init cpu_smt_check_topology(void)
 
 425{
 426	if (!topology_smt_supported())
 
 
 427		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 428}
 429
 430static int __init smt_cmdline_disable(char *str)
 431{
 432	cpu_smt_disable(str && !strcmp(str, "force"));
 433	return 0;
 434}
 435early_param("nosmt", smt_cmdline_disable);
 436
 437static inline bool cpu_smt_allowed(unsigned int cpu)
 
 
 
 
 
 438{
 439	if (cpu_smt_control == CPU_SMT_ENABLED)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440		return true;
 441
 442	if (topology_is_primary_thread(cpu))
 443		return true;
 444
 445	/*
 446	 * On x86 it's required to boot all logical CPUs at least once so
 447	 * that the init code can get a chance to set CR4.MCE on each
 448	 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
 449	 * core will shutdown the machine.
 450	 */
 451	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
 452}
 453
 454/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
 455bool cpu_smt_possible(void)
 456{
 457	return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
 458		cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
 459}
 460EXPORT_SYMBOL_GPL(cpu_smt_possible);
 
 461#else
 462static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 463#endif
 464
 465static inline enum cpuhp_state
 466cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 467{
 468	enum cpuhp_state prev_state = st->state;
 469	bool bringup = st->state < target;
 470
 471	st->rollback = false;
 472	st->last = NULL;
 473
 474	st->target = target;
 475	st->single = false;
 476	st->bringup = bringup;
 477	if (cpu_dying(st->cpu) != !bringup)
 478		set_cpu_dying(st->cpu, !bringup);
 479
 480	return prev_state;
 481}
 482
 483static inline void
 484cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
 
 485{
 486	bool bringup = !st->bringup;
 487
 488	st->target = prev_state;
 489
 490	/*
 491	 * Already rolling back. No need invert the bringup value or to change
 492	 * the current state.
 493	 */
 494	if (st->rollback)
 495		return;
 496
 497	st->rollback = true;
 498
 499	/*
 500	 * If we have st->last we need to undo partial multi_instance of this
 501	 * state first. Otherwise start undo at the previous state.
 502	 */
 503	if (!st->last) {
 504		if (st->bringup)
 505			st->state--;
 506		else
 507			st->state++;
 508	}
 509
 510	st->bringup = bringup;
 511	if (cpu_dying(st->cpu) != !bringup)
 512		set_cpu_dying(st->cpu, !bringup);
 513}
 514
 515/* Regular hotplug invocation of the AP hotplug thread */
 516static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
 517{
 518	if (!st->single && st->state == st->target)
 519		return;
 520
 521	st->result = 0;
 522	/*
 523	 * Make sure the above stores are visible before should_run becomes
 524	 * true. Paired with the mb() above in cpuhp_thread_fun()
 525	 */
 526	smp_mb();
 527	st->should_run = true;
 528	wake_up_process(st->thread);
 529	wait_for_ap_thread(st, st->bringup);
 530}
 531
 532static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 
 533{
 534	enum cpuhp_state prev_state;
 535	int ret;
 536
 537	prev_state = cpuhp_set_state(st, target);
 538	__cpuhp_kick_ap(st);
 539	if ((ret = st->result)) {
 540		cpuhp_reset_state(st, prev_state);
 541		__cpuhp_kick_ap(st);
 542	}
 543
 544	return ret;
 545}
 546
 547static int bringup_wait_for_ap(unsigned int cpu)
 548{
 549	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 550
 551	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
 552	wait_for_ap_thread(st, true);
 553	if (WARN_ON_ONCE((!cpu_online(cpu))))
 554		return -ECANCELED;
 555
 556	/* Unpark the hotplug thread of the target cpu */
 557	kthread_unpark(st->thread);
 558
 559	/*
 560	 * SMT soft disabling on X86 requires to bring the CPU out of the
 561	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
 562	 * CPU marked itself as booted_once in notify_cpu_starting() so the
 563	 * cpu_smt_allowed() check will now return false if this is not the
 564	 * primary sibling.
 565	 */
 566	if (!cpu_smt_allowed(cpu))
 567		return -ECANCELED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 568
 569	if (st->target <= CPUHP_AP_ONLINE_IDLE)
 570		return 0;
 571
 572	return cpuhp_kick_ap(st, st->target);
 
 
 
 
 573}
 574
 575static int bringup_cpu(unsigned int cpu)
 576{
 
 577	struct task_struct *idle = idle_thread_get(cpu);
 578	int ret;
 579
 
 
 
 580	/*
 581	 * Some architectures have to walk the irq descriptors to
 582	 * setup the vector space for the cpu which comes online.
 583	 * Prevent irq alloc/free across the bringup.
 
 
 
 
 584	 */
 585	irq_lock_sparse();
 586
 587	/* Arch-specific enabling code. */
 588	ret = __cpu_up(cpu, idle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589	irq_unlock_sparse();
 590	if (ret)
 591		return ret;
 592	return bringup_wait_for_ap(cpu);
 593}
 
 594
 595static int finish_cpu(unsigned int cpu)
 596{
 597	struct task_struct *idle = idle_thread_get(cpu);
 598	struct mm_struct *mm = idle->active_mm;
 599
 600	/*
 601	 * idle_task_exit() will have switched to &init_mm, now
 602	 * clean up any remaining active_mm state.
 603	 */
 604	if (mm != &init_mm)
 605		idle->active_mm = &init_mm;
 606	mmdrop(mm);
 607	return 0;
 608}
 609
 610/*
 611 * Hotplug state machine related functions
 612 */
 613
 614/*
 615 * Get the next state to run. Empty ones will be skipped. Returns true if a
 616 * state must be run.
 617 *
 618 * st->state will be modified ahead of time, to match state_to_run, as if it
 619 * has already ran.
 620 */
 621static bool cpuhp_next_state(bool bringup,
 622			     enum cpuhp_state *state_to_run,
 623			     struct cpuhp_cpu_state *st,
 624			     enum cpuhp_state target)
 625{
 626	do {
 627		if (bringup) {
 628			if (st->state >= target)
 629				return false;
 630
 631			*state_to_run = ++st->state;
 632		} else {
 633			if (st->state <= target)
 634				return false;
 635
 636			*state_to_run = st->state--;
 637		}
 638
 639		if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
 640			break;
 641	} while (true);
 642
 643	return true;
 644}
 645
 646static int cpuhp_invoke_callback_range(bool bringup,
 647				       unsigned int cpu,
 648				       struct cpuhp_cpu_state *st,
 649				       enum cpuhp_state target)
 
 650{
 651	enum cpuhp_state state;
 652	int err = 0;
 653
 654	while (cpuhp_next_state(bringup, &state, st, target)) {
 
 
 655		err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
 656		if (err)
 
 
 
 
 
 
 
 
 
 
 657			break;
 
 658	}
 659
 660	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661}
 662
 663static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
 664{
 665	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
 666		return true;
 667	/*
 668	 * When CPU hotplug is disabled, then taking the CPU down is not
 669	 * possible because takedown_cpu() and the architecture and
 670	 * subsystem specific mechanisms are not available. So the CPU
 671	 * which would be completely unplugged again needs to stay around
 672	 * in the current state.
 673	 */
 674	return st->state <= CPUHP_BRINGUP_CPU;
 675}
 676
 677static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 678			      enum cpuhp_state target)
 679{
 680	enum cpuhp_state prev_state = st->state;
 681	int ret = 0;
 682
 683	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
 684	if (ret) {
 685		cpuhp_reset_state(st, prev_state);
 
 
 
 
 686		if (can_rollback_cpu(st))
 687			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
 688							    prev_state));
 689	}
 690	return ret;
 691}
 692
 693/*
 694 * The cpu hotplug threads manage the bringup and teardown of the cpus
 695 */
 696static void cpuhp_create(unsigned int cpu)
 697{
 698	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 699
 700	init_completion(&st->done_up);
 701	init_completion(&st->done_down);
 702	st->cpu = cpu;
 703}
 704
 705static int cpuhp_should_run(unsigned int cpu)
 706{
 707	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 708
 709	return st->should_run;
 710}
 711
 712/*
 713 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 714 * callbacks when a state gets [un]installed at runtime.
 715 *
 716 * Each invocation of this function by the smpboot thread does a single AP
 717 * state callback.
 718 *
 719 * It has 3 modes of operation:
 720 *  - single: runs st->cb_state
 721 *  - up:     runs ++st->state, while st->state < st->target
 722 *  - down:   runs st->state--, while st->state > st->target
 723 *
 724 * When complete or on error, should_run is cleared and the completion is fired.
 725 */
 726static void cpuhp_thread_fun(unsigned int cpu)
 727{
 728	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 729	bool bringup = st->bringup;
 730	enum cpuhp_state state;
 731
 732	if (WARN_ON_ONCE(!st->should_run))
 733		return;
 734
 735	/*
 736	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
 737	 * that if we see ->should_run we also see the rest of the state.
 738	 */
 739	smp_mb();
 740
 741	/*
 742	 * The BP holds the hotplug lock, but we're now running on the AP,
 743	 * ensure that anybody asserting the lock is held, will actually find
 744	 * it so.
 745	 */
 746	lockdep_acquire_cpus_lock();
 747	cpuhp_lock_acquire(bringup);
 748
 749	if (st->single) {
 750		state = st->cb_state;
 751		st->should_run = false;
 752	} else {
 753		st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
 754		if (!st->should_run)
 755			goto end;
 756	}
 757
 758	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 759
 760	if (cpuhp_is_atomic_state(state)) {
 761		local_irq_disable();
 762		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 763		local_irq_enable();
 764
 765		/*
 766		 * STARTING/DYING must not fail!
 767		 */
 768		WARN_ON_ONCE(st->result);
 769	} else {
 770		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 771	}
 772
 773	if (st->result) {
 774		/*
 775		 * If we fail on a rollback, we're up a creek without no
 776		 * paddle, no way forward, no way back. We loose, thanks for
 777		 * playing.
 778		 */
 779		WARN_ON_ONCE(st->rollback);
 780		st->should_run = false;
 781	}
 782
 783end:
 784	cpuhp_lock_release(bringup);
 785	lockdep_release_cpus_lock();
 786
 787	if (!st->should_run)
 788		complete_ap_thread(st, bringup);
 789}
 790
 791/* Invoke a single callback on a remote cpu */
 792static int
 793cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 794			 struct hlist_node *node)
 795{
 796	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 797	int ret;
 798
 799	if (!cpu_online(cpu))
 800		return 0;
 801
 802	cpuhp_lock_acquire(false);
 803	cpuhp_lock_release(false);
 804
 805	cpuhp_lock_acquire(true);
 806	cpuhp_lock_release(true);
 807
 808	/*
 809	 * If we are up and running, use the hotplug thread. For early calls
 810	 * we invoke the thread function directly.
 811	 */
 812	if (!st->thread)
 813		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 814
 815	st->rollback = false;
 816	st->last = NULL;
 817
 818	st->node = node;
 819	st->bringup = bringup;
 820	st->cb_state = state;
 821	st->single = true;
 822
 823	__cpuhp_kick_ap(st);
 824
 825	/*
 826	 * If we failed and did a partial, do a rollback.
 827	 */
 828	if ((ret = st->result) && st->last) {
 829		st->rollback = true;
 830		st->bringup = !bringup;
 831
 832		__cpuhp_kick_ap(st);
 833	}
 834
 835	/*
 836	 * Clean up the leftovers so the next hotplug operation wont use stale
 837	 * data.
 838	 */
 839	st->node = st->last = NULL;
 840	return ret;
 841}
 842
 843static int cpuhp_kick_ap_work(unsigned int cpu)
 844{
 845	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 846	enum cpuhp_state prev_state = st->state;
 847	int ret;
 848
 849	cpuhp_lock_acquire(false);
 850	cpuhp_lock_release(false);
 851
 852	cpuhp_lock_acquire(true);
 853	cpuhp_lock_release(true);
 854
 855	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
 856	ret = cpuhp_kick_ap(st, st->target);
 857	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 858
 859	return ret;
 860}
 861
 862static struct smp_hotplug_thread cpuhp_threads = {
 863	.store			= &cpuhp_state.thread,
 864	.create			= &cpuhp_create,
 865	.thread_should_run	= cpuhp_should_run,
 866	.thread_fn		= cpuhp_thread_fun,
 867	.thread_comm		= "cpuhp/%u",
 868	.selfparking		= true,
 869};
 870
 
 
 
 
 
 
 
 
 
 
 
 
 871void __init cpuhp_threads_init(void)
 872{
 
 873	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
 874	kthread_unpark(this_cpu_read(cpuhp_state.thread));
 875}
 876
 877/*
 878 *
 879 * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
 880 * protected region.
 881 *
 882 * The operation is still serialized against concurrent CPU hotplug via
 883 * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
 884 * serialized against other hotplug related activity like adding or
 885 * removing of state callbacks and state instances, which invoke either the
 886 * startup or the teardown callback of the affected state.
 887 *
 888 * This is required for subsystems which are unfixable vs. CPU hotplug and
 889 * evade lock inversion problems by scheduling work which has to be
 890 * completed _before_ cpu_up()/_cpu_down() returns.
 891 *
 892 * Don't even think about adding anything to this for any new code or even
 893 * drivers. It's only purpose is to keep existing lock order trainwrecks
 894 * working.
 895 *
 896 * For cpu_down() there might be valid reasons to finish cleanups which are
 897 * not required to be done under cpu_hotplug_lock, but that's a different
 898 * story and would be not invoked via this.
 899 */
 900static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
 901{
 902	/*
 903	 * cpusets delegate hotplug operations to a worker to "solve" the
 904	 * lock order problems. Wait for the worker, but only if tasks are
 905	 * _not_ frozen (suspend, hibernate) as that would wait forever.
 906	 *
 907	 * The wait is required because otherwise the hotplug operation
 908	 * returns with inconsistent state, which could even be observed in
 909	 * user space when a new CPU is brought up. The CPU plug uevent
 910	 * would be delivered and user space reacting on it would fail to
 911	 * move tasks to the newly plugged CPU up to the point where the
 912	 * work has finished because up to that point the newly plugged CPU
 913	 * is not assignable in cpusets/cgroups. On unplug that's not
 914	 * necessarily a visible issue, but it is still inconsistent state,
 915	 * which is the real problem which needs to be "fixed". This can't
 916	 * prevent the transient state between scheduling the work and
 917	 * returning from waiting for it.
 918	 */
 919	if (!tasks_frozen)
 920		cpuset_wait_for_hotplug();
 921}
 922
 923#ifdef CONFIG_HOTPLUG_CPU
 924#ifndef arch_clear_mm_cpumask_cpu
 925#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
 926#endif
 927
 928/**
 929 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 930 * @cpu: a CPU id
 931 *
 932 * This function walks all processes, finds a valid mm struct for each one and
 933 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 934 * trivial, there are various non-obvious corner cases, which this function
 935 * tries to solve in a safe manner.
 936 *
 937 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 938 * be called only for an already offlined CPU.
 939 */
 940void clear_tasks_mm_cpumask(int cpu)
 941{
 942	struct task_struct *p;
 943
 944	/*
 945	 * This function is called after the cpu is taken down and marked
 946	 * offline, so its not like new tasks will ever get this cpu set in
 947	 * their mm mask. -- Peter Zijlstra
 948	 * Thus, we may use rcu_read_lock() here, instead of grabbing
 949	 * full-fledged tasklist_lock.
 950	 */
 951	WARN_ON(cpu_online(cpu));
 952	rcu_read_lock();
 953	for_each_process(p) {
 954		struct task_struct *t;
 955
 956		/*
 957		 * Main thread might exit, but other threads may still have
 958		 * a valid mm. Find one.
 959		 */
 960		t = find_lock_task_mm(p);
 961		if (!t)
 962			continue;
 963		arch_clear_mm_cpumask_cpu(cpu, t->mm);
 964		task_unlock(t);
 965	}
 966	rcu_read_unlock();
 967}
 968
 969/* Take this CPU down. */
 970static int take_cpu_down(void *_param)
 971{
 972	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 973	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
 974	int err, cpu = smp_processor_id();
 975	int ret;
 976
 977	/* Ensure this CPU doesn't handle any more interrupts. */
 978	err = __cpu_disable();
 979	if (err < 0)
 980		return err;
 981
 982	/*
 983	 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
 984	 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
 985	 */
 986	WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
 987
 988	/* Invoke the former CPU_DYING callbacks */
 989	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
 990
 991	/*
 992	 * DYING must not fail!
 993	 */
 994	WARN_ON_ONCE(ret);
 995
 996	/* Give up timekeeping duties */
 997	tick_handover_do_timer();
 998	/* Remove CPU from timer broadcasting */
 999	tick_offline_cpu(cpu);
1000	/* Park the stopper thread */
1001	stop_machine_park(cpu);
1002	return 0;
1003}
1004
1005static int takedown_cpu(unsigned int cpu)
1006{
1007	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1008	int err;
1009
1010	/* Park the smpboot threads */
1011	kthread_park(st->thread);
1012
1013	/*
1014	 * Prevent irq alloc/free while the dying cpu reorganizes the
1015	 * interrupt affinities.
1016	 */
1017	irq_lock_sparse();
1018
1019	/*
1020	 * So now all preempt/rcu users must observe !cpu_active().
1021	 */
1022	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1023	if (err) {
1024		/* CPU refused to die */
1025		irq_unlock_sparse();
1026		/* Unpark the hotplug thread so we can rollback there */
1027		kthread_unpark(st->thread);
1028		return err;
1029	}
1030	BUG_ON(cpu_online(cpu));
1031
1032	/*
1033	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1034	 * all runnable tasks from the CPU, there's only the idle task left now
1035	 * that the migration thread is done doing the stop_machine thing.
1036	 *
1037	 * Wait for the stop thread to go away.
1038	 */
1039	wait_for_ap_thread(st, false);
1040	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1041
1042	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
1043	irq_unlock_sparse();
1044
1045	hotplug_cpu__broadcast_tick_pull(cpu);
1046	/* This actually kills the CPU. */
1047	__cpu_die(cpu);
1048
1049	tick_cleanup_dead_cpu(cpu);
 
 
 
 
 
 
 
 
1050	rcutree_migrate_callbacks(cpu);
 
1051	return 0;
1052}
1053
1054static void cpuhp_complete_idle_dead(void *arg)
1055{
1056	struct cpuhp_cpu_state *st = arg;
1057
1058	complete_ap_thread(st, false);
1059}
1060
1061void cpuhp_report_idle_dead(void)
1062{
1063	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1064
1065	BUG_ON(st->state != CPUHP_AP_OFFLINE);
1066	rcu_report_dead(smp_processor_id());
 
1067	st->state = CPUHP_AP_IDLE_DEAD;
1068	/*
1069	 * We cannot call complete after rcu_report_dead() so we delegate it
1070	 * to an online cpu.
1071	 */
1072	smp_call_function_single(cpumask_first(cpu_online_mask),
1073				 cpuhp_complete_idle_dead, st, 0);
1074}
1075
1076static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1077				enum cpuhp_state target)
1078{
1079	enum cpuhp_state prev_state = st->state;
1080	int ret = 0;
1081
1082	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1083	if (ret) {
 
 
 
1084
1085		cpuhp_reset_state(st, prev_state);
1086
1087		if (st->state < prev_state)
1088			WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1089							    prev_state));
1090	}
1091
1092	return ret;
1093}
1094
1095/* Requires cpu_add_remove_lock to be held */
1096static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1097			   enum cpuhp_state target)
1098{
1099	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1100	int prev_state, ret = 0;
1101
1102	if (num_online_cpus() == 1)
1103		return -EBUSY;
1104
1105	if (!cpu_present(cpu))
1106		return -EINVAL;
1107
1108	cpus_write_lock();
1109
1110	cpuhp_tasks_frozen = tasks_frozen;
1111
1112	prev_state = cpuhp_set_state(st, target);
1113	/*
1114	 * If the current CPU state is in the range of the AP hotplug thread,
1115	 * then we need to kick the thread.
1116	 */
1117	if (st->state > CPUHP_TEARDOWN_CPU) {
1118		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1119		ret = cpuhp_kick_ap_work(cpu);
1120		/*
1121		 * The AP side has done the error rollback already. Just
1122		 * return the error code..
1123		 */
1124		if (ret)
1125			goto out;
1126
1127		/*
1128		 * We might have stopped still in the range of the AP hotplug
1129		 * thread. Nothing to do anymore.
1130		 */
1131		if (st->state > CPUHP_TEARDOWN_CPU)
1132			goto out;
1133
1134		st->target = target;
1135	}
1136	/*
1137	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1138	 * to do the further cleanups.
1139	 */
1140	ret = cpuhp_down_callbacks(cpu, st, target);
1141	if (ret && st->state < prev_state) {
1142		if (st->state == CPUHP_TEARDOWN_CPU) {
1143			cpuhp_reset_state(st, prev_state);
1144			__cpuhp_kick_ap(st);
1145		} else {
1146			WARN(1, "DEAD callback error for CPU%d", cpu);
1147		}
1148	}
1149
1150out:
1151	cpus_write_unlock();
1152	/*
1153	 * Do post unplug cleanup. This is still protected against
1154	 * concurrent CPU hotplug via cpu_add_remove_lock.
1155	 */
1156	lockup_detector_cleanup();
1157	arch_smt_update();
1158	cpu_up_down_serialize_trainwrecks(tasks_frozen);
1159	return ret;
1160}
1161
 
 
 
 
 
 
 
 
 
 
 
 
1162static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1163{
 
 
 
 
 
 
 
 
1164	if (cpu_hotplug_disabled)
1165		return -EBUSY;
1166	return _cpu_down(cpu, 0, target);
 
 
 
 
 
 
 
 
 
 
 
1167}
1168
1169static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1170{
1171	int err;
1172
1173	cpu_maps_update_begin();
1174	err = cpu_down_maps_locked(cpu, target);
1175	cpu_maps_update_done();
1176	return err;
1177}
1178
1179/**
1180 * cpu_device_down - Bring down a cpu device
1181 * @dev: Pointer to the cpu device to offline
1182 *
1183 * This function is meant to be used by device core cpu subsystem only.
1184 *
1185 * Other subsystems should use remove_cpu() instead.
 
 
1186 */
1187int cpu_device_down(struct device *dev)
1188{
1189	return cpu_down(dev->id, CPUHP_OFFLINE);
1190}
1191
1192int remove_cpu(unsigned int cpu)
1193{
1194	int ret;
1195
1196	lock_device_hotplug();
1197	ret = device_offline(get_cpu_device(cpu));
1198	unlock_device_hotplug();
1199
1200	return ret;
1201}
1202EXPORT_SYMBOL_GPL(remove_cpu);
1203
1204void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1205{
1206	unsigned int cpu;
1207	int error;
1208
1209	cpu_maps_update_begin();
1210
1211	/*
1212	 * Make certain the cpu I'm about to reboot on is online.
1213	 *
1214	 * This is inline to what migrate_to_reboot_cpu() already do.
1215	 */
1216	if (!cpu_online(primary_cpu))
1217		primary_cpu = cpumask_first(cpu_online_mask);
1218
1219	for_each_online_cpu(cpu) {
1220		if (cpu == primary_cpu)
1221			continue;
1222
1223		error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1224		if (error) {
1225			pr_err("Failed to offline CPU%d - error=%d",
1226				cpu, error);
1227			break;
1228		}
1229	}
1230
1231	/*
1232	 * Ensure all but the reboot CPU are offline.
1233	 */
1234	BUG_ON(num_online_cpus() > 1);
1235
1236	/*
1237	 * Make sure the CPUs won't be enabled by someone else after this
1238	 * point. Kexec will reboot to a new kernel shortly resetting
1239	 * everything along the way.
1240	 */
1241	cpu_hotplug_disabled++;
1242
1243	cpu_maps_update_done();
1244}
1245
1246#else
1247#define takedown_cpu		NULL
1248#endif /*CONFIG_HOTPLUG_CPU*/
1249
1250/**
1251 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1252 * @cpu: cpu that just started
1253 *
1254 * It must be called by the arch code on the new cpu, before the new cpu
1255 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1256 */
1257void notify_cpu_starting(unsigned int cpu)
1258{
1259	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1260	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1261	int ret;
1262
1263	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
1264	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1265	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1266
1267	/*
1268	 * STARTING must not fail!
1269	 */
1270	WARN_ON_ONCE(ret);
1271}
1272
1273/*
1274 * Called from the idle task. Wake up the controlling task which brings the
1275 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1276 * online bringup to the hotplug thread.
1277 */
1278void cpuhp_online_idle(enum cpuhp_state state)
1279{
1280	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1281
1282	/* Happens for the boot cpu */
1283	if (state != CPUHP_AP_ONLINE_IDLE)
1284		return;
1285
 
 
1286	/*
1287	 * Unpart the stopper thread before we start the idle loop (and start
1288	 * scheduling); this ensures the stopper task is always available.
1289	 */
1290	stop_machine_unpark(smp_processor_id());
1291
1292	st->state = CPUHP_AP_ONLINE_IDLE;
1293	complete_ap_thread(st, true);
1294}
1295
1296/* Requires cpu_add_remove_lock to be held */
1297static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1298{
1299	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1300	struct task_struct *idle;
1301	int ret = 0;
1302
1303	cpus_write_lock();
1304
1305	if (!cpu_present(cpu)) {
1306		ret = -EINVAL;
1307		goto out;
1308	}
1309
1310	/*
1311	 * The caller of cpu_up() might have raced with another
1312	 * caller. Nothing to do.
1313	 */
1314	if (st->state >= target)
1315		goto out;
1316
1317	if (st->state == CPUHP_OFFLINE) {
1318		/* Let it fail before we try to bring the cpu up */
1319		idle = idle_thread_get(cpu);
1320		if (IS_ERR(idle)) {
1321			ret = PTR_ERR(idle);
1322			goto out;
1323		}
 
 
 
 
 
 
1324	}
1325
1326	cpuhp_tasks_frozen = tasks_frozen;
1327
1328	cpuhp_set_state(st, target);
1329	/*
1330	 * If the current CPU state is in the range of the AP hotplug thread,
1331	 * then we need to kick the thread once more.
1332	 */
1333	if (st->state > CPUHP_BRINGUP_CPU) {
1334		ret = cpuhp_kick_ap_work(cpu);
1335		/*
1336		 * The AP side has done the error rollback already. Just
1337		 * return the error code..
1338		 */
1339		if (ret)
1340			goto out;
1341	}
1342
1343	/*
1344	 * Try to reach the target state. We max out on the BP at
1345	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1346	 * responsible for bringing it up to the target state.
1347	 */
1348	target = min((int)target, CPUHP_BRINGUP_CPU);
1349	ret = cpuhp_up_callbacks(cpu, st, target);
1350out:
1351	cpus_write_unlock();
1352	arch_smt_update();
1353	cpu_up_down_serialize_trainwrecks(tasks_frozen);
1354	return ret;
1355}
1356
1357static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1358{
1359	int err = 0;
1360
1361	if (!cpu_possible(cpu)) {
1362		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1363		       cpu);
1364#if defined(CONFIG_IA64)
1365		pr_err("please check additional_cpus= boot parameter\n");
1366#endif
1367		return -EINVAL;
1368	}
1369
1370	err = try_online_node(cpu_to_node(cpu));
1371	if (err)
1372		return err;
1373
1374	cpu_maps_update_begin();
1375
1376	if (cpu_hotplug_disabled) {
1377		err = -EBUSY;
1378		goto out;
1379	}
1380	if (!cpu_smt_allowed(cpu)) {
1381		err = -EPERM;
1382		goto out;
1383	}
1384
1385	err = _cpu_up(cpu, 0, target);
1386out:
1387	cpu_maps_update_done();
1388	return err;
1389}
1390
1391/**
1392 * cpu_device_up - Bring up a cpu device
1393 * @dev: Pointer to the cpu device to online
1394 *
1395 * This function is meant to be used by device core cpu subsystem only.
1396 *
1397 * Other subsystems should use add_cpu() instead.
 
 
1398 */
1399int cpu_device_up(struct device *dev)
1400{
1401	return cpu_up(dev->id, CPUHP_ONLINE);
1402}
1403
1404int add_cpu(unsigned int cpu)
1405{
1406	int ret;
1407
1408	lock_device_hotplug();
1409	ret = device_online(get_cpu_device(cpu));
1410	unlock_device_hotplug();
1411
1412	return ret;
1413}
1414EXPORT_SYMBOL_GPL(add_cpu);
1415
1416/**
1417 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1418 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1419 *
1420 * On some architectures like arm64, we can hibernate on any CPU, but on
1421 * wake up the CPU we hibernated on might be offline as a side effect of
1422 * using maxcpus= for example.
 
 
1423 */
1424int bringup_hibernate_cpu(unsigned int sleep_cpu)
1425{
1426	int ret;
1427
1428	if (!cpu_online(sleep_cpu)) {
1429		pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1430		ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1431		if (ret) {
1432			pr_err("Failed to bring hibernate-CPU up!\n");
1433			return ret;
1434		}
1435	}
1436	return 0;
1437}
1438
1439void bringup_nonboot_cpus(unsigned int setup_max_cpus)
 
1440{
1441	unsigned int cpu;
1442
1443	for_each_present_cpu(cpu) {
1444		if (num_online_cpus() >= setup_max_cpus)
 
 
 
 
 
 
 
 
 
 
 
1445			break;
1446		if (!cpu_online(cpu))
1447			cpu_up(cpu, CPUHP_ONLINE);
1448	}
1449}
1450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451#ifdef CONFIG_PM_SLEEP_SMP
1452static cpumask_var_t frozen_cpus;
1453
1454int freeze_secondary_cpus(int primary)
1455{
1456	int cpu, error = 0;
1457
1458	cpu_maps_update_begin();
1459	if (primary == -1) {
1460		primary = cpumask_first(cpu_online_mask);
1461		if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1462			primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1463	} else {
1464		if (!cpu_online(primary))
1465			primary = cpumask_first(cpu_online_mask);
1466	}
1467
1468	/*
1469	 * We take down all of the non-boot CPUs in one shot to avoid races
1470	 * with the userspace trying to use the CPU hotplug at the same time
1471	 */
1472	cpumask_clear(frozen_cpus);
1473
1474	pr_info("Disabling non-boot CPUs ...\n");
1475	for_each_online_cpu(cpu) {
1476		if (cpu == primary)
1477			continue;
1478
1479		if (pm_wakeup_pending()) {
1480			pr_info("Wakeup pending. Abort CPU freeze\n");
1481			error = -EBUSY;
1482			break;
1483		}
1484
1485		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1486		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1487		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1488		if (!error)
1489			cpumask_set_cpu(cpu, frozen_cpus);
1490		else {
1491			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1492			break;
1493		}
1494	}
1495
1496	if (!error)
1497		BUG_ON(num_online_cpus() > 1);
1498	else
1499		pr_err("Non-boot CPUs are not disabled\n");
1500
1501	/*
1502	 * Make sure the CPUs won't be enabled by someone else. We need to do
1503	 * this even in case of failure as all freeze_secondary_cpus() users are
1504	 * supposed to do thaw_secondary_cpus() on the failure path.
1505	 */
1506	cpu_hotplug_disabled++;
1507
1508	cpu_maps_update_done();
1509	return error;
1510}
1511
1512void __weak arch_thaw_secondary_cpus_begin(void)
1513{
1514}
1515
1516void __weak arch_thaw_secondary_cpus_end(void)
1517{
1518}
1519
1520void thaw_secondary_cpus(void)
1521{
1522	int cpu, error;
1523
1524	/* Allow everyone to use the CPU hotplug again */
1525	cpu_maps_update_begin();
1526	__cpu_hotplug_enable();
1527	if (cpumask_empty(frozen_cpus))
1528		goto out;
1529
1530	pr_info("Enabling non-boot CPUs ...\n");
1531
1532	arch_thaw_secondary_cpus_begin();
1533
1534	for_each_cpu(cpu, frozen_cpus) {
1535		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1536		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1537		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1538		if (!error) {
1539			pr_info("CPU%d is up\n", cpu);
1540			continue;
1541		}
1542		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1543	}
1544
1545	arch_thaw_secondary_cpus_end();
1546
1547	cpumask_clear(frozen_cpus);
1548out:
1549	cpu_maps_update_done();
1550}
1551
1552static int __init alloc_frozen_cpus(void)
1553{
1554	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1555		return -ENOMEM;
1556	return 0;
1557}
1558core_initcall(alloc_frozen_cpus);
1559
1560/*
1561 * When callbacks for CPU hotplug notifications are being executed, we must
1562 * ensure that the state of the system with respect to the tasks being frozen
1563 * or not, as reported by the notification, remains unchanged *throughout the
1564 * duration* of the execution of the callbacks.
1565 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1566 *
1567 * This synchronization is implemented by mutually excluding regular CPU
1568 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1569 * Hibernate notifications.
1570 */
1571static int
1572cpu_hotplug_pm_callback(struct notifier_block *nb,
1573			unsigned long action, void *ptr)
1574{
1575	switch (action) {
1576
1577	case PM_SUSPEND_PREPARE:
1578	case PM_HIBERNATION_PREPARE:
1579		cpu_hotplug_disable();
1580		break;
1581
1582	case PM_POST_SUSPEND:
1583	case PM_POST_HIBERNATION:
1584		cpu_hotplug_enable();
1585		break;
1586
1587	default:
1588		return NOTIFY_DONE;
1589	}
1590
1591	return NOTIFY_OK;
1592}
1593
1594
1595static int __init cpu_hotplug_pm_sync_init(void)
1596{
1597	/*
1598	 * cpu_hotplug_pm_callback has higher priority than x86
1599	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1600	 * to disable cpu hotplug to avoid cpu hotplug race.
1601	 */
1602	pm_notifier(cpu_hotplug_pm_callback, 0);
1603	return 0;
1604}
1605core_initcall(cpu_hotplug_pm_sync_init);
1606
1607#endif /* CONFIG_PM_SLEEP_SMP */
1608
1609int __boot_cpu_id;
1610
1611#endif /* CONFIG_SMP */
1612
1613/* Boot processor state steps */
1614static struct cpuhp_step cpuhp_hp_states[] = {
1615	[CPUHP_OFFLINE] = {
1616		.name			= "offline",
1617		.startup.single		= NULL,
1618		.teardown.single	= NULL,
1619	},
1620#ifdef CONFIG_SMP
1621	[CPUHP_CREATE_THREADS]= {
1622		.name			= "threads:prepare",
1623		.startup.single		= smpboot_create_threads,
1624		.teardown.single	= NULL,
1625		.cant_stop		= true,
1626	},
1627	[CPUHP_PERF_PREPARE] = {
1628		.name			= "perf:prepare",
1629		.startup.single		= perf_event_init_cpu,
1630		.teardown.single	= perf_event_exit_cpu,
1631	},
 
 
 
 
 
1632	[CPUHP_WORKQUEUE_PREP] = {
1633		.name			= "workqueue:prepare",
1634		.startup.single		= workqueue_prepare_cpu,
1635		.teardown.single	= NULL,
1636	},
1637	[CPUHP_HRTIMERS_PREPARE] = {
1638		.name			= "hrtimers:prepare",
1639		.startup.single		= hrtimers_prepare_cpu,
1640		.teardown.single	= hrtimers_dead_cpu,
1641	},
1642	[CPUHP_SMPCFD_PREPARE] = {
1643		.name			= "smpcfd:prepare",
1644		.startup.single		= smpcfd_prepare_cpu,
1645		.teardown.single	= smpcfd_dead_cpu,
1646	},
1647	[CPUHP_RELAY_PREPARE] = {
1648		.name			= "relay:prepare",
1649		.startup.single		= relay_prepare_cpu,
1650		.teardown.single	= NULL,
1651	},
1652	[CPUHP_SLAB_PREPARE] = {
1653		.name			= "slab:prepare",
1654		.startup.single		= slab_prepare_cpu,
1655		.teardown.single	= slab_dead_cpu,
1656	},
1657	[CPUHP_RCUTREE_PREP] = {
1658		.name			= "RCU/tree:prepare",
1659		.startup.single		= rcutree_prepare_cpu,
1660		.teardown.single	= rcutree_dead_cpu,
1661	},
1662	/*
1663	 * On the tear-down path, timers_dead_cpu() must be invoked
1664	 * before blk_mq_queue_reinit_notify() from notify_dead(),
1665	 * otherwise a RCU stall occurs.
1666	 */
1667	[CPUHP_TIMERS_PREPARE] = {
1668		.name			= "timers:prepare",
1669		.startup.single		= timers_prepare_cpu,
1670		.teardown.single	= timers_dead_cpu,
1671	},
1672	/* Kicks the plugged cpu into life */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1673	[CPUHP_BRINGUP_CPU] = {
1674		.name			= "cpu:bringup",
1675		.startup.single		= bringup_cpu,
1676		.teardown.single	= finish_cpu,
1677		.cant_stop		= true,
1678	},
 
1679	/* Final state before CPU kills itself */
1680	[CPUHP_AP_IDLE_DEAD] = {
1681		.name			= "idle:dead",
1682	},
1683	/*
1684	 * Last state before CPU enters the idle loop to die. Transient state
1685	 * for synchronization.
1686	 */
1687	[CPUHP_AP_OFFLINE] = {
1688		.name			= "ap:offline",
1689		.cant_stop		= true,
1690	},
1691	/* First state is scheduler control. Interrupts are disabled */
1692	[CPUHP_AP_SCHED_STARTING] = {
1693		.name			= "sched:starting",
1694		.startup.single		= sched_cpu_starting,
1695		.teardown.single	= sched_cpu_dying,
1696	},
1697	[CPUHP_AP_RCUTREE_DYING] = {
1698		.name			= "RCU/tree:dying",
1699		.startup.single		= NULL,
1700		.teardown.single	= rcutree_dying_cpu,
1701	},
1702	[CPUHP_AP_SMPCFD_DYING] = {
1703		.name			= "smpcfd:dying",
1704		.startup.single		= NULL,
1705		.teardown.single	= smpcfd_dying_cpu,
1706	},
 
 
 
 
 
 
 
 
 
 
1707	/* Entry state on starting. Interrupts enabled from here on. Transient
1708	 * state for synchronsization */
1709	[CPUHP_AP_ONLINE] = {
1710		.name			= "ap:online",
1711	},
1712	/*
1713	 * Handled on control processor until the plugged processor manages
1714	 * this itself.
1715	 */
1716	[CPUHP_TEARDOWN_CPU] = {
1717		.name			= "cpu:teardown",
1718		.startup.single		= NULL,
1719		.teardown.single	= takedown_cpu,
1720		.cant_stop		= true,
1721	},
1722
1723	[CPUHP_AP_SCHED_WAIT_EMPTY] = {
1724		.name			= "sched:waitempty",
1725		.startup.single		= NULL,
1726		.teardown.single	= sched_cpu_wait_empty,
1727	},
1728
1729	/* Handle smpboot threads park/unpark */
1730	[CPUHP_AP_SMPBOOT_THREADS] = {
1731		.name			= "smpboot/threads:online",
1732		.startup.single		= smpboot_unpark_threads,
1733		.teardown.single	= smpboot_park_threads,
1734	},
1735	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1736		.name			= "irq/affinity:online",
1737		.startup.single		= irq_affinity_online_cpu,
1738		.teardown.single	= NULL,
1739	},
1740	[CPUHP_AP_PERF_ONLINE] = {
1741		.name			= "perf:online",
1742		.startup.single		= perf_event_init_cpu,
1743		.teardown.single	= perf_event_exit_cpu,
1744	},
1745	[CPUHP_AP_WATCHDOG_ONLINE] = {
1746		.name			= "lockup_detector:online",
1747		.startup.single		= lockup_detector_online_cpu,
1748		.teardown.single	= lockup_detector_offline_cpu,
1749	},
1750	[CPUHP_AP_WORKQUEUE_ONLINE] = {
1751		.name			= "workqueue:online",
1752		.startup.single		= workqueue_online_cpu,
1753		.teardown.single	= workqueue_offline_cpu,
1754	},
 
 
 
 
 
1755	[CPUHP_AP_RCUTREE_ONLINE] = {
1756		.name			= "RCU/tree:online",
1757		.startup.single		= rcutree_online_cpu,
1758		.teardown.single	= rcutree_offline_cpu,
1759	},
1760#endif
1761	/*
1762	 * The dynamically registered state space is here
1763	 */
1764
1765#ifdef CONFIG_SMP
1766	/* Last state is scheduler control setting the cpu active */
1767	[CPUHP_AP_ACTIVE] = {
1768		.name			= "sched:active",
1769		.startup.single		= sched_cpu_activate,
1770		.teardown.single	= sched_cpu_deactivate,
1771	},
1772#endif
1773
1774	/* CPU is fully up and running. */
1775	[CPUHP_ONLINE] = {
1776		.name			= "online",
1777		.startup.single		= NULL,
1778		.teardown.single	= NULL,
1779	},
1780};
1781
1782/* Sanity check for callbacks */
1783static int cpuhp_cb_check(enum cpuhp_state state)
1784{
1785	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1786		return -EINVAL;
1787	return 0;
1788}
1789
1790/*
1791 * Returns a free for dynamic slot assignment of the Online state. The states
1792 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1793 * by having no name assigned.
1794 */
1795static int cpuhp_reserve_state(enum cpuhp_state state)
1796{
1797	enum cpuhp_state i, end;
1798	struct cpuhp_step *step;
1799
1800	switch (state) {
1801	case CPUHP_AP_ONLINE_DYN:
1802		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1803		end = CPUHP_AP_ONLINE_DYN_END;
1804		break;
1805	case CPUHP_BP_PREPARE_DYN:
1806		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1807		end = CPUHP_BP_PREPARE_DYN_END;
1808		break;
1809	default:
1810		return -EINVAL;
1811	}
1812
1813	for (i = state; i <= end; i++, step++) {
1814		if (!step->name)
1815			return i;
1816	}
1817	WARN(1, "No more dynamic states available for CPU hotplug\n");
1818	return -ENOSPC;
1819}
1820
1821static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1822				 int (*startup)(unsigned int cpu),
1823				 int (*teardown)(unsigned int cpu),
1824				 bool multi_instance)
1825{
1826	/* (Un)Install the callbacks for further cpu hotplug operations */
1827	struct cpuhp_step *sp;
1828	int ret = 0;
1829
1830	/*
1831	 * If name is NULL, then the state gets removed.
1832	 *
1833	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1834	 * the first allocation from these dynamic ranges, so the removal
1835	 * would trigger a new allocation and clear the wrong (already
1836	 * empty) state, leaving the callbacks of the to be cleared state
1837	 * dangling, which causes wreckage on the next hotplug operation.
1838	 */
1839	if (name && (state == CPUHP_AP_ONLINE_DYN ||
1840		     state == CPUHP_BP_PREPARE_DYN)) {
1841		ret = cpuhp_reserve_state(state);
1842		if (ret < 0)
1843			return ret;
1844		state = ret;
1845	}
1846	sp = cpuhp_get_step(state);
1847	if (name && sp->name)
1848		return -EBUSY;
1849
1850	sp->startup.single = startup;
1851	sp->teardown.single = teardown;
1852	sp->name = name;
1853	sp->multi_instance = multi_instance;
1854	INIT_HLIST_HEAD(&sp->list);
1855	return ret;
1856}
1857
1858static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1859{
1860	return cpuhp_get_step(state)->teardown.single;
1861}
1862
1863/*
1864 * Call the startup/teardown function for a step either on the AP or
1865 * on the current CPU.
1866 */
1867static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1868			    struct hlist_node *node)
1869{
1870	struct cpuhp_step *sp = cpuhp_get_step(state);
1871	int ret;
1872
1873	/*
1874	 * If there's nothing to do, we done.
1875	 * Relies on the union for multi_instance.
1876	 */
1877	if (cpuhp_step_empty(bringup, sp))
1878		return 0;
1879	/*
1880	 * The non AP bound callbacks can fail on bringup. On teardown
1881	 * e.g. module removal we crash for now.
1882	 */
1883#ifdef CONFIG_SMP
1884	if (cpuhp_is_ap_state(state))
1885		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1886	else
1887		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1888#else
1889	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1890#endif
1891	BUG_ON(ret && !bringup);
1892	return ret;
1893}
1894
1895/*
1896 * Called from __cpuhp_setup_state on a recoverable failure.
1897 *
1898 * Note: The teardown callbacks for rollback are not allowed to fail!
1899 */
1900static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1901				   struct hlist_node *node)
1902{
1903	int cpu;
1904
1905	/* Roll back the already executed steps on the other cpus */
1906	for_each_present_cpu(cpu) {
1907		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1908		int cpustate = st->state;
1909
1910		if (cpu >= failedcpu)
1911			break;
1912
1913		/* Did we invoke the startup call on that cpu ? */
1914		if (cpustate >= state)
1915			cpuhp_issue_call(cpu, state, false, node);
1916	}
1917}
1918
1919int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1920					  struct hlist_node *node,
1921					  bool invoke)
1922{
1923	struct cpuhp_step *sp;
1924	int cpu;
1925	int ret;
1926
1927	lockdep_assert_cpus_held();
1928
1929	sp = cpuhp_get_step(state);
1930	if (sp->multi_instance == false)
1931		return -EINVAL;
1932
1933	mutex_lock(&cpuhp_state_mutex);
1934
1935	if (!invoke || !sp->startup.multi)
1936		goto add_node;
1937
1938	/*
1939	 * Try to call the startup callback for each present cpu
1940	 * depending on the hotplug state of the cpu.
1941	 */
1942	for_each_present_cpu(cpu) {
1943		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1944		int cpustate = st->state;
1945
1946		if (cpustate < state)
1947			continue;
1948
1949		ret = cpuhp_issue_call(cpu, state, true, node);
1950		if (ret) {
1951			if (sp->teardown.multi)
1952				cpuhp_rollback_install(cpu, state, node);
1953			goto unlock;
1954		}
1955	}
1956add_node:
1957	ret = 0;
1958	hlist_add_head(node, &sp->list);
1959unlock:
1960	mutex_unlock(&cpuhp_state_mutex);
1961	return ret;
1962}
1963
1964int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1965			       bool invoke)
1966{
1967	int ret;
1968
1969	cpus_read_lock();
1970	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1971	cpus_read_unlock();
1972	return ret;
1973}
1974EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1975
1976/**
1977 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1978 * @state:		The state to setup
 
1979 * @invoke:		If true, the startup function is invoked for cpus where
1980 *			cpu state >= @state
1981 * @startup:		startup callback function
1982 * @teardown:		teardown callback function
1983 * @multi_instance:	State is set up for multiple instances which get
1984 *			added afterwards.
1985 *
1986 * The caller needs to hold cpus read locked while calling this function.
1987 * Returns:
1988 *   On success:
1989 *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
1990 *      0 for all other states
1991 *   On failure: proper (negative) error code
1992 */
1993int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1994				   const char *name, bool invoke,
1995				   int (*startup)(unsigned int cpu),
1996				   int (*teardown)(unsigned int cpu),
1997				   bool multi_instance)
1998{
1999	int cpu, ret = 0;
2000	bool dynstate;
2001
2002	lockdep_assert_cpus_held();
2003
2004	if (cpuhp_cb_check(state) || !name)
2005		return -EINVAL;
2006
2007	mutex_lock(&cpuhp_state_mutex);
2008
2009	ret = cpuhp_store_callbacks(state, name, startup, teardown,
2010				    multi_instance);
2011
2012	dynstate = state == CPUHP_AP_ONLINE_DYN;
2013	if (ret > 0 && dynstate) {
2014		state = ret;
2015		ret = 0;
2016	}
2017
2018	if (ret || !invoke || !startup)
2019		goto out;
2020
2021	/*
2022	 * Try to call the startup callback for each present cpu
2023	 * depending on the hotplug state of the cpu.
2024	 */
2025	for_each_present_cpu(cpu) {
2026		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2027		int cpustate = st->state;
2028
2029		if (cpustate < state)
2030			continue;
2031
2032		ret = cpuhp_issue_call(cpu, state, true, NULL);
2033		if (ret) {
2034			if (teardown)
2035				cpuhp_rollback_install(cpu, state, NULL);
2036			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2037			goto out;
2038		}
2039	}
2040out:
2041	mutex_unlock(&cpuhp_state_mutex);
2042	/*
2043	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2044	 * dynamically allocated state in case of success.
2045	 */
2046	if (!ret && dynstate)
2047		return state;
2048	return ret;
2049}
2050EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2051
2052int __cpuhp_setup_state(enum cpuhp_state state,
2053			const char *name, bool invoke,
2054			int (*startup)(unsigned int cpu),
2055			int (*teardown)(unsigned int cpu),
2056			bool multi_instance)
2057{
2058	int ret;
2059
2060	cpus_read_lock();
2061	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2062					     teardown, multi_instance);
2063	cpus_read_unlock();
2064	return ret;
2065}
2066EXPORT_SYMBOL(__cpuhp_setup_state);
2067
2068int __cpuhp_state_remove_instance(enum cpuhp_state state,
2069				  struct hlist_node *node, bool invoke)
2070{
2071	struct cpuhp_step *sp = cpuhp_get_step(state);
2072	int cpu;
2073
2074	BUG_ON(cpuhp_cb_check(state));
2075
2076	if (!sp->multi_instance)
2077		return -EINVAL;
2078
2079	cpus_read_lock();
2080	mutex_lock(&cpuhp_state_mutex);
2081
2082	if (!invoke || !cpuhp_get_teardown_cb(state))
2083		goto remove;
2084	/*
2085	 * Call the teardown callback for each present cpu depending
2086	 * on the hotplug state of the cpu. This function is not
2087	 * allowed to fail currently!
2088	 */
2089	for_each_present_cpu(cpu) {
2090		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2091		int cpustate = st->state;
2092
2093		if (cpustate >= state)
2094			cpuhp_issue_call(cpu, state, false, node);
2095	}
2096
2097remove:
2098	hlist_del(node);
2099	mutex_unlock(&cpuhp_state_mutex);
2100	cpus_read_unlock();
2101
2102	return 0;
2103}
2104EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2105
2106/**
2107 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2108 * @state:	The state to remove
2109 * @invoke:	If true, the teardown function is invoked for cpus where
2110 *		cpu state >= @state
2111 *
2112 * The caller needs to hold cpus read locked while calling this function.
2113 * The teardown callback is currently not allowed to fail. Think
2114 * about module removal!
2115 */
2116void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2117{
2118	struct cpuhp_step *sp = cpuhp_get_step(state);
2119	int cpu;
2120
2121	BUG_ON(cpuhp_cb_check(state));
2122
2123	lockdep_assert_cpus_held();
2124
2125	mutex_lock(&cpuhp_state_mutex);
2126	if (sp->multi_instance) {
2127		WARN(!hlist_empty(&sp->list),
2128		     "Error: Removing state %d which has instances left.\n",
2129		     state);
2130		goto remove;
2131	}
2132
2133	if (!invoke || !cpuhp_get_teardown_cb(state))
2134		goto remove;
2135
2136	/*
2137	 * Call the teardown callback for each present cpu depending
2138	 * on the hotplug state of the cpu. This function is not
2139	 * allowed to fail currently!
2140	 */
2141	for_each_present_cpu(cpu) {
2142		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2143		int cpustate = st->state;
2144
2145		if (cpustate >= state)
2146			cpuhp_issue_call(cpu, state, false, NULL);
2147	}
2148remove:
2149	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2150	mutex_unlock(&cpuhp_state_mutex);
2151}
2152EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2153
2154void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2155{
2156	cpus_read_lock();
2157	__cpuhp_remove_state_cpuslocked(state, invoke);
2158	cpus_read_unlock();
2159}
2160EXPORT_SYMBOL(__cpuhp_remove_state);
2161
2162#ifdef CONFIG_HOTPLUG_SMT
2163static void cpuhp_offline_cpu_device(unsigned int cpu)
2164{
2165	struct device *dev = get_cpu_device(cpu);
2166
2167	dev->offline = true;
2168	/* Tell user space about the state change */
2169	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2170}
2171
2172static void cpuhp_online_cpu_device(unsigned int cpu)
2173{
2174	struct device *dev = get_cpu_device(cpu);
2175
2176	dev->offline = false;
2177	/* Tell user space about the state change */
2178	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2179}
2180
2181int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2182{
2183	int cpu, ret = 0;
2184
2185	cpu_maps_update_begin();
2186	for_each_online_cpu(cpu) {
2187		if (topology_is_primary_thread(cpu))
2188			continue;
 
 
 
 
 
 
2189		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2190		if (ret)
2191			break;
2192		/*
2193		 * As this needs to hold the cpu maps lock it's impossible
2194		 * to call device_offline() because that ends up calling
2195		 * cpu_down() which takes cpu maps lock. cpu maps lock
2196		 * needs to be held as this might race against in kernel
2197		 * abusers of the hotplug machinery (thermal management).
2198		 *
2199		 * So nothing would update device:offline state. That would
2200		 * leave the sysfs entry stale and prevent onlining after
2201		 * smt control has been changed to 'off' again. This is
2202		 * called under the sysfs hotplug lock, so it is properly
2203		 * serialized against the regular offline usage.
2204		 */
2205		cpuhp_offline_cpu_device(cpu);
2206	}
2207	if (!ret)
2208		cpu_smt_control = ctrlval;
2209	cpu_maps_update_done();
2210	return ret;
2211}
2212
 
 
 
 
 
 
 
 
2213int cpuhp_smt_enable(void)
2214{
2215	int cpu, ret = 0;
2216
2217	cpu_maps_update_begin();
2218	cpu_smt_control = CPU_SMT_ENABLED;
2219	for_each_present_cpu(cpu) {
2220		/* Skip online CPUs and CPUs on offline nodes */
2221		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2222			continue;
 
 
2223		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2224		if (ret)
2225			break;
2226		/* See comment in cpuhp_smt_disable() */
2227		cpuhp_online_cpu_device(cpu);
2228	}
2229	cpu_maps_update_done();
2230	return ret;
2231}
2232#endif
2233
2234#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2235static ssize_t show_cpuhp_state(struct device *dev,
2236				struct device_attribute *attr, char *buf)
2237{
2238	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2239
2240	return sprintf(buf, "%d\n", st->state);
2241}
2242static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2243
2244static ssize_t write_cpuhp_target(struct device *dev,
2245				  struct device_attribute *attr,
2246				  const char *buf, size_t count)
2247{
2248	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2249	struct cpuhp_step *sp;
2250	int target, ret;
2251
2252	ret = kstrtoint(buf, 10, &target);
2253	if (ret)
2254		return ret;
2255
2256#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2257	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2258		return -EINVAL;
2259#else
2260	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2261		return -EINVAL;
2262#endif
2263
2264	ret = lock_device_hotplug_sysfs();
2265	if (ret)
2266		return ret;
2267
2268	mutex_lock(&cpuhp_state_mutex);
2269	sp = cpuhp_get_step(target);
2270	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2271	mutex_unlock(&cpuhp_state_mutex);
2272	if (ret)
2273		goto out;
2274
2275	if (st->state < target)
2276		ret = cpu_up(dev->id, target);
2277	else
2278		ret = cpu_down(dev->id, target);
 
 
2279out:
2280	unlock_device_hotplug();
2281	return ret ? ret : count;
2282}
2283
2284static ssize_t show_cpuhp_target(struct device *dev,
2285				 struct device_attribute *attr, char *buf)
2286{
2287	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2288
2289	return sprintf(buf, "%d\n", st->target);
2290}
2291static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2292
2293
2294static ssize_t write_cpuhp_fail(struct device *dev,
2295				struct device_attribute *attr,
2296				const char *buf, size_t count)
2297{
2298	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2299	struct cpuhp_step *sp;
2300	int fail, ret;
2301
2302	ret = kstrtoint(buf, 10, &fail);
2303	if (ret)
2304		return ret;
2305
2306	if (fail == CPUHP_INVALID) {
2307		st->fail = fail;
2308		return count;
2309	}
2310
2311	if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2312		return -EINVAL;
2313
2314	/*
2315	 * Cannot fail STARTING/DYING callbacks.
2316	 */
2317	if (cpuhp_is_atomic_state(fail))
2318		return -EINVAL;
2319
2320	/*
2321	 * DEAD callbacks cannot fail...
2322	 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2323	 * triggering STARTING callbacks, a failure in this state would
2324	 * hinder rollback.
2325	 */
2326	if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2327		return -EINVAL;
2328
2329	/*
2330	 * Cannot fail anything that doesn't have callbacks.
2331	 */
2332	mutex_lock(&cpuhp_state_mutex);
2333	sp = cpuhp_get_step(fail);
2334	if (!sp->startup.single && !sp->teardown.single)
2335		ret = -EINVAL;
2336	mutex_unlock(&cpuhp_state_mutex);
2337	if (ret)
2338		return ret;
2339
2340	st->fail = fail;
2341
2342	return count;
2343}
2344
2345static ssize_t show_cpuhp_fail(struct device *dev,
2346			       struct device_attribute *attr, char *buf)
2347{
2348	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2349
2350	return sprintf(buf, "%d\n", st->fail);
2351}
2352
2353static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2354
2355static struct attribute *cpuhp_cpu_attrs[] = {
2356	&dev_attr_state.attr,
2357	&dev_attr_target.attr,
2358	&dev_attr_fail.attr,
2359	NULL
2360};
2361
2362static const struct attribute_group cpuhp_cpu_attr_group = {
2363	.attrs = cpuhp_cpu_attrs,
2364	.name = "hotplug",
2365	NULL
2366};
2367
2368static ssize_t show_cpuhp_states(struct device *dev,
2369				 struct device_attribute *attr, char *buf)
2370{
2371	ssize_t cur, res = 0;
2372	int i;
2373
2374	mutex_lock(&cpuhp_state_mutex);
2375	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2376		struct cpuhp_step *sp = cpuhp_get_step(i);
2377
2378		if (sp->name) {
2379			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2380			buf += cur;
2381			res += cur;
2382		}
2383	}
2384	mutex_unlock(&cpuhp_state_mutex);
2385	return res;
2386}
2387static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2388
2389static struct attribute *cpuhp_cpu_root_attrs[] = {
2390	&dev_attr_states.attr,
2391	NULL
2392};
2393
2394static const struct attribute_group cpuhp_cpu_root_attr_group = {
2395	.attrs = cpuhp_cpu_root_attrs,
2396	.name = "hotplug",
2397	NULL
2398};
2399
2400#ifdef CONFIG_HOTPLUG_SMT
2401
 
 
 
 
 
 
 
2402static ssize_t
2403__store_smt_control(struct device *dev, struct device_attribute *attr,
2404		    const char *buf, size_t count)
2405{
2406	int ctrlval, ret;
2407
2408	if (sysfs_streq(buf, "on"))
2409		ctrlval = CPU_SMT_ENABLED;
2410	else if (sysfs_streq(buf, "off"))
2411		ctrlval = CPU_SMT_DISABLED;
2412	else if (sysfs_streq(buf, "forceoff"))
2413		ctrlval = CPU_SMT_FORCE_DISABLED;
2414	else
2415		return -EINVAL;
2416
2417	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2418		return -EPERM;
2419
2420	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2421		return -ENODEV;
2422
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2423	ret = lock_device_hotplug_sysfs();
2424	if (ret)
2425		return ret;
2426
2427	if (ctrlval != cpu_smt_control) {
2428		switch (ctrlval) {
2429		case CPU_SMT_ENABLED:
2430			ret = cpuhp_smt_enable();
2431			break;
2432		case CPU_SMT_DISABLED:
2433		case CPU_SMT_FORCE_DISABLED:
2434			ret = cpuhp_smt_disable(ctrlval);
2435			break;
2436		}
2437	}
2438
2439	unlock_device_hotplug();
2440	return ret ? ret : count;
2441}
2442
2443#else /* !CONFIG_HOTPLUG_SMT */
2444static ssize_t
2445__store_smt_control(struct device *dev, struct device_attribute *attr,
2446		    const char *buf, size_t count)
2447{
2448	return -ENODEV;
2449}
2450#endif /* CONFIG_HOTPLUG_SMT */
2451
2452static const char *smt_states[] = {
2453	[CPU_SMT_ENABLED]		= "on",
2454	[CPU_SMT_DISABLED]		= "off",
2455	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
2456	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
2457	[CPU_SMT_NOT_IMPLEMENTED]	= "notimplemented",
2458};
2459
2460static ssize_t
2461show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2462{
2463	const char *state = smt_states[cpu_smt_control];
2464
2465	return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
 
 
 
 
 
 
 
 
 
 
 
2466}
2467
2468static ssize_t
2469store_smt_control(struct device *dev, struct device_attribute *attr,
2470		  const char *buf, size_t count)
2471{
2472	return __store_smt_control(dev, attr, buf, count);
2473}
2474static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2475
2476static ssize_t
2477show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2478{
2479	return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2480}
2481static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2482
2483static struct attribute *cpuhp_smt_attrs[] = {
2484	&dev_attr_control.attr,
2485	&dev_attr_active.attr,
2486	NULL
2487};
2488
2489static const struct attribute_group cpuhp_smt_attr_group = {
2490	.attrs = cpuhp_smt_attrs,
2491	.name = "smt",
2492	NULL
2493};
2494
2495static int __init cpu_smt_sysfs_init(void)
2496{
2497	return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2498				  &cpuhp_smt_attr_group);
 
 
 
 
 
 
 
2499}
2500
2501static int __init cpuhp_sysfs_init(void)
2502{
 
2503	int cpu, ret;
2504
2505	ret = cpu_smt_sysfs_init();
2506	if (ret)
2507		return ret;
2508
2509	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2510				 &cpuhp_cpu_root_attr_group);
2511	if (ret)
2512		return ret;
 
 
 
2513
2514	for_each_possible_cpu(cpu) {
2515		struct device *dev = get_cpu_device(cpu);
2516
2517		if (!dev)
2518			continue;
2519		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2520		if (ret)
2521			return ret;
2522	}
2523	return 0;
2524}
2525device_initcall(cpuhp_sysfs_init);
2526#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2527
2528/*
2529 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2530 * represents all NR_CPUS bits binary values of 1<<nr.
2531 *
2532 * It is used by cpumask_of() to get a constant address to a CPU
2533 * mask value that has a single bit set only.
2534 */
2535
2536/* cpu_bit_bitmap[0] is empty - so we can back into it */
2537#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
2538#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2539#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2540#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2541
2542const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2543
2544	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
2545	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
2546#if BITS_PER_LONG > 32
2547	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
2548	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
2549#endif
2550};
2551EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2552
2553const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2554EXPORT_SYMBOL(cpu_all_bits);
2555
2556#ifdef CONFIG_INIT_ALL_POSSIBLE
2557struct cpumask __cpu_possible_mask __read_mostly
2558	= {CPU_BITS_ALL};
2559#else
2560struct cpumask __cpu_possible_mask __read_mostly;
2561#endif
2562EXPORT_SYMBOL(__cpu_possible_mask);
2563
2564struct cpumask __cpu_online_mask __read_mostly;
2565EXPORT_SYMBOL(__cpu_online_mask);
2566
 
 
 
2567struct cpumask __cpu_present_mask __read_mostly;
2568EXPORT_SYMBOL(__cpu_present_mask);
2569
2570struct cpumask __cpu_active_mask __read_mostly;
2571EXPORT_SYMBOL(__cpu_active_mask);
2572
2573struct cpumask __cpu_dying_mask __read_mostly;
2574EXPORT_SYMBOL(__cpu_dying_mask);
2575
2576atomic_t __num_online_cpus __read_mostly;
2577EXPORT_SYMBOL(__num_online_cpus);
2578
2579void init_cpu_present(const struct cpumask *src)
2580{
2581	cpumask_copy(&__cpu_present_mask, src);
2582}
2583
2584void init_cpu_possible(const struct cpumask *src)
2585{
2586	cpumask_copy(&__cpu_possible_mask, src);
2587}
2588
2589void init_cpu_online(const struct cpumask *src)
2590{
2591	cpumask_copy(&__cpu_online_mask, src);
2592}
2593
2594void set_cpu_online(unsigned int cpu, bool online)
2595{
2596	/*
2597	 * atomic_inc/dec() is required to handle the horrid abuse of this
2598	 * function by the reboot and kexec code which invoke it from
2599	 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2600	 * regular CPU hotplug is properly serialized.
2601	 *
2602	 * Note, that the fact that __num_online_cpus is of type atomic_t
2603	 * does not protect readers which are not serialized against
2604	 * concurrent hotplug operations.
2605	 */
2606	if (online) {
2607		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2608			atomic_inc(&__num_online_cpus);
2609	} else {
2610		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2611			atomic_dec(&__num_online_cpus);
2612	}
2613}
2614
2615/*
2616 * Activate the first processor.
2617 */
2618void __init boot_cpu_init(void)
2619{
2620	int cpu = smp_processor_id();
2621
2622	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
2623	set_cpu_online(cpu, true);
2624	set_cpu_active(cpu, true);
2625	set_cpu_present(cpu, true);
2626	set_cpu_possible(cpu, true);
2627
2628#ifdef CONFIG_SMP
2629	__boot_cpu_id = cpu;
2630#endif
2631}
2632
2633/*
2634 * Must be called _AFTER_ setting up the per_cpu areas
2635 */
2636void __init boot_cpu_hotplug_init(void)
2637{
2638#ifdef CONFIG_SMP
2639	cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
 
2640#endif
2641	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
 
2642}
2643
 
2644/*
2645 * These are used for a global "mitigations=" cmdline option for toggling
2646 * optional CPU mitigations.
2647 */
2648enum cpu_mitigations {
2649	CPU_MITIGATIONS_OFF,
2650	CPU_MITIGATIONS_AUTO,
2651	CPU_MITIGATIONS_AUTO_NOSMT,
2652};
2653
2654static enum cpu_mitigations cpu_mitigations __ro_after_init =
2655	CPU_MITIGATIONS_AUTO;
2656
2657static int __init mitigations_parse_cmdline(char *arg)
2658{
2659	if (!strcmp(arg, "off"))
2660		cpu_mitigations = CPU_MITIGATIONS_OFF;
2661	else if (!strcmp(arg, "auto"))
2662		cpu_mitigations = CPU_MITIGATIONS_AUTO;
2663	else if (!strcmp(arg, "auto,nosmt"))
2664		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2665	else
2666		pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2667			arg);
2668
2669	return 0;
2670}
2671early_param("mitigations", mitigations_parse_cmdline);
2672
2673/* mitigations=off */
2674bool cpu_mitigations_off(void)
2675{
2676	return cpu_mitigations == CPU_MITIGATIONS_OFF;
2677}
2678EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2679
2680/* mitigations=auto,nosmt */
2681bool cpu_mitigations_auto_nosmt(void)
2682{
2683	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2684}
2685EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);