Linux Audio

Check our new training course

Loading...
v6.2
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/sched/mm.h>
   7#include <linux/proc_fs.h>
   8#include <linux/smp.h>
   9#include <linux/init.h>
  10#include <linux/notifier.h>
  11#include <linux/sched/signal.h>
  12#include <linux/sched/hotplug.h>
  13#include <linux/sched/isolation.h>
  14#include <linux/sched/task.h>
  15#include <linux/sched/smt.h>
  16#include <linux/unistd.h>
  17#include <linux/cpu.h>
  18#include <linux/oom.h>
  19#include <linux/rcupdate.h>
  20#include <linux/export.h>
  21#include <linux/bug.h>
  22#include <linux/kthread.h>
  23#include <linux/stop_machine.h>
  24#include <linux/mutex.h>
  25#include <linux/gfp.h>
  26#include <linux/suspend.h>
  27#include <linux/lockdep.h>
  28#include <linux/tick.h>
  29#include <linux/irq.h>
  30#include <linux/nmi.h>
  31#include <linux/smpboot.h>
  32#include <linux/relay.h>
  33#include <linux/slab.h>
  34#include <linux/scs.h>
  35#include <linux/percpu-rwsem.h>
  36#include <linux/cpuset.h>
  37#include <linux/random.h>
  38#include <linux/cc_platform.h>
  39
  40#include <trace/events/power.h>
  41#define CREATE_TRACE_POINTS
  42#include <trace/events/cpuhp.h>
  43
  44#include "smpboot.h"
  45
  46/**
  47 * struct cpuhp_cpu_state - Per cpu hotplug state storage
  48 * @state:	The current cpu state
  49 * @target:	The target state
  50 * @fail:	Current CPU hotplug callback state
  51 * @thread:	Pointer to the hotplug thread
  52 * @should_run:	Thread should execute
  53 * @rollback:	Perform a rollback
  54 * @single:	Single callback invocation
  55 * @bringup:	Single callback bringup or teardown selector
  56 * @cpu:	CPU number
  57 * @node:	Remote CPU node; for multi-instance, do a
  58 *		single entry callback for install/remove
  59 * @last:	For multi-instance rollback, remember how far we got
  60 * @cb_state:	The state for a single callback (install/uninstall)
  61 * @result:	Result of the operation
  62 * @done_up:	Signal completion to the issuer of the task for cpu-up
  63 * @done_down:	Signal completion to the issuer of the task for cpu-down
  64 */
  65struct cpuhp_cpu_state {
  66	enum cpuhp_state	state;
  67	enum cpuhp_state	target;
  68	enum cpuhp_state	fail;
  69#ifdef CONFIG_SMP
  70	struct task_struct	*thread;
  71	bool			should_run;
  72	bool			rollback;
  73	bool			single;
  74	bool			bringup;
 
  75	struct hlist_node	*node;
  76	struct hlist_node	*last;
  77	enum cpuhp_state	cb_state;
  78	int			result;
  79	struct completion	done_up;
  80	struct completion	done_down;
  81#endif
  82};
  83
  84static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  85	.fail = CPUHP_INVALID,
  86};
  87
  88#ifdef CONFIG_SMP
  89cpumask_t cpus_booted_once_mask;
  90#endif
  91
  92#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
  93static struct lockdep_map cpuhp_state_up_map =
  94	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  95static struct lockdep_map cpuhp_state_down_map =
  96	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
  97
  98
  99static inline void cpuhp_lock_acquire(bool bringup)
 100{
 101	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 102}
 103
 104static inline void cpuhp_lock_release(bool bringup)
 105{
 106	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 107}
 108#else
 109
 110static inline void cpuhp_lock_acquire(bool bringup) { }
 111static inline void cpuhp_lock_release(bool bringup) { }
 112
 113#endif
 114
 115/**
 116 * struct cpuhp_step - Hotplug state machine step
 117 * @name:	Name of the step
 118 * @startup:	Startup function of the step
 119 * @teardown:	Teardown function of the step
 120 * @cant_stop:	Bringup/teardown can't be stopped at this step
 121 * @multi_instance:	State has multiple instances which get added afterwards
 122 */
 123struct cpuhp_step {
 124	const char		*name;
 125	union {
 126		int		(*single)(unsigned int cpu);
 127		int		(*multi)(unsigned int cpu,
 128					 struct hlist_node *node);
 129	} startup;
 130	union {
 131		int		(*single)(unsigned int cpu);
 132		int		(*multi)(unsigned int cpu,
 133					 struct hlist_node *node);
 134	} teardown;
 135	/* private: */
 136	struct hlist_head	list;
 137	/* public: */
 138	bool			cant_stop;
 139	bool			multi_instance;
 140};
 141
 142static DEFINE_MUTEX(cpuhp_state_mutex);
 143static struct cpuhp_step cpuhp_hp_states[];
 144
 145static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
 146{
 147	return cpuhp_hp_states + state;
 148}
 149
 150static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
 151{
 152	return bringup ? !step->startup.single : !step->teardown.single;
 153}
 154
 155/**
 156 * cpuhp_invoke_callback - Invoke the callbacks for a given state
 157 * @cpu:	The cpu for which the callback should be invoked
 158 * @state:	The state to do callbacks for
 159 * @bringup:	True if the bringup callback should be invoked
 160 * @node:	For multi-instance, do a single entry callback for install/remove
 161 * @lastp:	For multi-instance rollback, remember how far we got
 162 *
 163 * Called from cpu hotplug and from the state register machinery.
 164 *
 165 * Return: %0 on success or a negative errno code
 166 */
 167static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
 168				 bool bringup, struct hlist_node *node,
 169				 struct hlist_node **lastp)
 170{
 171	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 172	struct cpuhp_step *step = cpuhp_get_step(state);
 173	int (*cbm)(unsigned int cpu, struct hlist_node *node);
 174	int (*cb)(unsigned int cpu);
 175	int ret, cnt;
 176
 177	if (st->fail == state) {
 178		st->fail = CPUHP_INVALID;
 179		return -EAGAIN;
 180	}
 181
 182	if (cpuhp_step_empty(bringup, step)) {
 183		WARN_ON_ONCE(1);
 184		return 0;
 185	}
 186
 187	if (!step->multi_instance) {
 188		WARN_ON_ONCE(lastp && *lastp);
 189		cb = bringup ? step->startup.single : step->teardown.single;
 190
 191		trace_cpuhp_enter(cpu, st->target, state, cb);
 192		ret = cb(cpu);
 193		trace_cpuhp_exit(cpu, st->state, state, ret);
 194		return ret;
 195	}
 196	cbm = bringup ? step->startup.multi : step->teardown.multi;
 197
 198	/* Single invocation for instance add/remove */
 199	if (node) {
 200		WARN_ON_ONCE(lastp && *lastp);
 201		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 202		ret = cbm(cpu, node);
 203		trace_cpuhp_exit(cpu, st->state, state, ret);
 204		return ret;
 205	}
 206
 207	/* State transition. Invoke on all instances */
 208	cnt = 0;
 209	hlist_for_each(node, &step->list) {
 210		if (lastp && node == *lastp)
 211			break;
 212
 213		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 214		ret = cbm(cpu, node);
 215		trace_cpuhp_exit(cpu, st->state, state, ret);
 216		if (ret) {
 217			if (!lastp)
 218				goto err;
 219
 220			*lastp = node;
 221			return ret;
 222		}
 223		cnt++;
 224	}
 225	if (lastp)
 226		*lastp = NULL;
 227	return 0;
 228err:
 229	/* Rollback the instances if one failed */
 230	cbm = !bringup ? step->startup.multi : step->teardown.multi;
 231	if (!cbm)
 232		return ret;
 233
 234	hlist_for_each(node, &step->list) {
 235		if (!cnt--)
 236			break;
 237
 238		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 239		ret = cbm(cpu, node);
 240		trace_cpuhp_exit(cpu, st->state, state, ret);
 241		/*
 242		 * Rollback must not fail,
 243		 */
 244		WARN_ON_ONCE(ret);
 245	}
 246	return ret;
 247}
 248
 249#ifdef CONFIG_SMP
 250static bool cpuhp_is_ap_state(enum cpuhp_state state)
 251{
 252	/*
 253	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
 254	 * purposes as that state is handled explicitly in cpu_down.
 255	 */
 256	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
 257}
 258
 259static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 260{
 261	struct completion *done = bringup ? &st->done_up : &st->done_down;
 262	wait_for_completion(done);
 263}
 264
 265static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 266{
 267	struct completion *done = bringup ? &st->done_up : &st->done_down;
 268	complete(done);
 269}
 270
 271/*
 272 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 273 */
 274static bool cpuhp_is_atomic_state(enum cpuhp_state state)
 275{
 276	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
 277}
 278
 279/* Serializes the updates to cpu_online_mask, cpu_present_mask */
 280static DEFINE_MUTEX(cpu_add_remove_lock);
 281bool cpuhp_tasks_frozen;
 282EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
 283
 284/*
 285 * The following two APIs (cpu_maps_update_begin/done) must be used when
 286 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 287 */
 288void cpu_maps_update_begin(void)
 289{
 290	mutex_lock(&cpu_add_remove_lock);
 291}
 292
 293void cpu_maps_update_done(void)
 294{
 295	mutex_unlock(&cpu_add_remove_lock);
 296}
 297
 298/*
 299 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 300 * Should always be manipulated under cpu_add_remove_lock
 301 */
 302static int cpu_hotplug_disabled;
 303
 304#ifdef CONFIG_HOTPLUG_CPU
 305
 306DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
 307
 308void cpus_read_lock(void)
 309{
 310	percpu_down_read(&cpu_hotplug_lock);
 311}
 312EXPORT_SYMBOL_GPL(cpus_read_lock);
 313
 314int cpus_read_trylock(void)
 315{
 316	return percpu_down_read_trylock(&cpu_hotplug_lock);
 317}
 318EXPORT_SYMBOL_GPL(cpus_read_trylock);
 319
 320void cpus_read_unlock(void)
 321{
 322	percpu_up_read(&cpu_hotplug_lock);
 323}
 324EXPORT_SYMBOL_GPL(cpus_read_unlock);
 325
 326void cpus_write_lock(void)
 327{
 328	percpu_down_write(&cpu_hotplug_lock);
 329}
 330
 331void cpus_write_unlock(void)
 332{
 333	percpu_up_write(&cpu_hotplug_lock);
 334}
 335
 336void lockdep_assert_cpus_held(void)
 337{
 338	/*
 339	 * We can't have hotplug operations before userspace starts running,
 340	 * and some init codepaths will knowingly not take the hotplug lock.
 341	 * This is all valid, so mute lockdep until it makes sense to report
 342	 * unheld locks.
 343	 */
 344	if (system_state < SYSTEM_RUNNING)
 345		return;
 346
 347	percpu_rwsem_assert_held(&cpu_hotplug_lock);
 348}
 349
 350#ifdef CONFIG_LOCKDEP
 351int lockdep_is_cpus_held(void)
 352{
 353	return percpu_rwsem_is_held(&cpu_hotplug_lock);
 354}
 355#endif
 356
 357static void lockdep_acquire_cpus_lock(void)
 358{
 359	rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
 360}
 361
 362static void lockdep_release_cpus_lock(void)
 363{
 364	rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
 365}
 366
 367/*
 368 * Wait for currently running CPU hotplug operations to complete (if any) and
 369 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 370 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 371 * hotplug path before performing hotplug operations. So acquiring that lock
 372 * guarantees mutual exclusion from any currently running hotplug operations.
 373 */
 374void cpu_hotplug_disable(void)
 375{
 376	cpu_maps_update_begin();
 377	cpu_hotplug_disabled++;
 378	cpu_maps_update_done();
 379}
 380EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 381
 382static void __cpu_hotplug_enable(void)
 383{
 384	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
 385		return;
 386	cpu_hotplug_disabled--;
 387}
 388
 389void cpu_hotplug_enable(void)
 390{
 391	cpu_maps_update_begin();
 392	__cpu_hotplug_enable();
 393	cpu_maps_update_done();
 394}
 395EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 396
 397#else
 398
 399static void lockdep_acquire_cpus_lock(void)
 400{
 401}
 402
 403static void lockdep_release_cpus_lock(void)
 404{
 405}
 406
 407#endif	/* CONFIG_HOTPLUG_CPU */
 408
 409/*
 410 * Architectures that need SMT-specific errata handling during SMT hotplug
 411 * should override this.
 412 */
 413void __weak arch_smt_update(void) { }
 414
 415#ifdef CONFIG_HOTPLUG_SMT
 416enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 417
 418void __init cpu_smt_disable(bool force)
 419{
 420	if (!cpu_smt_possible())
 421		return;
 422
 423	if (force) {
 424		pr_info("SMT: Force disabled\n");
 425		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
 426	} else {
 427		pr_info("SMT: disabled\n");
 428		cpu_smt_control = CPU_SMT_DISABLED;
 429	}
 430}
 431
 432/*
 433 * The decision whether SMT is supported can only be done after the full
 434 * CPU identification. Called from architecture code.
 435 */
 436void __init cpu_smt_check_topology(void)
 437{
 438	if (!topology_smt_supported())
 439		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 440}
 441
 442static int __init smt_cmdline_disable(char *str)
 443{
 444	cpu_smt_disable(str && !strcmp(str, "force"));
 445	return 0;
 446}
 447early_param("nosmt", smt_cmdline_disable);
 448
 449static inline bool cpu_smt_allowed(unsigned int cpu)
 450{
 451	if (cpu_smt_control == CPU_SMT_ENABLED)
 452		return true;
 453
 454	if (topology_is_primary_thread(cpu))
 455		return true;
 456
 457	/*
 458	 * On x86 it's required to boot all logical CPUs at least once so
 459	 * that the init code can get a chance to set CR4.MCE on each
 460	 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
 461	 * core will shutdown the machine.
 462	 */
 463	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
 464}
 465
 466/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
 467bool cpu_smt_possible(void)
 468{
 469	return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
 470		cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
 471}
 472EXPORT_SYMBOL_GPL(cpu_smt_possible);
 473#else
 474static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 475#endif
 476
 477static inline enum cpuhp_state
 478cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
 479{
 480	enum cpuhp_state prev_state = st->state;
 481	bool bringup = st->state < target;
 482
 483	st->rollback = false;
 484	st->last = NULL;
 485
 486	st->target = target;
 487	st->single = false;
 488	st->bringup = bringup;
 489	if (cpu_dying(cpu) != !bringup)
 490		set_cpu_dying(cpu, !bringup);
 491
 492	return prev_state;
 493}
 494
 495static inline void
 496cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
 497		  enum cpuhp_state prev_state)
 498{
 499	bool bringup = !st->bringup;
 500
 501	st->target = prev_state;
 502
 503	/*
 504	 * Already rolling back. No need invert the bringup value or to change
 505	 * the current state.
 506	 */
 507	if (st->rollback)
 508		return;
 509
 510	st->rollback = true;
 511
 512	/*
 513	 * If we have st->last we need to undo partial multi_instance of this
 514	 * state first. Otherwise start undo at the previous state.
 515	 */
 516	if (!st->last) {
 517		if (st->bringup)
 518			st->state--;
 519		else
 520			st->state++;
 521	}
 522
 523	st->bringup = bringup;
 524	if (cpu_dying(cpu) != !bringup)
 525		set_cpu_dying(cpu, !bringup);
 526}
 527
 528/* Regular hotplug invocation of the AP hotplug thread */
 529static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
 530{
 531	if (!st->single && st->state == st->target)
 532		return;
 533
 534	st->result = 0;
 535	/*
 536	 * Make sure the above stores are visible before should_run becomes
 537	 * true. Paired with the mb() above in cpuhp_thread_fun()
 538	 */
 539	smp_mb();
 540	st->should_run = true;
 541	wake_up_process(st->thread);
 542	wait_for_ap_thread(st, st->bringup);
 543}
 544
 545static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
 546			 enum cpuhp_state target)
 547{
 548	enum cpuhp_state prev_state;
 549	int ret;
 550
 551	prev_state = cpuhp_set_state(cpu, st, target);
 552	__cpuhp_kick_ap(st);
 553	if ((ret = st->result)) {
 554		cpuhp_reset_state(cpu, st, prev_state);
 555		__cpuhp_kick_ap(st);
 556	}
 557
 558	return ret;
 559}
 560
 561static int bringup_wait_for_ap(unsigned int cpu)
 562{
 563	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 564
 565	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
 566	wait_for_ap_thread(st, true);
 567	if (WARN_ON_ONCE((!cpu_online(cpu))))
 568		return -ECANCELED;
 569
 570	/* Unpark the hotplug thread of the target cpu */
 571	kthread_unpark(st->thread);
 572
 573	/*
 574	 * SMT soft disabling on X86 requires to bring the CPU out of the
 575	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
 576	 * CPU marked itself as booted_once in notify_cpu_starting() so the
 577	 * cpu_smt_allowed() check will now return false if this is not the
 578	 * primary sibling.
 579	 */
 580	if (!cpu_smt_allowed(cpu))
 581		return -ECANCELED;
 582
 583	if (st->target <= CPUHP_AP_ONLINE_IDLE)
 584		return 0;
 585
 586	return cpuhp_kick_ap(cpu, st, st->target);
 587}
 588
 589static int bringup_cpu(unsigned int cpu)
 590{
 591	struct task_struct *idle = idle_thread_get(cpu);
 592	int ret;
 593
 594	/*
 595	 * Reset stale stack state from the last time this CPU was online.
 596	 */
 597	scs_task_reset(idle);
 598	kasan_unpoison_task_stack(idle);
 599
 600	/*
 601	 * Some architectures have to walk the irq descriptors to
 602	 * setup the vector space for the cpu which comes online.
 603	 * Prevent irq alloc/free across the bringup.
 604	 */
 605	irq_lock_sparse();
 606
 607	/* Arch-specific enabling code. */
 608	ret = __cpu_up(cpu, idle);
 609	irq_unlock_sparse();
 610	if (ret)
 611		return ret;
 612	return bringup_wait_for_ap(cpu);
 613}
 614
 615static int finish_cpu(unsigned int cpu)
 616{
 617	struct task_struct *idle = idle_thread_get(cpu);
 618	struct mm_struct *mm = idle->active_mm;
 619
 620	/*
 621	 * idle_task_exit() will have switched to &init_mm, now
 622	 * clean up any remaining active_mm state.
 623	 */
 624	if (mm != &init_mm)
 625		idle->active_mm = &init_mm;
 626	mmdrop(mm);
 627	return 0;
 628}
 629
 630/*
 631 * Hotplug state machine related functions
 632 */
 633
 634/*
 635 * Get the next state to run. Empty ones will be skipped. Returns true if a
 636 * state must be run.
 637 *
 638 * st->state will be modified ahead of time, to match state_to_run, as if it
 639 * has already ran.
 640 */
 641static bool cpuhp_next_state(bool bringup,
 642			     enum cpuhp_state *state_to_run,
 643			     struct cpuhp_cpu_state *st,
 644			     enum cpuhp_state target)
 645{
 646	do {
 647		if (bringup) {
 648			if (st->state >= target)
 649				return false;
 650
 651			*state_to_run = ++st->state;
 652		} else {
 653			if (st->state <= target)
 654				return false;
 655
 656			*state_to_run = st->state--;
 657		}
 658
 659		if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
 660			break;
 661	} while (true);
 662
 663	return true;
 664}
 665
 666static int __cpuhp_invoke_callback_range(bool bringup,
 667					 unsigned int cpu,
 668					 struct cpuhp_cpu_state *st,
 669					 enum cpuhp_state target,
 670					 bool nofail)
 671{
 672	enum cpuhp_state state;
 673	int ret = 0;
 674
 675	while (cpuhp_next_state(bringup, &state, st, target)) {
 676		int err;
 677
 678		err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
 679		if (!err)
 680			continue;
 681
 682		if (nofail) {
 683			pr_warn("CPU %u %s state %s (%d) failed (%d)\n",
 684				cpu, bringup ? "UP" : "DOWN",
 685				cpuhp_get_step(st->state)->name,
 686				st->state, err);
 687			ret = -1;
 688		} else {
 689			ret = err;
 690			break;
 691		}
 692	}
 693
 694	return ret;
 695}
 696
 697static inline int cpuhp_invoke_callback_range(bool bringup,
 698					      unsigned int cpu,
 699					      struct cpuhp_cpu_state *st,
 700					      enum cpuhp_state target)
 701{
 702	return __cpuhp_invoke_callback_range(bringup, cpu, st, target, false);
 703}
 704
 705static inline void cpuhp_invoke_callback_range_nofail(bool bringup,
 706						      unsigned int cpu,
 707						      struct cpuhp_cpu_state *st,
 708						      enum cpuhp_state target)
 709{
 710	__cpuhp_invoke_callback_range(bringup, cpu, st, target, true);
 711}
 712
 713static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
 714{
 715	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
 716		return true;
 717	/*
 718	 * When CPU hotplug is disabled, then taking the CPU down is not
 719	 * possible because takedown_cpu() and the architecture and
 720	 * subsystem specific mechanisms are not available. So the CPU
 721	 * which would be completely unplugged again needs to stay around
 722	 * in the current state.
 723	 */
 724	return st->state <= CPUHP_BRINGUP_CPU;
 725}
 726
 727static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 728			      enum cpuhp_state target)
 729{
 730	enum cpuhp_state prev_state = st->state;
 731	int ret = 0;
 732
 733	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
 734	if (ret) {
 735		pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
 736			 ret, cpu, cpuhp_get_step(st->state)->name,
 737			 st->state);
 738
 739		cpuhp_reset_state(cpu, st, prev_state);
 740		if (can_rollback_cpu(st))
 741			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
 742							    prev_state));
 743	}
 744	return ret;
 745}
 746
 747/*
 748 * The cpu hotplug threads manage the bringup and teardown of the cpus
 749 */
 
 
 
 
 
 
 
 
 
 750static int cpuhp_should_run(unsigned int cpu)
 751{
 752	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 753
 754	return st->should_run;
 755}
 756
 757/*
 758 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 759 * callbacks when a state gets [un]installed at runtime.
 760 *
 761 * Each invocation of this function by the smpboot thread does a single AP
 762 * state callback.
 763 *
 764 * It has 3 modes of operation:
 765 *  - single: runs st->cb_state
 766 *  - up:     runs ++st->state, while st->state < st->target
 767 *  - down:   runs st->state--, while st->state > st->target
 768 *
 769 * When complete or on error, should_run is cleared and the completion is fired.
 770 */
 771static void cpuhp_thread_fun(unsigned int cpu)
 772{
 773	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 774	bool bringup = st->bringup;
 775	enum cpuhp_state state;
 776
 777	if (WARN_ON_ONCE(!st->should_run))
 778		return;
 779
 780	/*
 781	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
 782	 * that if we see ->should_run we also see the rest of the state.
 783	 */
 784	smp_mb();
 785
 786	/*
 787	 * The BP holds the hotplug lock, but we're now running on the AP,
 788	 * ensure that anybody asserting the lock is held, will actually find
 789	 * it so.
 790	 */
 791	lockdep_acquire_cpus_lock();
 792	cpuhp_lock_acquire(bringup);
 793
 794	if (st->single) {
 795		state = st->cb_state;
 796		st->should_run = false;
 797	} else {
 798		st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
 799		if (!st->should_run)
 800			goto end;
 801	}
 802
 803	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 804
 805	if (cpuhp_is_atomic_state(state)) {
 806		local_irq_disable();
 807		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 808		local_irq_enable();
 809
 810		/*
 811		 * STARTING/DYING must not fail!
 812		 */
 813		WARN_ON_ONCE(st->result);
 814	} else {
 815		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 816	}
 817
 818	if (st->result) {
 819		/*
 820		 * If we fail on a rollback, we're up a creek without no
 821		 * paddle, no way forward, no way back. We loose, thanks for
 822		 * playing.
 823		 */
 824		WARN_ON_ONCE(st->rollback);
 825		st->should_run = false;
 826	}
 827
 828end:
 829	cpuhp_lock_release(bringup);
 830	lockdep_release_cpus_lock();
 831
 832	if (!st->should_run)
 833		complete_ap_thread(st, bringup);
 834}
 835
 836/* Invoke a single callback on a remote cpu */
 837static int
 838cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 839			 struct hlist_node *node)
 840{
 841	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 842	int ret;
 843
 844	if (!cpu_online(cpu))
 845		return 0;
 846
 847	cpuhp_lock_acquire(false);
 848	cpuhp_lock_release(false);
 849
 850	cpuhp_lock_acquire(true);
 851	cpuhp_lock_release(true);
 852
 853	/*
 854	 * If we are up and running, use the hotplug thread. For early calls
 855	 * we invoke the thread function directly.
 856	 */
 857	if (!st->thread)
 858		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 859
 860	st->rollback = false;
 861	st->last = NULL;
 862
 863	st->node = node;
 864	st->bringup = bringup;
 865	st->cb_state = state;
 866	st->single = true;
 867
 868	__cpuhp_kick_ap(st);
 869
 870	/*
 871	 * If we failed and did a partial, do a rollback.
 872	 */
 873	if ((ret = st->result) && st->last) {
 874		st->rollback = true;
 875		st->bringup = !bringup;
 876
 877		__cpuhp_kick_ap(st);
 878	}
 879
 880	/*
 881	 * Clean up the leftovers so the next hotplug operation wont use stale
 882	 * data.
 883	 */
 884	st->node = st->last = NULL;
 885	return ret;
 886}
 887
 888static int cpuhp_kick_ap_work(unsigned int cpu)
 889{
 890	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 891	enum cpuhp_state prev_state = st->state;
 892	int ret;
 893
 894	cpuhp_lock_acquire(false);
 895	cpuhp_lock_release(false);
 896
 897	cpuhp_lock_acquire(true);
 898	cpuhp_lock_release(true);
 899
 900	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
 901	ret = cpuhp_kick_ap(cpu, st, st->target);
 902	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 903
 904	return ret;
 905}
 906
 907static struct smp_hotplug_thread cpuhp_threads = {
 908	.store			= &cpuhp_state.thread,
 
 909	.thread_should_run	= cpuhp_should_run,
 910	.thread_fn		= cpuhp_thread_fun,
 911	.thread_comm		= "cpuhp/%u",
 912	.selfparking		= true,
 913};
 914
 915static __init void cpuhp_init_state(void)
 916{
 917	struct cpuhp_cpu_state *st;
 918	int cpu;
 919
 920	for_each_possible_cpu(cpu) {
 921		st = per_cpu_ptr(&cpuhp_state, cpu);
 922		init_completion(&st->done_up);
 923		init_completion(&st->done_down);
 924	}
 925}
 926
 927void __init cpuhp_threads_init(void)
 928{
 929	cpuhp_init_state();
 930	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
 931	kthread_unpark(this_cpu_read(cpuhp_state.thread));
 932}
 933
 934/*
 935 *
 936 * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
 937 * protected region.
 938 *
 939 * The operation is still serialized against concurrent CPU hotplug via
 940 * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
 941 * serialized against other hotplug related activity like adding or
 942 * removing of state callbacks and state instances, which invoke either the
 943 * startup or the teardown callback of the affected state.
 944 *
 945 * This is required for subsystems which are unfixable vs. CPU hotplug and
 946 * evade lock inversion problems by scheduling work which has to be
 947 * completed _before_ cpu_up()/_cpu_down() returns.
 948 *
 949 * Don't even think about adding anything to this for any new code or even
 950 * drivers. It's only purpose is to keep existing lock order trainwrecks
 951 * working.
 952 *
 953 * For cpu_down() there might be valid reasons to finish cleanups which are
 954 * not required to be done under cpu_hotplug_lock, but that's a different
 955 * story and would be not invoked via this.
 956 */
 957static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
 958{
 959	/*
 960	 * cpusets delegate hotplug operations to a worker to "solve" the
 961	 * lock order problems. Wait for the worker, but only if tasks are
 962	 * _not_ frozen (suspend, hibernate) as that would wait forever.
 963	 *
 964	 * The wait is required because otherwise the hotplug operation
 965	 * returns with inconsistent state, which could even be observed in
 966	 * user space when a new CPU is brought up. The CPU plug uevent
 967	 * would be delivered and user space reacting on it would fail to
 968	 * move tasks to the newly plugged CPU up to the point where the
 969	 * work has finished because up to that point the newly plugged CPU
 970	 * is not assignable in cpusets/cgroups. On unplug that's not
 971	 * necessarily a visible issue, but it is still inconsistent state,
 972	 * which is the real problem which needs to be "fixed". This can't
 973	 * prevent the transient state between scheduling the work and
 974	 * returning from waiting for it.
 975	 */
 976	if (!tasks_frozen)
 977		cpuset_wait_for_hotplug();
 978}
 979
 980#ifdef CONFIG_HOTPLUG_CPU
 981#ifndef arch_clear_mm_cpumask_cpu
 982#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
 983#endif
 984
 985/**
 986 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 987 * @cpu: a CPU id
 988 *
 989 * This function walks all processes, finds a valid mm struct for each one and
 990 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 991 * trivial, there are various non-obvious corner cases, which this function
 992 * tries to solve in a safe manner.
 993 *
 994 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 995 * be called only for an already offlined CPU.
 996 */
 997void clear_tasks_mm_cpumask(int cpu)
 998{
 999	struct task_struct *p;
1000
1001	/*
1002	 * This function is called after the cpu is taken down and marked
1003	 * offline, so its not like new tasks will ever get this cpu set in
1004	 * their mm mask. -- Peter Zijlstra
1005	 * Thus, we may use rcu_read_lock() here, instead of grabbing
1006	 * full-fledged tasklist_lock.
1007	 */
1008	WARN_ON(cpu_online(cpu));
1009	rcu_read_lock();
1010	for_each_process(p) {
1011		struct task_struct *t;
1012
1013		/*
1014		 * Main thread might exit, but other threads may still have
1015		 * a valid mm. Find one.
1016		 */
1017		t = find_lock_task_mm(p);
1018		if (!t)
1019			continue;
1020		arch_clear_mm_cpumask_cpu(cpu, t->mm);
1021		task_unlock(t);
1022	}
1023	rcu_read_unlock();
1024}
1025
1026/* Take this CPU down. */
1027static int take_cpu_down(void *_param)
1028{
1029	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1030	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
1031	int err, cpu = smp_processor_id();
 
1032
1033	/* Ensure this CPU doesn't handle any more interrupts. */
1034	err = __cpu_disable();
1035	if (err < 0)
1036		return err;
1037
1038	/*
1039	 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
1040	 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
1041	 */
1042	WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
1043
 
 
 
1044	/*
1045	 * Invoke the former CPU_DYING callbacks. DYING must not fail!
1046	 */
1047	cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
1048
1049	/* Give up timekeeping duties */
1050	tick_handover_do_timer();
1051	/* Remove CPU from timer broadcasting */
1052	tick_offline_cpu(cpu);
1053	/* Park the stopper thread */
1054	stop_machine_park(cpu);
1055	return 0;
1056}
1057
1058static int takedown_cpu(unsigned int cpu)
1059{
1060	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1061	int err;
1062
1063	/* Park the smpboot threads */
1064	kthread_park(st->thread);
1065
1066	/*
1067	 * Prevent irq alloc/free while the dying cpu reorganizes the
1068	 * interrupt affinities.
1069	 */
1070	irq_lock_sparse();
1071
1072	/*
1073	 * So now all preempt/rcu users must observe !cpu_active().
1074	 */
1075	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1076	if (err) {
1077		/* CPU refused to die */
1078		irq_unlock_sparse();
1079		/* Unpark the hotplug thread so we can rollback there */
1080		kthread_unpark(st->thread);
1081		return err;
1082	}
1083	BUG_ON(cpu_online(cpu));
1084
1085	/*
1086	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1087	 * all runnable tasks from the CPU, there's only the idle task left now
1088	 * that the migration thread is done doing the stop_machine thing.
1089	 *
1090	 * Wait for the stop thread to go away.
1091	 */
1092	wait_for_ap_thread(st, false);
1093	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1094
1095	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
1096	irq_unlock_sparse();
1097
1098	hotplug_cpu__broadcast_tick_pull(cpu);
1099	/* This actually kills the CPU. */
1100	__cpu_die(cpu);
1101
1102	tick_cleanup_dead_cpu(cpu);
1103	rcutree_migrate_callbacks(cpu);
1104	return 0;
1105}
1106
1107static void cpuhp_complete_idle_dead(void *arg)
1108{
1109	struct cpuhp_cpu_state *st = arg;
1110
1111	complete_ap_thread(st, false);
1112}
1113
1114void cpuhp_report_idle_dead(void)
1115{
1116	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1117
1118	BUG_ON(st->state != CPUHP_AP_OFFLINE);
1119	rcu_report_dead(smp_processor_id());
1120	st->state = CPUHP_AP_IDLE_DEAD;
1121	/*
1122	 * We cannot call complete after rcu_report_dead() so we delegate it
1123	 * to an online cpu.
1124	 */
1125	smp_call_function_single(cpumask_first(cpu_online_mask),
1126				 cpuhp_complete_idle_dead, st, 0);
1127}
1128
1129static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1130				enum cpuhp_state target)
1131{
1132	enum cpuhp_state prev_state = st->state;
1133	int ret = 0;
1134
1135	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1136	if (ret) {
1137		pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
1138			 ret, cpu, cpuhp_get_step(st->state)->name,
1139			 st->state);
1140
1141		cpuhp_reset_state(cpu, st, prev_state);
1142
1143		if (st->state < prev_state)
1144			WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1145							    prev_state));
1146	}
1147
1148	return ret;
1149}
1150
1151/* Requires cpu_add_remove_lock to be held */
1152static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1153			   enum cpuhp_state target)
1154{
1155	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1156	int prev_state, ret = 0;
1157
1158	if (num_online_cpus() == 1)
1159		return -EBUSY;
1160
1161	if (!cpu_present(cpu))
1162		return -EINVAL;
1163
1164	cpus_write_lock();
1165
1166	cpuhp_tasks_frozen = tasks_frozen;
1167
1168	prev_state = cpuhp_set_state(cpu, st, target);
1169	/*
1170	 * If the current CPU state is in the range of the AP hotplug thread,
1171	 * then we need to kick the thread.
1172	 */
1173	if (st->state > CPUHP_TEARDOWN_CPU) {
1174		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1175		ret = cpuhp_kick_ap_work(cpu);
1176		/*
1177		 * The AP side has done the error rollback already. Just
1178		 * return the error code..
1179		 */
1180		if (ret)
1181			goto out;
1182
1183		/*
1184		 * We might have stopped still in the range of the AP hotplug
1185		 * thread. Nothing to do anymore.
1186		 */
1187		if (st->state > CPUHP_TEARDOWN_CPU)
1188			goto out;
1189
1190		st->target = target;
1191	}
1192	/*
1193	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1194	 * to do the further cleanups.
1195	 */
1196	ret = cpuhp_down_callbacks(cpu, st, target);
1197	if (ret && st->state < prev_state) {
1198		if (st->state == CPUHP_TEARDOWN_CPU) {
1199			cpuhp_reset_state(cpu, st, prev_state);
1200			__cpuhp_kick_ap(st);
1201		} else {
1202			WARN(1, "DEAD callback error for CPU%d", cpu);
1203		}
1204	}
1205
1206out:
1207	cpus_write_unlock();
1208	/*
1209	 * Do post unplug cleanup. This is still protected against
1210	 * concurrent CPU hotplug via cpu_add_remove_lock.
1211	 */
1212	lockup_detector_cleanup();
1213	arch_smt_update();
1214	cpu_up_down_serialize_trainwrecks(tasks_frozen);
1215	return ret;
1216}
1217
1218static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1219{
1220	/*
1221	 * If the platform does not support hotplug, report it explicitly to
1222	 * differentiate it from a transient offlining failure.
1223	 */
1224	if (cc_platform_has(CC_ATTR_HOTPLUG_DISABLED))
1225		return -EOPNOTSUPP;
1226	if (cpu_hotplug_disabled)
1227		return -EBUSY;
1228	return _cpu_down(cpu, 0, target);
1229}
1230
1231static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1232{
1233	int err;
1234
1235	cpu_maps_update_begin();
1236	err = cpu_down_maps_locked(cpu, target);
1237	cpu_maps_update_done();
1238	return err;
1239}
1240
1241/**
1242 * cpu_device_down - Bring down a cpu device
1243 * @dev: Pointer to the cpu device to offline
1244 *
1245 * This function is meant to be used by device core cpu subsystem only.
1246 *
1247 * Other subsystems should use remove_cpu() instead.
1248 *
1249 * Return: %0 on success or a negative errno code
1250 */
1251int cpu_device_down(struct device *dev)
1252{
1253	return cpu_down(dev->id, CPUHP_OFFLINE);
1254}
1255
1256int remove_cpu(unsigned int cpu)
1257{
1258	int ret;
1259
1260	lock_device_hotplug();
1261	ret = device_offline(get_cpu_device(cpu));
1262	unlock_device_hotplug();
1263
1264	return ret;
1265}
1266EXPORT_SYMBOL_GPL(remove_cpu);
1267
1268void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1269{
1270	unsigned int cpu;
1271	int error;
1272
1273	cpu_maps_update_begin();
1274
1275	/*
1276	 * Make certain the cpu I'm about to reboot on is online.
1277	 *
1278	 * This is inline to what migrate_to_reboot_cpu() already do.
1279	 */
1280	if (!cpu_online(primary_cpu))
1281		primary_cpu = cpumask_first(cpu_online_mask);
1282
1283	for_each_online_cpu(cpu) {
1284		if (cpu == primary_cpu)
1285			continue;
1286
1287		error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1288		if (error) {
1289			pr_err("Failed to offline CPU%d - error=%d",
1290				cpu, error);
1291			break;
1292		}
1293	}
1294
1295	/*
1296	 * Ensure all but the reboot CPU are offline.
1297	 */
1298	BUG_ON(num_online_cpus() > 1);
1299
1300	/*
1301	 * Make sure the CPUs won't be enabled by someone else after this
1302	 * point. Kexec will reboot to a new kernel shortly resetting
1303	 * everything along the way.
1304	 */
1305	cpu_hotplug_disabled++;
1306
1307	cpu_maps_update_done();
1308}
1309
1310#else
1311#define takedown_cpu		NULL
1312#endif /*CONFIG_HOTPLUG_CPU*/
1313
1314/**
1315 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1316 * @cpu: cpu that just started
1317 *
1318 * It must be called by the arch code on the new cpu, before the new cpu
1319 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1320 */
1321void notify_cpu_starting(unsigned int cpu)
1322{
1323	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1324	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
 
1325
1326	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
1327	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
 
1328
1329	/*
1330	 * STARTING must not fail!
1331	 */
1332	cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
1333}
1334
1335/*
1336 * Called from the idle task. Wake up the controlling task which brings the
1337 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1338 * online bringup to the hotplug thread.
1339 */
1340void cpuhp_online_idle(enum cpuhp_state state)
1341{
1342	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1343
1344	/* Happens for the boot cpu */
1345	if (state != CPUHP_AP_ONLINE_IDLE)
1346		return;
1347
1348	/*
1349	 * Unpart the stopper thread before we start the idle loop (and start
1350	 * scheduling); this ensures the stopper task is always available.
1351	 */
1352	stop_machine_unpark(smp_processor_id());
1353
1354	st->state = CPUHP_AP_ONLINE_IDLE;
1355	complete_ap_thread(st, true);
1356}
1357
1358/* Requires cpu_add_remove_lock to be held */
1359static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1360{
1361	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1362	struct task_struct *idle;
1363	int ret = 0;
1364
1365	cpus_write_lock();
1366
1367	if (!cpu_present(cpu)) {
1368		ret = -EINVAL;
1369		goto out;
1370	}
1371
1372	/*
1373	 * The caller of cpu_up() might have raced with another
1374	 * caller. Nothing to do.
1375	 */
1376	if (st->state >= target)
1377		goto out;
1378
1379	if (st->state == CPUHP_OFFLINE) {
1380		/* Let it fail before we try to bring the cpu up */
1381		idle = idle_thread_get(cpu);
1382		if (IS_ERR(idle)) {
1383			ret = PTR_ERR(idle);
1384			goto out;
1385		}
1386	}
1387
1388	cpuhp_tasks_frozen = tasks_frozen;
1389
1390	cpuhp_set_state(cpu, st, target);
1391	/*
1392	 * If the current CPU state is in the range of the AP hotplug thread,
1393	 * then we need to kick the thread once more.
1394	 */
1395	if (st->state > CPUHP_BRINGUP_CPU) {
1396		ret = cpuhp_kick_ap_work(cpu);
1397		/*
1398		 * The AP side has done the error rollback already. Just
1399		 * return the error code..
1400		 */
1401		if (ret)
1402			goto out;
1403	}
1404
1405	/*
1406	 * Try to reach the target state. We max out on the BP at
1407	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1408	 * responsible for bringing it up to the target state.
1409	 */
1410	target = min((int)target, CPUHP_BRINGUP_CPU);
1411	ret = cpuhp_up_callbacks(cpu, st, target);
1412out:
1413	cpus_write_unlock();
1414	arch_smt_update();
1415	cpu_up_down_serialize_trainwrecks(tasks_frozen);
1416	return ret;
1417}
1418
1419static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1420{
1421	int err = 0;
1422
1423	if (!cpu_possible(cpu)) {
1424		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1425		       cpu);
1426#if defined(CONFIG_IA64)
1427		pr_err("please check additional_cpus= boot parameter\n");
1428#endif
1429		return -EINVAL;
1430	}
1431
1432	err = try_online_node(cpu_to_node(cpu));
1433	if (err)
1434		return err;
1435
1436	cpu_maps_update_begin();
1437
1438	if (cpu_hotplug_disabled) {
1439		err = -EBUSY;
1440		goto out;
1441	}
1442	if (!cpu_smt_allowed(cpu)) {
1443		err = -EPERM;
1444		goto out;
1445	}
1446
1447	err = _cpu_up(cpu, 0, target);
1448out:
1449	cpu_maps_update_done();
1450	return err;
1451}
1452
1453/**
1454 * cpu_device_up - Bring up a cpu device
1455 * @dev: Pointer to the cpu device to online
1456 *
1457 * This function is meant to be used by device core cpu subsystem only.
1458 *
1459 * Other subsystems should use add_cpu() instead.
1460 *
1461 * Return: %0 on success or a negative errno code
1462 */
1463int cpu_device_up(struct device *dev)
1464{
1465	return cpu_up(dev->id, CPUHP_ONLINE);
1466}
1467
1468int add_cpu(unsigned int cpu)
1469{
1470	int ret;
1471
1472	lock_device_hotplug();
1473	ret = device_online(get_cpu_device(cpu));
1474	unlock_device_hotplug();
1475
1476	return ret;
1477}
1478EXPORT_SYMBOL_GPL(add_cpu);
1479
1480/**
1481 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1482 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1483 *
1484 * On some architectures like arm64, we can hibernate on any CPU, but on
1485 * wake up the CPU we hibernated on might be offline as a side effect of
1486 * using maxcpus= for example.
1487 *
1488 * Return: %0 on success or a negative errno code
1489 */
1490int bringup_hibernate_cpu(unsigned int sleep_cpu)
1491{
1492	int ret;
1493
1494	if (!cpu_online(sleep_cpu)) {
1495		pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1496		ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1497		if (ret) {
1498			pr_err("Failed to bring hibernate-CPU up!\n");
1499			return ret;
1500		}
1501	}
1502	return 0;
1503}
1504
1505void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1506{
1507	unsigned int cpu;
1508
1509	for_each_present_cpu(cpu) {
1510		if (num_online_cpus() >= setup_max_cpus)
1511			break;
1512		if (!cpu_online(cpu))
1513			cpu_up(cpu, CPUHP_ONLINE);
1514	}
1515}
1516
1517#ifdef CONFIG_PM_SLEEP_SMP
1518static cpumask_var_t frozen_cpus;
1519
1520int freeze_secondary_cpus(int primary)
1521{
1522	int cpu, error = 0;
1523
1524	cpu_maps_update_begin();
1525	if (primary == -1) {
1526		primary = cpumask_first(cpu_online_mask);
1527		if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
1528			primary = housekeeping_any_cpu(HK_TYPE_TIMER);
1529	} else {
1530		if (!cpu_online(primary))
1531			primary = cpumask_first(cpu_online_mask);
1532	}
1533
1534	/*
1535	 * We take down all of the non-boot CPUs in one shot to avoid races
1536	 * with the userspace trying to use the CPU hotplug at the same time
1537	 */
1538	cpumask_clear(frozen_cpus);
1539
1540	pr_info("Disabling non-boot CPUs ...\n");
1541	for_each_online_cpu(cpu) {
1542		if (cpu == primary)
1543			continue;
1544
1545		if (pm_wakeup_pending()) {
1546			pr_info("Wakeup pending. Abort CPU freeze\n");
1547			error = -EBUSY;
1548			break;
1549		}
1550
1551		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1552		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1553		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1554		if (!error)
1555			cpumask_set_cpu(cpu, frozen_cpus);
1556		else {
1557			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1558			break;
1559		}
1560	}
1561
1562	if (!error)
1563		BUG_ON(num_online_cpus() > 1);
1564	else
1565		pr_err("Non-boot CPUs are not disabled\n");
1566
1567	/*
1568	 * Make sure the CPUs won't be enabled by someone else. We need to do
1569	 * this even in case of failure as all freeze_secondary_cpus() users are
1570	 * supposed to do thaw_secondary_cpus() on the failure path.
1571	 */
1572	cpu_hotplug_disabled++;
1573
1574	cpu_maps_update_done();
1575	return error;
1576}
1577
1578void __weak arch_thaw_secondary_cpus_begin(void)
1579{
1580}
1581
1582void __weak arch_thaw_secondary_cpus_end(void)
1583{
1584}
1585
1586void thaw_secondary_cpus(void)
1587{
1588	int cpu, error;
1589
1590	/* Allow everyone to use the CPU hotplug again */
1591	cpu_maps_update_begin();
1592	__cpu_hotplug_enable();
1593	if (cpumask_empty(frozen_cpus))
1594		goto out;
1595
1596	pr_info("Enabling non-boot CPUs ...\n");
1597
1598	arch_thaw_secondary_cpus_begin();
1599
1600	for_each_cpu(cpu, frozen_cpus) {
1601		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1602		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1603		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1604		if (!error) {
1605			pr_info("CPU%d is up\n", cpu);
1606			continue;
1607		}
1608		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1609	}
1610
1611	arch_thaw_secondary_cpus_end();
1612
1613	cpumask_clear(frozen_cpus);
1614out:
1615	cpu_maps_update_done();
1616}
1617
1618static int __init alloc_frozen_cpus(void)
1619{
1620	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1621		return -ENOMEM;
1622	return 0;
1623}
1624core_initcall(alloc_frozen_cpus);
1625
1626/*
1627 * When callbacks for CPU hotplug notifications are being executed, we must
1628 * ensure that the state of the system with respect to the tasks being frozen
1629 * or not, as reported by the notification, remains unchanged *throughout the
1630 * duration* of the execution of the callbacks.
1631 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1632 *
1633 * This synchronization is implemented by mutually excluding regular CPU
1634 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1635 * Hibernate notifications.
1636 */
1637static int
1638cpu_hotplug_pm_callback(struct notifier_block *nb,
1639			unsigned long action, void *ptr)
1640{
1641	switch (action) {
1642
1643	case PM_SUSPEND_PREPARE:
1644	case PM_HIBERNATION_PREPARE:
1645		cpu_hotplug_disable();
1646		break;
1647
1648	case PM_POST_SUSPEND:
1649	case PM_POST_HIBERNATION:
1650		cpu_hotplug_enable();
1651		break;
1652
1653	default:
1654		return NOTIFY_DONE;
1655	}
1656
1657	return NOTIFY_OK;
1658}
1659
1660
1661static int __init cpu_hotplug_pm_sync_init(void)
1662{
1663	/*
1664	 * cpu_hotplug_pm_callback has higher priority than x86
1665	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1666	 * to disable cpu hotplug to avoid cpu hotplug race.
1667	 */
1668	pm_notifier(cpu_hotplug_pm_callback, 0);
1669	return 0;
1670}
1671core_initcall(cpu_hotplug_pm_sync_init);
1672
1673#endif /* CONFIG_PM_SLEEP_SMP */
1674
1675int __boot_cpu_id;
1676
1677#endif /* CONFIG_SMP */
1678
1679/* Boot processor state steps */
1680static struct cpuhp_step cpuhp_hp_states[] = {
1681	[CPUHP_OFFLINE] = {
1682		.name			= "offline",
1683		.startup.single		= NULL,
1684		.teardown.single	= NULL,
1685	},
1686#ifdef CONFIG_SMP
1687	[CPUHP_CREATE_THREADS]= {
1688		.name			= "threads:prepare",
1689		.startup.single		= smpboot_create_threads,
1690		.teardown.single	= NULL,
1691		.cant_stop		= true,
1692	},
1693	[CPUHP_PERF_PREPARE] = {
1694		.name			= "perf:prepare",
1695		.startup.single		= perf_event_init_cpu,
1696		.teardown.single	= perf_event_exit_cpu,
1697	},
1698	[CPUHP_RANDOM_PREPARE] = {
1699		.name			= "random:prepare",
1700		.startup.single		= random_prepare_cpu,
1701		.teardown.single	= NULL,
1702	},
1703	[CPUHP_WORKQUEUE_PREP] = {
1704		.name			= "workqueue:prepare",
1705		.startup.single		= workqueue_prepare_cpu,
1706		.teardown.single	= NULL,
1707	},
1708	[CPUHP_HRTIMERS_PREPARE] = {
1709		.name			= "hrtimers:prepare",
1710		.startup.single		= hrtimers_prepare_cpu,
1711		.teardown.single	= hrtimers_dead_cpu,
1712	},
1713	[CPUHP_SMPCFD_PREPARE] = {
1714		.name			= "smpcfd:prepare",
1715		.startup.single		= smpcfd_prepare_cpu,
1716		.teardown.single	= smpcfd_dead_cpu,
1717	},
1718	[CPUHP_RELAY_PREPARE] = {
1719		.name			= "relay:prepare",
1720		.startup.single		= relay_prepare_cpu,
1721		.teardown.single	= NULL,
1722	},
1723	[CPUHP_SLAB_PREPARE] = {
1724		.name			= "slab:prepare",
1725		.startup.single		= slab_prepare_cpu,
1726		.teardown.single	= slab_dead_cpu,
1727	},
1728	[CPUHP_RCUTREE_PREP] = {
1729		.name			= "RCU/tree:prepare",
1730		.startup.single		= rcutree_prepare_cpu,
1731		.teardown.single	= rcutree_dead_cpu,
1732	},
1733	/*
1734	 * On the tear-down path, timers_dead_cpu() must be invoked
1735	 * before blk_mq_queue_reinit_notify() from notify_dead(),
1736	 * otherwise a RCU stall occurs.
1737	 */
1738	[CPUHP_TIMERS_PREPARE] = {
1739		.name			= "timers:prepare",
1740		.startup.single		= timers_prepare_cpu,
1741		.teardown.single	= timers_dead_cpu,
1742	},
1743	/* Kicks the plugged cpu into life */
1744	[CPUHP_BRINGUP_CPU] = {
1745		.name			= "cpu:bringup",
1746		.startup.single		= bringup_cpu,
1747		.teardown.single	= finish_cpu,
1748		.cant_stop		= true,
1749	},
1750	/* Final state before CPU kills itself */
1751	[CPUHP_AP_IDLE_DEAD] = {
1752		.name			= "idle:dead",
1753	},
1754	/*
1755	 * Last state before CPU enters the idle loop to die. Transient state
1756	 * for synchronization.
1757	 */
1758	[CPUHP_AP_OFFLINE] = {
1759		.name			= "ap:offline",
1760		.cant_stop		= true,
1761	},
1762	/* First state is scheduler control. Interrupts are disabled */
1763	[CPUHP_AP_SCHED_STARTING] = {
1764		.name			= "sched:starting",
1765		.startup.single		= sched_cpu_starting,
1766		.teardown.single	= sched_cpu_dying,
1767	},
1768	[CPUHP_AP_RCUTREE_DYING] = {
1769		.name			= "RCU/tree:dying",
1770		.startup.single		= NULL,
1771		.teardown.single	= rcutree_dying_cpu,
1772	},
1773	[CPUHP_AP_SMPCFD_DYING] = {
1774		.name			= "smpcfd:dying",
1775		.startup.single		= NULL,
1776		.teardown.single	= smpcfd_dying_cpu,
1777	},
1778	/* Entry state on starting. Interrupts enabled from here on. Transient
1779	 * state for synchronsization */
1780	[CPUHP_AP_ONLINE] = {
1781		.name			= "ap:online",
1782	},
1783	/*
1784	 * Handled on control processor until the plugged processor manages
1785	 * this itself.
1786	 */
1787	[CPUHP_TEARDOWN_CPU] = {
1788		.name			= "cpu:teardown",
1789		.startup.single		= NULL,
1790		.teardown.single	= takedown_cpu,
1791		.cant_stop		= true,
1792	},
1793
1794	[CPUHP_AP_SCHED_WAIT_EMPTY] = {
1795		.name			= "sched:waitempty",
1796		.startup.single		= NULL,
1797		.teardown.single	= sched_cpu_wait_empty,
1798	},
1799
1800	/* Handle smpboot threads park/unpark */
1801	[CPUHP_AP_SMPBOOT_THREADS] = {
1802		.name			= "smpboot/threads:online",
1803		.startup.single		= smpboot_unpark_threads,
1804		.teardown.single	= smpboot_park_threads,
1805	},
1806	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1807		.name			= "irq/affinity:online",
1808		.startup.single		= irq_affinity_online_cpu,
1809		.teardown.single	= NULL,
1810	},
1811	[CPUHP_AP_PERF_ONLINE] = {
1812		.name			= "perf:online",
1813		.startup.single		= perf_event_init_cpu,
1814		.teardown.single	= perf_event_exit_cpu,
1815	},
1816	[CPUHP_AP_WATCHDOG_ONLINE] = {
1817		.name			= "lockup_detector:online",
1818		.startup.single		= lockup_detector_online_cpu,
1819		.teardown.single	= lockup_detector_offline_cpu,
1820	},
1821	[CPUHP_AP_WORKQUEUE_ONLINE] = {
1822		.name			= "workqueue:online",
1823		.startup.single		= workqueue_online_cpu,
1824		.teardown.single	= workqueue_offline_cpu,
1825	},
1826	[CPUHP_AP_RANDOM_ONLINE] = {
1827		.name			= "random:online",
1828		.startup.single		= random_online_cpu,
1829		.teardown.single	= NULL,
1830	},
1831	[CPUHP_AP_RCUTREE_ONLINE] = {
1832		.name			= "RCU/tree:online",
1833		.startup.single		= rcutree_online_cpu,
1834		.teardown.single	= rcutree_offline_cpu,
1835	},
1836#endif
1837	/*
1838	 * The dynamically registered state space is here
1839	 */
1840
1841#ifdef CONFIG_SMP
1842	/* Last state is scheduler control setting the cpu active */
1843	[CPUHP_AP_ACTIVE] = {
1844		.name			= "sched:active",
1845		.startup.single		= sched_cpu_activate,
1846		.teardown.single	= sched_cpu_deactivate,
1847	},
1848#endif
1849
1850	/* CPU is fully up and running. */
1851	[CPUHP_ONLINE] = {
1852		.name			= "online",
1853		.startup.single		= NULL,
1854		.teardown.single	= NULL,
1855	},
1856};
1857
1858/* Sanity check for callbacks */
1859static int cpuhp_cb_check(enum cpuhp_state state)
1860{
1861	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1862		return -EINVAL;
1863	return 0;
1864}
1865
1866/*
1867 * Returns a free for dynamic slot assignment of the Online state. The states
1868 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1869 * by having no name assigned.
1870 */
1871static int cpuhp_reserve_state(enum cpuhp_state state)
1872{
1873	enum cpuhp_state i, end;
1874	struct cpuhp_step *step;
1875
1876	switch (state) {
1877	case CPUHP_AP_ONLINE_DYN:
1878		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1879		end = CPUHP_AP_ONLINE_DYN_END;
1880		break;
1881	case CPUHP_BP_PREPARE_DYN:
1882		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1883		end = CPUHP_BP_PREPARE_DYN_END;
1884		break;
1885	default:
1886		return -EINVAL;
1887	}
1888
1889	for (i = state; i <= end; i++, step++) {
1890		if (!step->name)
1891			return i;
1892	}
1893	WARN(1, "No more dynamic states available for CPU hotplug\n");
1894	return -ENOSPC;
1895}
1896
1897static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1898				 int (*startup)(unsigned int cpu),
1899				 int (*teardown)(unsigned int cpu),
1900				 bool multi_instance)
1901{
1902	/* (Un)Install the callbacks for further cpu hotplug operations */
1903	struct cpuhp_step *sp;
1904	int ret = 0;
1905
1906	/*
1907	 * If name is NULL, then the state gets removed.
1908	 *
1909	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1910	 * the first allocation from these dynamic ranges, so the removal
1911	 * would trigger a new allocation and clear the wrong (already
1912	 * empty) state, leaving the callbacks of the to be cleared state
1913	 * dangling, which causes wreckage on the next hotplug operation.
1914	 */
1915	if (name && (state == CPUHP_AP_ONLINE_DYN ||
1916		     state == CPUHP_BP_PREPARE_DYN)) {
1917		ret = cpuhp_reserve_state(state);
1918		if (ret < 0)
1919			return ret;
1920		state = ret;
1921	}
1922	sp = cpuhp_get_step(state);
1923	if (name && sp->name)
1924		return -EBUSY;
1925
1926	sp->startup.single = startup;
1927	sp->teardown.single = teardown;
1928	sp->name = name;
1929	sp->multi_instance = multi_instance;
1930	INIT_HLIST_HEAD(&sp->list);
1931	return ret;
1932}
1933
1934static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1935{
1936	return cpuhp_get_step(state)->teardown.single;
1937}
1938
1939/*
1940 * Call the startup/teardown function for a step either on the AP or
1941 * on the current CPU.
1942 */
1943static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1944			    struct hlist_node *node)
1945{
1946	struct cpuhp_step *sp = cpuhp_get_step(state);
1947	int ret;
1948
1949	/*
1950	 * If there's nothing to do, we done.
1951	 * Relies on the union for multi_instance.
1952	 */
1953	if (cpuhp_step_empty(bringup, sp))
1954		return 0;
1955	/*
1956	 * The non AP bound callbacks can fail on bringup. On teardown
1957	 * e.g. module removal we crash for now.
1958	 */
1959#ifdef CONFIG_SMP
1960	if (cpuhp_is_ap_state(state))
1961		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1962	else
1963		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1964#else
1965	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1966#endif
1967	BUG_ON(ret && !bringup);
1968	return ret;
1969}
1970
1971/*
1972 * Called from __cpuhp_setup_state on a recoverable failure.
1973 *
1974 * Note: The teardown callbacks for rollback are not allowed to fail!
1975 */
1976static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1977				   struct hlist_node *node)
1978{
1979	int cpu;
1980
1981	/* Roll back the already executed steps on the other cpus */
1982	for_each_present_cpu(cpu) {
1983		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1984		int cpustate = st->state;
1985
1986		if (cpu >= failedcpu)
1987			break;
1988
1989		/* Did we invoke the startup call on that cpu ? */
1990		if (cpustate >= state)
1991			cpuhp_issue_call(cpu, state, false, node);
1992	}
1993}
1994
1995int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1996					  struct hlist_node *node,
1997					  bool invoke)
1998{
1999	struct cpuhp_step *sp;
2000	int cpu;
2001	int ret;
2002
2003	lockdep_assert_cpus_held();
2004
2005	sp = cpuhp_get_step(state);
2006	if (sp->multi_instance == false)
2007		return -EINVAL;
2008
2009	mutex_lock(&cpuhp_state_mutex);
2010
2011	if (!invoke || !sp->startup.multi)
2012		goto add_node;
2013
2014	/*
2015	 * Try to call the startup callback for each present cpu
2016	 * depending on the hotplug state of the cpu.
2017	 */
2018	for_each_present_cpu(cpu) {
2019		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2020		int cpustate = st->state;
2021
2022		if (cpustate < state)
2023			continue;
2024
2025		ret = cpuhp_issue_call(cpu, state, true, node);
2026		if (ret) {
2027			if (sp->teardown.multi)
2028				cpuhp_rollback_install(cpu, state, node);
2029			goto unlock;
2030		}
2031	}
2032add_node:
2033	ret = 0;
2034	hlist_add_head(node, &sp->list);
2035unlock:
2036	mutex_unlock(&cpuhp_state_mutex);
2037	return ret;
2038}
2039
2040int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
2041			       bool invoke)
2042{
2043	int ret;
2044
2045	cpus_read_lock();
2046	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
2047	cpus_read_unlock();
2048	return ret;
2049}
2050EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
2051
2052/**
2053 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
2054 * @state:		The state to setup
2055 * @name:		Name of the step
2056 * @invoke:		If true, the startup function is invoked for cpus where
2057 *			cpu state >= @state
2058 * @startup:		startup callback function
2059 * @teardown:		teardown callback function
2060 * @multi_instance:	State is set up for multiple instances which get
2061 *			added afterwards.
2062 *
2063 * The caller needs to hold cpus read locked while calling this function.
2064 * Return:
2065 *   On success:
2066 *      Positive state number if @state is CPUHP_AP_ONLINE_DYN;
2067 *      0 for all other states
2068 *   On failure: proper (negative) error code
2069 */
2070int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
2071				   const char *name, bool invoke,
2072				   int (*startup)(unsigned int cpu),
2073				   int (*teardown)(unsigned int cpu),
2074				   bool multi_instance)
2075{
2076	int cpu, ret = 0;
2077	bool dynstate;
2078
2079	lockdep_assert_cpus_held();
2080
2081	if (cpuhp_cb_check(state) || !name)
2082		return -EINVAL;
2083
2084	mutex_lock(&cpuhp_state_mutex);
2085
2086	ret = cpuhp_store_callbacks(state, name, startup, teardown,
2087				    multi_instance);
2088
2089	dynstate = state == CPUHP_AP_ONLINE_DYN;
2090	if (ret > 0 && dynstate) {
2091		state = ret;
2092		ret = 0;
2093	}
2094
2095	if (ret || !invoke || !startup)
2096		goto out;
2097
2098	/*
2099	 * Try to call the startup callback for each present cpu
2100	 * depending on the hotplug state of the cpu.
2101	 */
2102	for_each_present_cpu(cpu) {
2103		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2104		int cpustate = st->state;
2105
2106		if (cpustate < state)
2107			continue;
2108
2109		ret = cpuhp_issue_call(cpu, state, true, NULL);
2110		if (ret) {
2111			if (teardown)
2112				cpuhp_rollback_install(cpu, state, NULL);
2113			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2114			goto out;
2115		}
2116	}
2117out:
2118	mutex_unlock(&cpuhp_state_mutex);
2119	/*
2120	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2121	 * dynamically allocated state in case of success.
2122	 */
2123	if (!ret && dynstate)
2124		return state;
2125	return ret;
2126}
2127EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2128
2129int __cpuhp_setup_state(enum cpuhp_state state,
2130			const char *name, bool invoke,
2131			int (*startup)(unsigned int cpu),
2132			int (*teardown)(unsigned int cpu),
2133			bool multi_instance)
2134{
2135	int ret;
2136
2137	cpus_read_lock();
2138	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2139					     teardown, multi_instance);
2140	cpus_read_unlock();
2141	return ret;
2142}
2143EXPORT_SYMBOL(__cpuhp_setup_state);
2144
2145int __cpuhp_state_remove_instance(enum cpuhp_state state,
2146				  struct hlist_node *node, bool invoke)
2147{
2148	struct cpuhp_step *sp = cpuhp_get_step(state);
2149	int cpu;
2150
2151	BUG_ON(cpuhp_cb_check(state));
2152
2153	if (!sp->multi_instance)
2154		return -EINVAL;
2155
2156	cpus_read_lock();
2157	mutex_lock(&cpuhp_state_mutex);
2158
2159	if (!invoke || !cpuhp_get_teardown_cb(state))
2160		goto remove;
2161	/*
2162	 * Call the teardown callback for each present cpu depending
2163	 * on the hotplug state of the cpu. This function is not
2164	 * allowed to fail currently!
2165	 */
2166	for_each_present_cpu(cpu) {
2167		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2168		int cpustate = st->state;
2169
2170		if (cpustate >= state)
2171			cpuhp_issue_call(cpu, state, false, node);
2172	}
2173
2174remove:
2175	hlist_del(node);
2176	mutex_unlock(&cpuhp_state_mutex);
2177	cpus_read_unlock();
2178
2179	return 0;
2180}
2181EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2182
2183/**
2184 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2185 * @state:	The state to remove
2186 * @invoke:	If true, the teardown function is invoked for cpus where
2187 *		cpu state >= @state
2188 *
2189 * The caller needs to hold cpus read locked while calling this function.
2190 * The teardown callback is currently not allowed to fail. Think
2191 * about module removal!
2192 */
2193void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2194{
2195	struct cpuhp_step *sp = cpuhp_get_step(state);
2196	int cpu;
2197
2198	BUG_ON(cpuhp_cb_check(state));
2199
2200	lockdep_assert_cpus_held();
2201
2202	mutex_lock(&cpuhp_state_mutex);
2203	if (sp->multi_instance) {
2204		WARN(!hlist_empty(&sp->list),
2205		     "Error: Removing state %d which has instances left.\n",
2206		     state);
2207		goto remove;
2208	}
2209
2210	if (!invoke || !cpuhp_get_teardown_cb(state))
2211		goto remove;
2212
2213	/*
2214	 * Call the teardown callback for each present cpu depending
2215	 * on the hotplug state of the cpu. This function is not
2216	 * allowed to fail currently!
2217	 */
2218	for_each_present_cpu(cpu) {
2219		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2220		int cpustate = st->state;
2221
2222		if (cpustate >= state)
2223			cpuhp_issue_call(cpu, state, false, NULL);
2224	}
2225remove:
2226	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2227	mutex_unlock(&cpuhp_state_mutex);
2228}
2229EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2230
2231void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2232{
2233	cpus_read_lock();
2234	__cpuhp_remove_state_cpuslocked(state, invoke);
2235	cpus_read_unlock();
2236}
2237EXPORT_SYMBOL(__cpuhp_remove_state);
2238
2239#ifdef CONFIG_HOTPLUG_SMT
2240static void cpuhp_offline_cpu_device(unsigned int cpu)
2241{
2242	struct device *dev = get_cpu_device(cpu);
2243
2244	dev->offline = true;
2245	/* Tell user space about the state change */
2246	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2247}
2248
2249static void cpuhp_online_cpu_device(unsigned int cpu)
2250{
2251	struct device *dev = get_cpu_device(cpu);
2252
2253	dev->offline = false;
2254	/* Tell user space about the state change */
2255	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2256}
2257
2258int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2259{
2260	int cpu, ret = 0;
2261
2262	cpu_maps_update_begin();
2263	for_each_online_cpu(cpu) {
2264		if (topology_is_primary_thread(cpu))
2265			continue;
2266		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2267		if (ret)
2268			break;
2269		/*
2270		 * As this needs to hold the cpu maps lock it's impossible
2271		 * to call device_offline() because that ends up calling
2272		 * cpu_down() which takes cpu maps lock. cpu maps lock
2273		 * needs to be held as this might race against in kernel
2274		 * abusers of the hotplug machinery (thermal management).
2275		 *
2276		 * So nothing would update device:offline state. That would
2277		 * leave the sysfs entry stale and prevent onlining after
2278		 * smt control has been changed to 'off' again. This is
2279		 * called under the sysfs hotplug lock, so it is properly
2280		 * serialized against the regular offline usage.
2281		 */
2282		cpuhp_offline_cpu_device(cpu);
2283	}
2284	if (!ret)
2285		cpu_smt_control = ctrlval;
2286	cpu_maps_update_done();
2287	return ret;
2288}
2289
2290int cpuhp_smt_enable(void)
2291{
2292	int cpu, ret = 0;
2293
2294	cpu_maps_update_begin();
2295	cpu_smt_control = CPU_SMT_ENABLED;
2296	for_each_present_cpu(cpu) {
2297		/* Skip online CPUs and CPUs on offline nodes */
2298		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2299			continue;
2300		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2301		if (ret)
2302			break;
2303		/* See comment in cpuhp_smt_disable() */
2304		cpuhp_online_cpu_device(cpu);
2305	}
2306	cpu_maps_update_done();
2307	return ret;
2308}
2309#endif
2310
2311#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2312static ssize_t state_show(struct device *dev,
2313			  struct device_attribute *attr, char *buf)
2314{
2315	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2316
2317	return sprintf(buf, "%d\n", st->state);
2318}
2319static DEVICE_ATTR_RO(state);
2320
2321static ssize_t target_store(struct device *dev, struct device_attribute *attr,
2322			    const char *buf, size_t count)
 
2323{
2324	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2325	struct cpuhp_step *sp;
2326	int target, ret;
2327
2328	ret = kstrtoint(buf, 10, &target);
2329	if (ret)
2330		return ret;
2331
2332#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2333	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2334		return -EINVAL;
2335#else
2336	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2337		return -EINVAL;
2338#endif
2339
2340	ret = lock_device_hotplug_sysfs();
2341	if (ret)
2342		return ret;
2343
2344	mutex_lock(&cpuhp_state_mutex);
2345	sp = cpuhp_get_step(target);
2346	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2347	mutex_unlock(&cpuhp_state_mutex);
2348	if (ret)
2349		goto out;
2350
2351	if (st->state < target)
2352		ret = cpu_up(dev->id, target);
2353	else if (st->state > target)
2354		ret = cpu_down(dev->id, target);
2355	else if (WARN_ON(st->target != target))
2356		st->target = target;
2357out:
2358	unlock_device_hotplug();
2359	return ret ? ret : count;
2360}
2361
2362static ssize_t target_show(struct device *dev,
2363			   struct device_attribute *attr, char *buf)
2364{
2365	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2366
2367	return sprintf(buf, "%d\n", st->target);
2368}
2369static DEVICE_ATTR_RW(target);
 
2370
2371static ssize_t fail_store(struct device *dev, struct device_attribute *attr,
2372			  const char *buf, size_t count)
 
2373{
2374	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2375	struct cpuhp_step *sp;
2376	int fail, ret;
2377
2378	ret = kstrtoint(buf, 10, &fail);
2379	if (ret)
2380		return ret;
2381
2382	if (fail == CPUHP_INVALID) {
2383		st->fail = fail;
2384		return count;
2385	}
2386
2387	if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2388		return -EINVAL;
2389
2390	/*
2391	 * Cannot fail STARTING/DYING callbacks.
2392	 */
2393	if (cpuhp_is_atomic_state(fail))
2394		return -EINVAL;
2395
2396	/*
2397	 * DEAD callbacks cannot fail...
2398	 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2399	 * triggering STARTING callbacks, a failure in this state would
2400	 * hinder rollback.
2401	 */
2402	if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2403		return -EINVAL;
2404
2405	/*
2406	 * Cannot fail anything that doesn't have callbacks.
2407	 */
2408	mutex_lock(&cpuhp_state_mutex);
2409	sp = cpuhp_get_step(fail);
2410	if (!sp->startup.single && !sp->teardown.single)
2411		ret = -EINVAL;
2412	mutex_unlock(&cpuhp_state_mutex);
2413	if (ret)
2414		return ret;
2415
2416	st->fail = fail;
2417
2418	return count;
2419}
2420
2421static ssize_t fail_show(struct device *dev,
2422			 struct device_attribute *attr, char *buf)
2423{
2424	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2425
2426	return sprintf(buf, "%d\n", st->fail);
2427}
2428
2429static DEVICE_ATTR_RW(fail);
2430
2431static struct attribute *cpuhp_cpu_attrs[] = {
2432	&dev_attr_state.attr,
2433	&dev_attr_target.attr,
2434	&dev_attr_fail.attr,
2435	NULL
2436};
2437
2438static const struct attribute_group cpuhp_cpu_attr_group = {
2439	.attrs = cpuhp_cpu_attrs,
2440	.name = "hotplug",
2441	NULL
2442};
2443
2444static ssize_t states_show(struct device *dev,
2445				 struct device_attribute *attr, char *buf)
2446{
2447	ssize_t cur, res = 0;
2448	int i;
2449
2450	mutex_lock(&cpuhp_state_mutex);
2451	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2452		struct cpuhp_step *sp = cpuhp_get_step(i);
2453
2454		if (sp->name) {
2455			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2456			buf += cur;
2457			res += cur;
2458		}
2459	}
2460	mutex_unlock(&cpuhp_state_mutex);
2461	return res;
2462}
2463static DEVICE_ATTR_RO(states);
2464
2465static struct attribute *cpuhp_cpu_root_attrs[] = {
2466	&dev_attr_states.attr,
2467	NULL
2468};
2469
2470static const struct attribute_group cpuhp_cpu_root_attr_group = {
2471	.attrs = cpuhp_cpu_root_attrs,
2472	.name = "hotplug",
2473	NULL
2474};
2475
2476#ifdef CONFIG_HOTPLUG_SMT
2477
2478static ssize_t
2479__store_smt_control(struct device *dev, struct device_attribute *attr,
2480		    const char *buf, size_t count)
2481{
2482	int ctrlval, ret;
2483
2484	if (sysfs_streq(buf, "on"))
2485		ctrlval = CPU_SMT_ENABLED;
2486	else if (sysfs_streq(buf, "off"))
2487		ctrlval = CPU_SMT_DISABLED;
2488	else if (sysfs_streq(buf, "forceoff"))
2489		ctrlval = CPU_SMT_FORCE_DISABLED;
2490	else
2491		return -EINVAL;
2492
2493	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2494		return -EPERM;
2495
2496	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2497		return -ENODEV;
2498
2499	ret = lock_device_hotplug_sysfs();
2500	if (ret)
2501		return ret;
2502
2503	if (ctrlval != cpu_smt_control) {
2504		switch (ctrlval) {
2505		case CPU_SMT_ENABLED:
2506			ret = cpuhp_smt_enable();
2507			break;
2508		case CPU_SMT_DISABLED:
2509		case CPU_SMT_FORCE_DISABLED:
2510			ret = cpuhp_smt_disable(ctrlval);
2511			break;
2512		}
2513	}
2514
2515	unlock_device_hotplug();
2516	return ret ? ret : count;
2517}
2518
2519#else /* !CONFIG_HOTPLUG_SMT */
2520static ssize_t
2521__store_smt_control(struct device *dev, struct device_attribute *attr,
2522		    const char *buf, size_t count)
2523{
2524	return -ENODEV;
2525}
2526#endif /* CONFIG_HOTPLUG_SMT */
2527
2528static const char *smt_states[] = {
2529	[CPU_SMT_ENABLED]		= "on",
2530	[CPU_SMT_DISABLED]		= "off",
2531	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
2532	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
2533	[CPU_SMT_NOT_IMPLEMENTED]	= "notimplemented",
2534};
2535
2536static ssize_t control_show(struct device *dev,
2537			    struct device_attribute *attr, char *buf)
2538{
2539	const char *state = smt_states[cpu_smt_control];
2540
2541	return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2542}
2543
2544static ssize_t control_store(struct device *dev, struct device_attribute *attr,
2545			     const char *buf, size_t count)
 
2546{
2547	return __store_smt_control(dev, attr, buf, count);
2548}
2549static DEVICE_ATTR_RW(control);
2550
2551static ssize_t active_show(struct device *dev,
2552			   struct device_attribute *attr, char *buf)
2553{
2554	return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2555}
2556static DEVICE_ATTR_RO(active);
2557
2558static struct attribute *cpuhp_smt_attrs[] = {
2559	&dev_attr_control.attr,
2560	&dev_attr_active.attr,
2561	NULL
2562};
2563
2564static const struct attribute_group cpuhp_smt_attr_group = {
2565	.attrs = cpuhp_smt_attrs,
2566	.name = "smt",
2567	NULL
2568};
2569
2570static int __init cpu_smt_sysfs_init(void)
2571{
2572	return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2573				  &cpuhp_smt_attr_group);
2574}
2575
2576static int __init cpuhp_sysfs_init(void)
2577{
2578	int cpu, ret;
2579
2580	ret = cpu_smt_sysfs_init();
2581	if (ret)
2582		return ret;
2583
2584	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2585				 &cpuhp_cpu_root_attr_group);
2586	if (ret)
2587		return ret;
2588
2589	for_each_possible_cpu(cpu) {
2590		struct device *dev = get_cpu_device(cpu);
2591
2592		if (!dev)
2593			continue;
2594		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2595		if (ret)
2596			return ret;
2597	}
2598	return 0;
2599}
2600device_initcall(cpuhp_sysfs_init);
2601#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2602
2603/*
2604 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2605 * represents all NR_CPUS bits binary values of 1<<nr.
2606 *
2607 * It is used by cpumask_of() to get a constant address to a CPU
2608 * mask value that has a single bit set only.
2609 */
2610
2611/* cpu_bit_bitmap[0] is empty - so we can back into it */
2612#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
2613#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2614#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2615#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2616
2617const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2618
2619	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
2620	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
2621#if BITS_PER_LONG > 32
2622	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
2623	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
2624#endif
2625};
2626EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2627
2628const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2629EXPORT_SYMBOL(cpu_all_bits);
2630
2631#ifdef CONFIG_INIT_ALL_POSSIBLE
2632struct cpumask __cpu_possible_mask __read_mostly
2633	= {CPU_BITS_ALL};
2634#else
2635struct cpumask __cpu_possible_mask __read_mostly;
2636#endif
2637EXPORT_SYMBOL(__cpu_possible_mask);
2638
2639struct cpumask __cpu_online_mask __read_mostly;
2640EXPORT_SYMBOL(__cpu_online_mask);
2641
2642struct cpumask __cpu_present_mask __read_mostly;
2643EXPORT_SYMBOL(__cpu_present_mask);
2644
2645struct cpumask __cpu_active_mask __read_mostly;
2646EXPORT_SYMBOL(__cpu_active_mask);
2647
2648struct cpumask __cpu_dying_mask __read_mostly;
2649EXPORT_SYMBOL(__cpu_dying_mask);
2650
2651atomic_t __num_online_cpus __read_mostly;
2652EXPORT_SYMBOL(__num_online_cpus);
2653
2654void init_cpu_present(const struct cpumask *src)
2655{
2656	cpumask_copy(&__cpu_present_mask, src);
2657}
2658
2659void init_cpu_possible(const struct cpumask *src)
2660{
2661	cpumask_copy(&__cpu_possible_mask, src);
2662}
2663
2664void init_cpu_online(const struct cpumask *src)
2665{
2666	cpumask_copy(&__cpu_online_mask, src);
2667}
2668
2669void set_cpu_online(unsigned int cpu, bool online)
2670{
2671	/*
2672	 * atomic_inc/dec() is required to handle the horrid abuse of this
2673	 * function by the reboot and kexec code which invoke it from
2674	 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2675	 * regular CPU hotplug is properly serialized.
2676	 *
2677	 * Note, that the fact that __num_online_cpus is of type atomic_t
2678	 * does not protect readers which are not serialized against
2679	 * concurrent hotplug operations.
2680	 */
2681	if (online) {
2682		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2683			atomic_inc(&__num_online_cpus);
2684	} else {
2685		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2686			atomic_dec(&__num_online_cpus);
2687	}
2688}
2689
2690/*
2691 * Activate the first processor.
2692 */
2693void __init boot_cpu_init(void)
2694{
2695	int cpu = smp_processor_id();
2696
2697	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
2698	set_cpu_online(cpu, true);
2699	set_cpu_active(cpu, true);
2700	set_cpu_present(cpu, true);
2701	set_cpu_possible(cpu, true);
2702
2703#ifdef CONFIG_SMP
2704	__boot_cpu_id = cpu;
2705#endif
2706}
2707
2708/*
2709 * Must be called _AFTER_ setting up the per_cpu areas
2710 */
2711void __init boot_cpu_hotplug_init(void)
2712{
2713#ifdef CONFIG_SMP
2714	cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2715#endif
2716	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2717	this_cpu_write(cpuhp_state.target, CPUHP_ONLINE);
2718}
2719
2720/*
2721 * These are used for a global "mitigations=" cmdline option for toggling
2722 * optional CPU mitigations.
2723 */
2724enum cpu_mitigations {
2725	CPU_MITIGATIONS_OFF,
2726	CPU_MITIGATIONS_AUTO,
2727	CPU_MITIGATIONS_AUTO_NOSMT,
2728};
2729
2730static enum cpu_mitigations cpu_mitigations __ro_after_init =
2731	CPU_MITIGATIONS_AUTO;
2732
2733static int __init mitigations_parse_cmdline(char *arg)
2734{
2735	if (!strcmp(arg, "off"))
2736		cpu_mitigations = CPU_MITIGATIONS_OFF;
2737	else if (!strcmp(arg, "auto"))
2738		cpu_mitigations = CPU_MITIGATIONS_AUTO;
2739	else if (!strcmp(arg, "auto,nosmt"))
2740		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2741	else
2742		pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2743			arg);
2744
2745	return 0;
2746}
2747early_param("mitigations", mitigations_parse_cmdline);
2748
2749/* mitigations=off */
2750bool cpu_mitigations_off(void)
2751{
2752	return cpu_mitigations == CPU_MITIGATIONS_OFF;
2753}
2754EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2755
2756/* mitigations=auto,nosmt */
2757bool cpu_mitigations_auto_nosmt(void)
2758{
2759	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2760}
2761EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
v5.14.15
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/sched/mm.h>
   7#include <linux/proc_fs.h>
   8#include <linux/smp.h>
   9#include <linux/init.h>
  10#include <linux/notifier.h>
  11#include <linux/sched/signal.h>
  12#include <linux/sched/hotplug.h>
  13#include <linux/sched/isolation.h>
  14#include <linux/sched/task.h>
  15#include <linux/sched/smt.h>
  16#include <linux/unistd.h>
  17#include <linux/cpu.h>
  18#include <linux/oom.h>
  19#include <linux/rcupdate.h>
  20#include <linux/export.h>
  21#include <linux/bug.h>
  22#include <linux/kthread.h>
  23#include <linux/stop_machine.h>
  24#include <linux/mutex.h>
  25#include <linux/gfp.h>
  26#include <linux/suspend.h>
  27#include <linux/lockdep.h>
  28#include <linux/tick.h>
  29#include <linux/irq.h>
  30#include <linux/nmi.h>
  31#include <linux/smpboot.h>
  32#include <linux/relay.h>
  33#include <linux/slab.h>
 
  34#include <linux/percpu-rwsem.h>
  35#include <linux/cpuset.h>
 
 
  36
  37#include <trace/events/power.h>
  38#define CREATE_TRACE_POINTS
  39#include <trace/events/cpuhp.h>
  40
  41#include "smpboot.h"
  42
  43/**
  44 * cpuhp_cpu_state - Per cpu hotplug state storage
  45 * @state:	The current cpu state
  46 * @target:	The target state
 
  47 * @thread:	Pointer to the hotplug thread
  48 * @should_run:	Thread should execute
  49 * @rollback:	Perform a rollback
  50 * @single:	Single callback invocation
  51 * @bringup:	Single callback bringup or teardown selector
 
 
 
 
  52 * @cb_state:	The state for a single callback (install/uninstall)
  53 * @result:	Result of the operation
  54 * @done_up:	Signal completion to the issuer of the task for cpu-up
  55 * @done_down:	Signal completion to the issuer of the task for cpu-down
  56 */
  57struct cpuhp_cpu_state {
  58	enum cpuhp_state	state;
  59	enum cpuhp_state	target;
  60	enum cpuhp_state	fail;
  61#ifdef CONFIG_SMP
  62	struct task_struct	*thread;
  63	bool			should_run;
  64	bool			rollback;
  65	bool			single;
  66	bool			bringup;
  67	int			cpu;
  68	struct hlist_node	*node;
  69	struct hlist_node	*last;
  70	enum cpuhp_state	cb_state;
  71	int			result;
  72	struct completion	done_up;
  73	struct completion	done_down;
  74#endif
  75};
  76
  77static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  78	.fail = CPUHP_INVALID,
  79};
  80
  81#ifdef CONFIG_SMP
  82cpumask_t cpus_booted_once_mask;
  83#endif
  84
  85#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
  86static struct lockdep_map cpuhp_state_up_map =
  87	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  88static struct lockdep_map cpuhp_state_down_map =
  89	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
  90
  91
  92static inline void cpuhp_lock_acquire(bool bringup)
  93{
  94	lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  95}
  96
  97static inline void cpuhp_lock_release(bool bringup)
  98{
  99	lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
 100}
 101#else
 102
 103static inline void cpuhp_lock_acquire(bool bringup) { }
 104static inline void cpuhp_lock_release(bool bringup) { }
 105
 106#endif
 107
 108/**
 109 * cpuhp_step - Hotplug state machine step
 110 * @name:	Name of the step
 111 * @startup:	Startup function of the step
 112 * @teardown:	Teardown function of the step
 113 * @cant_stop:	Bringup/teardown can't be stopped at this step
 
 114 */
 115struct cpuhp_step {
 116	const char		*name;
 117	union {
 118		int		(*single)(unsigned int cpu);
 119		int		(*multi)(unsigned int cpu,
 120					 struct hlist_node *node);
 121	} startup;
 122	union {
 123		int		(*single)(unsigned int cpu);
 124		int		(*multi)(unsigned int cpu,
 125					 struct hlist_node *node);
 126	} teardown;
 
 127	struct hlist_head	list;
 
 128	bool			cant_stop;
 129	bool			multi_instance;
 130};
 131
 132static DEFINE_MUTEX(cpuhp_state_mutex);
 133static struct cpuhp_step cpuhp_hp_states[];
 134
 135static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
 136{
 137	return cpuhp_hp_states + state;
 138}
 139
 140static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
 141{
 142	return bringup ? !step->startup.single : !step->teardown.single;
 143}
 144
 145/**
 146 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
 147 * @cpu:	The cpu for which the callback should be invoked
 148 * @state:	The state to do callbacks for
 149 * @bringup:	True if the bringup callback should be invoked
 150 * @node:	For multi-instance, do a single entry callback for install/remove
 151 * @lastp:	For multi-instance rollback, remember how far we got
 152 *
 153 * Called from cpu hotplug and from the state register machinery.
 
 
 154 */
 155static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
 156				 bool bringup, struct hlist_node *node,
 157				 struct hlist_node **lastp)
 158{
 159	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 160	struct cpuhp_step *step = cpuhp_get_step(state);
 161	int (*cbm)(unsigned int cpu, struct hlist_node *node);
 162	int (*cb)(unsigned int cpu);
 163	int ret, cnt;
 164
 165	if (st->fail == state) {
 166		st->fail = CPUHP_INVALID;
 167		return -EAGAIN;
 168	}
 169
 170	if (cpuhp_step_empty(bringup, step)) {
 171		WARN_ON_ONCE(1);
 172		return 0;
 173	}
 174
 175	if (!step->multi_instance) {
 176		WARN_ON_ONCE(lastp && *lastp);
 177		cb = bringup ? step->startup.single : step->teardown.single;
 178
 179		trace_cpuhp_enter(cpu, st->target, state, cb);
 180		ret = cb(cpu);
 181		trace_cpuhp_exit(cpu, st->state, state, ret);
 182		return ret;
 183	}
 184	cbm = bringup ? step->startup.multi : step->teardown.multi;
 185
 186	/* Single invocation for instance add/remove */
 187	if (node) {
 188		WARN_ON_ONCE(lastp && *lastp);
 189		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 190		ret = cbm(cpu, node);
 191		trace_cpuhp_exit(cpu, st->state, state, ret);
 192		return ret;
 193	}
 194
 195	/* State transition. Invoke on all instances */
 196	cnt = 0;
 197	hlist_for_each(node, &step->list) {
 198		if (lastp && node == *lastp)
 199			break;
 200
 201		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 202		ret = cbm(cpu, node);
 203		trace_cpuhp_exit(cpu, st->state, state, ret);
 204		if (ret) {
 205			if (!lastp)
 206				goto err;
 207
 208			*lastp = node;
 209			return ret;
 210		}
 211		cnt++;
 212	}
 213	if (lastp)
 214		*lastp = NULL;
 215	return 0;
 216err:
 217	/* Rollback the instances if one failed */
 218	cbm = !bringup ? step->startup.multi : step->teardown.multi;
 219	if (!cbm)
 220		return ret;
 221
 222	hlist_for_each(node, &step->list) {
 223		if (!cnt--)
 224			break;
 225
 226		trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 227		ret = cbm(cpu, node);
 228		trace_cpuhp_exit(cpu, st->state, state, ret);
 229		/*
 230		 * Rollback must not fail,
 231		 */
 232		WARN_ON_ONCE(ret);
 233	}
 234	return ret;
 235}
 236
 237#ifdef CONFIG_SMP
 238static bool cpuhp_is_ap_state(enum cpuhp_state state)
 239{
 240	/*
 241	 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
 242	 * purposes as that state is handled explicitly in cpu_down.
 243	 */
 244	return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
 245}
 246
 247static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 248{
 249	struct completion *done = bringup ? &st->done_up : &st->done_down;
 250	wait_for_completion(done);
 251}
 252
 253static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 254{
 255	struct completion *done = bringup ? &st->done_up : &st->done_down;
 256	complete(done);
 257}
 258
 259/*
 260 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 261 */
 262static bool cpuhp_is_atomic_state(enum cpuhp_state state)
 263{
 264	return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
 265}
 266
 267/* Serializes the updates to cpu_online_mask, cpu_present_mask */
 268static DEFINE_MUTEX(cpu_add_remove_lock);
 269bool cpuhp_tasks_frozen;
 270EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
 271
 272/*
 273 * The following two APIs (cpu_maps_update_begin/done) must be used when
 274 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 275 */
 276void cpu_maps_update_begin(void)
 277{
 278	mutex_lock(&cpu_add_remove_lock);
 279}
 280
 281void cpu_maps_update_done(void)
 282{
 283	mutex_unlock(&cpu_add_remove_lock);
 284}
 285
 286/*
 287 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 288 * Should always be manipulated under cpu_add_remove_lock
 289 */
 290static int cpu_hotplug_disabled;
 291
 292#ifdef CONFIG_HOTPLUG_CPU
 293
 294DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
 295
 296void cpus_read_lock(void)
 297{
 298	percpu_down_read(&cpu_hotplug_lock);
 299}
 300EXPORT_SYMBOL_GPL(cpus_read_lock);
 301
 302int cpus_read_trylock(void)
 303{
 304	return percpu_down_read_trylock(&cpu_hotplug_lock);
 305}
 306EXPORT_SYMBOL_GPL(cpus_read_trylock);
 307
 308void cpus_read_unlock(void)
 309{
 310	percpu_up_read(&cpu_hotplug_lock);
 311}
 312EXPORT_SYMBOL_GPL(cpus_read_unlock);
 313
 314void cpus_write_lock(void)
 315{
 316	percpu_down_write(&cpu_hotplug_lock);
 317}
 318
 319void cpus_write_unlock(void)
 320{
 321	percpu_up_write(&cpu_hotplug_lock);
 322}
 323
 324void lockdep_assert_cpus_held(void)
 325{
 326	/*
 327	 * We can't have hotplug operations before userspace starts running,
 328	 * and some init codepaths will knowingly not take the hotplug lock.
 329	 * This is all valid, so mute lockdep until it makes sense to report
 330	 * unheld locks.
 331	 */
 332	if (system_state < SYSTEM_RUNNING)
 333		return;
 334
 335	percpu_rwsem_assert_held(&cpu_hotplug_lock);
 336}
 337
 338#ifdef CONFIG_LOCKDEP
 339int lockdep_is_cpus_held(void)
 340{
 341	return percpu_rwsem_is_held(&cpu_hotplug_lock);
 342}
 343#endif
 344
 345static void lockdep_acquire_cpus_lock(void)
 346{
 347	rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
 348}
 349
 350static void lockdep_release_cpus_lock(void)
 351{
 352	rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
 353}
 354
 355/*
 356 * Wait for currently running CPU hotplug operations to complete (if any) and
 357 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 358 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 359 * hotplug path before performing hotplug operations. So acquiring that lock
 360 * guarantees mutual exclusion from any currently running hotplug operations.
 361 */
 362void cpu_hotplug_disable(void)
 363{
 364	cpu_maps_update_begin();
 365	cpu_hotplug_disabled++;
 366	cpu_maps_update_done();
 367}
 368EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 369
 370static void __cpu_hotplug_enable(void)
 371{
 372	if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
 373		return;
 374	cpu_hotplug_disabled--;
 375}
 376
 377void cpu_hotplug_enable(void)
 378{
 379	cpu_maps_update_begin();
 380	__cpu_hotplug_enable();
 381	cpu_maps_update_done();
 382}
 383EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 384
 385#else
 386
 387static void lockdep_acquire_cpus_lock(void)
 388{
 389}
 390
 391static void lockdep_release_cpus_lock(void)
 392{
 393}
 394
 395#endif	/* CONFIG_HOTPLUG_CPU */
 396
 397/*
 398 * Architectures that need SMT-specific errata handling during SMT hotplug
 399 * should override this.
 400 */
 401void __weak arch_smt_update(void) { }
 402
 403#ifdef CONFIG_HOTPLUG_SMT
 404enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 405
 406void __init cpu_smt_disable(bool force)
 407{
 408	if (!cpu_smt_possible())
 409		return;
 410
 411	if (force) {
 412		pr_info("SMT: Force disabled\n");
 413		cpu_smt_control = CPU_SMT_FORCE_DISABLED;
 414	} else {
 415		pr_info("SMT: disabled\n");
 416		cpu_smt_control = CPU_SMT_DISABLED;
 417	}
 418}
 419
 420/*
 421 * The decision whether SMT is supported can only be done after the full
 422 * CPU identification. Called from architecture code.
 423 */
 424void __init cpu_smt_check_topology(void)
 425{
 426	if (!topology_smt_supported())
 427		cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 428}
 429
 430static int __init smt_cmdline_disable(char *str)
 431{
 432	cpu_smt_disable(str && !strcmp(str, "force"));
 433	return 0;
 434}
 435early_param("nosmt", smt_cmdline_disable);
 436
 437static inline bool cpu_smt_allowed(unsigned int cpu)
 438{
 439	if (cpu_smt_control == CPU_SMT_ENABLED)
 440		return true;
 441
 442	if (topology_is_primary_thread(cpu))
 443		return true;
 444
 445	/*
 446	 * On x86 it's required to boot all logical CPUs at least once so
 447	 * that the init code can get a chance to set CR4.MCE on each
 448	 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
 449	 * core will shutdown the machine.
 450	 */
 451	return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
 452}
 453
 454/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
 455bool cpu_smt_possible(void)
 456{
 457	return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
 458		cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
 459}
 460EXPORT_SYMBOL_GPL(cpu_smt_possible);
 461#else
 462static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 463#endif
 464
 465static inline enum cpuhp_state
 466cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 467{
 468	enum cpuhp_state prev_state = st->state;
 469	bool bringup = st->state < target;
 470
 471	st->rollback = false;
 472	st->last = NULL;
 473
 474	st->target = target;
 475	st->single = false;
 476	st->bringup = bringup;
 477	if (cpu_dying(st->cpu) != !bringup)
 478		set_cpu_dying(st->cpu, !bringup);
 479
 480	return prev_state;
 481}
 482
 483static inline void
 484cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
 
 485{
 486	bool bringup = !st->bringup;
 487
 488	st->target = prev_state;
 489
 490	/*
 491	 * Already rolling back. No need invert the bringup value or to change
 492	 * the current state.
 493	 */
 494	if (st->rollback)
 495		return;
 496
 497	st->rollback = true;
 498
 499	/*
 500	 * If we have st->last we need to undo partial multi_instance of this
 501	 * state first. Otherwise start undo at the previous state.
 502	 */
 503	if (!st->last) {
 504		if (st->bringup)
 505			st->state--;
 506		else
 507			st->state++;
 508	}
 509
 510	st->bringup = bringup;
 511	if (cpu_dying(st->cpu) != !bringup)
 512		set_cpu_dying(st->cpu, !bringup);
 513}
 514
 515/* Regular hotplug invocation of the AP hotplug thread */
 516static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
 517{
 518	if (!st->single && st->state == st->target)
 519		return;
 520
 521	st->result = 0;
 522	/*
 523	 * Make sure the above stores are visible before should_run becomes
 524	 * true. Paired with the mb() above in cpuhp_thread_fun()
 525	 */
 526	smp_mb();
 527	st->should_run = true;
 528	wake_up_process(st->thread);
 529	wait_for_ap_thread(st, st->bringup);
 530}
 531
 532static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 
 533{
 534	enum cpuhp_state prev_state;
 535	int ret;
 536
 537	prev_state = cpuhp_set_state(st, target);
 538	__cpuhp_kick_ap(st);
 539	if ((ret = st->result)) {
 540		cpuhp_reset_state(st, prev_state);
 541		__cpuhp_kick_ap(st);
 542	}
 543
 544	return ret;
 545}
 546
 547static int bringup_wait_for_ap(unsigned int cpu)
 548{
 549	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 550
 551	/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
 552	wait_for_ap_thread(st, true);
 553	if (WARN_ON_ONCE((!cpu_online(cpu))))
 554		return -ECANCELED;
 555
 556	/* Unpark the hotplug thread of the target cpu */
 557	kthread_unpark(st->thread);
 558
 559	/*
 560	 * SMT soft disabling on X86 requires to bring the CPU out of the
 561	 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
 562	 * CPU marked itself as booted_once in notify_cpu_starting() so the
 563	 * cpu_smt_allowed() check will now return false if this is not the
 564	 * primary sibling.
 565	 */
 566	if (!cpu_smt_allowed(cpu))
 567		return -ECANCELED;
 568
 569	if (st->target <= CPUHP_AP_ONLINE_IDLE)
 570		return 0;
 571
 572	return cpuhp_kick_ap(st, st->target);
 573}
 574
 575static int bringup_cpu(unsigned int cpu)
 576{
 577	struct task_struct *idle = idle_thread_get(cpu);
 578	int ret;
 579
 580	/*
 
 
 
 
 
 
 581	 * Some architectures have to walk the irq descriptors to
 582	 * setup the vector space for the cpu which comes online.
 583	 * Prevent irq alloc/free across the bringup.
 584	 */
 585	irq_lock_sparse();
 586
 587	/* Arch-specific enabling code. */
 588	ret = __cpu_up(cpu, idle);
 589	irq_unlock_sparse();
 590	if (ret)
 591		return ret;
 592	return bringup_wait_for_ap(cpu);
 593}
 594
 595static int finish_cpu(unsigned int cpu)
 596{
 597	struct task_struct *idle = idle_thread_get(cpu);
 598	struct mm_struct *mm = idle->active_mm;
 599
 600	/*
 601	 * idle_task_exit() will have switched to &init_mm, now
 602	 * clean up any remaining active_mm state.
 603	 */
 604	if (mm != &init_mm)
 605		idle->active_mm = &init_mm;
 606	mmdrop(mm);
 607	return 0;
 608}
 609
 610/*
 611 * Hotplug state machine related functions
 612 */
 613
 614/*
 615 * Get the next state to run. Empty ones will be skipped. Returns true if a
 616 * state must be run.
 617 *
 618 * st->state will be modified ahead of time, to match state_to_run, as if it
 619 * has already ran.
 620 */
 621static bool cpuhp_next_state(bool bringup,
 622			     enum cpuhp_state *state_to_run,
 623			     struct cpuhp_cpu_state *st,
 624			     enum cpuhp_state target)
 625{
 626	do {
 627		if (bringup) {
 628			if (st->state >= target)
 629				return false;
 630
 631			*state_to_run = ++st->state;
 632		} else {
 633			if (st->state <= target)
 634				return false;
 635
 636			*state_to_run = st->state--;
 637		}
 638
 639		if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
 640			break;
 641	} while (true);
 642
 643	return true;
 644}
 645
 646static int cpuhp_invoke_callback_range(bool bringup,
 647				       unsigned int cpu,
 648				       struct cpuhp_cpu_state *st,
 649				       enum cpuhp_state target)
 
 650{
 651	enum cpuhp_state state;
 652	int err = 0;
 653
 654	while (cpuhp_next_state(bringup, &state, st, target)) {
 
 
 655		err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
 656		if (err)
 
 
 
 
 
 
 
 
 
 
 657			break;
 
 658	}
 659
 660	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661}
 662
 663static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
 664{
 665	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
 666		return true;
 667	/*
 668	 * When CPU hotplug is disabled, then taking the CPU down is not
 669	 * possible because takedown_cpu() and the architecture and
 670	 * subsystem specific mechanisms are not available. So the CPU
 671	 * which would be completely unplugged again needs to stay around
 672	 * in the current state.
 673	 */
 674	return st->state <= CPUHP_BRINGUP_CPU;
 675}
 676
 677static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 678			      enum cpuhp_state target)
 679{
 680	enum cpuhp_state prev_state = st->state;
 681	int ret = 0;
 682
 683	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
 684	if (ret) {
 685		cpuhp_reset_state(st, prev_state);
 
 
 
 
 686		if (can_rollback_cpu(st))
 687			WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
 688							    prev_state));
 689	}
 690	return ret;
 691}
 692
 693/*
 694 * The cpu hotplug threads manage the bringup and teardown of the cpus
 695 */
 696static void cpuhp_create(unsigned int cpu)
 697{
 698	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 699
 700	init_completion(&st->done_up);
 701	init_completion(&st->done_down);
 702	st->cpu = cpu;
 703}
 704
 705static int cpuhp_should_run(unsigned int cpu)
 706{
 707	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 708
 709	return st->should_run;
 710}
 711
 712/*
 713 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 714 * callbacks when a state gets [un]installed at runtime.
 715 *
 716 * Each invocation of this function by the smpboot thread does a single AP
 717 * state callback.
 718 *
 719 * It has 3 modes of operation:
 720 *  - single: runs st->cb_state
 721 *  - up:     runs ++st->state, while st->state < st->target
 722 *  - down:   runs st->state--, while st->state > st->target
 723 *
 724 * When complete or on error, should_run is cleared and the completion is fired.
 725 */
 726static void cpuhp_thread_fun(unsigned int cpu)
 727{
 728	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 729	bool bringup = st->bringup;
 730	enum cpuhp_state state;
 731
 732	if (WARN_ON_ONCE(!st->should_run))
 733		return;
 734
 735	/*
 736	 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
 737	 * that if we see ->should_run we also see the rest of the state.
 738	 */
 739	smp_mb();
 740
 741	/*
 742	 * The BP holds the hotplug lock, but we're now running on the AP,
 743	 * ensure that anybody asserting the lock is held, will actually find
 744	 * it so.
 745	 */
 746	lockdep_acquire_cpus_lock();
 747	cpuhp_lock_acquire(bringup);
 748
 749	if (st->single) {
 750		state = st->cb_state;
 751		st->should_run = false;
 752	} else {
 753		st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
 754		if (!st->should_run)
 755			goto end;
 756	}
 757
 758	WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 759
 760	if (cpuhp_is_atomic_state(state)) {
 761		local_irq_disable();
 762		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 763		local_irq_enable();
 764
 765		/*
 766		 * STARTING/DYING must not fail!
 767		 */
 768		WARN_ON_ONCE(st->result);
 769	} else {
 770		st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 771	}
 772
 773	if (st->result) {
 774		/*
 775		 * If we fail on a rollback, we're up a creek without no
 776		 * paddle, no way forward, no way back. We loose, thanks for
 777		 * playing.
 778		 */
 779		WARN_ON_ONCE(st->rollback);
 780		st->should_run = false;
 781	}
 782
 783end:
 784	cpuhp_lock_release(bringup);
 785	lockdep_release_cpus_lock();
 786
 787	if (!st->should_run)
 788		complete_ap_thread(st, bringup);
 789}
 790
 791/* Invoke a single callback on a remote cpu */
 792static int
 793cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 794			 struct hlist_node *node)
 795{
 796	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 797	int ret;
 798
 799	if (!cpu_online(cpu))
 800		return 0;
 801
 802	cpuhp_lock_acquire(false);
 803	cpuhp_lock_release(false);
 804
 805	cpuhp_lock_acquire(true);
 806	cpuhp_lock_release(true);
 807
 808	/*
 809	 * If we are up and running, use the hotplug thread. For early calls
 810	 * we invoke the thread function directly.
 811	 */
 812	if (!st->thread)
 813		return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 814
 815	st->rollback = false;
 816	st->last = NULL;
 817
 818	st->node = node;
 819	st->bringup = bringup;
 820	st->cb_state = state;
 821	st->single = true;
 822
 823	__cpuhp_kick_ap(st);
 824
 825	/*
 826	 * If we failed and did a partial, do a rollback.
 827	 */
 828	if ((ret = st->result) && st->last) {
 829		st->rollback = true;
 830		st->bringup = !bringup;
 831
 832		__cpuhp_kick_ap(st);
 833	}
 834
 835	/*
 836	 * Clean up the leftovers so the next hotplug operation wont use stale
 837	 * data.
 838	 */
 839	st->node = st->last = NULL;
 840	return ret;
 841}
 842
 843static int cpuhp_kick_ap_work(unsigned int cpu)
 844{
 845	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 846	enum cpuhp_state prev_state = st->state;
 847	int ret;
 848
 849	cpuhp_lock_acquire(false);
 850	cpuhp_lock_release(false);
 851
 852	cpuhp_lock_acquire(true);
 853	cpuhp_lock_release(true);
 854
 855	trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
 856	ret = cpuhp_kick_ap(st, st->target);
 857	trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 858
 859	return ret;
 860}
 861
 862static struct smp_hotplug_thread cpuhp_threads = {
 863	.store			= &cpuhp_state.thread,
 864	.create			= &cpuhp_create,
 865	.thread_should_run	= cpuhp_should_run,
 866	.thread_fn		= cpuhp_thread_fun,
 867	.thread_comm		= "cpuhp/%u",
 868	.selfparking		= true,
 869};
 870
 
 
 
 
 
 
 
 
 
 
 
 
 871void __init cpuhp_threads_init(void)
 872{
 
 873	BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
 874	kthread_unpark(this_cpu_read(cpuhp_state.thread));
 875}
 876
 877/*
 878 *
 879 * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
 880 * protected region.
 881 *
 882 * The operation is still serialized against concurrent CPU hotplug via
 883 * cpu_add_remove_lock, i.e. CPU map protection.  But it is _not_
 884 * serialized against other hotplug related activity like adding or
 885 * removing of state callbacks and state instances, which invoke either the
 886 * startup or the teardown callback of the affected state.
 887 *
 888 * This is required for subsystems which are unfixable vs. CPU hotplug and
 889 * evade lock inversion problems by scheduling work which has to be
 890 * completed _before_ cpu_up()/_cpu_down() returns.
 891 *
 892 * Don't even think about adding anything to this for any new code or even
 893 * drivers. It's only purpose is to keep existing lock order trainwrecks
 894 * working.
 895 *
 896 * For cpu_down() there might be valid reasons to finish cleanups which are
 897 * not required to be done under cpu_hotplug_lock, but that's a different
 898 * story and would be not invoked via this.
 899 */
 900static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
 901{
 902	/*
 903	 * cpusets delegate hotplug operations to a worker to "solve" the
 904	 * lock order problems. Wait for the worker, but only if tasks are
 905	 * _not_ frozen (suspend, hibernate) as that would wait forever.
 906	 *
 907	 * The wait is required because otherwise the hotplug operation
 908	 * returns with inconsistent state, which could even be observed in
 909	 * user space when a new CPU is brought up. The CPU plug uevent
 910	 * would be delivered and user space reacting on it would fail to
 911	 * move tasks to the newly plugged CPU up to the point where the
 912	 * work has finished because up to that point the newly plugged CPU
 913	 * is not assignable in cpusets/cgroups. On unplug that's not
 914	 * necessarily a visible issue, but it is still inconsistent state,
 915	 * which is the real problem which needs to be "fixed". This can't
 916	 * prevent the transient state between scheduling the work and
 917	 * returning from waiting for it.
 918	 */
 919	if (!tasks_frozen)
 920		cpuset_wait_for_hotplug();
 921}
 922
 923#ifdef CONFIG_HOTPLUG_CPU
 924#ifndef arch_clear_mm_cpumask_cpu
 925#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
 926#endif
 927
 928/**
 929 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 930 * @cpu: a CPU id
 931 *
 932 * This function walks all processes, finds a valid mm struct for each one and
 933 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 934 * trivial, there are various non-obvious corner cases, which this function
 935 * tries to solve in a safe manner.
 936 *
 937 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 938 * be called only for an already offlined CPU.
 939 */
 940void clear_tasks_mm_cpumask(int cpu)
 941{
 942	struct task_struct *p;
 943
 944	/*
 945	 * This function is called after the cpu is taken down and marked
 946	 * offline, so its not like new tasks will ever get this cpu set in
 947	 * their mm mask. -- Peter Zijlstra
 948	 * Thus, we may use rcu_read_lock() here, instead of grabbing
 949	 * full-fledged tasklist_lock.
 950	 */
 951	WARN_ON(cpu_online(cpu));
 952	rcu_read_lock();
 953	for_each_process(p) {
 954		struct task_struct *t;
 955
 956		/*
 957		 * Main thread might exit, but other threads may still have
 958		 * a valid mm. Find one.
 959		 */
 960		t = find_lock_task_mm(p);
 961		if (!t)
 962			continue;
 963		arch_clear_mm_cpumask_cpu(cpu, t->mm);
 964		task_unlock(t);
 965	}
 966	rcu_read_unlock();
 967}
 968
 969/* Take this CPU down. */
 970static int take_cpu_down(void *_param)
 971{
 972	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 973	enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
 974	int err, cpu = smp_processor_id();
 975	int ret;
 976
 977	/* Ensure this CPU doesn't handle any more interrupts. */
 978	err = __cpu_disable();
 979	if (err < 0)
 980		return err;
 981
 982	/*
 983	 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
 984	 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
 985	 */
 986	WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
 987
 988	/* Invoke the former CPU_DYING callbacks */
 989	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
 990
 991	/*
 992	 * DYING must not fail!
 993	 */
 994	WARN_ON_ONCE(ret);
 995
 996	/* Give up timekeeping duties */
 997	tick_handover_do_timer();
 998	/* Remove CPU from timer broadcasting */
 999	tick_offline_cpu(cpu);
1000	/* Park the stopper thread */
1001	stop_machine_park(cpu);
1002	return 0;
1003}
1004
1005static int takedown_cpu(unsigned int cpu)
1006{
1007	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1008	int err;
1009
1010	/* Park the smpboot threads */
1011	kthread_park(st->thread);
1012
1013	/*
1014	 * Prevent irq alloc/free while the dying cpu reorganizes the
1015	 * interrupt affinities.
1016	 */
1017	irq_lock_sparse();
1018
1019	/*
1020	 * So now all preempt/rcu users must observe !cpu_active().
1021	 */
1022	err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1023	if (err) {
1024		/* CPU refused to die */
1025		irq_unlock_sparse();
1026		/* Unpark the hotplug thread so we can rollback there */
1027		kthread_unpark(st->thread);
1028		return err;
1029	}
1030	BUG_ON(cpu_online(cpu));
1031
1032	/*
1033	 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
1034	 * all runnable tasks from the CPU, there's only the idle task left now
1035	 * that the migration thread is done doing the stop_machine thing.
1036	 *
1037	 * Wait for the stop thread to go away.
1038	 */
1039	wait_for_ap_thread(st, false);
1040	BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1041
1042	/* Interrupts are moved away from the dying cpu, reenable alloc/free */
1043	irq_unlock_sparse();
1044
1045	hotplug_cpu__broadcast_tick_pull(cpu);
1046	/* This actually kills the CPU. */
1047	__cpu_die(cpu);
1048
1049	tick_cleanup_dead_cpu(cpu);
1050	rcutree_migrate_callbacks(cpu);
1051	return 0;
1052}
1053
1054static void cpuhp_complete_idle_dead(void *arg)
1055{
1056	struct cpuhp_cpu_state *st = arg;
1057
1058	complete_ap_thread(st, false);
1059}
1060
1061void cpuhp_report_idle_dead(void)
1062{
1063	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1064
1065	BUG_ON(st->state != CPUHP_AP_OFFLINE);
1066	rcu_report_dead(smp_processor_id());
1067	st->state = CPUHP_AP_IDLE_DEAD;
1068	/*
1069	 * We cannot call complete after rcu_report_dead() so we delegate it
1070	 * to an online cpu.
1071	 */
1072	smp_call_function_single(cpumask_first(cpu_online_mask),
1073				 cpuhp_complete_idle_dead, st, 0);
1074}
1075
1076static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1077				enum cpuhp_state target)
1078{
1079	enum cpuhp_state prev_state = st->state;
1080	int ret = 0;
1081
1082	ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1083	if (ret) {
 
 
 
1084
1085		cpuhp_reset_state(st, prev_state);
1086
1087		if (st->state < prev_state)
1088			WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1089							    prev_state));
1090	}
1091
1092	return ret;
1093}
1094
1095/* Requires cpu_add_remove_lock to be held */
1096static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1097			   enum cpuhp_state target)
1098{
1099	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1100	int prev_state, ret = 0;
1101
1102	if (num_online_cpus() == 1)
1103		return -EBUSY;
1104
1105	if (!cpu_present(cpu))
1106		return -EINVAL;
1107
1108	cpus_write_lock();
1109
1110	cpuhp_tasks_frozen = tasks_frozen;
1111
1112	prev_state = cpuhp_set_state(st, target);
1113	/*
1114	 * If the current CPU state is in the range of the AP hotplug thread,
1115	 * then we need to kick the thread.
1116	 */
1117	if (st->state > CPUHP_TEARDOWN_CPU) {
1118		st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1119		ret = cpuhp_kick_ap_work(cpu);
1120		/*
1121		 * The AP side has done the error rollback already. Just
1122		 * return the error code..
1123		 */
1124		if (ret)
1125			goto out;
1126
1127		/*
1128		 * We might have stopped still in the range of the AP hotplug
1129		 * thread. Nothing to do anymore.
1130		 */
1131		if (st->state > CPUHP_TEARDOWN_CPU)
1132			goto out;
1133
1134		st->target = target;
1135	}
1136	/*
1137	 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1138	 * to do the further cleanups.
1139	 */
1140	ret = cpuhp_down_callbacks(cpu, st, target);
1141	if (ret && st->state < prev_state) {
1142		if (st->state == CPUHP_TEARDOWN_CPU) {
1143			cpuhp_reset_state(st, prev_state);
1144			__cpuhp_kick_ap(st);
1145		} else {
1146			WARN(1, "DEAD callback error for CPU%d", cpu);
1147		}
1148	}
1149
1150out:
1151	cpus_write_unlock();
1152	/*
1153	 * Do post unplug cleanup. This is still protected against
1154	 * concurrent CPU hotplug via cpu_add_remove_lock.
1155	 */
1156	lockup_detector_cleanup();
1157	arch_smt_update();
1158	cpu_up_down_serialize_trainwrecks(tasks_frozen);
1159	return ret;
1160}
1161
1162static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1163{
 
 
 
 
 
 
1164	if (cpu_hotplug_disabled)
1165		return -EBUSY;
1166	return _cpu_down(cpu, 0, target);
1167}
1168
1169static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1170{
1171	int err;
1172
1173	cpu_maps_update_begin();
1174	err = cpu_down_maps_locked(cpu, target);
1175	cpu_maps_update_done();
1176	return err;
1177}
1178
1179/**
1180 * cpu_device_down - Bring down a cpu device
1181 * @dev: Pointer to the cpu device to offline
1182 *
1183 * This function is meant to be used by device core cpu subsystem only.
1184 *
1185 * Other subsystems should use remove_cpu() instead.
 
 
1186 */
1187int cpu_device_down(struct device *dev)
1188{
1189	return cpu_down(dev->id, CPUHP_OFFLINE);
1190}
1191
1192int remove_cpu(unsigned int cpu)
1193{
1194	int ret;
1195
1196	lock_device_hotplug();
1197	ret = device_offline(get_cpu_device(cpu));
1198	unlock_device_hotplug();
1199
1200	return ret;
1201}
1202EXPORT_SYMBOL_GPL(remove_cpu);
1203
1204void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1205{
1206	unsigned int cpu;
1207	int error;
1208
1209	cpu_maps_update_begin();
1210
1211	/*
1212	 * Make certain the cpu I'm about to reboot on is online.
1213	 *
1214	 * This is inline to what migrate_to_reboot_cpu() already do.
1215	 */
1216	if (!cpu_online(primary_cpu))
1217		primary_cpu = cpumask_first(cpu_online_mask);
1218
1219	for_each_online_cpu(cpu) {
1220		if (cpu == primary_cpu)
1221			continue;
1222
1223		error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1224		if (error) {
1225			pr_err("Failed to offline CPU%d - error=%d",
1226				cpu, error);
1227			break;
1228		}
1229	}
1230
1231	/*
1232	 * Ensure all but the reboot CPU are offline.
1233	 */
1234	BUG_ON(num_online_cpus() > 1);
1235
1236	/*
1237	 * Make sure the CPUs won't be enabled by someone else after this
1238	 * point. Kexec will reboot to a new kernel shortly resetting
1239	 * everything along the way.
1240	 */
1241	cpu_hotplug_disabled++;
1242
1243	cpu_maps_update_done();
1244}
1245
1246#else
1247#define takedown_cpu		NULL
1248#endif /*CONFIG_HOTPLUG_CPU*/
1249
1250/**
1251 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1252 * @cpu: cpu that just started
1253 *
1254 * It must be called by the arch code on the new cpu, before the new cpu
1255 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1256 */
1257void notify_cpu_starting(unsigned int cpu)
1258{
1259	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1260	enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1261	int ret;
1262
1263	rcu_cpu_starting(cpu);	/* Enables RCU usage on this CPU. */
1264	cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1265	ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1266
1267	/*
1268	 * STARTING must not fail!
1269	 */
1270	WARN_ON_ONCE(ret);
1271}
1272
1273/*
1274 * Called from the idle task. Wake up the controlling task which brings the
1275 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1276 * online bringup to the hotplug thread.
1277 */
1278void cpuhp_online_idle(enum cpuhp_state state)
1279{
1280	struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1281
1282	/* Happens for the boot cpu */
1283	if (state != CPUHP_AP_ONLINE_IDLE)
1284		return;
1285
1286	/*
1287	 * Unpart the stopper thread before we start the idle loop (and start
1288	 * scheduling); this ensures the stopper task is always available.
1289	 */
1290	stop_machine_unpark(smp_processor_id());
1291
1292	st->state = CPUHP_AP_ONLINE_IDLE;
1293	complete_ap_thread(st, true);
1294}
1295
1296/* Requires cpu_add_remove_lock to be held */
1297static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1298{
1299	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1300	struct task_struct *idle;
1301	int ret = 0;
1302
1303	cpus_write_lock();
1304
1305	if (!cpu_present(cpu)) {
1306		ret = -EINVAL;
1307		goto out;
1308	}
1309
1310	/*
1311	 * The caller of cpu_up() might have raced with another
1312	 * caller. Nothing to do.
1313	 */
1314	if (st->state >= target)
1315		goto out;
1316
1317	if (st->state == CPUHP_OFFLINE) {
1318		/* Let it fail before we try to bring the cpu up */
1319		idle = idle_thread_get(cpu);
1320		if (IS_ERR(idle)) {
1321			ret = PTR_ERR(idle);
1322			goto out;
1323		}
1324	}
1325
1326	cpuhp_tasks_frozen = tasks_frozen;
1327
1328	cpuhp_set_state(st, target);
1329	/*
1330	 * If the current CPU state is in the range of the AP hotplug thread,
1331	 * then we need to kick the thread once more.
1332	 */
1333	if (st->state > CPUHP_BRINGUP_CPU) {
1334		ret = cpuhp_kick_ap_work(cpu);
1335		/*
1336		 * The AP side has done the error rollback already. Just
1337		 * return the error code..
1338		 */
1339		if (ret)
1340			goto out;
1341	}
1342
1343	/*
1344	 * Try to reach the target state. We max out on the BP at
1345	 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1346	 * responsible for bringing it up to the target state.
1347	 */
1348	target = min((int)target, CPUHP_BRINGUP_CPU);
1349	ret = cpuhp_up_callbacks(cpu, st, target);
1350out:
1351	cpus_write_unlock();
1352	arch_smt_update();
1353	cpu_up_down_serialize_trainwrecks(tasks_frozen);
1354	return ret;
1355}
1356
1357static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1358{
1359	int err = 0;
1360
1361	if (!cpu_possible(cpu)) {
1362		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1363		       cpu);
1364#if defined(CONFIG_IA64)
1365		pr_err("please check additional_cpus= boot parameter\n");
1366#endif
1367		return -EINVAL;
1368	}
1369
1370	err = try_online_node(cpu_to_node(cpu));
1371	if (err)
1372		return err;
1373
1374	cpu_maps_update_begin();
1375
1376	if (cpu_hotplug_disabled) {
1377		err = -EBUSY;
1378		goto out;
1379	}
1380	if (!cpu_smt_allowed(cpu)) {
1381		err = -EPERM;
1382		goto out;
1383	}
1384
1385	err = _cpu_up(cpu, 0, target);
1386out:
1387	cpu_maps_update_done();
1388	return err;
1389}
1390
1391/**
1392 * cpu_device_up - Bring up a cpu device
1393 * @dev: Pointer to the cpu device to online
1394 *
1395 * This function is meant to be used by device core cpu subsystem only.
1396 *
1397 * Other subsystems should use add_cpu() instead.
 
 
1398 */
1399int cpu_device_up(struct device *dev)
1400{
1401	return cpu_up(dev->id, CPUHP_ONLINE);
1402}
1403
1404int add_cpu(unsigned int cpu)
1405{
1406	int ret;
1407
1408	lock_device_hotplug();
1409	ret = device_online(get_cpu_device(cpu));
1410	unlock_device_hotplug();
1411
1412	return ret;
1413}
1414EXPORT_SYMBOL_GPL(add_cpu);
1415
1416/**
1417 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1418 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1419 *
1420 * On some architectures like arm64, we can hibernate on any CPU, but on
1421 * wake up the CPU we hibernated on might be offline as a side effect of
1422 * using maxcpus= for example.
 
 
1423 */
1424int bringup_hibernate_cpu(unsigned int sleep_cpu)
1425{
1426	int ret;
1427
1428	if (!cpu_online(sleep_cpu)) {
1429		pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1430		ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1431		if (ret) {
1432			pr_err("Failed to bring hibernate-CPU up!\n");
1433			return ret;
1434		}
1435	}
1436	return 0;
1437}
1438
1439void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1440{
1441	unsigned int cpu;
1442
1443	for_each_present_cpu(cpu) {
1444		if (num_online_cpus() >= setup_max_cpus)
1445			break;
1446		if (!cpu_online(cpu))
1447			cpu_up(cpu, CPUHP_ONLINE);
1448	}
1449}
1450
1451#ifdef CONFIG_PM_SLEEP_SMP
1452static cpumask_var_t frozen_cpus;
1453
1454int freeze_secondary_cpus(int primary)
1455{
1456	int cpu, error = 0;
1457
1458	cpu_maps_update_begin();
1459	if (primary == -1) {
1460		primary = cpumask_first(cpu_online_mask);
1461		if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1462			primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1463	} else {
1464		if (!cpu_online(primary))
1465			primary = cpumask_first(cpu_online_mask);
1466	}
1467
1468	/*
1469	 * We take down all of the non-boot CPUs in one shot to avoid races
1470	 * with the userspace trying to use the CPU hotplug at the same time
1471	 */
1472	cpumask_clear(frozen_cpus);
1473
1474	pr_info("Disabling non-boot CPUs ...\n");
1475	for_each_online_cpu(cpu) {
1476		if (cpu == primary)
1477			continue;
1478
1479		if (pm_wakeup_pending()) {
1480			pr_info("Wakeup pending. Abort CPU freeze\n");
1481			error = -EBUSY;
1482			break;
1483		}
1484
1485		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1486		error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1487		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1488		if (!error)
1489			cpumask_set_cpu(cpu, frozen_cpus);
1490		else {
1491			pr_err("Error taking CPU%d down: %d\n", cpu, error);
1492			break;
1493		}
1494	}
1495
1496	if (!error)
1497		BUG_ON(num_online_cpus() > 1);
1498	else
1499		pr_err("Non-boot CPUs are not disabled\n");
1500
1501	/*
1502	 * Make sure the CPUs won't be enabled by someone else. We need to do
1503	 * this even in case of failure as all freeze_secondary_cpus() users are
1504	 * supposed to do thaw_secondary_cpus() on the failure path.
1505	 */
1506	cpu_hotplug_disabled++;
1507
1508	cpu_maps_update_done();
1509	return error;
1510}
1511
1512void __weak arch_thaw_secondary_cpus_begin(void)
1513{
1514}
1515
1516void __weak arch_thaw_secondary_cpus_end(void)
1517{
1518}
1519
1520void thaw_secondary_cpus(void)
1521{
1522	int cpu, error;
1523
1524	/* Allow everyone to use the CPU hotplug again */
1525	cpu_maps_update_begin();
1526	__cpu_hotplug_enable();
1527	if (cpumask_empty(frozen_cpus))
1528		goto out;
1529
1530	pr_info("Enabling non-boot CPUs ...\n");
1531
1532	arch_thaw_secondary_cpus_begin();
1533
1534	for_each_cpu(cpu, frozen_cpus) {
1535		trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1536		error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1537		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1538		if (!error) {
1539			pr_info("CPU%d is up\n", cpu);
1540			continue;
1541		}
1542		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1543	}
1544
1545	arch_thaw_secondary_cpus_end();
1546
1547	cpumask_clear(frozen_cpus);
1548out:
1549	cpu_maps_update_done();
1550}
1551
1552static int __init alloc_frozen_cpus(void)
1553{
1554	if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1555		return -ENOMEM;
1556	return 0;
1557}
1558core_initcall(alloc_frozen_cpus);
1559
1560/*
1561 * When callbacks for CPU hotplug notifications are being executed, we must
1562 * ensure that the state of the system with respect to the tasks being frozen
1563 * or not, as reported by the notification, remains unchanged *throughout the
1564 * duration* of the execution of the callbacks.
1565 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1566 *
1567 * This synchronization is implemented by mutually excluding regular CPU
1568 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1569 * Hibernate notifications.
1570 */
1571static int
1572cpu_hotplug_pm_callback(struct notifier_block *nb,
1573			unsigned long action, void *ptr)
1574{
1575	switch (action) {
1576
1577	case PM_SUSPEND_PREPARE:
1578	case PM_HIBERNATION_PREPARE:
1579		cpu_hotplug_disable();
1580		break;
1581
1582	case PM_POST_SUSPEND:
1583	case PM_POST_HIBERNATION:
1584		cpu_hotplug_enable();
1585		break;
1586
1587	default:
1588		return NOTIFY_DONE;
1589	}
1590
1591	return NOTIFY_OK;
1592}
1593
1594
1595static int __init cpu_hotplug_pm_sync_init(void)
1596{
1597	/*
1598	 * cpu_hotplug_pm_callback has higher priority than x86
1599	 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1600	 * to disable cpu hotplug to avoid cpu hotplug race.
1601	 */
1602	pm_notifier(cpu_hotplug_pm_callback, 0);
1603	return 0;
1604}
1605core_initcall(cpu_hotplug_pm_sync_init);
1606
1607#endif /* CONFIG_PM_SLEEP_SMP */
1608
1609int __boot_cpu_id;
1610
1611#endif /* CONFIG_SMP */
1612
1613/* Boot processor state steps */
1614static struct cpuhp_step cpuhp_hp_states[] = {
1615	[CPUHP_OFFLINE] = {
1616		.name			= "offline",
1617		.startup.single		= NULL,
1618		.teardown.single	= NULL,
1619	},
1620#ifdef CONFIG_SMP
1621	[CPUHP_CREATE_THREADS]= {
1622		.name			= "threads:prepare",
1623		.startup.single		= smpboot_create_threads,
1624		.teardown.single	= NULL,
1625		.cant_stop		= true,
1626	},
1627	[CPUHP_PERF_PREPARE] = {
1628		.name			= "perf:prepare",
1629		.startup.single		= perf_event_init_cpu,
1630		.teardown.single	= perf_event_exit_cpu,
1631	},
 
 
 
 
 
1632	[CPUHP_WORKQUEUE_PREP] = {
1633		.name			= "workqueue:prepare",
1634		.startup.single		= workqueue_prepare_cpu,
1635		.teardown.single	= NULL,
1636	},
1637	[CPUHP_HRTIMERS_PREPARE] = {
1638		.name			= "hrtimers:prepare",
1639		.startup.single		= hrtimers_prepare_cpu,
1640		.teardown.single	= hrtimers_dead_cpu,
1641	},
1642	[CPUHP_SMPCFD_PREPARE] = {
1643		.name			= "smpcfd:prepare",
1644		.startup.single		= smpcfd_prepare_cpu,
1645		.teardown.single	= smpcfd_dead_cpu,
1646	},
1647	[CPUHP_RELAY_PREPARE] = {
1648		.name			= "relay:prepare",
1649		.startup.single		= relay_prepare_cpu,
1650		.teardown.single	= NULL,
1651	},
1652	[CPUHP_SLAB_PREPARE] = {
1653		.name			= "slab:prepare",
1654		.startup.single		= slab_prepare_cpu,
1655		.teardown.single	= slab_dead_cpu,
1656	},
1657	[CPUHP_RCUTREE_PREP] = {
1658		.name			= "RCU/tree:prepare",
1659		.startup.single		= rcutree_prepare_cpu,
1660		.teardown.single	= rcutree_dead_cpu,
1661	},
1662	/*
1663	 * On the tear-down path, timers_dead_cpu() must be invoked
1664	 * before blk_mq_queue_reinit_notify() from notify_dead(),
1665	 * otherwise a RCU stall occurs.
1666	 */
1667	[CPUHP_TIMERS_PREPARE] = {
1668		.name			= "timers:prepare",
1669		.startup.single		= timers_prepare_cpu,
1670		.teardown.single	= timers_dead_cpu,
1671	},
1672	/* Kicks the plugged cpu into life */
1673	[CPUHP_BRINGUP_CPU] = {
1674		.name			= "cpu:bringup",
1675		.startup.single		= bringup_cpu,
1676		.teardown.single	= finish_cpu,
1677		.cant_stop		= true,
1678	},
1679	/* Final state before CPU kills itself */
1680	[CPUHP_AP_IDLE_DEAD] = {
1681		.name			= "idle:dead",
1682	},
1683	/*
1684	 * Last state before CPU enters the idle loop to die. Transient state
1685	 * for synchronization.
1686	 */
1687	[CPUHP_AP_OFFLINE] = {
1688		.name			= "ap:offline",
1689		.cant_stop		= true,
1690	},
1691	/* First state is scheduler control. Interrupts are disabled */
1692	[CPUHP_AP_SCHED_STARTING] = {
1693		.name			= "sched:starting",
1694		.startup.single		= sched_cpu_starting,
1695		.teardown.single	= sched_cpu_dying,
1696	},
1697	[CPUHP_AP_RCUTREE_DYING] = {
1698		.name			= "RCU/tree:dying",
1699		.startup.single		= NULL,
1700		.teardown.single	= rcutree_dying_cpu,
1701	},
1702	[CPUHP_AP_SMPCFD_DYING] = {
1703		.name			= "smpcfd:dying",
1704		.startup.single		= NULL,
1705		.teardown.single	= smpcfd_dying_cpu,
1706	},
1707	/* Entry state on starting. Interrupts enabled from here on. Transient
1708	 * state for synchronsization */
1709	[CPUHP_AP_ONLINE] = {
1710		.name			= "ap:online",
1711	},
1712	/*
1713	 * Handled on control processor until the plugged processor manages
1714	 * this itself.
1715	 */
1716	[CPUHP_TEARDOWN_CPU] = {
1717		.name			= "cpu:teardown",
1718		.startup.single		= NULL,
1719		.teardown.single	= takedown_cpu,
1720		.cant_stop		= true,
1721	},
1722
1723	[CPUHP_AP_SCHED_WAIT_EMPTY] = {
1724		.name			= "sched:waitempty",
1725		.startup.single		= NULL,
1726		.teardown.single	= sched_cpu_wait_empty,
1727	},
1728
1729	/* Handle smpboot threads park/unpark */
1730	[CPUHP_AP_SMPBOOT_THREADS] = {
1731		.name			= "smpboot/threads:online",
1732		.startup.single		= smpboot_unpark_threads,
1733		.teardown.single	= smpboot_park_threads,
1734	},
1735	[CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1736		.name			= "irq/affinity:online",
1737		.startup.single		= irq_affinity_online_cpu,
1738		.teardown.single	= NULL,
1739	},
1740	[CPUHP_AP_PERF_ONLINE] = {
1741		.name			= "perf:online",
1742		.startup.single		= perf_event_init_cpu,
1743		.teardown.single	= perf_event_exit_cpu,
1744	},
1745	[CPUHP_AP_WATCHDOG_ONLINE] = {
1746		.name			= "lockup_detector:online",
1747		.startup.single		= lockup_detector_online_cpu,
1748		.teardown.single	= lockup_detector_offline_cpu,
1749	},
1750	[CPUHP_AP_WORKQUEUE_ONLINE] = {
1751		.name			= "workqueue:online",
1752		.startup.single		= workqueue_online_cpu,
1753		.teardown.single	= workqueue_offline_cpu,
1754	},
 
 
 
 
 
1755	[CPUHP_AP_RCUTREE_ONLINE] = {
1756		.name			= "RCU/tree:online",
1757		.startup.single		= rcutree_online_cpu,
1758		.teardown.single	= rcutree_offline_cpu,
1759	},
1760#endif
1761	/*
1762	 * The dynamically registered state space is here
1763	 */
1764
1765#ifdef CONFIG_SMP
1766	/* Last state is scheduler control setting the cpu active */
1767	[CPUHP_AP_ACTIVE] = {
1768		.name			= "sched:active",
1769		.startup.single		= sched_cpu_activate,
1770		.teardown.single	= sched_cpu_deactivate,
1771	},
1772#endif
1773
1774	/* CPU is fully up and running. */
1775	[CPUHP_ONLINE] = {
1776		.name			= "online",
1777		.startup.single		= NULL,
1778		.teardown.single	= NULL,
1779	},
1780};
1781
1782/* Sanity check for callbacks */
1783static int cpuhp_cb_check(enum cpuhp_state state)
1784{
1785	if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1786		return -EINVAL;
1787	return 0;
1788}
1789
1790/*
1791 * Returns a free for dynamic slot assignment of the Online state. The states
1792 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1793 * by having no name assigned.
1794 */
1795static int cpuhp_reserve_state(enum cpuhp_state state)
1796{
1797	enum cpuhp_state i, end;
1798	struct cpuhp_step *step;
1799
1800	switch (state) {
1801	case CPUHP_AP_ONLINE_DYN:
1802		step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1803		end = CPUHP_AP_ONLINE_DYN_END;
1804		break;
1805	case CPUHP_BP_PREPARE_DYN:
1806		step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1807		end = CPUHP_BP_PREPARE_DYN_END;
1808		break;
1809	default:
1810		return -EINVAL;
1811	}
1812
1813	for (i = state; i <= end; i++, step++) {
1814		if (!step->name)
1815			return i;
1816	}
1817	WARN(1, "No more dynamic states available for CPU hotplug\n");
1818	return -ENOSPC;
1819}
1820
1821static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1822				 int (*startup)(unsigned int cpu),
1823				 int (*teardown)(unsigned int cpu),
1824				 bool multi_instance)
1825{
1826	/* (Un)Install the callbacks for further cpu hotplug operations */
1827	struct cpuhp_step *sp;
1828	int ret = 0;
1829
1830	/*
1831	 * If name is NULL, then the state gets removed.
1832	 *
1833	 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1834	 * the first allocation from these dynamic ranges, so the removal
1835	 * would trigger a new allocation and clear the wrong (already
1836	 * empty) state, leaving the callbacks of the to be cleared state
1837	 * dangling, which causes wreckage on the next hotplug operation.
1838	 */
1839	if (name && (state == CPUHP_AP_ONLINE_DYN ||
1840		     state == CPUHP_BP_PREPARE_DYN)) {
1841		ret = cpuhp_reserve_state(state);
1842		if (ret < 0)
1843			return ret;
1844		state = ret;
1845	}
1846	sp = cpuhp_get_step(state);
1847	if (name && sp->name)
1848		return -EBUSY;
1849
1850	sp->startup.single = startup;
1851	sp->teardown.single = teardown;
1852	sp->name = name;
1853	sp->multi_instance = multi_instance;
1854	INIT_HLIST_HEAD(&sp->list);
1855	return ret;
1856}
1857
1858static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1859{
1860	return cpuhp_get_step(state)->teardown.single;
1861}
1862
1863/*
1864 * Call the startup/teardown function for a step either on the AP or
1865 * on the current CPU.
1866 */
1867static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1868			    struct hlist_node *node)
1869{
1870	struct cpuhp_step *sp = cpuhp_get_step(state);
1871	int ret;
1872
1873	/*
1874	 * If there's nothing to do, we done.
1875	 * Relies on the union for multi_instance.
1876	 */
1877	if (cpuhp_step_empty(bringup, sp))
1878		return 0;
1879	/*
1880	 * The non AP bound callbacks can fail on bringup. On teardown
1881	 * e.g. module removal we crash for now.
1882	 */
1883#ifdef CONFIG_SMP
1884	if (cpuhp_is_ap_state(state))
1885		ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1886	else
1887		ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1888#else
1889	ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1890#endif
1891	BUG_ON(ret && !bringup);
1892	return ret;
1893}
1894
1895/*
1896 * Called from __cpuhp_setup_state on a recoverable failure.
1897 *
1898 * Note: The teardown callbacks for rollback are not allowed to fail!
1899 */
1900static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1901				   struct hlist_node *node)
1902{
1903	int cpu;
1904
1905	/* Roll back the already executed steps on the other cpus */
1906	for_each_present_cpu(cpu) {
1907		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1908		int cpustate = st->state;
1909
1910		if (cpu >= failedcpu)
1911			break;
1912
1913		/* Did we invoke the startup call on that cpu ? */
1914		if (cpustate >= state)
1915			cpuhp_issue_call(cpu, state, false, node);
1916	}
1917}
1918
1919int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1920					  struct hlist_node *node,
1921					  bool invoke)
1922{
1923	struct cpuhp_step *sp;
1924	int cpu;
1925	int ret;
1926
1927	lockdep_assert_cpus_held();
1928
1929	sp = cpuhp_get_step(state);
1930	if (sp->multi_instance == false)
1931		return -EINVAL;
1932
1933	mutex_lock(&cpuhp_state_mutex);
1934
1935	if (!invoke || !sp->startup.multi)
1936		goto add_node;
1937
1938	/*
1939	 * Try to call the startup callback for each present cpu
1940	 * depending on the hotplug state of the cpu.
1941	 */
1942	for_each_present_cpu(cpu) {
1943		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1944		int cpustate = st->state;
1945
1946		if (cpustate < state)
1947			continue;
1948
1949		ret = cpuhp_issue_call(cpu, state, true, node);
1950		if (ret) {
1951			if (sp->teardown.multi)
1952				cpuhp_rollback_install(cpu, state, node);
1953			goto unlock;
1954		}
1955	}
1956add_node:
1957	ret = 0;
1958	hlist_add_head(node, &sp->list);
1959unlock:
1960	mutex_unlock(&cpuhp_state_mutex);
1961	return ret;
1962}
1963
1964int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1965			       bool invoke)
1966{
1967	int ret;
1968
1969	cpus_read_lock();
1970	ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1971	cpus_read_unlock();
1972	return ret;
1973}
1974EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1975
1976/**
1977 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1978 * @state:		The state to setup
 
1979 * @invoke:		If true, the startup function is invoked for cpus where
1980 *			cpu state >= @state
1981 * @startup:		startup callback function
1982 * @teardown:		teardown callback function
1983 * @multi_instance:	State is set up for multiple instances which get
1984 *			added afterwards.
1985 *
1986 * The caller needs to hold cpus read locked while calling this function.
1987 * Returns:
1988 *   On success:
1989 *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
1990 *      0 for all other states
1991 *   On failure: proper (negative) error code
1992 */
1993int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1994				   const char *name, bool invoke,
1995				   int (*startup)(unsigned int cpu),
1996				   int (*teardown)(unsigned int cpu),
1997				   bool multi_instance)
1998{
1999	int cpu, ret = 0;
2000	bool dynstate;
2001
2002	lockdep_assert_cpus_held();
2003
2004	if (cpuhp_cb_check(state) || !name)
2005		return -EINVAL;
2006
2007	mutex_lock(&cpuhp_state_mutex);
2008
2009	ret = cpuhp_store_callbacks(state, name, startup, teardown,
2010				    multi_instance);
2011
2012	dynstate = state == CPUHP_AP_ONLINE_DYN;
2013	if (ret > 0 && dynstate) {
2014		state = ret;
2015		ret = 0;
2016	}
2017
2018	if (ret || !invoke || !startup)
2019		goto out;
2020
2021	/*
2022	 * Try to call the startup callback for each present cpu
2023	 * depending on the hotplug state of the cpu.
2024	 */
2025	for_each_present_cpu(cpu) {
2026		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2027		int cpustate = st->state;
2028
2029		if (cpustate < state)
2030			continue;
2031
2032		ret = cpuhp_issue_call(cpu, state, true, NULL);
2033		if (ret) {
2034			if (teardown)
2035				cpuhp_rollback_install(cpu, state, NULL);
2036			cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2037			goto out;
2038		}
2039	}
2040out:
2041	mutex_unlock(&cpuhp_state_mutex);
2042	/*
2043	 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
2044	 * dynamically allocated state in case of success.
2045	 */
2046	if (!ret && dynstate)
2047		return state;
2048	return ret;
2049}
2050EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2051
2052int __cpuhp_setup_state(enum cpuhp_state state,
2053			const char *name, bool invoke,
2054			int (*startup)(unsigned int cpu),
2055			int (*teardown)(unsigned int cpu),
2056			bool multi_instance)
2057{
2058	int ret;
2059
2060	cpus_read_lock();
2061	ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2062					     teardown, multi_instance);
2063	cpus_read_unlock();
2064	return ret;
2065}
2066EXPORT_SYMBOL(__cpuhp_setup_state);
2067
2068int __cpuhp_state_remove_instance(enum cpuhp_state state,
2069				  struct hlist_node *node, bool invoke)
2070{
2071	struct cpuhp_step *sp = cpuhp_get_step(state);
2072	int cpu;
2073
2074	BUG_ON(cpuhp_cb_check(state));
2075
2076	if (!sp->multi_instance)
2077		return -EINVAL;
2078
2079	cpus_read_lock();
2080	mutex_lock(&cpuhp_state_mutex);
2081
2082	if (!invoke || !cpuhp_get_teardown_cb(state))
2083		goto remove;
2084	/*
2085	 * Call the teardown callback for each present cpu depending
2086	 * on the hotplug state of the cpu. This function is not
2087	 * allowed to fail currently!
2088	 */
2089	for_each_present_cpu(cpu) {
2090		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2091		int cpustate = st->state;
2092
2093		if (cpustate >= state)
2094			cpuhp_issue_call(cpu, state, false, node);
2095	}
2096
2097remove:
2098	hlist_del(node);
2099	mutex_unlock(&cpuhp_state_mutex);
2100	cpus_read_unlock();
2101
2102	return 0;
2103}
2104EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2105
2106/**
2107 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2108 * @state:	The state to remove
2109 * @invoke:	If true, the teardown function is invoked for cpus where
2110 *		cpu state >= @state
2111 *
2112 * The caller needs to hold cpus read locked while calling this function.
2113 * The teardown callback is currently not allowed to fail. Think
2114 * about module removal!
2115 */
2116void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2117{
2118	struct cpuhp_step *sp = cpuhp_get_step(state);
2119	int cpu;
2120
2121	BUG_ON(cpuhp_cb_check(state));
2122
2123	lockdep_assert_cpus_held();
2124
2125	mutex_lock(&cpuhp_state_mutex);
2126	if (sp->multi_instance) {
2127		WARN(!hlist_empty(&sp->list),
2128		     "Error: Removing state %d which has instances left.\n",
2129		     state);
2130		goto remove;
2131	}
2132
2133	if (!invoke || !cpuhp_get_teardown_cb(state))
2134		goto remove;
2135
2136	/*
2137	 * Call the teardown callback for each present cpu depending
2138	 * on the hotplug state of the cpu. This function is not
2139	 * allowed to fail currently!
2140	 */
2141	for_each_present_cpu(cpu) {
2142		struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2143		int cpustate = st->state;
2144
2145		if (cpustate >= state)
2146			cpuhp_issue_call(cpu, state, false, NULL);
2147	}
2148remove:
2149	cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2150	mutex_unlock(&cpuhp_state_mutex);
2151}
2152EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2153
2154void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2155{
2156	cpus_read_lock();
2157	__cpuhp_remove_state_cpuslocked(state, invoke);
2158	cpus_read_unlock();
2159}
2160EXPORT_SYMBOL(__cpuhp_remove_state);
2161
2162#ifdef CONFIG_HOTPLUG_SMT
2163static void cpuhp_offline_cpu_device(unsigned int cpu)
2164{
2165	struct device *dev = get_cpu_device(cpu);
2166
2167	dev->offline = true;
2168	/* Tell user space about the state change */
2169	kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2170}
2171
2172static void cpuhp_online_cpu_device(unsigned int cpu)
2173{
2174	struct device *dev = get_cpu_device(cpu);
2175
2176	dev->offline = false;
2177	/* Tell user space about the state change */
2178	kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2179}
2180
2181int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2182{
2183	int cpu, ret = 0;
2184
2185	cpu_maps_update_begin();
2186	for_each_online_cpu(cpu) {
2187		if (topology_is_primary_thread(cpu))
2188			continue;
2189		ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2190		if (ret)
2191			break;
2192		/*
2193		 * As this needs to hold the cpu maps lock it's impossible
2194		 * to call device_offline() because that ends up calling
2195		 * cpu_down() which takes cpu maps lock. cpu maps lock
2196		 * needs to be held as this might race against in kernel
2197		 * abusers of the hotplug machinery (thermal management).
2198		 *
2199		 * So nothing would update device:offline state. That would
2200		 * leave the sysfs entry stale and prevent onlining after
2201		 * smt control has been changed to 'off' again. This is
2202		 * called under the sysfs hotplug lock, so it is properly
2203		 * serialized against the regular offline usage.
2204		 */
2205		cpuhp_offline_cpu_device(cpu);
2206	}
2207	if (!ret)
2208		cpu_smt_control = ctrlval;
2209	cpu_maps_update_done();
2210	return ret;
2211}
2212
2213int cpuhp_smt_enable(void)
2214{
2215	int cpu, ret = 0;
2216
2217	cpu_maps_update_begin();
2218	cpu_smt_control = CPU_SMT_ENABLED;
2219	for_each_present_cpu(cpu) {
2220		/* Skip online CPUs and CPUs on offline nodes */
2221		if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2222			continue;
2223		ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2224		if (ret)
2225			break;
2226		/* See comment in cpuhp_smt_disable() */
2227		cpuhp_online_cpu_device(cpu);
2228	}
2229	cpu_maps_update_done();
2230	return ret;
2231}
2232#endif
2233
2234#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2235static ssize_t show_cpuhp_state(struct device *dev,
2236				struct device_attribute *attr, char *buf)
2237{
2238	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2239
2240	return sprintf(buf, "%d\n", st->state);
2241}
2242static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2243
2244static ssize_t write_cpuhp_target(struct device *dev,
2245				  struct device_attribute *attr,
2246				  const char *buf, size_t count)
2247{
2248	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2249	struct cpuhp_step *sp;
2250	int target, ret;
2251
2252	ret = kstrtoint(buf, 10, &target);
2253	if (ret)
2254		return ret;
2255
2256#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2257	if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2258		return -EINVAL;
2259#else
2260	if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2261		return -EINVAL;
2262#endif
2263
2264	ret = lock_device_hotplug_sysfs();
2265	if (ret)
2266		return ret;
2267
2268	mutex_lock(&cpuhp_state_mutex);
2269	sp = cpuhp_get_step(target);
2270	ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2271	mutex_unlock(&cpuhp_state_mutex);
2272	if (ret)
2273		goto out;
2274
2275	if (st->state < target)
2276		ret = cpu_up(dev->id, target);
2277	else
2278		ret = cpu_down(dev->id, target);
 
 
2279out:
2280	unlock_device_hotplug();
2281	return ret ? ret : count;
2282}
2283
2284static ssize_t show_cpuhp_target(struct device *dev,
2285				 struct device_attribute *attr, char *buf)
2286{
2287	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2288
2289	return sprintf(buf, "%d\n", st->target);
2290}
2291static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2292
2293
2294static ssize_t write_cpuhp_fail(struct device *dev,
2295				struct device_attribute *attr,
2296				const char *buf, size_t count)
2297{
2298	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2299	struct cpuhp_step *sp;
2300	int fail, ret;
2301
2302	ret = kstrtoint(buf, 10, &fail);
2303	if (ret)
2304		return ret;
2305
2306	if (fail == CPUHP_INVALID) {
2307		st->fail = fail;
2308		return count;
2309	}
2310
2311	if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2312		return -EINVAL;
2313
2314	/*
2315	 * Cannot fail STARTING/DYING callbacks.
2316	 */
2317	if (cpuhp_is_atomic_state(fail))
2318		return -EINVAL;
2319
2320	/*
2321	 * DEAD callbacks cannot fail...
2322	 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2323	 * triggering STARTING callbacks, a failure in this state would
2324	 * hinder rollback.
2325	 */
2326	if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2327		return -EINVAL;
2328
2329	/*
2330	 * Cannot fail anything that doesn't have callbacks.
2331	 */
2332	mutex_lock(&cpuhp_state_mutex);
2333	sp = cpuhp_get_step(fail);
2334	if (!sp->startup.single && !sp->teardown.single)
2335		ret = -EINVAL;
2336	mutex_unlock(&cpuhp_state_mutex);
2337	if (ret)
2338		return ret;
2339
2340	st->fail = fail;
2341
2342	return count;
2343}
2344
2345static ssize_t show_cpuhp_fail(struct device *dev,
2346			       struct device_attribute *attr, char *buf)
2347{
2348	struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2349
2350	return sprintf(buf, "%d\n", st->fail);
2351}
2352
2353static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2354
2355static struct attribute *cpuhp_cpu_attrs[] = {
2356	&dev_attr_state.attr,
2357	&dev_attr_target.attr,
2358	&dev_attr_fail.attr,
2359	NULL
2360};
2361
2362static const struct attribute_group cpuhp_cpu_attr_group = {
2363	.attrs = cpuhp_cpu_attrs,
2364	.name = "hotplug",
2365	NULL
2366};
2367
2368static ssize_t show_cpuhp_states(struct device *dev,
2369				 struct device_attribute *attr, char *buf)
2370{
2371	ssize_t cur, res = 0;
2372	int i;
2373
2374	mutex_lock(&cpuhp_state_mutex);
2375	for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2376		struct cpuhp_step *sp = cpuhp_get_step(i);
2377
2378		if (sp->name) {
2379			cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2380			buf += cur;
2381			res += cur;
2382		}
2383	}
2384	mutex_unlock(&cpuhp_state_mutex);
2385	return res;
2386}
2387static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2388
2389static struct attribute *cpuhp_cpu_root_attrs[] = {
2390	&dev_attr_states.attr,
2391	NULL
2392};
2393
2394static const struct attribute_group cpuhp_cpu_root_attr_group = {
2395	.attrs = cpuhp_cpu_root_attrs,
2396	.name = "hotplug",
2397	NULL
2398};
2399
2400#ifdef CONFIG_HOTPLUG_SMT
2401
2402static ssize_t
2403__store_smt_control(struct device *dev, struct device_attribute *attr,
2404		    const char *buf, size_t count)
2405{
2406	int ctrlval, ret;
2407
2408	if (sysfs_streq(buf, "on"))
2409		ctrlval = CPU_SMT_ENABLED;
2410	else if (sysfs_streq(buf, "off"))
2411		ctrlval = CPU_SMT_DISABLED;
2412	else if (sysfs_streq(buf, "forceoff"))
2413		ctrlval = CPU_SMT_FORCE_DISABLED;
2414	else
2415		return -EINVAL;
2416
2417	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2418		return -EPERM;
2419
2420	if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2421		return -ENODEV;
2422
2423	ret = lock_device_hotplug_sysfs();
2424	if (ret)
2425		return ret;
2426
2427	if (ctrlval != cpu_smt_control) {
2428		switch (ctrlval) {
2429		case CPU_SMT_ENABLED:
2430			ret = cpuhp_smt_enable();
2431			break;
2432		case CPU_SMT_DISABLED:
2433		case CPU_SMT_FORCE_DISABLED:
2434			ret = cpuhp_smt_disable(ctrlval);
2435			break;
2436		}
2437	}
2438
2439	unlock_device_hotplug();
2440	return ret ? ret : count;
2441}
2442
2443#else /* !CONFIG_HOTPLUG_SMT */
2444static ssize_t
2445__store_smt_control(struct device *dev, struct device_attribute *attr,
2446		    const char *buf, size_t count)
2447{
2448	return -ENODEV;
2449}
2450#endif /* CONFIG_HOTPLUG_SMT */
2451
2452static const char *smt_states[] = {
2453	[CPU_SMT_ENABLED]		= "on",
2454	[CPU_SMT_DISABLED]		= "off",
2455	[CPU_SMT_FORCE_DISABLED]	= "forceoff",
2456	[CPU_SMT_NOT_SUPPORTED]		= "notsupported",
2457	[CPU_SMT_NOT_IMPLEMENTED]	= "notimplemented",
2458};
2459
2460static ssize_t
2461show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2462{
2463	const char *state = smt_states[cpu_smt_control];
2464
2465	return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2466}
2467
2468static ssize_t
2469store_smt_control(struct device *dev, struct device_attribute *attr,
2470		  const char *buf, size_t count)
2471{
2472	return __store_smt_control(dev, attr, buf, count);
2473}
2474static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2475
2476static ssize_t
2477show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2478{
2479	return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2480}
2481static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2482
2483static struct attribute *cpuhp_smt_attrs[] = {
2484	&dev_attr_control.attr,
2485	&dev_attr_active.attr,
2486	NULL
2487};
2488
2489static const struct attribute_group cpuhp_smt_attr_group = {
2490	.attrs = cpuhp_smt_attrs,
2491	.name = "smt",
2492	NULL
2493};
2494
2495static int __init cpu_smt_sysfs_init(void)
2496{
2497	return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2498				  &cpuhp_smt_attr_group);
2499}
2500
2501static int __init cpuhp_sysfs_init(void)
2502{
2503	int cpu, ret;
2504
2505	ret = cpu_smt_sysfs_init();
2506	if (ret)
2507		return ret;
2508
2509	ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2510				 &cpuhp_cpu_root_attr_group);
2511	if (ret)
2512		return ret;
2513
2514	for_each_possible_cpu(cpu) {
2515		struct device *dev = get_cpu_device(cpu);
2516
2517		if (!dev)
2518			continue;
2519		ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2520		if (ret)
2521			return ret;
2522	}
2523	return 0;
2524}
2525device_initcall(cpuhp_sysfs_init);
2526#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2527
2528/*
2529 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2530 * represents all NR_CPUS bits binary values of 1<<nr.
2531 *
2532 * It is used by cpumask_of() to get a constant address to a CPU
2533 * mask value that has a single bit set only.
2534 */
2535
2536/* cpu_bit_bitmap[0] is empty - so we can back into it */
2537#define MASK_DECLARE_1(x)	[x+1][0] = (1UL << (x))
2538#define MASK_DECLARE_2(x)	MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2539#define MASK_DECLARE_4(x)	MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2540#define MASK_DECLARE_8(x)	MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2541
2542const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2543
2544	MASK_DECLARE_8(0),	MASK_DECLARE_8(8),
2545	MASK_DECLARE_8(16),	MASK_DECLARE_8(24),
2546#if BITS_PER_LONG > 32
2547	MASK_DECLARE_8(32),	MASK_DECLARE_8(40),
2548	MASK_DECLARE_8(48),	MASK_DECLARE_8(56),
2549#endif
2550};
2551EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2552
2553const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2554EXPORT_SYMBOL(cpu_all_bits);
2555
2556#ifdef CONFIG_INIT_ALL_POSSIBLE
2557struct cpumask __cpu_possible_mask __read_mostly
2558	= {CPU_BITS_ALL};
2559#else
2560struct cpumask __cpu_possible_mask __read_mostly;
2561#endif
2562EXPORT_SYMBOL(__cpu_possible_mask);
2563
2564struct cpumask __cpu_online_mask __read_mostly;
2565EXPORT_SYMBOL(__cpu_online_mask);
2566
2567struct cpumask __cpu_present_mask __read_mostly;
2568EXPORT_SYMBOL(__cpu_present_mask);
2569
2570struct cpumask __cpu_active_mask __read_mostly;
2571EXPORT_SYMBOL(__cpu_active_mask);
2572
2573struct cpumask __cpu_dying_mask __read_mostly;
2574EXPORT_SYMBOL(__cpu_dying_mask);
2575
2576atomic_t __num_online_cpus __read_mostly;
2577EXPORT_SYMBOL(__num_online_cpus);
2578
2579void init_cpu_present(const struct cpumask *src)
2580{
2581	cpumask_copy(&__cpu_present_mask, src);
2582}
2583
2584void init_cpu_possible(const struct cpumask *src)
2585{
2586	cpumask_copy(&__cpu_possible_mask, src);
2587}
2588
2589void init_cpu_online(const struct cpumask *src)
2590{
2591	cpumask_copy(&__cpu_online_mask, src);
2592}
2593
2594void set_cpu_online(unsigned int cpu, bool online)
2595{
2596	/*
2597	 * atomic_inc/dec() is required to handle the horrid abuse of this
2598	 * function by the reboot and kexec code which invoke it from
2599	 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2600	 * regular CPU hotplug is properly serialized.
2601	 *
2602	 * Note, that the fact that __num_online_cpus is of type atomic_t
2603	 * does not protect readers which are not serialized against
2604	 * concurrent hotplug operations.
2605	 */
2606	if (online) {
2607		if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2608			atomic_inc(&__num_online_cpus);
2609	} else {
2610		if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2611			atomic_dec(&__num_online_cpus);
2612	}
2613}
2614
2615/*
2616 * Activate the first processor.
2617 */
2618void __init boot_cpu_init(void)
2619{
2620	int cpu = smp_processor_id();
2621
2622	/* Mark the boot cpu "present", "online" etc for SMP and UP case */
2623	set_cpu_online(cpu, true);
2624	set_cpu_active(cpu, true);
2625	set_cpu_present(cpu, true);
2626	set_cpu_possible(cpu, true);
2627
2628#ifdef CONFIG_SMP
2629	__boot_cpu_id = cpu;
2630#endif
2631}
2632
2633/*
2634 * Must be called _AFTER_ setting up the per_cpu areas
2635 */
2636void __init boot_cpu_hotplug_init(void)
2637{
2638#ifdef CONFIG_SMP
2639	cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2640#endif
2641	this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
 
2642}
2643
2644/*
2645 * These are used for a global "mitigations=" cmdline option for toggling
2646 * optional CPU mitigations.
2647 */
2648enum cpu_mitigations {
2649	CPU_MITIGATIONS_OFF,
2650	CPU_MITIGATIONS_AUTO,
2651	CPU_MITIGATIONS_AUTO_NOSMT,
2652};
2653
2654static enum cpu_mitigations cpu_mitigations __ro_after_init =
2655	CPU_MITIGATIONS_AUTO;
2656
2657static int __init mitigations_parse_cmdline(char *arg)
2658{
2659	if (!strcmp(arg, "off"))
2660		cpu_mitigations = CPU_MITIGATIONS_OFF;
2661	else if (!strcmp(arg, "auto"))
2662		cpu_mitigations = CPU_MITIGATIONS_AUTO;
2663	else if (!strcmp(arg, "auto,nosmt"))
2664		cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2665	else
2666		pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2667			arg);
2668
2669	return 0;
2670}
2671early_param("mitigations", mitigations_parse_cmdline);
2672
2673/* mitigations=off */
2674bool cpu_mitigations_off(void)
2675{
2676	return cpu_mitigations == CPU_MITIGATIONS_OFF;
2677}
2678EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2679
2680/* mitigations=auto,nosmt */
2681bool cpu_mitigations_auto_nosmt(void)
2682{
2683	return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2684}
2685EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);