Linux Audio

Check our new training course

Loading...
   1/*
   2 *  SMP related functions
   3 *
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Denis Joseph Barrow,
   6 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
   7 *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
   8 *
   9 *  based on other smp stuff by
  10 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
  11 *    (c) 1998 Ingo Molnar
  12 *
  13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
  14 * the translation of logical to physical cpu ids. All new code that
  15 * operates on physical cpu numbers needs to go into smp.c.
  16 */
  17
  18#define KMSG_COMPONENT "cpu"
  19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20
  21#include <linux/workqueue.h>
  22#include <linux/module.h>
  23#include <linux/init.h>
  24#include <linux/mm.h>
  25#include <linux/err.h>
  26#include <linux/spinlock.h>
  27#include <linux/kernel_stat.h>
  28#include <linux/delay.h>
  29#include <linux/interrupt.h>
  30#include <linux/irqflags.h>
  31#include <linux/cpu.h>
  32#include <linux/slab.h>
  33#include <linux/crash_dump.h>
  34#include <linux/memblock.h>
  35#include <asm/asm-offsets.h>
  36#include <asm/diag.h>
  37#include <asm/switch_to.h>
  38#include <asm/facility.h>
  39#include <asm/ipl.h>
  40#include <asm/setup.h>
  41#include <asm/irq.h>
  42#include <asm/tlbflush.h>
  43#include <asm/vtimer.h>
  44#include <asm/lowcore.h>
  45#include <asm/sclp.h>
  46#include <asm/vdso.h>
  47#include <asm/debug.h>
  48#include <asm/os_info.h>
  49#include <asm/sigp.h>
  50#include <asm/idle.h>
  51#include "entry.h"
  52
  53enum {
  54	ec_schedule = 0,
  55	ec_call_function_single,
  56	ec_stop_cpu,
  57};
  58
  59enum {
  60	CPU_STATE_STANDBY,
  61	CPU_STATE_CONFIGURED,
  62};
  63
  64static DEFINE_PER_CPU(struct cpu *, cpu_device);
  65
  66struct pcpu {
  67	struct lowcore *lowcore;	/* lowcore page(s) for the cpu */
  68	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
  69	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
  70	signed char state;		/* physical cpu state */
  71	signed char polarization;	/* physical polarization */
  72	u16 address;			/* physical cpu address */
  73};
  74
  75static u8 boot_core_type;
  76static struct pcpu pcpu_devices[NR_CPUS];
  77
  78unsigned int smp_cpu_mt_shift;
  79EXPORT_SYMBOL(smp_cpu_mt_shift);
  80
  81unsigned int smp_cpu_mtid;
  82EXPORT_SYMBOL(smp_cpu_mtid);
  83
  84#ifdef CONFIG_CRASH_DUMP
  85__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
  86#endif
  87
  88static unsigned int smp_max_threads __initdata = -1U;
  89
  90static int __init early_nosmt(char *s)
  91{
  92	smp_max_threads = 1;
  93	return 0;
  94}
  95early_param("nosmt", early_nosmt);
  96
  97static int __init early_smt(char *s)
  98{
  99	get_option(&s, &smp_max_threads);
 100	return 0;
 101}
 102early_param("smt", early_smt);
 103
 104/*
 105 * The smp_cpu_state_mutex must be held when changing the state or polarization
 106 * member of a pcpu data structure within the pcpu_devices arreay.
 107 */
 108DEFINE_MUTEX(smp_cpu_state_mutex);
 109
 110/*
 111 * Signal processor helper functions.
 112 */
 113static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
 114{
 115	int cc;
 116
 117	while (1) {
 118		cc = __pcpu_sigp(addr, order, parm, NULL);
 119		if (cc != SIGP_CC_BUSY)
 120			return cc;
 121		cpu_relax();
 122	}
 123}
 124
 125static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
 126{
 127	int cc, retry;
 128
 129	for (retry = 0; ; retry++) {
 130		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
 131		if (cc != SIGP_CC_BUSY)
 132			break;
 133		if (retry >= 3)
 134			udelay(10);
 135	}
 136	return cc;
 137}
 138
 139static inline int pcpu_stopped(struct pcpu *pcpu)
 140{
 141	u32 uninitialized_var(status);
 142
 143	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
 144			0, &status) != SIGP_CC_STATUS_STORED)
 145		return 0;
 146	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
 147}
 148
 149static inline int pcpu_running(struct pcpu *pcpu)
 150{
 151	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
 152			0, NULL) != SIGP_CC_STATUS_STORED)
 153		return 1;
 154	/* Status stored condition code is equivalent to cpu not running. */
 155	return 0;
 156}
 157
 158/*
 159 * Find struct pcpu by cpu address.
 160 */
 161static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
 162{
 163	int cpu;
 164
 165	for_each_cpu(cpu, mask)
 166		if (pcpu_devices[cpu].address == address)
 167			return pcpu_devices + cpu;
 168	return NULL;
 169}
 170
 171static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
 172{
 173	int order;
 174
 175	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
 176		return;
 177	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
 178	pcpu->ec_clk = get_tod_clock_fast();
 179	pcpu_sigp_retry(pcpu, order, 0);
 180}
 181
 182#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
 183#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
 184
 185static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 186{
 187	unsigned long async_stack, panic_stack;
 188	struct lowcore *lc;
 189
 190	if (pcpu != &pcpu_devices[0]) {
 191		pcpu->lowcore =	(struct lowcore *)
 192			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
 193		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 194		panic_stack = __get_free_page(GFP_KERNEL);
 195		if (!pcpu->lowcore || !panic_stack || !async_stack)
 196			goto out;
 197	} else {
 198		async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
 199		panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
 200	}
 201	lc = pcpu->lowcore;
 202	memcpy(lc, &S390_lowcore, 512);
 203	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
 204	lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
 205	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
 206	lc->cpu_nr = cpu;
 207	lc->spinlock_lockval = arch_spin_lockval(cpu);
 208	if (MACHINE_HAS_VX)
 209		lc->vector_save_area_addr =
 210			(unsigned long) &lc->vector_save_area;
 211	if (vdso_alloc_per_cpu(lc))
 212		goto out;
 213	lowcore_ptr[cpu] = lc;
 214	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
 215	return 0;
 216out:
 217	if (pcpu != &pcpu_devices[0]) {
 218		free_page(panic_stack);
 219		free_pages(async_stack, ASYNC_ORDER);
 220		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 221	}
 222	return -ENOMEM;
 223}
 224
 225#ifdef CONFIG_HOTPLUG_CPU
 226
 227static void pcpu_free_lowcore(struct pcpu *pcpu)
 228{
 229	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
 230	lowcore_ptr[pcpu - pcpu_devices] = NULL;
 231	vdso_free_per_cpu(pcpu->lowcore);
 232	if (pcpu == &pcpu_devices[0])
 233		return;
 234	free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
 235	free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
 236	free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 237}
 238
 239#endif /* CONFIG_HOTPLUG_CPU */
 240
 241static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 242{
 243	struct lowcore *lc = pcpu->lowcore;
 244
 245	if (MACHINE_HAS_TLB_LC)
 246		cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
 247	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
 248	atomic_inc(&init_mm.context.attach_count);
 249	lc->cpu_nr = cpu;
 250	lc->spinlock_lockval = arch_spin_lockval(cpu);
 251	lc->percpu_offset = __per_cpu_offset[cpu];
 252	lc->kernel_asce = S390_lowcore.kernel_asce;
 253	lc->machine_flags = S390_lowcore.machine_flags;
 254	lc->user_timer = lc->system_timer = lc->steal_timer = 0;
 255	__ctl_store(lc->cregs_save_area, 0, 15);
 256	save_access_regs((unsigned int *) lc->access_regs_save_area);
 257	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
 258	       MAX_FACILITY_BIT/8);
 259}
 260
 261static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
 262{
 263	struct lowcore *lc = pcpu->lowcore;
 264	struct thread_info *ti = task_thread_info(tsk);
 265
 266	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
 267		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 268	lc->thread_info = (unsigned long) task_thread_info(tsk);
 269	lc->current_task = (unsigned long) tsk;
 270	lc->lpp = LPP_MAGIC;
 271	lc->current_pid = tsk->pid;
 272	lc->user_timer = ti->user_timer;
 273	lc->system_timer = ti->system_timer;
 274	lc->steal_timer = 0;
 275}
 276
 277static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 278{
 279	struct lowcore *lc = pcpu->lowcore;
 280
 281	lc->restart_stack = lc->kernel_stack;
 282	lc->restart_fn = (unsigned long) func;
 283	lc->restart_data = (unsigned long) data;
 284	lc->restart_source = -1UL;
 285	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
 286}
 287
 288/*
 289 * Call function via PSW restart on pcpu and stop the current cpu.
 290 */
 291static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
 292			  void *data, unsigned long stack)
 293{
 294	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
 295	unsigned long source_cpu = stap();
 296
 297	__load_psw_mask(PSW_KERNEL_BITS);
 298	if (pcpu->address == source_cpu)
 299		func(data);	/* should not return */
 300	/* Stop target cpu (if func returns this stops the current cpu). */
 301	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
 302	/* Restart func on the target cpu and stop the current cpu. */
 303	mem_assign_absolute(lc->restart_stack, stack);
 304	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
 305	mem_assign_absolute(lc->restart_data, (unsigned long) data);
 306	mem_assign_absolute(lc->restart_source, source_cpu);
 307	asm volatile(
 308		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
 309		"	brc	2,0b	# busy, try again\n"
 310		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
 311		"	brc	2,1b	# busy, try again\n"
 312		: : "d" (pcpu->address), "d" (source_cpu),
 313		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
 314		: "0", "1", "cc");
 315	for (;;) ;
 316}
 317
 318/*
 319 * Enable additional logical cpus for multi-threading.
 320 */
 321static int pcpu_set_smt(unsigned int mtid)
 322{
 323	register unsigned long reg1 asm ("1") = (unsigned long) mtid;
 324	int cc;
 325
 326	if (smp_cpu_mtid == mtid)
 327		return 0;
 328	asm volatile(
 329		"	sigp	%1,0,%2	# sigp set multi-threading\n"
 330		"	ipm	%0\n"
 331		"	srl	%0,28\n"
 332		: "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
 333		: "cc");
 334	if (cc == 0) {
 335		smp_cpu_mtid = mtid;
 336		smp_cpu_mt_shift = 0;
 337		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
 338			smp_cpu_mt_shift++;
 339		pcpu_devices[0].address = stap();
 340	}
 341	return cc;
 342}
 343
 344/*
 345 * Call function on an online CPU.
 346 */
 347void smp_call_online_cpu(void (*func)(void *), void *data)
 348{
 349	struct pcpu *pcpu;
 350
 351	/* Use the current cpu if it is online. */
 352	pcpu = pcpu_find_address(cpu_online_mask, stap());
 353	if (!pcpu)
 354		/* Use the first online cpu. */
 355		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
 356	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
 357}
 358
 359/*
 360 * Call function on the ipl CPU.
 361 */
 362void smp_call_ipl_cpu(void (*func)(void *), void *data)
 363{
 364	pcpu_delegate(&pcpu_devices[0], func, data,
 365		      pcpu_devices->lowcore->panic_stack -
 366		      PANIC_FRAME_OFFSET + PAGE_SIZE);
 367}
 368
 369int smp_find_processor_id(u16 address)
 370{
 371	int cpu;
 372
 373	for_each_present_cpu(cpu)
 374		if (pcpu_devices[cpu].address == address)
 375			return cpu;
 376	return -1;
 377}
 378
 379int smp_vcpu_scheduled(int cpu)
 380{
 381	return pcpu_running(pcpu_devices + cpu);
 382}
 383
 384void smp_yield_cpu(int cpu)
 385{
 386	if (MACHINE_HAS_DIAG9C) {
 387		diag_stat_inc_norecursion(DIAG_STAT_X09C);
 388		asm volatile("diag %0,0,0x9c"
 389			     : : "d" (pcpu_devices[cpu].address));
 390	} else if (MACHINE_HAS_DIAG44) {
 391		diag_stat_inc_norecursion(DIAG_STAT_X044);
 392		asm volatile("diag 0,0,0x44");
 393	}
 394}
 395
 396/*
 397 * Send cpus emergency shutdown signal. This gives the cpus the
 398 * opportunity to complete outstanding interrupts.
 399 */
 400static void smp_emergency_stop(cpumask_t *cpumask)
 401{
 402	u64 end;
 403	int cpu;
 404
 405	end = get_tod_clock() + (1000000UL << 12);
 406	for_each_cpu(cpu, cpumask) {
 407		struct pcpu *pcpu = pcpu_devices + cpu;
 408		set_bit(ec_stop_cpu, &pcpu->ec_mask);
 409		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
 410				   0, NULL) == SIGP_CC_BUSY &&
 411		       get_tod_clock() < end)
 412			cpu_relax();
 413	}
 414	while (get_tod_clock() < end) {
 415		for_each_cpu(cpu, cpumask)
 416			if (pcpu_stopped(pcpu_devices + cpu))
 417				cpumask_clear_cpu(cpu, cpumask);
 418		if (cpumask_empty(cpumask))
 419			break;
 420		cpu_relax();
 421	}
 422}
 423
 424/*
 425 * Stop all cpus but the current one.
 426 */
 427void smp_send_stop(void)
 428{
 429	cpumask_t cpumask;
 430	int cpu;
 431
 432	/* Disable all interrupts/machine checks */
 433	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
 434	trace_hardirqs_off();
 435
 436	debug_set_critical();
 437	cpumask_copy(&cpumask, cpu_online_mask);
 438	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 439
 440	if (oops_in_progress)
 441		smp_emergency_stop(&cpumask);
 442
 443	/* stop all processors */
 444	for_each_cpu(cpu, &cpumask) {
 445		struct pcpu *pcpu = pcpu_devices + cpu;
 446		pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
 447		while (!pcpu_stopped(pcpu))
 448			cpu_relax();
 449	}
 450}
 451
 452/*
 453 * This is the main routine where commands issued by other
 454 * cpus are handled.
 455 */
 456static void smp_handle_ext_call(void)
 457{
 458	unsigned long bits;
 459
 460	/* handle bit signal external calls */
 461	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
 462	if (test_bit(ec_stop_cpu, &bits))
 463		smp_stop_cpu();
 464	if (test_bit(ec_schedule, &bits))
 465		scheduler_ipi();
 466	if (test_bit(ec_call_function_single, &bits))
 467		generic_smp_call_function_single_interrupt();
 468}
 469
 470static void do_ext_call_interrupt(struct ext_code ext_code,
 471				  unsigned int param32, unsigned long param64)
 472{
 473	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
 474	smp_handle_ext_call();
 475}
 476
 477void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 478{
 479	int cpu;
 480
 481	for_each_cpu(cpu, mask)
 482		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 483}
 484
 485void arch_send_call_function_single_ipi(int cpu)
 486{
 487	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 488}
 489
 490/*
 491 * this function sends a 'reschedule' IPI to another CPU.
 492 * it goes straight through and wastes no time serializing
 493 * anything. Worst case is that we lose a reschedule ...
 494 */
 495void smp_send_reschedule(int cpu)
 496{
 497	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
 498}
 499
 500/*
 501 * parameter area for the set/clear control bit callbacks
 502 */
 503struct ec_creg_mask_parms {
 504	unsigned long orval;
 505	unsigned long andval;
 506	int cr;
 507};
 508
 509/*
 510 * callback for setting/clearing control bits
 511 */
 512static void smp_ctl_bit_callback(void *info)
 513{
 514	struct ec_creg_mask_parms *pp = info;
 515	unsigned long cregs[16];
 516
 517	__ctl_store(cregs, 0, 15);
 518	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
 519	__ctl_load(cregs, 0, 15);
 520}
 521
 522/*
 523 * Set a bit in a control register of all cpus
 524 */
 525void smp_ctl_set_bit(int cr, int bit)
 526{
 527	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 528
 529	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 530}
 531EXPORT_SYMBOL(smp_ctl_set_bit);
 532
 533/*
 534 * Clear a bit in a control register of all cpus
 535 */
 536void smp_ctl_clear_bit(int cr, int bit)
 537{
 538	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 539
 540	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 541}
 542EXPORT_SYMBOL(smp_ctl_clear_bit);
 543
 544#ifdef CONFIG_CRASH_DUMP
 545
 546int smp_store_status(int cpu)
 547{
 548	struct pcpu *pcpu = pcpu_devices + cpu;
 549	unsigned long pa;
 550
 551	pa = __pa(&pcpu->lowcore->floating_pt_save_area);
 552	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
 553			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 554		return -EIO;
 555	if (!MACHINE_HAS_VX)
 556		return 0;
 557	pa = __pa(pcpu->lowcore->vector_save_area_addr);
 558	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
 559			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 560		return -EIO;
 561	return 0;
 562}
 563
 564/*
 565 * Collect CPU state of the previous, crashed system.
 566 * There are four cases:
 567 * 1) standard zfcp dump
 568 *    condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
 569 *    The state for all CPUs except the boot CPU needs to be collected
 570 *    with sigp stop-and-store-status. The boot CPU state is located in
 571 *    the absolute lowcore of the memory stored in the HSA. The zcore code
 572 *    will copy the boot CPU state from the HSA.
 573 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
 574 *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
 575 *    The state for all CPUs except the boot CPU needs to be collected
 576 *    with sigp stop-and-store-status. The firmware or the boot-loader
 577 *    stored the registers of the boot CPU in the absolute lowcore in the
 578 *    memory of the old system.
 579 * 3) kdump and the old kernel did not store the CPU state,
 580 *    or stand-alone kdump for DASD
 581 *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
 582 *    The state for all CPUs except the boot CPU needs to be collected
 583 *    with sigp stop-and-store-status. The kexec code or the boot-loader
 584 *    stored the registers of the boot CPU in the memory of the old system.
 585 * 4) kdump and the old kernel stored the CPU state
 586 *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
 587 *    This case does not exist for s390 anymore, setup_arch explicitly
 588 *    deactivates the elfcorehdr= kernel parameter
 589 */
 590static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
 591				     bool is_boot_cpu, unsigned long page)
 592{
 593	__vector128 *vxrs = (__vector128 *) page;
 594
 595	if (is_boot_cpu)
 596		vxrs = boot_cpu_vector_save_area;
 597	else
 598		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
 599	save_area_add_vxrs(sa, vxrs);
 600}
 601
 602static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
 603				     bool is_boot_cpu, unsigned long page)
 604{
 605	void *regs = (void *) page;
 606
 607	if (is_boot_cpu)
 608		copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
 609	else
 610		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
 611	save_area_add_regs(sa, regs);
 612}
 613
 614void __init smp_save_dump_cpus(void)
 615{
 616	int addr, boot_cpu_addr, max_cpu_addr;
 617	struct save_area *sa;
 618	unsigned long page;
 619	bool is_boot_cpu;
 620
 621	if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
 622		/* No previous system present, normal boot. */
 623		return;
 624	/* Allocate a page as dumping area for the store status sigps */
 625	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
 626	/* Set multi-threading state to the previous system. */
 627	pcpu_set_smt(sclp.mtid_prev);
 628	boot_cpu_addr = stap();
 629	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
 630	for (addr = 0; addr <= max_cpu_addr; addr++) {
 631		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
 632		    SIGP_CC_NOT_OPERATIONAL)
 633			continue;
 634		is_boot_cpu = (addr == boot_cpu_addr);
 635		/* Allocate save area */
 636		sa = save_area_alloc(is_boot_cpu);
 637		if (!sa)
 638			panic("could not allocate memory for save area\n");
 639		if (MACHINE_HAS_VX)
 640			/* Get the vector registers */
 641			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
 642		/*
 643		 * For a zfcp dump OLDMEM_BASE == NULL and the registers
 644		 * of the boot CPU are stored in the HSA. To retrieve
 645		 * these registers an SCLP request is required which is
 646		 * done by drivers/s390/char/zcore.c:init_cpu_info()
 647		 */
 648		if (!is_boot_cpu || OLDMEM_BASE)
 649			/* Get the CPU registers */
 650			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
 651	}
 652	memblock_free(page, PAGE_SIZE);
 653	diag308_reset();
 654	pcpu_set_smt(0);
 655}
 656#endif /* CONFIG_CRASH_DUMP */
 657
 658void smp_cpu_set_polarization(int cpu, int val)
 659{
 660	pcpu_devices[cpu].polarization = val;
 661}
 662
 663int smp_cpu_get_polarization(int cpu)
 664{
 665	return pcpu_devices[cpu].polarization;
 666}
 667
 668static struct sclp_core_info *smp_get_core_info(void)
 669{
 670	static int use_sigp_detection;
 671	struct sclp_core_info *info;
 672	int address;
 673
 674	info = kzalloc(sizeof(*info), GFP_KERNEL);
 675	if (info && (use_sigp_detection || sclp_get_core_info(info))) {
 676		use_sigp_detection = 1;
 677		for (address = 0;
 678		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
 679		     address += (1U << smp_cpu_mt_shift)) {
 680			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
 681			    SIGP_CC_NOT_OPERATIONAL)
 682				continue;
 683			info->core[info->configured].core_id =
 684				address >> smp_cpu_mt_shift;
 685			info->configured++;
 686		}
 687		info->combined = info->configured;
 688	}
 689	return info;
 690}
 691
 692static int smp_add_present_cpu(int cpu);
 693
 694static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
 695{
 696	struct pcpu *pcpu;
 697	cpumask_t avail;
 698	int cpu, nr, i, j;
 699	u16 address;
 700
 701	nr = 0;
 702	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
 703	cpu = cpumask_first(&avail);
 704	for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
 705		if (sclp.has_core_type && info->core[i].type != boot_core_type)
 706			continue;
 707		address = info->core[i].core_id << smp_cpu_mt_shift;
 708		for (j = 0; j <= smp_cpu_mtid; j++) {
 709			if (pcpu_find_address(cpu_present_mask, address + j))
 710				continue;
 711			pcpu = pcpu_devices + cpu;
 712			pcpu->address = address + j;
 713			pcpu->state =
 714				(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
 715				CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
 716			smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
 717			set_cpu_present(cpu, true);
 718			if (sysfs_add && smp_add_present_cpu(cpu) != 0)
 719				set_cpu_present(cpu, false);
 720			else
 721				nr++;
 722			cpu = cpumask_next(cpu, &avail);
 723			if (cpu >= nr_cpu_ids)
 724				break;
 725		}
 726	}
 727	return nr;
 728}
 729
 730static void __init smp_detect_cpus(void)
 731{
 732	unsigned int cpu, mtid, c_cpus, s_cpus;
 733	struct sclp_core_info *info;
 734	u16 address;
 735
 736	/* Get CPU information */
 737	info = smp_get_core_info();
 738	if (!info)
 739		panic("smp_detect_cpus failed to allocate memory\n");
 740
 741	/* Find boot CPU type */
 742	if (sclp.has_core_type) {
 743		address = stap();
 744		for (cpu = 0; cpu < info->combined; cpu++)
 745			if (info->core[cpu].core_id == address) {
 746				/* The boot cpu dictates the cpu type. */
 747				boot_core_type = info->core[cpu].type;
 748				break;
 749			}
 750		if (cpu >= info->combined)
 751			panic("Could not find boot CPU type");
 752	}
 753
 754	/* Set multi-threading state for the current system */
 755	mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
 756	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
 757	pcpu_set_smt(mtid);
 758
 759	/* Print number of CPUs */
 760	c_cpus = s_cpus = 0;
 761	for (cpu = 0; cpu < info->combined; cpu++) {
 762		if (sclp.has_core_type &&
 763		    info->core[cpu].type != boot_core_type)
 764			continue;
 765		if (cpu < info->configured)
 766			c_cpus += smp_cpu_mtid + 1;
 767		else
 768			s_cpus += smp_cpu_mtid + 1;
 769	}
 770	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
 771
 772	/* Add CPUs present at boot */
 773	get_online_cpus();
 774	__smp_rescan_cpus(info, 0);
 775	put_online_cpus();
 776	kfree(info);
 777}
 778
 779/*
 780 *	Activate a secondary processor.
 781 */
 782static void smp_start_secondary(void *cpuvoid)
 783{
 784	S390_lowcore.last_update_clock = get_tod_clock();
 785	S390_lowcore.restart_stack = (unsigned long) restart_stack;
 786	S390_lowcore.restart_fn = (unsigned long) do_restart;
 787	S390_lowcore.restart_data = 0;
 788	S390_lowcore.restart_source = -1UL;
 789	restore_access_regs(S390_lowcore.access_regs_save_area);
 790	__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
 791	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
 792	cpu_init();
 793	preempt_disable();
 794	init_cpu_timer();
 795	vtime_init();
 796	pfault_init();
 797	notify_cpu_starting(smp_processor_id());
 798	set_cpu_online(smp_processor_id(), true);
 799	inc_irq_stat(CPU_RST);
 800	local_irq_enable();
 801	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 802}
 803
 804/* Upping and downing of CPUs */
 805int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 806{
 807	struct pcpu *pcpu;
 808	int base, i, rc;
 809
 810	pcpu = pcpu_devices + cpu;
 811	if (pcpu->state != CPU_STATE_CONFIGURED)
 812		return -EIO;
 813	base = cpu - (cpu % (smp_cpu_mtid + 1));
 814	for (i = 0; i <= smp_cpu_mtid; i++) {
 815		if (base + i < nr_cpu_ids)
 816			if (cpu_online(base + i))
 817				break;
 818	}
 819	/*
 820	 * If this is the first CPU of the core to get online
 821	 * do an initial CPU reset.
 822	 */
 823	if (i > smp_cpu_mtid &&
 824	    pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
 825	    SIGP_CC_ORDER_CODE_ACCEPTED)
 826		return -EIO;
 827
 828	rc = pcpu_alloc_lowcore(pcpu, cpu);
 829	if (rc)
 830		return rc;
 831	pcpu_prepare_secondary(pcpu, cpu);
 832	pcpu_attach_task(pcpu, tidle);
 833	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
 834	/* Wait until cpu puts itself in the online & active maps */
 835	while (!cpu_online(cpu) || !cpu_active(cpu))
 836		cpu_relax();
 837	return 0;
 838}
 839
 840static unsigned int setup_possible_cpus __initdata;
 841
 842static int __init _setup_possible_cpus(char *s)
 843{
 844	get_option(&s, &setup_possible_cpus);
 845	return 0;
 846}
 847early_param("possible_cpus", _setup_possible_cpus);
 848
 849#ifdef CONFIG_HOTPLUG_CPU
 850
 851int __cpu_disable(void)
 852{
 853	unsigned long cregs[16];
 854
 855	/* Handle possible pending IPIs */
 856	smp_handle_ext_call();
 857	set_cpu_online(smp_processor_id(), false);
 858	/* Disable pseudo page faults on this cpu. */
 859	pfault_fini();
 860	/* Disable interrupt sources via control register. */
 861	__ctl_store(cregs, 0, 15);
 862	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
 863	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
 864	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
 865	__ctl_load(cregs, 0, 15);
 866	clear_cpu_flag(CIF_NOHZ_DELAY);
 867	return 0;
 868}
 869
 870void __cpu_die(unsigned int cpu)
 871{
 872	struct pcpu *pcpu;
 873
 874	/* Wait until target cpu is down */
 875	pcpu = pcpu_devices + cpu;
 876	while (!pcpu_stopped(pcpu))
 877		cpu_relax();
 878	pcpu_free_lowcore(pcpu);
 879	atomic_dec(&init_mm.context.attach_count);
 880	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
 881	if (MACHINE_HAS_TLB_LC)
 882		cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
 883}
 884
 885void __noreturn cpu_die(void)
 886{
 887	idle_task_exit();
 888	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
 889	for (;;) ;
 890}
 891
 892#endif /* CONFIG_HOTPLUG_CPU */
 893
 894void __init smp_fill_possible_mask(void)
 895{
 896	unsigned int possible, sclp_max, cpu;
 897
 898	sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
 899	sclp_max = min(smp_max_threads, sclp_max);
 900	sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
 901	possible = setup_possible_cpus ?: nr_cpu_ids;
 902	possible = min(possible, sclp_max);
 903	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
 904		set_cpu_possible(cpu, true);
 905}
 906
 907void __init smp_prepare_cpus(unsigned int max_cpus)
 908{
 909	/* request the 0x1201 emergency signal external interrupt */
 910	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
 911		panic("Couldn't request external interrupt 0x1201");
 912	/* request the 0x1202 external call external interrupt */
 913	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
 914		panic("Couldn't request external interrupt 0x1202");
 915	smp_detect_cpus();
 916}
 917
 918void __init smp_prepare_boot_cpu(void)
 919{
 920	struct pcpu *pcpu = pcpu_devices;
 921
 922	pcpu->state = CPU_STATE_CONFIGURED;
 923	pcpu->address = stap();
 924	pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
 925	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 926	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 927	set_cpu_present(0, true);
 928	set_cpu_online(0, true);
 929}
 930
 931void __init smp_cpus_done(unsigned int max_cpus)
 932{
 933}
 934
 935void __init smp_setup_processor_id(void)
 936{
 937	S390_lowcore.cpu_nr = 0;
 938	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
 939}
 940
 941/*
 942 * the frequency of the profiling timer can be changed
 943 * by writing a multiplier value into /proc/profile.
 944 *
 945 * usually you want to run this on all CPUs ;)
 946 */
 947int setup_profiling_timer(unsigned int multiplier)
 948{
 949	return 0;
 950}
 951
 952#ifdef CONFIG_HOTPLUG_CPU
 953static ssize_t cpu_configure_show(struct device *dev,
 954				  struct device_attribute *attr, char *buf)
 955{
 956	ssize_t count;
 957
 958	mutex_lock(&smp_cpu_state_mutex);
 959	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
 960	mutex_unlock(&smp_cpu_state_mutex);
 961	return count;
 962}
 963
 964static ssize_t cpu_configure_store(struct device *dev,
 965				   struct device_attribute *attr,
 966				   const char *buf, size_t count)
 967{
 968	struct pcpu *pcpu;
 969	int cpu, val, rc, i;
 970	char delim;
 971
 972	if (sscanf(buf, "%d %c", &val, &delim) != 1)
 973		return -EINVAL;
 974	if (val != 0 && val != 1)
 975		return -EINVAL;
 976	get_online_cpus();
 977	mutex_lock(&smp_cpu_state_mutex);
 978	rc = -EBUSY;
 979	/* disallow configuration changes of online cpus and cpu 0 */
 980	cpu = dev->id;
 981	cpu -= cpu % (smp_cpu_mtid + 1);
 982	if (cpu == 0)
 983		goto out;
 984	for (i = 0; i <= smp_cpu_mtid; i++)
 985		if (cpu_online(cpu + i))
 986			goto out;
 987	pcpu = pcpu_devices + cpu;
 988	rc = 0;
 989	switch (val) {
 990	case 0:
 991		if (pcpu->state != CPU_STATE_CONFIGURED)
 992			break;
 993		rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
 994		if (rc)
 995			break;
 996		for (i = 0; i <= smp_cpu_mtid; i++) {
 997			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
 998				continue;
 999			pcpu[i].state = CPU_STATE_STANDBY;
1000			smp_cpu_set_polarization(cpu + i,
1001						 POLARIZATION_UNKNOWN);
1002		}
1003		topology_expect_change();
1004		break;
1005	case 1:
1006		if (pcpu->state != CPU_STATE_STANDBY)
1007			break;
1008		rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1009		if (rc)
1010			break;
1011		for (i = 0; i <= smp_cpu_mtid; i++) {
1012			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1013				continue;
1014			pcpu[i].state = CPU_STATE_CONFIGURED;
1015			smp_cpu_set_polarization(cpu + i,
1016						 POLARIZATION_UNKNOWN);
1017		}
1018		topology_expect_change();
1019		break;
1020	default:
1021		break;
1022	}
1023out:
1024	mutex_unlock(&smp_cpu_state_mutex);
1025	put_online_cpus();
1026	return rc ? rc : count;
1027}
1028static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1029#endif /* CONFIG_HOTPLUG_CPU */
1030
1031static ssize_t show_cpu_address(struct device *dev,
1032				struct device_attribute *attr, char *buf)
1033{
1034	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1035}
1036static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1037
1038static struct attribute *cpu_common_attrs[] = {
1039#ifdef CONFIG_HOTPLUG_CPU
1040	&dev_attr_configure.attr,
1041#endif
1042	&dev_attr_address.attr,
1043	NULL,
1044};
1045
1046static struct attribute_group cpu_common_attr_group = {
1047	.attrs = cpu_common_attrs,
1048};
1049
1050static struct attribute *cpu_online_attrs[] = {
1051	&dev_attr_idle_count.attr,
1052	&dev_attr_idle_time_us.attr,
1053	NULL,
1054};
1055
1056static struct attribute_group cpu_online_attr_group = {
1057	.attrs = cpu_online_attrs,
1058};
1059
1060static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1061			  void *hcpu)
1062{
1063	unsigned int cpu = (unsigned int)(long)hcpu;
1064	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1065	int err = 0;
1066
1067	switch (action & ~CPU_TASKS_FROZEN) {
1068	case CPU_ONLINE:
1069		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1070		break;
1071	case CPU_DEAD:
1072		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1073		break;
1074	}
1075	return notifier_from_errno(err);
1076}
1077
1078static int smp_add_present_cpu(int cpu)
1079{
1080	struct device *s;
1081	struct cpu *c;
1082	int rc;
1083
1084	c = kzalloc(sizeof(*c), GFP_KERNEL);
1085	if (!c)
1086		return -ENOMEM;
1087	per_cpu(cpu_device, cpu) = c;
1088	s = &c->dev;
1089	c->hotpluggable = 1;
1090	rc = register_cpu(c, cpu);
1091	if (rc)
1092		goto out;
1093	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1094	if (rc)
1095		goto out_cpu;
1096	if (cpu_online(cpu)) {
1097		rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1098		if (rc)
1099			goto out_online;
1100	}
1101	rc = topology_cpu_init(c);
1102	if (rc)
1103		goto out_topology;
1104	return 0;
1105
1106out_topology:
1107	if (cpu_online(cpu))
1108		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1109out_online:
1110	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1111out_cpu:
1112#ifdef CONFIG_HOTPLUG_CPU
1113	unregister_cpu(c);
1114#endif
1115out:
1116	return rc;
1117}
1118
1119#ifdef CONFIG_HOTPLUG_CPU
1120
1121int __ref smp_rescan_cpus(void)
1122{
1123	struct sclp_core_info *info;
1124	int nr;
1125
1126	info = smp_get_core_info();
1127	if (!info)
1128		return -ENOMEM;
1129	get_online_cpus();
1130	mutex_lock(&smp_cpu_state_mutex);
1131	nr = __smp_rescan_cpus(info, 1);
1132	mutex_unlock(&smp_cpu_state_mutex);
1133	put_online_cpus();
1134	kfree(info);
1135	if (nr)
1136		topology_schedule_update();
1137	return 0;
1138}
1139
1140static ssize_t __ref rescan_store(struct device *dev,
1141				  struct device_attribute *attr,
1142				  const char *buf,
1143				  size_t count)
1144{
1145	int rc;
1146
1147	rc = smp_rescan_cpus();
1148	return rc ? rc : count;
1149}
1150static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1151#endif /* CONFIG_HOTPLUG_CPU */
1152
1153static int __init s390_smp_init(void)
1154{
1155	int cpu, rc = 0;
1156
1157#ifdef CONFIG_HOTPLUG_CPU
1158	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1159	if (rc)
1160		return rc;
1161#endif
1162	cpu_notifier_register_begin();
1163	for_each_present_cpu(cpu) {
1164		rc = smp_add_present_cpu(cpu);
1165		if (rc)
1166			goto out;
1167	}
1168
1169	__hotcpu_notifier(smp_cpu_notify, 0);
1170
1171out:
1172	cpu_notifier_register_done();
1173	return rc;
1174}
1175subsys_initcall(s390_smp_init);
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  SMP related functions
   4 *
   5 *    Copyright IBM Corp. 1999, 2012
   6 *    Author(s): Denis Joseph Barrow,
   7 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
   8 *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
   9 *
  10 *  based on other smp stuff by
  11 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
  12 *    (c) 1998 Ingo Molnar
  13 *
  14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
  15 * the translation of logical to physical cpu ids. All new code that
  16 * operates on physical cpu numbers needs to go into smp.c.
  17 */
  18
  19#define KMSG_COMPONENT "cpu"
  20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  21
  22#include <linux/workqueue.h>
  23#include <linux/bootmem.h>
  24#include <linux/export.h>
  25#include <linux/init.h>
  26#include <linux/mm.h>
  27#include <linux/err.h>
  28#include <linux/spinlock.h>
  29#include <linux/kernel_stat.h>
  30#include <linux/delay.h>
  31#include <linux/interrupt.h>
  32#include <linux/irqflags.h>
  33#include <linux/cpu.h>
  34#include <linux/slab.h>
  35#include <linux/sched/hotplug.h>
  36#include <linux/sched/task_stack.h>
  37#include <linux/crash_dump.h>
  38#include <linux/memblock.h>
  39#include <linux/kprobes.h>
  40#include <asm/asm-offsets.h>
  41#include <asm/diag.h>
  42#include <asm/switch_to.h>
  43#include <asm/facility.h>
  44#include <asm/ipl.h>
  45#include <asm/setup.h>
  46#include <asm/irq.h>
  47#include <asm/tlbflush.h>
  48#include <asm/vtimer.h>
  49#include <asm/lowcore.h>
  50#include <asm/sclp.h>
  51#include <asm/vdso.h>
  52#include <asm/debug.h>
  53#include <asm/os_info.h>
  54#include <asm/sigp.h>
  55#include <asm/idle.h>
  56#include <asm/nmi.h>
  57#include <asm/topology.h>
  58#include "entry.h"
  59
  60enum {
  61	ec_schedule = 0,
  62	ec_call_function_single,
  63	ec_stop_cpu,
  64};
  65
  66enum {
  67	CPU_STATE_STANDBY,
  68	CPU_STATE_CONFIGURED,
  69};
  70
  71static DEFINE_PER_CPU(struct cpu *, cpu_device);
  72
  73struct pcpu {
  74	struct lowcore *lowcore;	/* lowcore page(s) for the cpu */
  75	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
  76	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
  77	signed char state;		/* physical cpu state */
  78	signed char polarization;	/* physical polarization */
  79	u16 address;			/* physical cpu address */
  80};
  81
  82static u8 boot_core_type;
  83static struct pcpu pcpu_devices[NR_CPUS];
  84
  85unsigned int smp_cpu_mt_shift;
  86EXPORT_SYMBOL(smp_cpu_mt_shift);
  87
  88unsigned int smp_cpu_mtid;
  89EXPORT_SYMBOL(smp_cpu_mtid);
  90
  91#ifdef CONFIG_CRASH_DUMP
  92__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
  93#endif
  94
  95static unsigned int smp_max_threads __initdata = -1U;
  96
  97static int __init early_nosmt(char *s)
  98{
  99	smp_max_threads = 1;
 100	return 0;
 101}
 102early_param("nosmt", early_nosmt);
 103
 104static int __init early_smt(char *s)
 105{
 106	get_option(&s, &smp_max_threads);
 107	return 0;
 108}
 109early_param("smt", early_smt);
 110
 111/*
 112 * The smp_cpu_state_mutex must be held when changing the state or polarization
 113 * member of a pcpu data structure within the pcpu_devices arreay.
 114 */
 115DEFINE_MUTEX(smp_cpu_state_mutex);
 116
 117/*
 118 * Signal processor helper functions.
 119 */
 120static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
 121{
 122	int cc;
 123
 124	while (1) {
 125		cc = __pcpu_sigp(addr, order, parm, NULL);
 126		if (cc != SIGP_CC_BUSY)
 127			return cc;
 128		cpu_relax();
 129	}
 130}
 131
 132static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
 133{
 134	int cc, retry;
 135
 136	for (retry = 0; ; retry++) {
 137		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
 138		if (cc != SIGP_CC_BUSY)
 139			break;
 140		if (retry >= 3)
 141			udelay(10);
 142	}
 143	return cc;
 144}
 145
 146static inline int pcpu_stopped(struct pcpu *pcpu)
 147{
 148	u32 uninitialized_var(status);
 149
 150	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
 151			0, &status) != SIGP_CC_STATUS_STORED)
 152		return 0;
 153	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
 154}
 155
 156static inline int pcpu_running(struct pcpu *pcpu)
 157{
 158	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
 159			0, NULL) != SIGP_CC_STATUS_STORED)
 160		return 1;
 161	/* Status stored condition code is equivalent to cpu not running. */
 162	return 0;
 163}
 164
 165/*
 166 * Find struct pcpu by cpu address.
 167 */
 168static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
 169{
 170	int cpu;
 171
 172	for_each_cpu(cpu, mask)
 173		if (pcpu_devices[cpu].address == address)
 174			return pcpu_devices + cpu;
 175	return NULL;
 176}
 177
 178static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
 179{
 180	int order;
 181
 182	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
 183		return;
 184	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
 185	pcpu->ec_clk = get_tod_clock_fast();
 186	pcpu_sigp_retry(pcpu, order, 0);
 187}
 188
 189#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
 190#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
 191
 192static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 193{
 194	unsigned long async_stack, panic_stack;
 195	struct lowcore *lc;
 196
 197	if (pcpu != &pcpu_devices[0]) {
 198		pcpu->lowcore =	(struct lowcore *)
 199			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
 200		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 201		panic_stack = __get_free_page(GFP_KERNEL);
 202		if (!pcpu->lowcore || !panic_stack || !async_stack)
 203			goto out;
 204	} else {
 205		async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
 206		panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
 207	}
 208	lc = pcpu->lowcore;
 209	memcpy(lc, &S390_lowcore, 512);
 210	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
 211	lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
 212	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
 213	lc->cpu_nr = cpu;
 214	lc->spinlock_lockval = arch_spin_lockval(cpu);
 215	lc->spinlock_index = 0;
 216	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
 217	if (nmi_alloc_per_cpu(lc))
 218		goto out;
 219	if (vdso_alloc_per_cpu(lc))
 220		goto out_mcesa;
 221	lowcore_ptr[cpu] = lc;
 222	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
 223	return 0;
 224
 225out_mcesa:
 226	nmi_free_per_cpu(lc);
 227out:
 228	if (pcpu != &pcpu_devices[0]) {
 229		free_page(panic_stack);
 230		free_pages(async_stack, ASYNC_ORDER);
 231		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 232	}
 233	return -ENOMEM;
 234}
 235
 236#ifdef CONFIG_HOTPLUG_CPU
 237
 238static void pcpu_free_lowcore(struct pcpu *pcpu)
 239{
 240	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
 241	lowcore_ptr[pcpu - pcpu_devices] = NULL;
 242	vdso_free_per_cpu(pcpu->lowcore);
 243	nmi_free_per_cpu(pcpu->lowcore);
 244	if (pcpu == &pcpu_devices[0])
 245		return;
 246	free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
 247	free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
 248	free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 249}
 250
 251#endif /* CONFIG_HOTPLUG_CPU */
 252
 253static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 254{
 255	struct lowcore *lc = pcpu->lowcore;
 256
 257	cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
 258	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
 259	lc->cpu_nr = cpu;
 260	lc->spinlock_lockval = arch_spin_lockval(cpu);
 261	lc->spinlock_index = 0;
 262	lc->percpu_offset = __per_cpu_offset[cpu];
 263	lc->kernel_asce = S390_lowcore.kernel_asce;
 264	lc->machine_flags = S390_lowcore.machine_flags;
 265	lc->user_timer = lc->system_timer = lc->steal_timer = 0;
 266	__ctl_store(lc->cregs_save_area, 0, 15);
 267	save_access_regs((unsigned int *) lc->access_regs_save_area);
 268	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
 269	       sizeof(lc->stfle_fac_list));
 270	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
 271	       sizeof(lc->alt_stfle_fac_list));
 272	arch_spin_lock_setup(cpu);
 273}
 274
 275static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
 276{
 277	struct lowcore *lc = pcpu->lowcore;
 278
 279	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
 280		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 281	lc->current_task = (unsigned long) tsk;
 282	lc->lpp = LPP_MAGIC;
 283	lc->current_pid = tsk->pid;
 284	lc->user_timer = tsk->thread.user_timer;
 285	lc->guest_timer = tsk->thread.guest_timer;
 286	lc->system_timer = tsk->thread.system_timer;
 287	lc->hardirq_timer = tsk->thread.hardirq_timer;
 288	lc->softirq_timer = tsk->thread.softirq_timer;
 289	lc->steal_timer = 0;
 290}
 291
 292static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 293{
 294	struct lowcore *lc = pcpu->lowcore;
 295
 296	lc->restart_stack = lc->kernel_stack;
 297	lc->restart_fn = (unsigned long) func;
 298	lc->restart_data = (unsigned long) data;
 299	lc->restart_source = -1UL;
 300	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
 301}
 302
 303/*
 304 * Call function via PSW restart on pcpu and stop the current cpu.
 305 */
 306static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
 307			  void *data, unsigned long stack)
 308{
 309	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
 310	unsigned long source_cpu = stap();
 311
 312	__load_psw_mask(PSW_KERNEL_BITS);
 313	if (pcpu->address == source_cpu)
 314		func(data);	/* should not return */
 315	/* Stop target cpu (if func returns this stops the current cpu). */
 316	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
 317	/* Restart func on the target cpu and stop the current cpu. */
 318	mem_assign_absolute(lc->restart_stack, stack);
 319	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
 320	mem_assign_absolute(lc->restart_data, (unsigned long) data);
 321	mem_assign_absolute(lc->restart_source, source_cpu);
 322	__bpon();
 323	asm volatile(
 324		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
 325		"	brc	2,0b	# busy, try again\n"
 326		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
 327		"	brc	2,1b	# busy, try again\n"
 328		: : "d" (pcpu->address), "d" (source_cpu),
 329		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
 330		: "0", "1", "cc");
 331	for (;;) ;
 332}
 333
 334/*
 335 * Enable additional logical cpus for multi-threading.
 336 */
 337static int pcpu_set_smt(unsigned int mtid)
 338{
 339	int cc;
 340
 341	if (smp_cpu_mtid == mtid)
 342		return 0;
 343	cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
 344	if (cc == 0) {
 345		smp_cpu_mtid = mtid;
 346		smp_cpu_mt_shift = 0;
 347		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
 348			smp_cpu_mt_shift++;
 349		pcpu_devices[0].address = stap();
 350	}
 351	return cc;
 352}
 353
 354/*
 355 * Call function on an online CPU.
 356 */
 357void smp_call_online_cpu(void (*func)(void *), void *data)
 358{
 359	struct pcpu *pcpu;
 360
 361	/* Use the current cpu if it is online. */
 362	pcpu = pcpu_find_address(cpu_online_mask, stap());
 363	if (!pcpu)
 364		/* Use the first online cpu. */
 365		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
 366	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
 367}
 368
 369/*
 370 * Call function on the ipl CPU.
 371 */
 372void smp_call_ipl_cpu(void (*func)(void *), void *data)
 373{
 374	pcpu_delegate(&pcpu_devices[0], func, data,
 375		      pcpu_devices->lowcore->panic_stack -
 376		      PANIC_FRAME_OFFSET + PAGE_SIZE);
 377}
 378
 379int smp_find_processor_id(u16 address)
 380{
 381	int cpu;
 382
 383	for_each_present_cpu(cpu)
 384		if (pcpu_devices[cpu].address == address)
 385			return cpu;
 386	return -1;
 387}
 388
 389bool arch_vcpu_is_preempted(int cpu)
 390{
 391	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
 392		return false;
 393	if (pcpu_running(pcpu_devices + cpu))
 394		return false;
 395	return true;
 396}
 397EXPORT_SYMBOL(arch_vcpu_is_preempted);
 398
 399void smp_yield_cpu(int cpu)
 400{
 401	if (MACHINE_HAS_DIAG9C) {
 402		diag_stat_inc_norecursion(DIAG_STAT_X09C);
 403		asm volatile("diag %0,0,0x9c"
 404			     : : "d" (pcpu_devices[cpu].address));
 405	} else if (MACHINE_HAS_DIAG44) {
 406		diag_stat_inc_norecursion(DIAG_STAT_X044);
 407		asm volatile("diag 0,0,0x44");
 408	}
 409}
 410
 411/*
 412 * Send cpus emergency shutdown signal. This gives the cpus the
 413 * opportunity to complete outstanding interrupts.
 414 */
 415void notrace smp_emergency_stop(void)
 416{
 417	cpumask_t cpumask;
 418	u64 end;
 419	int cpu;
 420
 421	cpumask_copy(&cpumask, cpu_online_mask);
 422	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 423
 424	end = get_tod_clock() + (1000000UL << 12);
 425	for_each_cpu(cpu, &cpumask) {
 426		struct pcpu *pcpu = pcpu_devices + cpu;
 427		set_bit(ec_stop_cpu, &pcpu->ec_mask);
 428		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
 429				   0, NULL) == SIGP_CC_BUSY &&
 430		       get_tod_clock() < end)
 431			cpu_relax();
 432	}
 433	while (get_tod_clock() < end) {
 434		for_each_cpu(cpu, &cpumask)
 435			if (pcpu_stopped(pcpu_devices + cpu))
 436				cpumask_clear_cpu(cpu, &cpumask);
 437		if (cpumask_empty(&cpumask))
 438			break;
 439		cpu_relax();
 440	}
 441}
 442NOKPROBE_SYMBOL(smp_emergency_stop);
 443
 444/*
 445 * Stop all cpus but the current one.
 446 */
 447void smp_send_stop(void)
 448{
 449	int cpu;
 450
 451	/* Disable all interrupts/machine checks */
 452	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
 453	trace_hardirqs_off();
 454
 455	debug_set_critical();
 456
 457	if (oops_in_progress)
 458		smp_emergency_stop();
 459
 460	/* stop all processors */
 461	for_each_online_cpu(cpu) {
 462		if (cpu == smp_processor_id())
 463			continue;
 464		pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
 465		while (!pcpu_stopped(pcpu_devices + cpu))
 466			cpu_relax();
 467	}
 468}
 469
 470/*
 471 * This is the main routine where commands issued by other
 472 * cpus are handled.
 473 */
 474static void smp_handle_ext_call(void)
 475{
 476	unsigned long bits;
 477
 478	/* handle bit signal external calls */
 479	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
 480	if (test_bit(ec_stop_cpu, &bits))
 481		smp_stop_cpu();
 482	if (test_bit(ec_schedule, &bits))
 483		scheduler_ipi();
 484	if (test_bit(ec_call_function_single, &bits))
 485		generic_smp_call_function_single_interrupt();
 486}
 487
 488static void do_ext_call_interrupt(struct ext_code ext_code,
 489				  unsigned int param32, unsigned long param64)
 490{
 491	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
 492	smp_handle_ext_call();
 493}
 494
 495void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 496{
 497	int cpu;
 498
 499	for_each_cpu(cpu, mask)
 500		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 501}
 502
 503void arch_send_call_function_single_ipi(int cpu)
 504{
 505	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 506}
 507
 508/*
 509 * this function sends a 'reschedule' IPI to another CPU.
 510 * it goes straight through and wastes no time serializing
 511 * anything. Worst case is that we lose a reschedule ...
 512 */
 513void smp_send_reschedule(int cpu)
 514{
 515	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
 516}
 517
 518/*
 519 * parameter area for the set/clear control bit callbacks
 520 */
 521struct ec_creg_mask_parms {
 522	unsigned long orval;
 523	unsigned long andval;
 524	int cr;
 525};
 526
 527/*
 528 * callback for setting/clearing control bits
 529 */
 530static void smp_ctl_bit_callback(void *info)
 531{
 532	struct ec_creg_mask_parms *pp = info;
 533	unsigned long cregs[16];
 534
 535	__ctl_store(cregs, 0, 15);
 536	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
 537	__ctl_load(cregs, 0, 15);
 538}
 539
 540/*
 541 * Set a bit in a control register of all cpus
 542 */
 543void smp_ctl_set_bit(int cr, int bit)
 544{
 545	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 546
 547	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 548}
 549EXPORT_SYMBOL(smp_ctl_set_bit);
 550
 551/*
 552 * Clear a bit in a control register of all cpus
 553 */
 554void smp_ctl_clear_bit(int cr, int bit)
 555{
 556	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 557
 558	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 559}
 560EXPORT_SYMBOL(smp_ctl_clear_bit);
 561
 562#ifdef CONFIG_CRASH_DUMP
 563
 564int smp_store_status(int cpu)
 565{
 566	struct pcpu *pcpu = pcpu_devices + cpu;
 567	unsigned long pa;
 568
 569	pa = __pa(&pcpu->lowcore->floating_pt_save_area);
 570	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
 571			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 572		return -EIO;
 573	if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
 574		return 0;
 575	pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
 576	if (MACHINE_HAS_GS)
 577		pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
 578	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
 579			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 580		return -EIO;
 581	return 0;
 582}
 583
 584/*
 585 * Collect CPU state of the previous, crashed system.
 586 * There are four cases:
 587 * 1) standard zfcp dump
 588 *    condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
 589 *    The state for all CPUs except the boot CPU needs to be collected
 590 *    with sigp stop-and-store-status. The boot CPU state is located in
 591 *    the absolute lowcore of the memory stored in the HSA. The zcore code
 592 *    will copy the boot CPU state from the HSA.
 593 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
 594 *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
 595 *    The state for all CPUs except the boot CPU needs to be collected
 596 *    with sigp stop-and-store-status. The firmware or the boot-loader
 597 *    stored the registers of the boot CPU in the absolute lowcore in the
 598 *    memory of the old system.
 599 * 3) kdump and the old kernel did not store the CPU state,
 600 *    or stand-alone kdump for DASD
 601 *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
 602 *    The state for all CPUs except the boot CPU needs to be collected
 603 *    with sigp stop-and-store-status. The kexec code or the boot-loader
 604 *    stored the registers of the boot CPU in the memory of the old system.
 605 * 4) kdump and the old kernel stored the CPU state
 606 *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
 607 *    This case does not exist for s390 anymore, setup_arch explicitly
 608 *    deactivates the elfcorehdr= kernel parameter
 609 */
 610static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
 611				     bool is_boot_cpu, unsigned long page)
 612{
 613	__vector128 *vxrs = (__vector128 *) page;
 614
 615	if (is_boot_cpu)
 616		vxrs = boot_cpu_vector_save_area;
 617	else
 618		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
 619	save_area_add_vxrs(sa, vxrs);
 620}
 621
 622static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
 623				     bool is_boot_cpu, unsigned long page)
 624{
 625	void *regs = (void *) page;
 626
 627	if (is_boot_cpu)
 628		copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
 629	else
 630		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
 631	save_area_add_regs(sa, regs);
 632}
 633
 634void __init smp_save_dump_cpus(void)
 635{
 636	int addr, boot_cpu_addr, max_cpu_addr;
 637	struct save_area *sa;
 638	unsigned long page;
 639	bool is_boot_cpu;
 640
 641	if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
 642		/* No previous system present, normal boot. */
 643		return;
 644	/* Allocate a page as dumping area for the store status sigps */
 645	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
 646	/* Set multi-threading state to the previous system. */
 647	pcpu_set_smt(sclp.mtid_prev);
 648	boot_cpu_addr = stap();
 649	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
 650	for (addr = 0; addr <= max_cpu_addr; addr++) {
 651		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
 652		    SIGP_CC_NOT_OPERATIONAL)
 653			continue;
 654		is_boot_cpu = (addr == boot_cpu_addr);
 655		/* Allocate save area */
 656		sa = save_area_alloc(is_boot_cpu);
 657		if (!sa)
 658			panic("could not allocate memory for save area\n");
 659		if (MACHINE_HAS_VX)
 660			/* Get the vector registers */
 661			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
 662		/*
 663		 * For a zfcp dump OLDMEM_BASE == NULL and the registers
 664		 * of the boot CPU are stored in the HSA. To retrieve
 665		 * these registers an SCLP request is required which is
 666		 * done by drivers/s390/char/zcore.c:init_cpu_info()
 667		 */
 668		if (!is_boot_cpu || OLDMEM_BASE)
 669			/* Get the CPU registers */
 670			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
 671	}
 672	memblock_free(page, PAGE_SIZE);
 673	diag308_reset();
 674	pcpu_set_smt(0);
 675}
 676#endif /* CONFIG_CRASH_DUMP */
 677
 678void smp_cpu_set_polarization(int cpu, int val)
 679{
 680	pcpu_devices[cpu].polarization = val;
 681}
 682
 683int smp_cpu_get_polarization(int cpu)
 684{
 685	return pcpu_devices[cpu].polarization;
 686}
 687
 688static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
 689{
 690	static int use_sigp_detection;
 691	int address;
 692
 693	if (use_sigp_detection || sclp_get_core_info(info, early)) {
 694		use_sigp_detection = 1;
 695		for (address = 0;
 696		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
 697		     address += (1U << smp_cpu_mt_shift)) {
 698			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
 699			    SIGP_CC_NOT_OPERATIONAL)
 700				continue;
 701			info->core[info->configured].core_id =
 702				address >> smp_cpu_mt_shift;
 703			info->configured++;
 704		}
 705		info->combined = info->configured;
 706	}
 707}
 708
 709static int smp_add_present_cpu(int cpu);
 710
 711static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
 712{
 713	struct pcpu *pcpu;
 714	cpumask_t avail;
 715	int cpu, nr, i, j;
 716	u16 address;
 717
 718	nr = 0;
 719	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
 720	cpu = cpumask_first(&avail);
 721	for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
 722		if (sclp.has_core_type && info->core[i].type != boot_core_type)
 723			continue;
 724		address = info->core[i].core_id << smp_cpu_mt_shift;
 725		for (j = 0; j <= smp_cpu_mtid; j++) {
 726			if (pcpu_find_address(cpu_present_mask, address + j))
 727				continue;
 728			pcpu = pcpu_devices + cpu;
 729			pcpu->address = address + j;
 730			pcpu->state =
 731				(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
 732				CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
 733			smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
 734			set_cpu_present(cpu, true);
 735			if (sysfs_add && smp_add_present_cpu(cpu) != 0)
 736				set_cpu_present(cpu, false);
 737			else
 738				nr++;
 739			cpu = cpumask_next(cpu, &avail);
 740			if (cpu >= nr_cpu_ids)
 741				break;
 742		}
 743	}
 744	return nr;
 745}
 746
 747void __init smp_detect_cpus(void)
 748{
 749	unsigned int cpu, mtid, c_cpus, s_cpus;
 750	struct sclp_core_info *info;
 751	u16 address;
 752
 753	/* Get CPU information */
 754	info = memblock_virt_alloc(sizeof(*info), 8);
 755	smp_get_core_info(info, 1);
 756	/* Find boot CPU type */
 757	if (sclp.has_core_type) {
 758		address = stap();
 759		for (cpu = 0; cpu < info->combined; cpu++)
 760			if (info->core[cpu].core_id == address) {
 761				/* The boot cpu dictates the cpu type. */
 762				boot_core_type = info->core[cpu].type;
 763				break;
 764			}
 765		if (cpu >= info->combined)
 766			panic("Could not find boot CPU type");
 767	}
 768
 769	/* Set multi-threading state for the current system */
 770	mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
 771	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
 772	pcpu_set_smt(mtid);
 773
 774	/* Print number of CPUs */
 775	c_cpus = s_cpus = 0;
 776	for (cpu = 0; cpu < info->combined; cpu++) {
 777		if (sclp.has_core_type &&
 778		    info->core[cpu].type != boot_core_type)
 779			continue;
 780		if (cpu < info->configured)
 781			c_cpus += smp_cpu_mtid + 1;
 782		else
 783			s_cpus += smp_cpu_mtid + 1;
 784	}
 785	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
 786
 787	/* Add CPUs present at boot */
 788	get_online_cpus();
 789	__smp_rescan_cpus(info, 0);
 790	put_online_cpus();
 791	memblock_free_early((unsigned long)info, sizeof(*info));
 792}
 793
 794/*
 795 *	Activate a secondary processor.
 796 */
 797static void smp_start_secondary(void *cpuvoid)
 798{
 799	int cpu = smp_processor_id();
 800
 801	S390_lowcore.last_update_clock = get_tod_clock();
 802	S390_lowcore.restart_stack = (unsigned long) restart_stack;
 803	S390_lowcore.restart_fn = (unsigned long) do_restart;
 804	S390_lowcore.restart_data = 0;
 805	S390_lowcore.restart_source = -1UL;
 806	restore_access_regs(S390_lowcore.access_regs_save_area);
 807	__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
 808	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
 809	cpu_init();
 810	preempt_disable();
 811	init_cpu_timer();
 812	vtime_init();
 813	pfault_init();
 814	notify_cpu_starting(cpu);
 815	if (topology_cpu_dedicated(cpu))
 816		set_cpu_flag(CIF_DEDICATED_CPU);
 817	else
 818		clear_cpu_flag(CIF_DEDICATED_CPU);
 819	set_cpu_online(cpu, true);
 820	inc_irq_stat(CPU_RST);
 821	local_irq_enable();
 822	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 823}
 824
 825/* Upping and downing of CPUs */
 826int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 827{
 828	struct pcpu *pcpu;
 829	int base, i, rc;
 830
 831	pcpu = pcpu_devices + cpu;
 832	if (pcpu->state != CPU_STATE_CONFIGURED)
 833		return -EIO;
 834	base = smp_get_base_cpu(cpu);
 835	for (i = 0; i <= smp_cpu_mtid; i++) {
 836		if (base + i < nr_cpu_ids)
 837			if (cpu_online(base + i))
 838				break;
 839	}
 840	/*
 841	 * If this is the first CPU of the core to get online
 842	 * do an initial CPU reset.
 843	 */
 844	if (i > smp_cpu_mtid &&
 845	    pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
 846	    SIGP_CC_ORDER_CODE_ACCEPTED)
 847		return -EIO;
 848
 849	rc = pcpu_alloc_lowcore(pcpu, cpu);
 850	if (rc)
 851		return rc;
 852	pcpu_prepare_secondary(pcpu, cpu);
 853	pcpu_attach_task(pcpu, tidle);
 854	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
 855	/* Wait until cpu puts itself in the online & active maps */
 856	while (!cpu_online(cpu))
 857		cpu_relax();
 858	return 0;
 859}
 860
 861static unsigned int setup_possible_cpus __initdata;
 862
 863static int __init _setup_possible_cpus(char *s)
 864{
 865	get_option(&s, &setup_possible_cpus);
 866	return 0;
 867}
 868early_param("possible_cpus", _setup_possible_cpus);
 869
 870#ifdef CONFIG_HOTPLUG_CPU
 871
 872int __cpu_disable(void)
 873{
 874	unsigned long cregs[16];
 875
 876	/* Handle possible pending IPIs */
 877	smp_handle_ext_call();
 878	set_cpu_online(smp_processor_id(), false);
 879	/* Disable pseudo page faults on this cpu. */
 880	pfault_fini();
 881	/* Disable interrupt sources via control register. */
 882	__ctl_store(cregs, 0, 15);
 883	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
 884	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
 885	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
 886	__ctl_load(cregs, 0, 15);
 887	clear_cpu_flag(CIF_NOHZ_DELAY);
 888	return 0;
 889}
 890
 891void __cpu_die(unsigned int cpu)
 892{
 893	struct pcpu *pcpu;
 894
 895	/* Wait until target cpu is down */
 896	pcpu = pcpu_devices + cpu;
 897	while (!pcpu_stopped(pcpu))
 898		cpu_relax();
 899	pcpu_free_lowcore(pcpu);
 900	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
 901	cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
 902}
 903
 904void __noreturn cpu_die(void)
 905{
 906	idle_task_exit();
 907	__bpon();
 908	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
 909	for (;;) ;
 910}
 911
 912#endif /* CONFIG_HOTPLUG_CPU */
 913
 914void __init smp_fill_possible_mask(void)
 915{
 916	unsigned int possible, sclp_max, cpu;
 917
 918	sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
 919	sclp_max = min(smp_max_threads, sclp_max);
 920	sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
 921	possible = setup_possible_cpus ?: nr_cpu_ids;
 922	possible = min(possible, sclp_max);
 923	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
 924		set_cpu_possible(cpu, true);
 925}
 926
 927void __init smp_prepare_cpus(unsigned int max_cpus)
 928{
 929	/* request the 0x1201 emergency signal external interrupt */
 930	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
 931		panic("Couldn't request external interrupt 0x1201");
 932	/* request the 0x1202 external call external interrupt */
 933	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
 934		panic("Couldn't request external interrupt 0x1202");
 935}
 936
 937void __init smp_prepare_boot_cpu(void)
 938{
 939	struct pcpu *pcpu = pcpu_devices;
 940
 941	WARN_ON(!cpu_present(0) || !cpu_online(0));
 942	pcpu->state = CPU_STATE_CONFIGURED;
 943	pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
 944	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 945	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 946}
 947
 948void __init smp_cpus_done(unsigned int max_cpus)
 949{
 950}
 951
 952void __init smp_setup_processor_id(void)
 953{
 954	pcpu_devices[0].address = stap();
 955	S390_lowcore.cpu_nr = 0;
 956	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
 957	S390_lowcore.spinlock_index = 0;
 958}
 959
 960/*
 961 * the frequency of the profiling timer can be changed
 962 * by writing a multiplier value into /proc/profile.
 963 *
 964 * usually you want to run this on all CPUs ;)
 965 */
 966int setup_profiling_timer(unsigned int multiplier)
 967{
 968	return 0;
 969}
 970
 971#ifdef CONFIG_HOTPLUG_CPU
 972static ssize_t cpu_configure_show(struct device *dev,
 973				  struct device_attribute *attr, char *buf)
 974{
 975	ssize_t count;
 976
 977	mutex_lock(&smp_cpu_state_mutex);
 978	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
 979	mutex_unlock(&smp_cpu_state_mutex);
 980	return count;
 981}
 982
 983static ssize_t cpu_configure_store(struct device *dev,
 984				   struct device_attribute *attr,
 985				   const char *buf, size_t count)
 986{
 987	struct pcpu *pcpu;
 988	int cpu, val, rc, i;
 989	char delim;
 990
 991	if (sscanf(buf, "%d %c", &val, &delim) != 1)
 992		return -EINVAL;
 993	if (val != 0 && val != 1)
 994		return -EINVAL;
 995	get_online_cpus();
 996	mutex_lock(&smp_cpu_state_mutex);
 997	rc = -EBUSY;
 998	/* disallow configuration changes of online cpus and cpu 0 */
 999	cpu = dev->id;
1000	cpu = smp_get_base_cpu(cpu);
1001	if (cpu == 0)
1002		goto out;
1003	for (i = 0; i <= smp_cpu_mtid; i++)
1004		if (cpu_online(cpu + i))
1005			goto out;
1006	pcpu = pcpu_devices + cpu;
1007	rc = 0;
1008	switch (val) {
1009	case 0:
1010		if (pcpu->state != CPU_STATE_CONFIGURED)
1011			break;
1012		rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1013		if (rc)
1014			break;
1015		for (i = 0; i <= smp_cpu_mtid; i++) {
1016			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1017				continue;
1018			pcpu[i].state = CPU_STATE_STANDBY;
1019			smp_cpu_set_polarization(cpu + i,
1020						 POLARIZATION_UNKNOWN);
1021		}
1022		topology_expect_change();
1023		break;
1024	case 1:
1025		if (pcpu->state != CPU_STATE_STANDBY)
1026			break;
1027		rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1028		if (rc)
1029			break;
1030		for (i = 0; i <= smp_cpu_mtid; i++) {
1031			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1032				continue;
1033			pcpu[i].state = CPU_STATE_CONFIGURED;
1034			smp_cpu_set_polarization(cpu + i,
1035						 POLARIZATION_UNKNOWN);
1036		}
1037		topology_expect_change();
1038		break;
1039	default:
1040		break;
1041	}
1042out:
1043	mutex_unlock(&smp_cpu_state_mutex);
1044	put_online_cpus();
1045	return rc ? rc : count;
1046}
1047static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1048#endif /* CONFIG_HOTPLUG_CPU */
1049
1050static ssize_t show_cpu_address(struct device *dev,
1051				struct device_attribute *attr, char *buf)
1052{
1053	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1054}
1055static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1056
1057static struct attribute *cpu_common_attrs[] = {
1058#ifdef CONFIG_HOTPLUG_CPU
1059	&dev_attr_configure.attr,
1060#endif
1061	&dev_attr_address.attr,
1062	NULL,
1063};
1064
1065static struct attribute_group cpu_common_attr_group = {
1066	.attrs = cpu_common_attrs,
1067};
1068
1069static struct attribute *cpu_online_attrs[] = {
1070	&dev_attr_idle_count.attr,
1071	&dev_attr_idle_time_us.attr,
1072	NULL,
1073};
1074
1075static struct attribute_group cpu_online_attr_group = {
1076	.attrs = cpu_online_attrs,
1077};
1078
1079static int smp_cpu_online(unsigned int cpu)
1080{
1081	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1082
1083	return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1084}
1085static int smp_cpu_pre_down(unsigned int cpu)
1086{
1087	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1088
1089	sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1090	return 0;
1091}
1092
1093static int smp_add_present_cpu(int cpu)
1094{
1095	struct device *s;
1096	struct cpu *c;
1097	int rc;
1098
1099	c = kzalloc(sizeof(*c), GFP_KERNEL);
1100	if (!c)
1101		return -ENOMEM;
1102	per_cpu(cpu_device, cpu) = c;
1103	s = &c->dev;
1104	c->hotpluggable = 1;
1105	rc = register_cpu(c, cpu);
1106	if (rc)
1107		goto out;
1108	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1109	if (rc)
1110		goto out_cpu;
1111	rc = topology_cpu_init(c);
1112	if (rc)
1113		goto out_topology;
1114	return 0;
1115
1116out_topology:
1117	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1118out_cpu:
1119#ifdef CONFIG_HOTPLUG_CPU
1120	unregister_cpu(c);
1121#endif
1122out:
1123	return rc;
1124}
1125
1126#ifdef CONFIG_HOTPLUG_CPU
1127
1128int __ref smp_rescan_cpus(void)
1129{
1130	struct sclp_core_info *info;
1131	int nr;
1132
1133	info = kzalloc(sizeof(*info), GFP_KERNEL);
1134	if (!info)
1135		return -ENOMEM;
1136	smp_get_core_info(info, 0);
1137	get_online_cpus();
1138	mutex_lock(&smp_cpu_state_mutex);
1139	nr = __smp_rescan_cpus(info, 1);
1140	mutex_unlock(&smp_cpu_state_mutex);
1141	put_online_cpus();
1142	kfree(info);
1143	if (nr)
1144		topology_schedule_update();
1145	return 0;
1146}
1147
1148static ssize_t __ref rescan_store(struct device *dev,
1149				  struct device_attribute *attr,
1150				  const char *buf,
1151				  size_t count)
1152{
1153	int rc;
1154
1155	rc = smp_rescan_cpus();
1156	return rc ? rc : count;
1157}
1158static DEVICE_ATTR_WO(rescan);
1159#endif /* CONFIG_HOTPLUG_CPU */
1160
1161static int __init s390_smp_init(void)
1162{
1163	int cpu, rc = 0;
1164
1165#ifdef CONFIG_HOTPLUG_CPU
1166	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1167	if (rc)
1168		return rc;
1169#endif
1170	for_each_present_cpu(cpu) {
1171		rc = smp_add_present_cpu(cpu);
1172		if (rc)
1173			goto out;
1174	}
1175
1176	rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1177			       smp_cpu_online, smp_cpu_pre_down);
1178	rc = rc <= 0 ? rc : 0;
1179out:
1180	return rc;
1181}
1182subsys_initcall(s390_smp_init);