Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  arch/s390/kernel/smp.c
   3 *
   4 *    Copyright IBM Corp. 1999, 2009
   5 *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
   6 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
   7 *		 Heiko Carstens (heiko.carstens@de.ibm.com)
   8 *
   9 *  based on other smp stuff by
  10 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
  11 *    (c) 1998 Ingo Molnar
  12 *
  13 * We work with logical cpu numbering everywhere we can. The only
  14 * functions using the real cpu address (got from STAP) are the sigp
  15 * functions. For all other functions we use the identity mapping.
  16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
  17 * used e.g. to find the idle task belonging to a logical cpu. Every array
  18 * in the kernel is sorted by the logical cpu number and not by the physical
  19 * one which is causing all the confusion with __cpu_logical_map and
  20 * cpu_number_map in other architectures.
  21 */
  22
  23#define KMSG_COMPONENT "cpu"
  24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  25
  26#include <linux/workqueue.h>
  27#include <linux/module.h>
  28#include <linux/init.h>
  29#include <linux/mm.h>
  30#include <linux/err.h>
  31#include <linux/spinlock.h>
  32#include <linux/kernel_stat.h>
  33#include <linux/delay.h>
  34#include <linux/cache.h>
  35#include <linux/interrupt.h>
  36#include <linux/irqflags.h>
  37#include <linux/cpu.h>
  38#include <linux/timex.h>
  39#include <linux/bootmem.h>
  40#include <linux/slab.h>
 
 
  41#include <asm/asm-offsets.h>
 
 
 
  42#include <asm/ipl.h>
  43#include <asm/setup.h>
  44#include <asm/sigp.h>
  45#include <asm/pgalloc.h>
  46#include <asm/irq.h>
  47#include <asm/cpcmd.h>
  48#include <asm/tlbflush.h>
  49#include <asm/timer.h>
  50#include <asm/lowcore.h>
  51#include <asm/sclp.h>
  52#include <asm/cputime.h>
  53#include <asm/vdso.h>
  54#include <asm/cpu.h>
 
 
 
  55#include "entry.h"
  56
  57/* logical cpu to cpu address */
  58unsigned short __cpu_logical_map[NR_CPUS];
  59
  60static struct task_struct *current_set[NR_CPUS];
  61
  62static u8 smp_cpu_type;
  63static int smp_use_sigp_detection;
  64
  65enum s390_cpu_state {
  66	CPU_STATE_STANDBY,
  67	CPU_STATE_CONFIGURED,
  68};
  69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70DEFINE_MUTEX(smp_cpu_state_mutex);
  71int smp_cpu_polarization[NR_CPUS];
  72static int smp_cpu_state[NR_CPUS];
  73static int cpu_management;
  74
  75static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 
 
 
 
  76
  77static void smp_ext_bitcall(int, int);
 
 
 
 
 
 
  78
  79static int raw_cpu_stopped(int cpu)
  80{
  81	u32 status;
  82
  83	switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
  84	case sigp_status_stored:
  85		/* Check for stopped and check stop state */
  86		if (status & 0x50)
  87			return 1;
  88		break;
  89	default:
  90		break;
  91	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92	return 0;
  93}
  94
  95static inline int cpu_stopped(int cpu)
 
 
 
  96{
  97	return raw_cpu_stopped(cpu_logical_map(cpu));
 
 
 
 
 
  98}
  99
 100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
 101{
 102	struct _lowcore *lc, *current_lc;
 103	struct stack_frame *sf;
 104	struct pt_regs *regs;
 105	unsigned long sp;
 106
 107	if (smp_processor_id() == 0)
 108		func(data);
 109	__load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
 110	/* Disable lowcore protection */
 111	__ctl_clear_bit(0, 28);
 112	current_lc = lowcore_ptr[smp_processor_id()];
 113	lc = lowcore_ptr[0];
 114	if (!lc)
 115		lc = current_lc;
 116	lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 117	lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
 118	if (!cpu_online(0))
 119		smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
 120	while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121		cpu_relax();
 122	sp = lc->panic_stack;
 123	sp -= sizeof(struct pt_regs);
 124	regs = (struct pt_regs *) sp;
 125	memcpy(&regs->gprs, &current_lc->gpregs_save_area, sizeof(regs->gprs));
 126	regs->psw = lc->psw_save_area;
 127	sp -= STACK_FRAME_OVERHEAD;
 128	sf = (struct stack_frame *) sp;
 129	sf->back_chain = regs->gprs[15];
 130	smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
 131}
 132
 
 
 
 133void smp_send_stop(void)
 134{
 135	int cpu, rc;
 
 136
 137	/* Disable all interrupts/machine checks */
 138	__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
 139	trace_hardirqs_off();
 140
 141	/* stop all processors */
 142	for_each_online_cpu(cpu) {
 143		if (cpu == smp_processor_id())
 144			continue;
 145		do {
 146			rc = sigp(cpu, sigp_stop);
 147		} while (rc == sigp_busy);
 148
 149		while (!cpu_stopped(cpu))
 
 
 
 
 
 
 
 150			cpu_relax();
 151	}
 152}
 153
 154/*
 155 * This is the main routine where commands issued by other
 156 * cpus are handled.
 157 */
 158
 159static void do_ext_call_interrupt(unsigned int ext_int_code,
 160				  unsigned int param32, unsigned long param64)
 161{
 162	unsigned long bits;
 163
 164	kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
 165	/*
 166	 * handle bit signal external calls
 167	 */
 168	bits = xchg(&S390_lowcore.ext_call_fast, 0);
 169
 170	if (test_bit(ec_schedule, &bits))
 171		scheduler_ipi();
 172
 173	if (test_bit(ec_call_function, &bits))
 174		generic_smp_call_function_interrupt();
 175
 176	if (test_bit(ec_call_function_single, &bits))
 177		generic_smp_call_function_single_interrupt();
 178}
 179
 180/*
 181 * Send an external call sigp to another cpu and return without waiting
 182 * for its completion.
 183 */
 184static void smp_ext_bitcall(int cpu, int sig)
 185{
 186	/*
 187	 * Set signaling bit in lowcore of target cpu and kick it
 188	 */
 189	set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
 190	while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
 191		udelay(10);
 192}
 193
 194void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 195{
 196	int cpu;
 197
 198	for_each_cpu(cpu, mask)
 199		smp_ext_bitcall(cpu, ec_call_function);
 200}
 201
 202void arch_send_call_function_single_ipi(int cpu)
 203{
 204	smp_ext_bitcall(cpu, ec_call_function_single);
 205}
 206
 207#ifndef CONFIG_64BIT
 208/*
 209 * this function sends a 'purge tlb' signal to another CPU.
 210 */
 211static void smp_ptlb_callback(void *info)
 212{
 213	__tlb_flush_local();
 214}
 215
 216void smp_ptlb_all(void)
 217{
 218	on_each_cpu(smp_ptlb_callback, NULL, 1);
 219}
 220EXPORT_SYMBOL(smp_ptlb_all);
 221#endif /* ! CONFIG_64BIT */
 222
 223/*
 224 * this function sends a 'reschedule' IPI to another CPU.
 225 * it goes straight through and wastes no time serializing
 226 * anything. Worst case is that we lose a reschedule ...
 227 */
 228void smp_send_reschedule(int cpu)
 229{
 230	smp_ext_bitcall(cpu, ec_schedule);
 231}
 232
 233/*
 234 * parameter area for the set/clear control bit callbacks
 235 */
 236struct ec_creg_mask_parms {
 237	unsigned long orvals[16];
 238	unsigned long andvals[16];
 
 239};
 240
 241/*
 242 * callback for setting/clearing control bits
 243 */
 244static void smp_ctl_bit_callback(void *info)
 245{
 246	struct ec_creg_mask_parms *pp = info;
 247	unsigned long cregs[16];
 248	int i;
 249
 250	__ctl_store(cregs, 0, 15);
 251	for (i = 0; i <= 15; i++)
 252		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
 253	__ctl_load(cregs, 0, 15);
 254}
 255
 256/*
 257 * Set a bit in a control register of all cpus
 258 */
 259void smp_ctl_set_bit(int cr, int bit)
 260{
 261	struct ec_creg_mask_parms parms;
 262
 263	memset(&parms.orvals, 0, sizeof(parms.orvals));
 264	memset(&parms.andvals, 0xff, sizeof(parms.andvals));
 265	parms.orvals[cr] = 1UL << bit;
 266	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 267}
 268EXPORT_SYMBOL(smp_ctl_set_bit);
 269
 270/*
 271 * Clear a bit in a control register of all cpus
 272 */
 273void smp_ctl_clear_bit(int cr, int bit)
 274{
 275	struct ec_creg_mask_parms parms;
 276
 277	memset(&parms.orvals, 0, sizeof(parms.orvals));
 278	memset(&parms.andvals, 0xff, sizeof(parms.andvals));
 279	parms.andvals[cr] = ~(1UL << bit);
 280	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 281}
 282EXPORT_SYMBOL(smp_ctl_clear_bit);
 283
 284#ifdef CONFIG_ZFCPDUMP
 285
 286static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
 287{
 288	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 289		return;
 290	if (cpu >= NR_CPUS) {
 291		pr_warning("CPU %i exceeds the maximum %i and is excluded from "
 292			   "the dump\n", cpu, NR_CPUS - 1);
 293		return;
 294	}
 295	zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
 296	while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
 297		cpu_relax();
 298	memcpy_real(zfcpdump_save_areas[cpu],
 299		    (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
 300		    sizeof(struct save_area));
 
 301}
 302
 303struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
 304EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 305
 306#else
 
 
 
 
 
 307
 308static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
 
 
 
 309
 310#endif /* CONFIG_ZFCPDUMP */
 
 
 
 
 
 311
 312static int cpu_known(int cpu_id)
 313{
 314	int cpu;
 
 
 
 315
 316	for_each_present_cpu(cpu) {
 317		if (__cpu_logical_map[cpu] == cpu_id)
 318			return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319	}
 320	return 0;
 
 
 321}
 
 322
 323static int smp_rescan_cpus_sigp(cpumask_t avail)
 324{
 325	int cpu_id, logical_cpu;
 
 326
 327	logical_cpu = cpumask_first(&avail);
 328	if (logical_cpu >= nr_cpu_ids)
 329		return 0;
 330	for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
 331		if (cpu_known(cpu_id))
 332			continue;
 333		__cpu_logical_map[logical_cpu] = cpu_id;
 334		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
 335		if (!cpu_stopped(logical_cpu))
 336			continue;
 337		set_cpu_present(logical_cpu, true);
 338		smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
 339		logical_cpu = cpumask_next(logical_cpu, &avail);
 340		if (logical_cpu >= nr_cpu_ids)
 341			break;
 342	}
 343	return 0;
 344}
 345
 346static int smp_rescan_cpus_sclp(cpumask_t avail)
 347{
 348	struct sclp_cpu_info *info;
 349	int cpu_id, logical_cpu, cpu;
 350	int rc;
 351
 352	logical_cpu = cpumask_first(&avail);
 353	if (logical_cpu >= nr_cpu_ids)
 354		return 0;
 355	info = kmalloc(sizeof(*info), GFP_KERNEL);
 356	if (!info)
 357		return -ENOMEM;
 358	rc = sclp_get_cpu_info(info);
 359	if (rc)
 360		goto out;
 361	for (cpu = 0; cpu < info->combined; cpu++) {
 362		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
 363			continue;
 364		cpu_id = info->cpu[cpu].address;
 365		if (cpu_known(cpu_id))
 366			continue;
 367		__cpu_logical_map[logical_cpu] = cpu_id;
 368		smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
 369		set_cpu_present(logical_cpu, true);
 370		if (cpu >= info->configured)
 371			smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
 372		else
 373			smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
 374		logical_cpu = cpumask_next(logical_cpu, &avail);
 375		if (logical_cpu >= nr_cpu_ids)
 376			break;
 377	}
 378out:
 379	kfree(info);
 380	return rc;
 381}
 382
 383static int __smp_rescan_cpus(void)
 
 
 384{
 
 385	cpumask_t avail;
 
 
 386
 
 387	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
 388	if (smp_use_sigp_detection)
 389		return smp_rescan_cpus_sigp(avail);
 390	else
 391		return smp_rescan_cpus_sclp(avail);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 392}
 393
 394static void __init smp_detect_cpus(void)
 395{
 396	unsigned int cpu, c_cpus, s_cpus;
 397	struct sclp_cpu_info *info;
 398	u16 boot_cpu_addr, cpu_addr;
 399
 400	c_cpus = 1;
 401	s_cpus = 0;
 402	boot_cpu_addr = __cpu_logical_map[0];
 403	info = kmalloc(sizeof(*info), GFP_KERNEL);
 404	if (!info)
 405		panic("smp_detect_cpus failed to allocate memory\n");
 406	/* Use sigp detection algorithm if sclp doesn't work. */
 407	if (sclp_get_cpu_info(info)) {
 408		smp_use_sigp_detection = 1;
 409		for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
 410			if (cpu == boot_cpu_addr)
 411				continue;
 412			if (!raw_cpu_stopped(cpu))
 413				continue;
 414			smp_get_save_area(c_cpus, cpu);
 415			c_cpus++;
 416		}
 417		goto out;
 418	}
 419
 420	if (info->has_cpu_type) {
 421		for (cpu = 0; cpu < info->combined; cpu++) {
 422			if (info->cpu[cpu].address == boot_cpu_addr) {
 423				smp_cpu_type = info->cpu[cpu].type;
 
 
 
 424				break;
 425			}
 426		}
 
 427	}
 428
 
 
 
 
 
 
 
 429	for (cpu = 0; cpu < info->combined; cpu++) {
 430		if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
 431			continue;
 432		cpu_addr = info->cpu[cpu].address;
 433		if (cpu_addr == boot_cpu_addr)
 434			continue;
 435		if (!raw_cpu_stopped(cpu_addr)) {
 436			s_cpus++;
 437			continue;
 438		}
 439		smp_get_save_area(c_cpus, cpu_addr);
 440		c_cpus++;
 441	}
 442out:
 443	kfree(info);
 444	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
 
 
 445	get_online_cpus();
 446	__smp_rescan_cpus();
 447	put_online_cpus();
 
 448}
 449
 450/*
 451 *	Activate a secondary processor.
 452 */
 453int __cpuinit start_secondary(void *cpuvoid)
 454{
 
 
 
 
 
 
 
 
 455	cpu_init();
 456	preempt_disable();
 457	init_cpu_timer();
 458	init_cpu_vtimer();
 459	pfault_init();
 460
 461	notify_cpu_starting(smp_processor_id());
 462	ipi_call_lock();
 463	set_cpu_online(smp_processor_id(), true);
 464	ipi_call_unlock();
 465	__ctl_clear_bit(0, 28); /* Disable lowcore protection */
 466	S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 467	S390_lowcore.restart_psw.addr =
 468		PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
 469	__ctl_set_bit(0, 28); /* Enable lowcore protection */
 470	/*
 471	 * Wait until the cpu which brought this one up marked it
 472	 * active before enabling interrupts.
 473	 */
 474	while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
 475		cpu_relax();
 476	local_irq_enable();
 477	/* cpu_idle will call schedule for us */
 478	cpu_idle();
 479	return 0;
 480}
 481
 482struct create_idle {
 483	struct work_struct work;
 484	struct task_struct *idle;
 485	struct completion done;
 486	int cpu;
 487};
 488
 489static void __cpuinit smp_fork_idle(struct work_struct *work)
 490{
 491	struct create_idle *c_idle;
 492
 493	c_idle = container_of(work, struct create_idle, work);
 494	c_idle->idle = fork_idle(c_idle->cpu);
 495	complete(&c_idle->done);
 496}
 497
 498static int __cpuinit smp_alloc_lowcore(int cpu)
 499{
 500	unsigned long async_stack, panic_stack;
 501	struct _lowcore *lowcore;
 502
 503	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
 504	if (!lowcore)
 505		return -ENOMEM;
 506	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 507	panic_stack = __get_free_page(GFP_KERNEL);
 508	if (!panic_stack || !async_stack)
 509		goto out;
 510	memcpy(lowcore, &S390_lowcore, 512);
 511	memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
 512	lowcore->async_stack = async_stack + ASYNC_SIZE;
 513	lowcore->panic_stack = panic_stack + PAGE_SIZE;
 514	lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
 515	lowcore->restart_psw.addr =
 516		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
 517	if (user_mode != HOME_SPACE_MODE)
 518		lowcore->restart_psw.mask |= PSW_ASC_HOME;
 519#ifndef CONFIG_64BIT
 520	if (MACHINE_HAS_IEEE) {
 521		unsigned long save_area;
 522
 523		save_area = get_zeroed_page(GFP_KERNEL);
 524		if (!save_area)
 525			goto out;
 526		lowcore->extended_save_area_addr = (u32) save_area;
 527	}
 528#else
 529	if (vdso_alloc_per_cpu(cpu, lowcore))
 530		goto out;
 531#endif
 532	lowcore_ptr[cpu] = lowcore;
 533	return 0;
 534
 535out:
 536	free_page(panic_stack);
 537	free_pages(async_stack, ASYNC_ORDER);
 538	free_pages((unsigned long) lowcore, LC_ORDER);
 539	return -ENOMEM;
 540}
 541
 542static void smp_free_lowcore(int cpu)
 543{
 544	struct _lowcore *lowcore;
 545
 546	lowcore = lowcore_ptr[cpu];
 547#ifndef CONFIG_64BIT
 548	if (MACHINE_HAS_IEEE)
 549		free_page((unsigned long) lowcore->extended_save_area_addr);
 550#else
 551	vdso_free_per_cpu(cpu, lowcore);
 552#endif
 553	free_page(lowcore->panic_stack - PAGE_SIZE);
 554	free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
 555	free_pages((unsigned long) lowcore, LC_ORDER);
 556	lowcore_ptr[cpu] = NULL;
 557}
 558
 559/* Upping and downing of CPUs */
 560int __cpuinit __cpu_up(unsigned int cpu)
 561{
 562	struct _lowcore *cpu_lowcore;
 563	struct create_idle c_idle;
 564	struct task_struct *idle;
 565	struct stack_frame *sf;
 566	u32 lowcore;
 567	int ccode;
 568
 569	if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
 
 570		return -EIO;
 571	idle = current_set[cpu];
 572	if (!idle) {
 573		c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
 574		INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
 575		c_idle.cpu = cpu;
 576		schedule_work(&c_idle.work);
 577		wait_for_completion(&c_idle.done);
 578		if (IS_ERR(c_idle.idle))
 579			return PTR_ERR(c_idle.idle);
 580		idle = c_idle.idle;
 581		current_set[cpu] = c_idle.idle;
 582	}
 583	init_idle(idle, cpu);
 584	if (smp_alloc_lowcore(cpu))
 585		return -ENOMEM;
 586	do {
 587		ccode = sigp(cpu, sigp_initial_cpu_reset);
 588		if (ccode == sigp_busy)
 589			udelay(10);
 590		if (ccode == sigp_not_operational)
 591			goto err_out;
 592	} while (ccode == sigp_busy);
 593
 594	lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
 595	while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
 596		udelay(10);
 597
 598	cpu_lowcore = lowcore_ptr[cpu];
 599	cpu_lowcore->kernel_stack = (unsigned long)
 600		task_stack_page(idle) + THREAD_SIZE;
 601	cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
 602	sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
 603				     - sizeof(struct pt_regs)
 604				     - sizeof(struct stack_frame));
 605	memset(sf, 0, sizeof(struct stack_frame));
 606	sf->gprs[9] = (unsigned long) sf;
 607	cpu_lowcore->save_area[15] = (unsigned long) sf;
 608	__ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
 609	atomic_inc(&init_mm.context.attach_count);
 610	asm volatile(
 611		"	stam	0,15,0(%0)"
 612		: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
 613	cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
 614	cpu_lowcore->current_task = (unsigned long) idle;
 615	cpu_lowcore->cpu_nr = cpu;
 616	cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
 617	cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
 618	cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
 619	memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
 620	       MAX_FACILITY_BIT/8);
 621	eieio();
 622
 623	while (sigp(cpu, sigp_restart) == sigp_busy)
 624		udelay(10);
 625
 626	while (!cpu_online(cpu))
 
 
 
 
 
 
 
 627		cpu_relax();
 628	return 0;
 629
 630err_out:
 631	smp_free_lowcore(cpu);
 632	return -EIO;
 633}
 634
 635static int __init setup_possible_cpus(char *s)
 636{
 637	int pcpus, cpu;
 638
 639	pcpus = simple_strtoul(s, NULL, 0);
 640	init_cpu_possible(cpumask_of(0));
 641	for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
 642		set_cpu_possible(cpu, true);
 643	return 0;
 644}
 645early_param("possible_cpus", setup_possible_cpus);
 646
 647#ifdef CONFIG_HOTPLUG_CPU
 648
 649int __cpu_disable(void)
 650{
 651	struct ec_creg_mask_parms cr_parms;
 652	int cpu = smp_processor_id();
 653
 654	set_cpu_online(cpu, false);
 655
 656	/* Disable pfault pseudo page faults on this cpu. */
 
 
 
 657	pfault_fini();
 658
 659	memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
 660	memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
 661
 662	/* disable all external interrupts */
 663	cr_parms.orvals[0] = 0;
 664	cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
 665				1 << 10 | 1 <<	9 | 1 <<  6 | 1 <<  5 |
 666				1 <<  4);
 667	/* disable all I/O interrupts */
 668	cr_parms.orvals[6] = 0;
 669	cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
 670				1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
 671	/* disable most machine checks */
 672	cr_parms.orvals[14] = 0;
 673	cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
 674				 1 << 25 | 1 << 24);
 675
 676	smp_ctl_bit_callback(&cr_parms);
 677
 678	return 0;
 679}
 680
 681void __cpu_die(unsigned int cpu)
 682{
 
 
 683	/* Wait until target cpu is down */
 684	while (!cpu_stopped(cpu))
 
 685		cpu_relax();
 686	while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
 687		udelay(10);
 688	smp_free_lowcore(cpu);
 689	atomic_dec(&init_mm.context.attach_count);
 
 
 
 690}
 691
 692void __noreturn cpu_die(void)
 693{
 694	idle_task_exit();
 695	while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
 696		cpu_relax();
 697	for (;;);
 698}
 699
 700#endif /* CONFIG_HOTPLUG_CPU */
 701
 702void __init smp_prepare_cpus(unsigned int max_cpus)
 703{
 704#ifndef CONFIG_64BIT
 705	unsigned long save_area = 0;
 706#endif
 707	unsigned long async_stack, panic_stack;
 708	struct _lowcore *lowcore;
 709
 710	smp_detect_cpus();
 
 
 
 
 
 
 
 711
 
 
 712	/* request the 0x1201 emergency signal external interrupt */
 713	if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
 714		panic("Couldn't request external interrupt 0x1201");
 715
 716	/* Reallocate current lowcore, but keep its contents. */
 717	lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
 718	panic_stack = __get_free_page(GFP_KERNEL);
 719	async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 720	BUG_ON(!lowcore || !panic_stack || !async_stack);
 721#ifndef CONFIG_64BIT
 722	if (MACHINE_HAS_IEEE)
 723		save_area = get_zeroed_page(GFP_KERNEL);
 724#endif
 725	local_irq_disable();
 726	local_mcck_disable();
 727	lowcore_ptr[smp_processor_id()] = lowcore;
 728	*lowcore = S390_lowcore;
 729	lowcore->panic_stack = panic_stack + PAGE_SIZE;
 730	lowcore->async_stack = async_stack + ASYNC_SIZE;
 731#ifndef CONFIG_64BIT
 732	if (MACHINE_HAS_IEEE)
 733		lowcore->extended_save_area_addr = (u32) save_area;
 734#endif
 735	set_prefix((u32)(unsigned long) lowcore);
 736	local_mcck_enable();
 737	local_irq_enable();
 738#ifdef CONFIG_64BIT
 739	if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
 740		BUG();
 741#endif
 742}
 743
 744void __init smp_prepare_boot_cpu(void)
 745{
 746	BUG_ON(smp_processor_id() != 0);
 747
 748	current_thread_info()->cpu = 0;
 
 
 
 
 749	set_cpu_present(0, true);
 750	set_cpu_online(0, true);
 751	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 752	current_set[0] = current;
 753	smp_cpu_state[0] = CPU_STATE_CONFIGURED;
 754	smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
 755}
 756
 757void __init smp_cpus_done(unsigned int max_cpus)
 758{
 759}
 760
 761void __init smp_setup_processor_id(void)
 762{
 763	S390_lowcore.cpu_nr = 0;
 764	__cpu_logical_map[0] = stap();
 765}
 766
 767/*
 768 * the frequency of the profiling timer can be changed
 769 * by writing a multiplier value into /proc/profile.
 770 *
 771 * usually you want to run this on all CPUs ;)
 772 */
 773int setup_profiling_timer(unsigned int multiplier)
 774{
 775	return 0;
 776}
 777
 778#ifdef CONFIG_HOTPLUG_CPU
 779static ssize_t cpu_configure_show(struct sys_device *dev,
 780				struct sysdev_attribute *attr, char *buf)
 781{
 782	ssize_t count;
 783
 784	mutex_lock(&smp_cpu_state_mutex);
 785	count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
 786	mutex_unlock(&smp_cpu_state_mutex);
 787	return count;
 788}
 789
 790static ssize_t cpu_configure_store(struct sys_device *dev,
 791				  struct sysdev_attribute *attr,
 792				  const char *buf, size_t count)
 793{
 794	int cpu = dev->id;
 795	int val, rc;
 796	char delim;
 797
 798	if (sscanf(buf, "%d %c", &val, &delim) != 1)
 799		return -EINVAL;
 800	if (val != 0 && val != 1)
 801		return -EINVAL;
 802
 803	get_online_cpus();
 804	mutex_lock(&smp_cpu_state_mutex);
 805	rc = -EBUSY;
 806	/* disallow configuration changes of online cpus and cpu 0 */
 807	if (cpu_online(cpu) || cpu == 0)
 
 
 808		goto out;
 
 
 
 
 809	rc = 0;
 810	switch (val) {
 811	case 0:
 812		if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
 813			rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
 814			if (!rc) {
 815				smp_cpu_state[cpu] = CPU_STATE_STANDBY;
 816				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
 817			}
 
 
 
 
 
 818		}
 
 819		break;
 820	case 1:
 821		if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
 822			rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
 823			if (!rc) {
 824				smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
 825				smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
 826			}
 
 
 
 
 
 827		}
 
 828		break;
 829	default:
 830		break;
 831	}
 832out:
 833	mutex_unlock(&smp_cpu_state_mutex);
 834	put_online_cpus();
 835	return rc ? rc : count;
 836}
 837static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
 838#endif /* CONFIG_HOTPLUG_CPU */
 839
 840static ssize_t cpu_polarization_show(struct sys_device *dev,
 841				     struct sysdev_attribute *attr, char *buf)
 842{
 843	int cpu = dev->id;
 844	ssize_t count;
 845
 846	mutex_lock(&smp_cpu_state_mutex);
 847	switch (smp_cpu_polarization[cpu]) {
 848	case POLARIZATION_HRZ:
 849		count = sprintf(buf, "horizontal\n");
 850		break;
 851	case POLARIZATION_VL:
 852		count = sprintf(buf, "vertical:low\n");
 853		break;
 854	case POLARIZATION_VM:
 855		count = sprintf(buf, "vertical:medium\n");
 856		break;
 857	case POLARIZATION_VH:
 858		count = sprintf(buf, "vertical:high\n");
 859		break;
 860	default:
 861		count = sprintf(buf, "unknown\n");
 862		break;
 863	}
 864	mutex_unlock(&smp_cpu_state_mutex);
 865	return count;
 866}
 867static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
 868
 869static ssize_t show_cpu_address(struct sys_device *dev,
 870				struct sysdev_attribute *attr, char *buf)
 871{
 872	return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
 873}
 874static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
 875
 876
 877static struct attribute *cpu_common_attrs[] = {
 878#ifdef CONFIG_HOTPLUG_CPU
 879	&attr_configure.attr,
 880#endif
 881	&attr_address.attr,
 882	&attr_polarization.attr,
 883	NULL,
 884};
 885
 886static struct attribute_group cpu_common_attr_group = {
 887	.attrs = cpu_common_attrs,
 888};
 889
 890static ssize_t show_capability(struct sys_device *dev,
 891				struct sysdev_attribute *attr, char *buf)
 892{
 893	unsigned int capability;
 894	int rc;
 895
 896	rc = get_cpu_capability(&capability);
 897	if (rc)
 898		return rc;
 899	return sprintf(buf, "%u\n", capability);
 900}
 901static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
 902
 903static ssize_t show_idle_count(struct sys_device *dev,
 904				struct sysdev_attribute *attr, char *buf)
 905{
 906	struct s390_idle_data *idle;
 907	unsigned long long idle_count;
 908	unsigned int sequence;
 909
 910	idle = &per_cpu(s390_idle, dev->id);
 911repeat:
 912	sequence = idle->sequence;
 913	smp_rmb();
 914	if (sequence & 1)
 915		goto repeat;
 916	idle_count = idle->idle_count;
 917	if (idle->idle_enter)
 918		idle_count++;
 919	smp_rmb();
 920	if (idle->sequence != sequence)
 921		goto repeat;
 922	return sprintf(buf, "%llu\n", idle_count);
 923}
 924static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
 925
 926static ssize_t show_idle_time(struct sys_device *dev,
 927				struct sysdev_attribute *attr, char *buf)
 928{
 929	struct s390_idle_data *idle;
 930	unsigned long long now, idle_time, idle_enter;
 931	unsigned int sequence;
 932
 933	idle = &per_cpu(s390_idle, dev->id);
 934	now = get_clock();
 935repeat:
 936	sequence = idle->sequence;
 937	smp_rmb();
 938	if (sequence & 1)
 939		goto repeat;
 940	idle_time = idle->idle_time;
 941	idle_enter = idle->idle_enter;
 942	if (idle_enter != 0ULL && idle_enter < now)
 943		idle_time += now - idle_enter;
 944	smp_rmb();
 945	if (idle->sequence != sequence)
 946		goto repeat;
 947	return sprintf(buf, "%llu\n", idle_time >> 12);
 948}
 949static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 950
 951static struct attribute *cpu_online_attrs[] = {
 952	&attr_capability.attr,
 953	&attr_idle_count.attr,
 954	&attr_idle_time_us.attr,
 955	NULL,
 956};
 957
 958static struct attribute_group cpu_online_attr_group = {
 959	.attrs = cpu_online_attrs,
 960};
 961
 962static int __cpuinit smp_cpu_notify(struct notifier_block *self,
 963				    unsigned long action, void *hcpu)
 964{
 965	unsigned int cpu = (unsigned int)(long)hcpu;
 966	struct cpu *c = &per_cpu(cpu_devices, cpu);
 967	struct sys_device *s = &c->sysdev;
 968	struct s390_idle_data *idle;
 969	int err = 0;
 970
 971	switch (action) {
 972	case CPU_ONLINE:
 973	case CPU_ONLINE_FROZEN:
 974		idle = &per_cpu(s390_idle, cpu);
 975		memset(idle, 0, sizeof(struct s390_idle_data));
 976		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
 977		break;
 978	case CPU_DEAD:
 979	case CPU_DEAD_FROZEN:
 980		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
 981		break;
 982	}
 983	return notifier_from_errno(err);
 984}
 985
 986static struct notifier_block __cpuinitdata smp_cpu_nb = {
 987	.notifier_call = smp_cpu_notify,
 988};
 989
 990static int __devinit smp_add_present_cpu(int cpu)
 991{
 992	struct cpu *c = &per_cpu(cpu_devices, cpu);
 993	struct sys_device *s = &c->sysdev;
 994	int rc;
 995
 
 
 
 
 
 996	c->hotpluggable = 1;
 997	rc = register_cpu(c, cpu);
 998	if (rc)
 999		goto out;
1000	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1001	if (rc)
1002		goto out_cpu;
1003	if (!cpu_online(cpu))
1004		goto out;
1005	rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1006	if (!rc)
1007		return 0;
 
 
 
 
 
 
 
 
 
1008	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1009out_cpu:
1010#ifdef CONFIG_HOTPLUG_CPU
1011	unregister_cpu(c);
1012#endif
1013out:
1014	return rc;
1015}
1016
1017#ifdef CONFIG_HOTPLUG_CPU
1018
1019int __ref smp_rescan_cpus(void)
1020{
1021	cpumask_t newcpus;
1022	int cpu;
1023	int rc;
1024
 
 
 
1025	get_online_cpus();
1026	mutex_lock(&smp_cpu_state_mutex);
1027	cpumask_copy(&newcpus, cpu_present_mask);
1028	rc = __smp_rescan_cpus();
1029	if (rc)
1030		goto out;
1031	cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1032	for_each_cpu(cpu, &newcpus) {
1033		rc = smp_add_present_cpu(cpu);
1034		if (rc)
1035			set_cpu_present(cpu, false);
1036	}
1037	rc = 0;
1038out:
1039	mutex_unlock(&smp_cpu_state_mutex);
1040	put_online_cpus();
1041	if (!cpumask_empty(&newcpus))
 
1042		topology_schedule_update();
1043	return rc;
1044}
1045
1046static ssize_t __ref rescan_store(struct sysdev_class *class,
1047				  struct sysdev_class_attribute *attr,
1048				  const char *buf,
1049				  size_t count)
1050{
1051	int rc;
1052
1053	rc = smp_rescan_cpus();
1054	return rc ? rc : count;
1055}
1056static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1057#endif /* CONFIG_HOTPLUG_CPU */
1058
1059static ssize_t dispatching_show(struct sysdev_class *class,
1060				struct sysdev_class_attribute *attr,
1061				char *buf)
1062{
1063	ssize_t count;
1064
1065	mutex_lock(&smp_cpu_state_mutex);
1066	count = sprintf(buf, "%d\n", cpu_management);
1067	mutex_unlock(&smp_cpu_state_mutex);
1068	return count;
1069}
1070
1071static ssize_t dispatching_store(struct sysdev_class *dev,
1072				 struct sysdev_class_attribute *attr,
1073				 const char *buf,
1074				 size_t count)
1075{
1076	int val, rc;
1077	char delim;
1078
1079	if (sscanf(buf, "%d %c", &val, &delim) != 1)
1080		return -EINVAL;
1081	if (val != 0 && val != 1)
1082		return -EINVAL;
1083	rc = 0;
1084	get_online_cpus();
1085	mutex_lock(&smp_cpu_state_mutex);
1086	if (cpu_management == val)
1087		goto out;
1088	rc = topology_set_cpu_management(val);
1089	if (!rc)
1090		cpu_management = val;
1091out:
1092	mutex_unlock(&smp_cpu_state_mutex);
1093	put_online_cpus();
1094	return rc ? rc : count;
1095}
1096static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1097			 dispatching_store);
1098
1099static int __init topology_init(void)
1100{
1101	int cpu;
1102	int rc;
1103
1104	register_cpu_notifier(&smp_cpu_nb);
1105
1106#ifdef CONFIG_HOTPLUG_CPU
1107	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1108	if (rc)
1109		return rc;
1110#endif
1111	rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1112	if (rc)
1113		return rc;
1114	for_each_present_cpu(cpu) {
1115		rc = smp_add_present_cpu(cpu);
1116		if (rc)
1117			return rc;
1118	}
1119	return 0;
 
 
 
 
 
1120}
1121subsys_initcall(topology_init);
v4.6
   1/*
   2 *  SMP related functions
   3 *
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Denis Joseph Barrow,
   6 *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
   7 *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
   8 *
   9 *  based on other smp stuff by
  10 *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
  11 *    (c) 1998 Ingo Molnar
  12 *
  13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
  14 * the translation of logical to physical cpu ids. All new code that
  15 * operates on physical cpu numbers needs to go into smp.c.
 
 
 
 
 
  16 */
  17
  18#define KMSG_COMPONENT "cpu"
  19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20
  21#include <linux/workqueue.h>
  22#include <linux/module.h>
  23#include <linux/init.h>
  24#include <linux/mm.h>
  25#include <linux/err.h>
  26#include <linux/spinlock.h>
  27#include <linux/kernel_stat.h>
  28#include <linux/delay.h>
 
  29#include <linux/interrupt.h>
  30#include <linux/irqflags.h>
  31#include <linux/cpu.h>
 
 
  32#include <linux/slab.h>
  33#include <linux/crash_dump.h>
  34#include <linux/memblock.h>
  35#include <asm/asm-offsets.h>
  36#include <asm/diag.h>
  37#include <asm/switch_to.h>
  38#include <asm/facility.h>
  39#include <asm/ipl.h>
  40#include <asm/setup.h>
 
 
  41#include <asm/irq.h>
 
  42#include <asm/tlbflush.h>
  43#include <asm/vtimer.h>
  44#include <asm/lowcore.h>
  45#include <asm/sclp.h>
 
  46#include <asm/vdso.h>
  47#include <asm/debug.h>
  48#include <asm/os_info.h>
  49#include <asm/sigp.h>
  50#include <asm/idle.h>
  51#include "entry.h"
  52
  53enum {
  54	ec_schedule = 0,
  55	ec_call_function_single,
  56	ec_stop_cpu,
  57};
 
 
  58
  59enum {
  60	CPU_STATE_STANDBY,
  61	CPU_STATE_CONFIGURED,
  62};
  63
  64static DEFINE_PER_CPU(struct cpu *, cpu_device);
  65
  66struct pcpu {
  67	struct lowcore *lowcore;	/* lowcore page(s) for the cpu */
  68	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
  69	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
  70	signed char state;		/* physical cpu state */
  71	signed char polarization;	/* physical polarization */
  72	u16 address;			/* physical cpu address */
  73};
  74
  75static u8 boot_core_type;
  76static struct pcpu pcpu_devices[NR_CPUS];
  77
  78unsigned int smp_cpu_mt_shift;
  79EXPORT_SYMBOL(smp_cpu_mt_shift);
  80
  81unsigned int smp_cpu_mtid;
  82EXPORT_SYMBOL(smp_cpu_mtid);
  83
  84#ifdef CONFIG_CRASH_DUMP
  85__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
  86#endif
  87
  88static unsigned int smp_max_threads __initdata = -1U;
  89
  90static int __init early_nosmt(char *s)
  91{
  92	smp_max_threads = 1;
  93	return 0;
  94}
  95early_param("nosmt", early_nosmt);
  96
  97static int __init early_smt(char *s)
  98{
  99	get_option(&s, &smp_max_threads);
 100	return 0;
 101}
 102early_param("smt", early_smt);
 103
 104/*
 105 * The smp_cpu_state_mutex must be held when changing the state or polarization
 106 * member of a pcpu data structure within the pcpu_devices arreay.
 107 */
 108DEFINE_MUTEX(smp_cpu_state_mutex);
 
 
 
 109
 110/*
 111 * Signal processor helper functions.
 112 */
 113static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
 114{
 115	int cc;
 116
 117	while (1) {
 118		cc = __pcpu_sigp(addr, order, parm, NULL);
 119		if (cc != SIGP_CC_BUSY)
 120			return cc;
 121		cpu_relax();
 122	}
 123}
 124
 125static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
 126{
 127	int cc, retry;
 128
 129	for (retry = 0; ; retry++) {
 130		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
 131		if (cc != SIGP_CC_BUSY)
 132			break;
 133		if (retry >= 3)
 134			udelay(10);
 
 
 135	}
 136	return cc;
 137}
 138
 139static inline int pcpu_stopped(struct pcpu *pcpu)
 140{
 141	u32 uninitialized_var(status);
 142
 143	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
 144			0, &status) != SIGP_CC_STATUS_STORED)
 145		return 0;
 146	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
 147}
 148
 149static inline int pcpu_running(struct pcpu *pcpu)
 150{
 151	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
 152			0, NULL) != SIGP_CC_STATUS_STORED)
 153		return 1;
 154	/* Status stored condition code is equivalent to cpu not running. */
 155	return 0;
 156}
 157
 158/*
 159 * Find struct pcpu by cpu address.
 160 */
 161static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
 162{
 163	int cpu;
 164
 165	for_each_cpu(cpu, mask)
 166		if (pcpu_devices[cpu].address == address)
 167			return pcpu_devices + cpu;
 168	return NULL;
 169}
 170
 171static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
 172{
 173	int order;
 174
 175	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
 176		return;
 177	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
 178	pcpu->ec_clk = get_tod_clock_fast();
 179	pcpu_sigp_retry(pcpu, order, 0);
 180}
 181
 182#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
 183#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
 184
 185static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 186{
 187	unsigned long async_stack, panic_stack;
 188	struct lowcore *lc;
 189
 190	if (pcpu != &pcpu_devices[0]) {
 191		pcpu->lowcore =	(struct lowcore *)
 192			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
 193		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
 194		panic_stack = __get_free_page(GFP_KERNEL);
 195		if (!pcpu->lowcore || !panic_stack || !async_stack)
 196			goto out;
 197	} else {
 198		async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
 199		panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
 200	}
 201	lc = pcpu->lowcore;
 202	memcpy(lc, &S390_lowcore, 512);
 203	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
 204	lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
 205	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
 206	lc->cpu_nr = cpu;
 207	lc->spinlock_lockval = arch_spin_lockval(cpu);
 208	if (MACHINE_HAS_VX)
 209		lc->vector_save_area_addr =
 210			(unsigned long) &lc->vector_save_area;
 211	if (vdso_alloc_per_cpu(lc))
 212		goto out;
 213	lowcore_ptr[cpu] = lc;
 214	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
 215	return 0;
 216out:
 217	if (pcpu != &pcpu_devices[0]) {
 218		free_page(panic_stack);
 219		free_pages(async_stack, ASYNC_ORDER);
 220		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 221	}
 222	return -ENOMEM;
 223}
 224
 225#ifdef CONFIG_HOTPLUG_CPU
 226
 227static void pcpu_free_lowcore(struct pcpu *pcpu)
 228{
 229	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
 230	lowcore_ptr[pcpu - pcpu_devices] = NULL;
 231	vdso_free_per_cpu(pcpu->lowcore);
 232	if (pcpu == &pcpu_devices[0])
 233		return;
 234	free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
 235	free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
 236	free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 237}
 238
 239#endif /* CONFIG_HOTPLUG_CPU */
 240
 241static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
 242{
 243	struct lowcore *lc = pcpu->lowcore;
 244
 245	if (MACHINE_HAS_TLB_LC)
 246		cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
 247	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
 248	atomic_inc(&init_mm.context.attach_count);
 249	lc->cpu_nr = cpu;
 250	lc->spinlock_lockval = arch_spin_lockval(cpu);
 251	lc->percpu_offset = __per_cpu_offset[cpu];
 252	lc->kernel_asce = S390_lowcore.kernel_asce;
 253	lc->machine_flags = S390_lowcore.machine_flags;
 254	lc->user_timer = lc->system_timer = lc->steal_timer = 0;
 255	__ctl_store(lc->cregs_save_area, 0, 15);
 256	save_access_regs((unsigned int *) lc->access_regs_save_area);
 257	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
 258	       MAX_FACILITY_BIT/8);
 259}
 260
 261static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
 262{
 263	struct lowcore *lc = pcpu->lowcore;
 264	struct thread_info *ti = task_thread_info(tsk);
 265
 266	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
 267		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 268	lc->thread_info = (unsigned long) task_thread_info(tsk);
 269	lc->current_task = (unsigned long) tsk;
 270	lc->lpp = LPP_MAGIC;
 271	lc->current_pid = tsk->pid;
 272	lc->user_timer = ti->user_timer;
 273	lc->system_timer = ti->system_timer;
 274	lc->steal_timer = 0;
 275}
 276
 277static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
 278{
 279	struct lowcore *lc = pcpu->lowcore;
 280
 281	lc->restart_stack = lc->kernel_stack;
 282	lc->restart_fn = (unsigned long) func;
 283	lc->restart_data = (unsigned long) data;
 284	lc->restart_source = -1UL;
 285	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
 286}
 287
 288/*
 289 * Call function via PSW restart on pcpu and stop the current cpu.
 290 */
 291static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
 292			  void *data, unsigned long stack)
 293{
 294	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
 295	unsigned long source_cpu = stap();
 296
 297	__load_psw_mask(PSW_KERNEL_BITS);
 298	if (pcpu->address == source_cpu)
 299		func(data);	/* should not return */
 300	/* Stop target cpu (if func returns this stops the current cpu). */
 301	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
 302	/* Restart func on the target cpu and stop the current cpu. */
 303	mem_assign_absolute(lc->restart_stack, stack);
 304	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
 305	mem_assign_absolute(lc->restart_data, (unsigned long) data);
 306	mem_assign_absolute(lc->restart_source, source_cpu);
 307	asm volatile(
 308		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
 309		"	brc	2,0b	# busy, try again\n"
 310		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
 311		"	brc	2,1b	# busy, try again\n"
 312		: : "d" (pcpu->address), "d" (source_cpu),
 313		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
 314		: "0", "1", "cc");
 315	for (;;) ;
 316}
 317
 318/*
 319 * Enable additional logical cpus for multi-threading.
 320 */
 321static int pcpu_set_smt(unsigned int mtid)
 322{
 323	register unsigned long reg1 asm ("1") = (unsigned long) mtid;
 324	int cc;
 325
 326	if (smp_cpu_mtid == mtid)
 327		return 0;
 328	asm volatile(
 329		"	sigp	%1,0,%2	# sigp set multi-threading\n"
 330		"	ipm	%0\n"
 331		"	srl	%0,28\n"
 332		: "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
 333		: "cc");
 334	if (cc == 0) {
 335		smp_cpu_mtid = mtid;
 336		smp_cpu_mt_shift = 0;
 337		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
 338			smp_cpu_mt_shift++;
 339		pcpu_devices[0].address = stap();
 340	}
 341	return cc;
 342}
 343
 344/*
 345 * Call function on an online CPU.
 346 */
 347void smp_call_online_cpu(void (*func)(void *), void *data)
 348{
 349	struct pcpu *pcpu;
 350
 351	/* Use the current cpu if it is online. */
 352	pcpu = pcpu_find_address(cpu_online_mask, stap());
 353	if (!pcpu)
 354		/* Use the first online cpu. */
 355		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
 356	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
 357}
 358
 359/*
 360 * Call function on the ipl CPU.
 361 */
 362void smp_call_ipl_cpu(void (*func)(void *), void *data)
 363{
 364	pcpu_delegate(&pcpu_devices[0], func, data,
 365		      pcpu_devices->lowcore->panic_stack -
 366		      PANIC_FRAME_OFFSET + PAGE_SIZE);
 367}
 368
 369int smp_find_processor_id(u16 address)
 370{
 371	int cpu;
 372
 373	for_each_present_cpu(cpu)
 374		if (pcpu_devices[cpu].address == address)
 375			return cpu;
 376	return -1;
 377}
 378
 379int smp_vcpu_scheduled(int cpu)
 380{
 381	return pcpu_running(pcpu_devices + cpu);
 382}
 383
 384void smp_yield_cpu(int cpu)
 385{
 386	if (MACHINE_HAS_DIAG9C) {
 387		diag_stat_inc_norecursion(DIAG_STAT_X09C);
 388		asm volatile("diag %0,0,0x9c"
 389			     : : "d" (pcpu_devices[cpu].address));
 390	} else if (MACHINE_HAS_DIAG44) {
 391		diag_stat_inc_norecursion(DIAG_STAT_X044);
 392		asm volatile("diag 0,0,0x44");
 393	}
 394}
 395
 396/*
 397 * Send cpus emergency shutdown signal. This gives the cpus the
 398 * opportunity to complete outstanding interrupts.
 399 */
 400static void smp_emergency_stop(cpumask_t *cpumask)
 401{
 402	u64 end;
 403	int cpu;
 404
 405	end = get_tod_clock() + (1000000UL << 12);
 406	for_each_cpu(cpu, cpumask) {
 407		struct pcpu *pcpu = pcpu_devices + cpu;
 408		set_bit(ec_stop_cpu, &pcpu->ec_mask);
 409		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
 410				   0, NULL) == SIGP_CC_BUSY &&
 411		       get_tod_clock() < end)
 412			cpu_relax();
 413	}
 414	while (get_tod_clock() < end) {
 415		for_each_cpu(cpu, cpumask)
 416			if (pcpu_stopped(pcpu_devices + cpu))
 417				cpumask_clear_cpu(cpu, cpumask);
 418		if (cpumask_empty(cpumask))
 419			break;
 420		cpu_relax();
 421	}
 
 
 
 
 
 
 
 
 422}
 423
 424/*
 425 * Stop all cpus but the current one.
 426 */
 427void smp_send_stop(void)
 428{
 429	cpumask_t cpumask;
 430	int cpu;
 431
 432	/* Disable all interrupts/machine checks */
 433	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
 434	trace_hardirqs_off();
 435
 436	debug_set_critical();
 437	cpumask_copy(&cpumask, cpu_online_mask);
 438	cpumask_clear_cpu(smp_processor_id(), &cpumask);
 
 
 
 
 439
 440	if (oops_in_progress)
 441		smp_emergency_stop(&cpumask);
 442
 443	/* stop all processors */
 444	for_each_cpu(cpu, &cpumask) {
 445		struct pcpu *pcpu = pcpu_devices + cpu;
 446		pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
 447		while (!pcpu_stopped(pcpu))
 448			cpu_relax();
 449	}
 450}
 451
 452/*
 453 * This is the main routine where commands issued by other
 454 * cpus are handled.
 455 */
 456static void smp_handle_ext_call(void)
 
 
 457{
 458	unsigned long bits;
 459
 460	/* handle bit signal external calls */
 461	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
 462	if (test_bit(ec_stop_cpu, &bits))
 463		smp_stop_cpu();
 
 
 464	if (test_bit(ec_schedule, &bits))
 465		scheduler_ipi();
 
 
 
 
 466	if (test_bit(ec_call_function_single, &bits))
 467		generic_smp_call_function_single_interrupt();
 468}
 469
 470static void do_ext_call_interrupt(struct ext_code ext_code,
 471				  unsigned int param32, unsigned long param64)
 
 
 
 472{
 473	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
 474	smp_handle_ext_call();
 
 
 
 
 475}
 476
 477void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 478{
 479	int cpu;
 480
 481	for_each_cpu(cpu, mask)
 482		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 483}
 484
 485void arch_send_call_function_single_ipi(int cpu)
 486{
 487	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 488}
 
 
 489
 490/*
 491 * this function sends a 'reschedule' IPI to another CPU.
 492 * it goes straight through and wastes no time serializing
 493 * anything. Worst case is that we lose a reschedule ...
 494 */
 495void smp_send_reschedule(int cpu)
 496{
 497	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
 498}
 499
 500/*
 501 * parameter area for the set/clear control bit callbacks
 502 */
 503struct ec_creg_mask_parms {
 504	unsigned long orval;
 505	unsigned long andval;
 506	int cr;
 507};
 508
 509/*
 510 * callback for setting/clearing control bits
 511 */
 512static void smp_ctl_bit_callback(void *info)
 513{
 514	struct ec_creg_mask_parms *pp = info;
 515	unsigned long cregs[16];
 
 516
 517	__ctl_store(cregs, 0, 15);
 518	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
 
 519	__ctl_load(cregs, 0, 15);
 520}
 521
 522/*
 523 * Set a bit in a control register of all cpus
 524 */
 525void smp_ctl_set_bit(int cr, int bit)
 526{
 527	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
 528
 
 
 
 529	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 530}
 531EXPORT_SYMBOL(smp_ctl_set_bit);
 532
 533/*
 534 * Clear a bit in a control register of all cpus
 535 */
 536void smp_ctl_clear_bit(int cr, int bit)
 537{
 538	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
 539
 
 
 
 540	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
 541}
 542EXPORT_SYMBOL(smp_ctl_clear_bit);
 543
 544#ifdef CONFIG_CRASH_DUMP
 545
 546int smp_store_status(int cpu)
 547{
 548	struct pcpu *pcpu = pcpu_devices + cpu;
 549	unsigned long pa;
 550
 551	pa = __pa(&pcpu->lowcore->floating_pt_save_area);
 552	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
 553			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 554		return -EIO;
 555	if (!MACHINE_HAS_VX)
 556		return 0;
 557	pa = __pa(pcpu->lowcore->vector_save_area_addr);
 558	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
 559			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
 560		return -EIO;
 561	return 0;
 562}
 563
 564/*
 565 * Collect CPU state of the previous, crashed system.
 566 * There are four cases:
 567 * 1) standard zfcp dump
 568 *    condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
 569 *    The state for all CPUs except the boot CPU needs to be collected
 570 *    with sigp stop-and-store-status. The boot CPU state is located in
 571 *    the absolute lowcore of the memory stored in the HSA. The zcore code
 572 *    will copy the boot CPU state from the HSA.
 573 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
 574 *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
 575 *    The state for all CPUs except the boot CPU needs to be collected
 576 *    with sigp stop-and-store-status. The firmware or the boot-loader
 577 *    stored the registers of the boot CPU in the absolute lowcore in the
 578 *    memory of the old system.
 579 * 3) kdump and the old kernel did not store the CPU state,
 580 *    or stand-alone kdump for DASD
 581 *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
 582 *    The state for all CPUs except the boot CPU needs to be collected
 583 *    with sigp stop-and-store-status. The kexec code or the boot-loader
 584 *    stored the registers of the boot CPU in the memory of the old system.
 585 * 4) kdump and the old kernel stored the CPU state
 586 *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
 587 *    This case does not exist for s390 anymore, setup_arch explicitly
 588 *    deactivates the elfcorehdr= kernel parameter
 589 */
 590static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
 591				     bool is_boot_cpu, unsigned long page)
 592{
 593	__vector128 *vxrs = (__vector128 *) page;
 594
 595	if (is_boot_cpu)
 596		vxrs = boot_cpu_vector_save_area;
 597	else
 598		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
 599	save_area_add_vxrs(sa, vxrs);
 600}
 601
 602static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
 603				     bool is_boot_cpu, unsigned long page)
 604{
 605	void *regs = (void *) page;
 606
 607	if (is_boot_cpu)
 608		copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
 609	else
 610		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
 611	save_area_add_regs(sa, regs);
 612}
 613
 614void __init smp_save_dump_cpus(void)
 615{
 616	int addr, boot_cpu_addr, max_cpu_addr;
 617	struct save_area *sa;
 618	unsigned long page;
 619	bool is_boot_cpu;
 620
 621	if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
 622		/* No previous system present, normal boot. */
 623		return;
 624	/* Allocate a page as dumping area for the store status sigps */
 625	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
 626	/* Set multi-threading state to the previous system. */
 627	pcpu_set_smt(sclp.mtid_prev);
 628	boot_cpu_addr = stap();
 629	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
 630	for (addr = 0; addr <= max_cpu_addr; addr++) {
 631		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
 632		    SIGP_CC_NOT_OPERATIONAL)
 633			continue;
 634		is_boot_cpu = (addr == boot_cpu_addr);
 635		/* Allocate save area */
 636		sa = save_area_alloc(is_boot_cpu);
 637		if (!sa)
 638			panic("could not allocate memory for save area\n");
 639		if (MACHINE_HAS_VX)
 640			/* Get the vector registers */
 641			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
 642		/*
 643		 * For a zfcp dump OLDMEM_BASE == NULL and the registers
 644		 * of the boot CPU are stored in the HSA. To retrieve
 645		 * these registers an SCLP request is required which is
 646		 * done by drivers/s390/char/zcore.c:init_cpu_info()
 647		 */
 648		if (!is_boot_cpu || OLDMEM_BASE)
 649			/* Get the CPU registers */
 650			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
 651	}
 652	memblock_free(page, PAGE_SIZE);
 653	diag308_reset();
 654	pcpu_set_smt(0);
 655}
 656#endif /* CONFIG_CRASH_DUMP */
 657
 658void smp_cpu_set_polarization(int cpu, int val)
 659{
 660	pcpu_devices[cpu].polarization = val;
 661}
 662
 663int smp_cpu_get_polarization(int cpu)
 664{
 665	return pcpu_devices[cpu].polarization;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666}
 667
 668static struct sclp_core_info *smp_get_core_info(void)
 669{
 670	static int use_sigp_detection;
 671	struct sclp_core_info *info;
 672	int address;
 673
 674	info = kzalloc(sizeof(*info), GFP_KERNEL);
 675	if (info && (use_sigp_detection || sclp_get_core_info(info))) {
 676		use_sigp_detection = 1;
 677		for (address = 0;
 678		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
 679		     address += (1U << smp_cpu_mt_shift)) {
 680			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
 681			    SIGP_CC_NOT_OPERATIONAL)
 682				continue;
 683			info->core[info->configured].core_id =
 684				address >> smp_cpu_mt_shift;
 685			info->configured++;
 686		}
 687		info->combined = info->configured;
 
 
 
 
 
 
 
 
 
 
 
 688	}
 689	return info;
 
 
 690}
 691
 692static int smp_add_present_cpu(int cpu);
 693
 694static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
 695{
 696	struct pcpu *pcpu;
 697	cpumask_t avail;
 698	int cpu, nr, i, j;
 699	u16 address;
 700
 701	nr = 0;
 702	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
 703	cpu = cpumask_first(&avail);
 704	for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
 705		if (sclp.has_core_type && info->core[i].type != boot_core_type)
 706			continue;
 707		address = info->core[i].core_id << smp_cpu_mt_shift;
 708		for (j = 0; j <= smp_cpu_mtid; j++) {
 709			if (pcpu_find_address(cpu_present_mask, address + j))
 710				continue;
 711			pcpu = pcpu_devices + cpu;
 712			pcpu->address = address + j;
 713			pcpu->state =
 714				(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
 715				CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
 716			smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
 717			set_cpu_present(cpu, true);
 718			if (sysfs_add && smp_add_present_cpu(cpu) != 0)
 719				set_cpu_present(cpu, false);
 720			else
 721				nr++;
 722			cpu = cpumask_next(cpu, &avail);
 723			if (cpu >= nr_cpu_ids)
 724				break;
 725		}
 726	}
 727	return nr;
 728}
 729
 730static void __init smp_detect_cpus(void)
 731{
 732	unsigned int cpu, mtid, c_cpus, s_cpus;
 733	struct sclp_core_info *info;
 734	u16 address;
 735
 736	/* Get CPU information */
 737	info = smp_get_core_info();
 
 
 738	if (!info)
 739		panic("smp_detect_cpus failed to allocate memory\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 740
 741	/* Find boot CPU type */
 742	if (sclp.has_core_type) {
 743		address = stap();
 744		for (cpu = 0; cpu < info->combined; cpu++)
 745			if (info->core[cpu].core_id == address) {
 746				/* The boot cpu dictates the cpu type. */
 747				boot_core_type = info->core[cpu].type;
 748				break;
 749			}
 750		if (cpu >= info->combined)
 751			panic("Could not find boot CPU type");
 752	}
 753
 754	/* Set multi-threading state for the current system */
 755	mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
 756	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
 757	pcpu_set_smt(mtid);
 758
 759	/* Print number of CPUs */
 760	c_cpus = s_cpus = 0;
 761	for (cpu = 0; cpu < info->combined; cpu++) {
 762		if (sclp.has_core_type &&
 763		    info->core[cpu].type != boot_core_type)
 
 
 764			continue;
 765		if (cpu < info->configured)
 766			c_cpus += smp_cpu_mtid + 1;
 767		else
 768			s_cpus += smp_cpu_mtid + 1;
 
 
 769	}
 
 
 770	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
 771
 772	/* Add CPUs present at boot */
 773	get_online_cpus();
 774	__smp_rescan_cpus(info, 0);
 775	put_online_cpus();
 776	kfree(info);
 777}
 778
 779/*
 780 *	Activate a secondary processor.
 781 */
 782static void smp_start_secondary(void *cpuvoid)
 783{
 784	S390_lowcore.last_update_clock = get_tod_clock();
 785	S390_lowcore.restart_stack = (unsigned long) restart_stack;
 786	S390_lowcore.restart_fn = (unsigned long) do_restart;
 787	S390_lowcore.restart_data = 0;
 788	S390_lowcore.restart_source = -1UL;
 789	restore_access_regs(S390_lowcore.access_regs_save_area);
 790	__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
 791	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
 792	cpu_init();
 793	preempt_disable();
 794	init_cpu_timer();
 795	vtime_init();
 796	pfault_init();
 
 797	notify_cpu_starting(smp_processor_id());
 
 798	set_cpu_online(smp_processor_id(), true);
 799	inc_irq_stat(CPU_RST);
 
 
 
 
 
 
 
 
 
 
 
 800	local_irq_enable();
 801	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802}
 803
 804/* Upping and downing of CPUs */
 805int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 806{
 807	struct pcpu *pcpu;
 808	int base, i, rc;
 
 
 
 
 809
 810	pcpu = pcpu_devices + cpu;
 811	if (pcpu->state != CPU_STATE_CONFIGURED)
 812		return -EIO;
 813	base = cpu - (cpu % (smp_cpu_mtid + 1));
 814	for (i = 0; i <= smp_cpu_mtid; i++) {
 815		if (base + i < nr_cpu_ids)
 816			if (cpu_online(base + i))
 817				break;
 
 
 
 
 
 
 818	}
 819	/*
 820	 * If this is the first CPU of the core to get online
 821	 * do an initial CPU reset.
 822	 */
 823	if (i > smp_cpu_mtid &&
 824	    pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
 825	    SIGP_CC_ORDER_CODE_ACCEPTED)
 826		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 827
 828	rc = pcpu_alloc_lowcore(pcpu, cpu);
 829	if (rc)
 830		return rc;
 831	pcpu_prepare_secondary(pcpu, cpu);
 832	pcpu_attach_task(pcpu, tidle);
 833	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
 834	/* Wait until cpu puts itself in the online & active maps */
 835	while (!cpu_online(cpu) || !cpu_active(cpu))
 836		cpu_relax();
 837	return 0;
 
 
 
 
 838}
 839
 840static unsigned int setup_possible_cpus __initdata;
 
 
 841
 842static int __init _setup_possible_cpus(char *s)
 843{
 844	get_option(&s, &setup_possible_cpus);
 
 845	return 0;
 846}
 847early_param("possible_cpus", _setup_possible_cpus);
 848
 849#ifdef CONFIG_HOTPLUG_CPU
 850
 851int __cpu_disable(void)
 852{
 853	unsigned long cregs[16];
 
 
 
 854
 855	/* Handle possible pending IPIs */
 856	smp_handle_ext_call();
 857	set_cpu_online(smp_processor_id(), false);
 858	/* Disable pseudo page faults on this cpu. */
 859	pfault_fini();
 860	/* Disable interrupt sources via control register. */
 861	__ctl_store(cregs, 0, 15);
 862	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
 863	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
 864	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
 865	__ctl_load(cregs, 0, 15);
 866	clear_cpu_flag(CIF_NOHZ_DELAY);
 
 
 
 
 
 
 
 
 
 
 
 
 
 867	return 0;
 868}
 869
 870void __cpu_die(unsigned int cpu)
 871{
 872	struct pcpu *pcpu;
 873
 874	/* Wait until target cpu is down */
 875	pcpu = pcpu_devices + cpu;
 876	while (!pcpu_stopped(pcpu))
 877		cpu_relax();
 878	pcpu_free_lowcore(pcpu);
 
 
 879	atomic_dec(&init_mm.context.attach_count);
 880	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
 881	if (MACHINE_HAS_TLB_LC)
 882		cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
 883}
 884
 885void __noreturn cpu_die(void)
 886{
 887	idle_task_exit();
 888	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
 889	for (;;) ;
 
 890}
 891
 892#endif /* CONFIG_HOTPLUG_CPU */
 893
 894void __init smp_fill_possible_mask(void)
 895{
 896	unsigned int possible, sclp_max, cpu;
 
 
 
 
 897
 898	sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
 899	sclp_max = min(smp_max_threads, sclp_max);
 900	sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
 901	possible = setup_possible_cpus ?: nr_cpu_ids;
 902	possible = min(possible, sclp_max);
 903	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
 904		set_cpu_possible(cpu, true);
 905}
 906
 907void __init smp_prepare_cpus(unsigned int max_cpus)
 908{
 909	/* request the 0x1201 emergency signal external interrupt */
 910	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
 911		panic("Couldn't request external interrupt 0x1201");
 912	/* request the 0x1202 external call external interrupt */
 913	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
 914		panic("Couldn't request external interrupt 0x1202");
 915	smp_detect_cpus();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916}
 917
 918void __init smp_prepare_boot_cpu(void)
 919{
 920	struct pcpu *pcpu = pcpu_devices;
 921
 922	pcpu->state = CPU_STATE_CONFIGURED;
 923	pcpu->address = stap();
 924	pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
 925	S390_lowcore.percpu_offset = __per_cpu_offset[0];
 926	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
 927	set_cpu_present(0, true);
 928	set_cpu_online(0, true);
 
 
 
 
 929}
 930
 931void __init smp_cpus_done(unsigned int max_cpus)
 932{
 933}
 934
 935void __init smp_setup_processor_id(void)
 936{
 937	S390_lowcore.cpu_nr = 0;
 938	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
 939}
 940
 941/*
 942 * the frequency of the profiling timer can be changed
 943 * by writing a multiplier value into /proc/profile.
 944 *
 945 * usually you want to run this on all CPUs ;)
 946 */
 947int setup_profiling_timer(unsigned int multiplier)
 948{
 949	return 0;
 950}
 951
 952#ifdef CONFIG_HOTPLUG_CPU
 953static ssize_t cpu_configure_show(struct device *dev,
 954				  struct device_attribute *attr, char *buf)
 955{
 956	ssize_t count;
 957
 958	mutex_lock(&smp_cpu_state_mutex);
 959	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
 960	mutex_unlock(&smp_cpu_state_mutex);
 961	return count;
 962}
 963
 964static ssize_t cpu_configure_store(struct device *dev,
 965				   struct device_attribute *attr,
 966				   const char *buf, size_t count)
 967{
 968	struct pcpu *pcpu;
 969	int cpu, val, rc, i;
 970	char delim;
 971
 972	if (sscanf(buf, "%d %c", &val, &delim) != 1)
 973		return -EINVAL;
 974	if (val != 0 && val != 1)
 975		return -EINVAL;
 
 976	get_online_cpus();
 977	mutex_lock(&smp_cpu_state_mutex);
 978	rc = -EBUSY;
 979	/* disallow configuration changes of online cpus and cpu 0 */
 980	cpu = dev->id;
 981	cpu -= cpu % (smp_cpu_mtid + 1);
 982	if (cpu == 0)
 983		goto out;
 984	for (i = 0; i <= smp_cpu_mtid; i++)
 985		if (cpu_online(cpu + i))
 986			goto out;
 987	pcpu = pcpu_devices + cpu;
 988	rc = 0;
 989	switch (val) {
 990	case 0:
 991		if (pcpu->state != CPU_STATE_CONFIGURED)
 992			break;
 993		rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
 994		if (rc)
 995			break;
 996		for (i = 0; i <= smp_cpu_mtid; i++) {
 997			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
 998				continue;
 999			pcpu[i].state = CPU_STATE_STANDBY;
1000			smp_cpu_set_polarization(cpu + i,
1001						 POLARIZATION_UNKNOWN);
1002		}
1003		topology_expect_change();
1004		break;
1005	case 1:
1006		if (pcpu->state != CPU_STATE_STANDBY)
1007			break;
1008		rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1009		if (rc)
1010			break;
1011		for (i = 0; i <= smp_cpu_mtid; i++) {
1012			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1013				continue;
1014			pcpu[i].state = CPU_STATE_CONFIGURED;
1015			smp_cpu_set_polarization(cpu + i,
1016						 POLARIZATION_UNKNOWN);
1017		}
1018		topology_expect_change();
1019		break;
1020	default:
1021		break;
1022	}
1023out:
1024	mutex_unlock(&smp_cpu_state_mutex);
1025	put_online_cpus();
1026	return rc ? rc : count;
1027}
1028static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1029#endif /* CONFIG_HOTPLUG_CPU */
1030
1031static ssize_t show_cpu_address(struct device *dev,
1032				struct device_attribute *attr, char *buf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1033{
1034	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1035}
1036static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
 
1037
1038static struct attribute *cpu_common_attrs[] = {
1039#ifdef CONFIG_HOTPLUG_CPU
1040	&dev_attr_configure.attr,
1041#endif
1042	&dev_attr_address.attr,
 
1043	NULL,
1044};
1045
1046static struct attribute_group cpu_common_attr_group = {
1047	.attrs = cpu_common_attrs,
1048};
1049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050static struct attribute *cpu_online_attrs[] = {
1051	&dev_attr_idle_count.attr,
1052	&dev_attr_idle_time_us.attr,
 
1053	NULL,
1054};
1055
1056static struct attribute_group cpu_online_attr_group = {
1057	.attrs = cpu_online_attrs,
1058};
1059
1060static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1061			  void *hcpu)
1062{
1063	unsigned int cpu = (unsigned int)(long)hcpu;
1064	struct device *s = &per_cpu(cpu_device, cpu)->dev;
 
 
1065	int err = 0;
1066
1067	switch (action & ~CPU_TASKS_FROZEN) {
1068	case CPU_ONLINE:
 
 
 
1069		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1070		break;
1071	case CPU_DEAD:
 
1072		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1073		break;
1074	}
1075	return notifier_from_errno(err);
1076}
1077
1078static int smp_add_present_cpu(int cpu)
 
 
 
 
1079{
1080	struct device *s;
1081	struct cpu *c;
1082	int rc;
1083
1084	c = kzalloc(sizeof(*c), GFP_KERNEL);
1085	if (!c)
1086		return -ENOMEM;
1087	per_cpu(cpu_device, cpu) = c;
1088	s = &c->dev;
1089	c->hotpluggable = 1;
1090	rc = register_cpu(c, cpu);
1091	if (rc)
1092		goto out;
1093	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1094	if (rc)
1095		goto out_cpu;
1096	if (cpu_online(cpu)) {
1097		rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1098		if (rc)
1099			goto out_online;
1100	}
1101	rc = topology_cpu_init(c);
1102	if (rc)
1103		goto out_topology;
1104	return 0;
1105
1106out_topology:
1107	if (cpu_online(cpu))
1108		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1109out_online:
1110	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1111out_cpu:
1112#ifdef CONFIG_HOTPLUG_CPU
1113	unregister_cpu(c);
1114#endif
1115out:
1116	return rc;
1117}
1118
1119#ifdef CONFIG_HOTPLUG_CPU
1120
1121int __ref smp_rescan_cpus(void)
1122{
1123	struct sclp_core_info *info;
1124	int nr;
 
1125
1126	info = smp_get_core_info();
1127	if (!info)
1128		return -ENOMEM;
1129	get_online_cpus();
1130	mutex_lock(&smp_cpu_state_mutex);
1131	nr = __smp_rescan_cpus(info, 1);
 
 
 
 
 
 
 
 
 
 
 
1132	mutex_unlock(&smp_cpu_state_mutex);
1133	put_online_cpus();
1134	kfree(info);
1135	if (nr)
1136		topology_schedule_update();
1137	return 0;
1138}
1139
1140static ssize_t __ref rescan_store(struct device *dev,
1141				  struct device_attribute *attr,
1142				  const char *buf,
1143				  size_t count)
1144{
1145	int rc;
1146
1147	rc = smp_rescan_cpus();
1148	return rc ? rc : count;
1149}
1150static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1151#endif /* CONFIG_HOTPLUG_CPU */
1152
1153static int __init s390_smp_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1154{
1155	int cpu, rc = 0;
 
 
 
1156
1157#ifdef CONFIG_HOTPLUG_CPU
1158	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1159	if (rc)
1160		return rc;
1161#endif
1162	cpu_notifier_register_begin();
 
 
1163	for_each_present_cpu(cpu) {
1164		rc = smp_add_present_cpu(cpu);
1165		if (rc)
1166			goto out;
1167	}
1168
1169	__hotcpu_notifier(smp_cpu_notify, 0);
1170
1171out:
1172	cpu_notifier_register_done();
1173	return rc;
1174}
1175subsys_initcall(s390_smp_init);