Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SMP related functions
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 *
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
13 *
14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
15 * the translation of logical to physical cpu ids. All new code that
16 * operates on physical cpu numbers needs to go into smp.c.
17 */
18
19#define KMSG_COMPONENT "cpu"
20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22#include <linux/workqueue.h>
23#include <linux/memblock.h>
24#include <linux/export.h>
25#include <linux/init.h>
26#include <linux/mm.h>
27#include <linux/err.h>
28#include <linux/spinlock.h>
29#include <linux/kernel_stat.h>
30#include <linux/delay.h>
31#include <linux/interrupt.h>
32#include <linux/irqflags.h>
33#include <linux/cpu.h>
34#include <linux/slab.h>
35#include <linux/sched/hotplug.h>
36#include <linux/sched/task_stack.h>
37#include <linux/crash_dump.h>
38#include <linux/kprobes.h>
39#include <asm/asm-offsets.h>
40#include <asm/diag.h>
41#include <asm/switch_to.h>
42#include <asm/facility.h>
43#include <asm/ipl.h>
44#include <asm/setup.h>
45#include <asm/irq.h>
46#include <asm/tlbflush.h>
47#include <asm/vtimer.h>
48#include <asm/lowcore.h>
49#include <asm/sclp.h>
50#include <asm/vdso.h>
51#include <asm/debug.h>
52#include <asm/os_info.h>
53#include <asm/sigp.h>
54#include <asm/idle.h>
55#include <asm/nmi.h>
56#include <asm/stacktrace.h>
57#include <asm/topology.h>
58#include "entry.h"
59
60enum {
61 ec_schedule = 0,
62 ec_call_function_single,
63 ec_stop_cpu,
64};
65
66enum {
67 CPU_STATE_STANDBY,
68 CPU_STATE_CONFIGURED,
69};
70
71static DEFINE_PER_CPU(struct cpu *, cpu_device);
72
73struct pcpu {
74 struct lowcore *lowcore; /* lowcore page(s) for the cpu */
75 unsigned long ec_mask; /* bit mask for ec_xxx functions */
76 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
77 signed char state; /* physical cpu state */
78 signed char polarization; /* physical polarization */
79 u16 address; /* physical cpu address */
80};
81
82static u8 boot_core_type;
83static struct pcpu pcpu_devices[NR_CPUS];
84
85unsigned int smp_cpu_mt_shift;
86EXPORT_SYMBOL(smp_cpu_mt_shift);
87
88unsigned int smp_cpu_mtid;
89EXPORT_SYMBOL(smp_cpu_mtid);
90
91#ifdef CONFIG_CRASH_DUMP
92__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
93#endif
94
95static unsigned int smp_max_threads __initdata = -1U;
96
97static int __init early_nosmt(char *s)
98{
99 smp_max_threads = 1;
100 return 0;
101}
102early_param("nosmt", early_nosmt);
103
104static int __init early_smt(char *s)
105{
106 get_option(&s, &smp_max_threads);
107 return 0;
108}
109early_param("smt", early_smt);
110
111/*
112 * The smp_cpu_state_mutex must be held when changing the state or polarization
113 * member of a pcpu data structure within the pcpu_devices arreay.
114 */
115DEFINE_MUTEX(smp_cpu_state_mutex);
116
117/*
118 * Signal processor helper functions.
119 */
120static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
121{
122 int cc;
123
124 while (1) {
125 cc = __pcpu_sigp(addr, order, parm, NULL);
126 if (cc != SIGP_CC_BUSY)
127 return cc;
128 cpu_relax();
129 }
130}
131
132static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
133{
134 int cc, retry;
135
136 for (retry = 0; ; retry++) {
137 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
138 if (cc != SIGP_CC_BUSY)
139 break;
140 if (retry >= 3)
141 udelay(10);
142 }
143 return cc;
144}
145
146static inline int pcpu_stopped(struct pcpu *pcpu)
147{
148 u32 uninitialized_var(status);
149
150 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
151 0, &status) != SIGP_CC_STATUS_STORED)
152 return 0;
153 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
154}
155
156static inline int pcpu_running(struct pcpu *pcpu)
157{
158 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
159 0, NULL) != SIGP_CC_STATUS_STORED)
160 return 1;
161 /* Status stored condition code is equivalent to cpu not running. */
162 return 0;
163}
164
165/*
166 * Find struct pcpu by cpu address.
167 */
168static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
169{
170 int cpu;
171
172 for_each_cpu(cpu, mask)
173 if (pcpu_devices[cpu].address == address)
174 return pcpu_devices + cpu;
175 return NULL;
176}
177
178static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
179{
180 int order;
181
182 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
183 return;
184 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
185 pcpu->ec_clk = get_tod_clock_fast();
186 pcpu_sigp_retry(pcpu, order, 0);
187}
188
189static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
190{
191 unsigned long async_stack, nodat_stack;
192 struct lowcore *lc;
193
194 if (pcpu != &pcpu_devices[0]) {
195 pcpu->lowcore = (struct lowcore *)
196 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
197 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
198 if (!pcpu->lowcore || !nodat_stack)
199 goto out;
200 } else {
201 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
202 }
203 async_stack = stack_alloc();
204 if (!async_stack)
205 goto out;
206 lc = pcpu->lowcore;
207 memcpy(lc, &S390_lowcore, 512);
208 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
209 lc->async_stack = async_stack + STACK_INIT_OFFSET;
210 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
211 lc->cpu_nr = cpu;
212 lc->spinlock_lockval = arch_spin_lockval(cpu);
213 lc->spinlock_index = 0;
214 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
215 if (nmi_alloc_per_cpu(lc))
216 goto out_async;
217 if (vdso_alloc_per_cpu(lc))
218 goto out_mcesa;
219 lowcore_ptr[cpu] = lc;
220 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
221 return 0;
222
223out_mcesa:
224 nmi_free_per_cpu(lc);
225out_async:
226 stack_free(async_stack);
227out:
228 if (pcpu != &pcpu_devices[0]) {
229 free_pages(nodat_stack, THREAD_SIZE_ORDER);
230 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
231 }
232 return -ENOMEM;
233}
234
235static void pcpu_free_lowcore(struct pcpu *pcpu)
236{
237 unsigned long async_stack, nodat_stack, lowcore;
238
239 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
240 async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
241 lowcore = (unsigned long) pcpu->lowcore;
242
243 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
244 lowcore_ptr[pcpu - pcpu_devices] = NULL;
245 vdso_free_per_cpu(pcpu->lowcore);
246 nmi_free_per_cpu(pcpu->lowcore);
247 stack_free(async_stack);
248 if (pcpu == &pcpu_devices[0])
249 return;
250 free_pages(nodat_stack, THREAD_SIZE_ORDER);
251 free_pages(lowcore, LC_ORDER);
252}
253
254static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
255{
256 struct lowcore *lc = pcpu->lowcore;
257
258 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
259 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
260 lc->cpu_nr = cpu;
261 lc->spinlock_lockval = arch_spin_lockval(cpu);
262 lc->spinlock_index = 0;
263 lc->percpu_offset = __per_cpu_offset[cpu];
264 lc->kernel_asce = S390_lowcore.kernel_asce;
265 lc->machine_flags = S390_lowcore.machine_flags;
266 lc->user_timer = lc->system_timer =
267 lc->steal_timer = lc->avg_steal_timer = 0;
268 __ctl_store(lc->cregs_save_area, 0, 15);
269 save_access_regs((unsigned int *) lc->access_regs_save_area);
270 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
271 sizeof(lc->stfle_fac_list));
272 memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
273 sizeof(lc->alt_stfle_fac_list));
274 arch_spin_lock_setup(cpu);
275}
276
277static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
278{
279 struct lowcore *lc = pcpu->lowcore;
280
281 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
282 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
283 lc->current_task = (unsigned long) tsk;
284 lc->lpp = LPP_MAGIC;
285 lc->current_pid = tsk->pid;
286 lc->user_timer = tsk->thread.user_timer;
287 lc->guest_timer = tsk->thread.guest_timer;
288 lc->system_timer = tsk->thread.system_timer;
289 lc->hardirq_timer = tsk->thread.hardirq_timer;
290 lc->softirq_timer = tsk->thread.softirq_timer;
291 lc->steal_timer = 0;
292}
293
294static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
295{
296 struct lowcore *lc = pcpu->lowcore;
297
298 lc->restart_stack = lc->nodat_stack;
299 lc->restart_fn = (unsigned long) func;
300 lc->restart_data = (unsigned long) data;
301 lc->restart_source = -1UL;
302 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
303}
304
305/*
306 * Call function via PSW restart on pcpu and stop the current cpu.
307 */
308static void __pcpu_delegate(void (*func)(void*), void *data)
309{
310 func(data); /* should not return */
311}
312
313static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
314 void (*func)(void *),
315 void *data, unsigned long stack)
316{
317 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
318 unsigned long source_cpu = stap();
319
320 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
321 if (pcpu->address == source_cpu)
322 CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
323 /* Stop target cpu (if func returns this stops the current cpu). */
324 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
325 /* Restart func on the target cpu and stop the current cpu. */
326 mem_assign_absolute(lc->restart_stack, stack);
327 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
328 mem_assign_absolute(lc->restart_data, (unsigned long) data);
329 mem_assign_absolute(lc->restart_source, source_cpu);
330 __bpon();
331 asm volatile(
332 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
333 " brc 2,0b # busy, try again\n"
334 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
335 " brc 2,1b # busy, try again\n"
336 : : "d" (pcpu->address), "d" (source_cpu),
337 "K" (SIGP_RESTART), "K" (SIGP_STOP)
338 : "0", "1", "cc");
339 for (;;) ;
340}
341
342/*
343 * Enable additional logical cpus for multi-threading.
344 */
345static int pcpu_set_smt(unsigned int mtid)
346{
347 int cc;
348
349 if (smp_cpu_mtid == mtid)
350 return 0;
351 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
352 if (cc == 0) {
353 smp_cpu_mtid = mtid;
354 smp_cpu_mt_shift = 0;
355 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
356 smp_cpu_mt_shift++;
357 pcpu_devices[0].address = stap();
358 }
359 return cc;
360}
361
362/*
363 * Call function on an online CPU.
364 */
365void smp_call_online_cpu(void (*func)(void *), void *data)
366{
367 struct pcpu *pcpu;
368
369 /* Use the current cpu if it is online. */
370 pcpu = pcpu_find_address(cpu_online_mask, stap());
371 if (!pcpu)
372 /* Use the first online cpu. */
373 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
374 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
375}
376
377/*
378 * Call function on the ipl CPU.
379 */
380void smp_call_ipl_cpu(void (*func)(void *), void *data)
381{
382 struct lowcore *lc = pcpu_devices->lowcore;
383
384 if (pcpu_devices[0].address == stap())
385 lc = &S390_lowcore;
386
387 pcpu_delegate(&pcpu_devices[0], func, data,
388 lc->nodat_stack);
389}
390
391int smp_find_processor_id(u16 address)
392{
393 int cpu;
394
395 for_each_present_cpu(cpu)
396 if (pcpu_devices[cpu].address == address)
397 return cpu;
398 return -1;
399}
400
401bool arch_vcpu_is_preempted(int cpu)
402{
403 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
404 return false;
405 if (pcpu_running(pcpu_devices + cpu))
406 return false;
407 return true;
408}
409EXPORT_SYMBOL(arch_vcpu_is_preempted);
410
411void smp_yield_cpu(int cpu)
412{
413 if (MACHINE_HAS_DIAG9C) {
414 diag_stat_inc_norecursion(DIAG_STAT_X09C);
415 asm volatile("diag %0,0,0x9c"
416 : : "d" (pcpu_devices[cpu].address));
417 } else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) {
418 diag_stat_inc_norecursion(DIAG_STAT_X044);
419 asm volatile("diag 0,0,0x44");
420 }
421}
422
423/*
424 * Send cpus emergency shutdown signal. This gives the cpus the
425 * opportunity to complete outstanding interrupts.
426 */
427void notrace smp_emergency_stop(void)
428{
429 cpumask_t cpumask;
430 u64 end;
431 int cpu;
432
433 cpumask_copy(&cpumask, cpu_online_mask);
434 cpumask_clear_cpu(smp_processor_id(), &cpumask);
435
436 end = get_tod_clock() + (1000000UL << 12);
437 for_each_cpu(cpu, &cpumask) {
438 struct pcpu *pcpu = pcpu_devices + cpu;
439 set_bit(ec_stop_cpu, &pcpu->ec_mask);
440 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
441 0, NULL) == SIGP_CC_BUSY &&
442 get_tod_clock() < end)
443 cpu_relax();
444 }
445 while (get_tod_clock() < end) {
446 for_each_cpu(cpu, &cpumask)
447 if (pcpu_stopped(pcpu_devices + cpu))
448 cpumask_clear_cpu(cpu, &cpumask);
449 if (cpumask_empty(&cpumask))
450 break;
451 cpu_relax();
452 }
453}
454NOKPROBE_SYMBOL(smp_emergency_stop);
455
456/*
457 * Stop all cpus but the current one.
458 */
459void smp_send_stop(void)
460{
461 int cpu;
462
463 /* Disable all interrupts/machine checks */
464 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
465 trace_hardirqs_off();
466
467 debug_set_critical();
468
469 if (oops_in_progress)
470 smp_emergency_stop();
471
472 /* stop all processors */
473 for_each_online_cpu(cpu) {
474 if (cpu == smp_processor_id())
475 continue;
476 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
477 while (!pcpu_stopped(pcpu_devices + cpu))
478 cpu_relax();
479 }
480}
481
482/*
483 * This is the main routine where commands issued by other
484 * cpus are handled.
485 */
486static void smp_handle_ext_call(void)
487{
488 unsigned long bits;
489
490 /* handle bit signal external calls */
491 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
492 if (test_bit(ec_stop_cpu, &bits))
493 smp_stop_cpu();
494 if (test_bit(ec_schedule, &bits))
495 scheduler_ipi();
496 if (test_bit(ec_call_function_single, &bits))
497 generic_smp_call_function_single_interrupt();
498}
499
500static void do_ext_call_interrupt(struct ext_code ext_code,
501 unsigned int param32, unsigned long param64)
502{
503 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
504 smp_handle_ext_call();
505}
506
507void arch_send_call_function_ipi_mask(const struct cpumask *mask)
508{
509 int cpu;
510
511 for_each_cpu(cpu, mask)
512 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
513}
514
515void arch_send_call_function_single_ipi(int cpu)
516{
517 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
518}
519
520/*
521 * this function sends a 'reschedule' IPI to another CPU.
522 * it goes straight through and wastes no time serializing
523 * anything. Worst case is that we lose a reschedule ...
524 */
525void smp_send_reschedule(int cpu)
526{
527 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
528}
529
530/*
531 * parameter area for the set/clear control bit callbacks
532 */
533struct ec_creg_mask_parms {
534 unsigned long orval;
535 unsigned long andval;
536 int cr;
537};
538
539/*
540 * callback for setting/clearing control bits
541 */
542static void smp_ctl_bit_callback(void *info)
543{
544 struct ec_creg_mask_parms *pp = info;
545 unsigned long cregs[16];
546
547 __ctl_store(cregs, 0, 15);
548 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
549 __ctl_load(cregs, 0, 15);
550}
551
552/*
553 * Set a bit in a control register of all cpus
554 */
555void smp_ctl_set_bit(int cr, int bit)
556{
557 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
558
559 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
560}
561EXPORT_SYMBOL(smp_ctl_set_bit);
562
563/*
564 * Clear a bit in a control register of all cpus
565 */
566void smp_ctl_clear_bit(int cr, int bit)
567{
568 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
569
570 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
571}
572EXPORT_SYMBOL(smp_ctl_clear_bit);
573
574#ifdef CONFIG_CRASH_DUMP
575
576int smp_store_status(int cpu)
577{
578 struct pcpu *pcpu = pcpu_devices + cpu;
579 unsigned long pa;
580
581 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
582 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
583 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
584 return -EIO;
585 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
586 return 0;
587 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
588 if (MACHINE_HAS_GS)
589 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
590 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
591 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
592 return -EIO;
593 return 0;
594}
595
596/*
597 * Collect CPU state of the previous, crashed system.
598 * There are four cases:
599 * 1) standard zfcp dump
600 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
601 * The state for all CPUs except the boot CPU needs to be collected
602 * with sigp stop-and-store-status. The boot CPU state is located in
603 * the absolute lowcore of the memory stored in the HSA. The zcore code
604 * will copy the boot CPU state from the HSA.
605 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
606 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
607 * The state for all CPUs except the boot CPU needs to be collected
608 * with sigp stop-and-store-status. The firmware or the boot-loader
609 * stored the registers of the boot CPU in the absolute lowcore in the
610 * memory of the old system.
611 * 3) kdump and the old kernel did not store the CPU state,
612 * or stand-alone kdump for DASD
613 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
614 * The state for all CPUs except the boot CPU needs to be collected
615 * with sigp stop-and-store-status. The kexec code or the boot-loader
616 * stored the registers of the boot CPU in the memory of the old system.
617 * 4) kdump and the old kernel stored the CPU state
618 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
619 * This case does not exist for s390 anymore, setup_arch explicitly
620 * deactivates the elfcorehdr= kernel parameter
621 */
622static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
623 bool is_boot_cpu, unsigned long page)
624{
625 __vector128 *vxrs = (__vector128 *) page;
626
627 if (is_boot_cpu)
628 vxrs = boot_cpu_vector_save_area;
629 else
630 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
631 save_area_add_vxrs(sa, vxrs);
632}
633
634static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
635 bool is_boot_cpu, unsigned long page)
636{
637 void *regs = (void *) page;
638
639 if (is_boot_cpu)
640 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
641 else
642 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
643 save_area_add_regs(sa, regs);
644}
645
646void __init smp_save_dump_cpus(void)
647{
648 int addr, boot_cpu_addr, max_cpu_addr;
649 struct save_area *sa;
650 unsigned long page;
651 bool is_boot_cpu;
652
653 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
654 /* No previous system present, normal boot. */
655 return;
656 /* Allocate a page as dumping area for the store status sigps */
657 page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
658 if (!page)
659 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
660 PAGE_SIZE, 1UL << 31);
661
662 /* Set multi-threading state to the previous system. */
663 pcpu_set_smt(sclp.mtid_prev);
664 boot_cpu_addr = stap();
665 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
666 for (addr = 0; addr <= max_cpu_addr; addr++) {
667 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
668 SIGP_CC_NOT_OPERATIONAL)
669 continue;
670 is_boot_cpu = (addr == boot_cpu_addr);
671 /* Allocate save area */
672 sa = save_area_alloc(is_boot_cpu);
673 if (!sa)
674 panic("could not allocate memory for save area\n");
675 if (MACHINE_HAS_VX)
676 /* Get the vector registers */
677 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
678 /*
679 * For a zfcp dump OLDMEM_BASE == NULL and the registers
680 * of the boot CPU are stored in the HSA. To retrieve
681 * these registers an SCLP request is required which is
682 * done by drivers/s390/char/zcore.c:init_cpu_info()
683 */
684 if (!is_boot_cpu || OLDMEM_BASE)
685 /* Get the CPU registers */
686 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
687 }
688 memblock_free(page, PAGE_SIZE);
689 diag_dma_ops.diag308_reset();
690 pcpu_set_smt(0);
691}
692#endif /* CONFIG_CRASH_DUMP */
693
694void smp_cpu_set_polarization(int cpu, int val)
695{
696 pcpu_devices[cpu].polarization = val;
697}
698
699int smp_cpu_get_polarization(int cpu)
700{
701 return pcpu_devices[cpu].polarization;
702}
703
704static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
705{
706 static int use_sigp_detection;
707 int address;
708
709 if (use_sigp_detection || sclp_get_core_info(info, early)) {
710 use_sigp_detection = 1;
711 for (address = 0;
712 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
713 address += (1U << smp_cpu_mt_shift)) {
714 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
715 SIGP_CC_NOT_OPERATIONAL)
716 continue;
717 info->core[info->configured].core_id =
718 address >> smp_cpu_mt_shift;
719 info->configured++;
720 }
721 info->combined = info->configured;
722 }
723}
724
725static int smp_add_present_cpu(int cpu);
726
727static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
728{
729 struct pcpu *pcpu;
730 cpumask_t avail;
731 int cpu, nr, i, j;
732 u16 address;
733
734 nr = 0;
735 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
736 cpu = cpumask_first(&avail);
737 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
738 if (sclp.has_core_type && info->core[i].type != boot_core_type)
739 continue;
740 address = info->core[i].core_id << smp_cpu_mt_shift;
741 for (j = 0; j <= smp_cpu_mtid; j++) {
742 if (pcpu_find_address(cpu_present_mask, address + j))
743 continue;
744 pcpu = pcpu_devices + cpu;
745 pcpu->address = address + j;
746 pcpu->state =
747 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
748 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
749 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
750 set_cpu_present(cpu, true);
751 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
752 set_cpu_present(cpu, false);
753 else
754 nr++;
755 cpu = cpumask_next(cpu, &avail);
756 if (cpu >= nr_cpu_ids)
757 break;
758 }
759 }
760 return nr;
761}
762
763void __init smp_detect_cpus(void)
764{
765 unsigned int cpu, mtid, c_cpus, s_cpus;
766 struct sclp_core_info *info;
767 u16 address;
768
769 /* Get CPU information */
770 info = memblock_alloc(sizeof(*info), 8);
771 if (!info)
772 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
773 __func__, sizeof(*info), 8);
774 smp_get_core_info(info, 1);
775 /* Find boot CPU type */
776 if (sclp.has_core_type) {
777 address = stap();
778 for (cpu = 0; cpu < info->combined; cpu++)
779 if (info->core[cpu].core_id == address) {
780 /* The boot cpu dictates the cpu type. */
781 boot_core_type = info->core[cpu].type;
782 break;
783 }
784 if (cpu >= info->combined)
785 panic("Could not find boot CPU type");
786 }
787
788 /* Set multi-threading state for the current system */
789 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
790 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
791 pcpu_set_smt(mtid);
792
793 /* Print number of CPUs */
794 c_cpus = s_cpus = 0;
795 for (cpu = 0; cpu < info->combined; cpu++) {
796 if (sclp.has_core_type &&
797 info->core[cpu].type != boot_core_type)
798 continue;
799 if (cpu < info->configured)
800 c_cpus += smp_cpu_mtid + 1;
801 else
802 s_cpus += smp_cpu_mtid + 1;
803 }
804 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
805
806 /* Add CPUs present at boot */
807 get_online_cpus();
808 __smp_rescan_cpus(info, 0);
809 put_online_cpus();
810 memblock_free_early((unsigned long)info, sizeof(*info));
811}
812
813static void smp_init_secondary(void)
814{
815 int cpu = smp_processor_id();
816
817 S390_lowcore.last_update_clock = get_tod_clock();
818 restore_access_regs(S390_lowcore.access_regs_save_area);
819 cpu_init();
820 preempt_disable();
821 init_cpu_timer();
822 vtime_init();
823 pfault_init();
824 notify_cpu_starting(smp_processor_id());
825 if (topology_cpu_dedicated(cpu))
826 set_cpu_flag(CIF_DEDICATED_CPU);
827 else
828 clear_cpu_flag(CIF_DEDICATED_CPU);
829 set_cpu_online(smp_processor_id(), true);
830 inc_irq_stat(CPU_RST);
831 local_irq_enable();
832 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
833}
834
835/*
836 * Activate a secondary processor.
837 */
838static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
839{
840 S390_lowcore.restart_stack = (unsigned long) restart_stack;
841 S390_lowcore.restart_fn = (unsigned long) do_restart;
842 S390_lowcore.restart_data = 0;
843 S390_lowcore.restart_source = -1UL;
844 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
845 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
846 CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
847}
848
849/* Upping and downing of CPUs */
850int __cpu_up(unsigned int cpu, struct task_struct *tidle)
851{
852 struct pcpu *pcpu;
853 int base, i, rc;
854
855 pcpu = pcpu_devices + cpu;
856 if (pcpu->state != CPU_STATE_CONFIGURED)
857 return -EIO;
858 base = smp_get_base_cpu(cpu);
859 for (i = 0; i <= smp_cpu_mtid; i++) {
860 if (base + i < nr_cpu_ids)
861 if (cpu_online(base + i))
862 break;
863 }
864 /*
865 * If this is the first CPU of the core to get online
866 * do an initial CPU reset.
867 */
868 if (i > smp_cpu_mtid &&
869 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
870 SIGP_CC_ORDER_CODE_ACCEPTED)
871 return -EIO;
872
873 rc = pcpu_alloc_lowcore(pcpu, cpu);
874 if (rc)
875 return rc;
876 pcpu_prepare_secondary(pcpu, cpu);
877 pcpu_attach_task(pcpu, tidle);
878 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
879 /* Wait until cpu puts itself in the online & active maps */
880 while (!cpu_online(cpu))
881 cpu_relax();
882 return 0;
883}
884
885static unsigned int setup_possible_cpus __initdata;
886
887static int __init _setup_possible_cpus(char *s)
888{
889 get_option(&s, &setup_possible_cpus);
890 return 0;
891}
892early_param("possible_cpus", _setup_possible_cpus);
893
894int __cpu_disable(void)
895{
896 unsigned long cregs[16];
897
898 /* Handle possible pending IPIs */
899 smp_handle_ext_call();
900 set_cpu_online(smp_processor_id(), false);
901 /* Disable pseudo page faults on this cpu. */
902 pfault_fini();
903 /* Disable interrupt sources via control register. */
904 __ctl_store(cregs, 0, 15);
905 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
906 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
907 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
908 __ctl_load(cregs, 0, 15);
909 clear_cpu_flag(CIF_NOHZ_DELAY);
910 return 0;
911}
912
913void __cpu_die(unsigned int cpu)
914{
915 struct pcpu *pcpu;
916
917 /* Wait until target cpu is down */
918 pcpu = pcpu_devices + cpu;
919 while (!pcpu_stopped(pcpu))
920 cpu_relax();
921 pcpu_free_lowcore(pcpu);
922 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
923 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
924}
925
926void __noreturn cpu_die(void)
927{
928 idle_task_exit();
929 __bpon();
930 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
931 for (;;) ;
932}
933
934void __init smp_fill_possible_mask(void)
935{
936 unsigned int possible, sclp_max, cpu;
937
938 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
939 sclp_max = min(smp_max_threads, sclp_max);
940 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
941 possible = setup_possible_cpus ?: nr_cpu_ids;
942 possible = min(possible, sclp_max);
943 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
944 set_cpu_possible(cpu, true);
945}
946
947void __init smp_prepare_cpus(unsigned int max_cpus)
948{
949 /* request the 0x1201 emergency signal external interrupt */
950 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
951 panic("Couldn't request external interrupt 0x1201");
952 /* request the 0x1202 external call external interrupt */
953 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
954 panic("Couldn't request external interrupt 0x1202");
955}
956
957void __init smp_prepare_boot_cpu(void)
958{
959 struct pcpu *pcpu = pcpu_devices;
960
961 WARN_ON(!cpu_present(0) || !cpu_online(0));
962 pcpu->state = CPU_STATE_CONFIGURED;
963 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
964 S390_lowcore.percpu_offset = __per_cpu_offset[0];
965 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
966}
967
968void __init smp_cpus_done(unsigned int max_cpus)
969{
970}
971
972void __init smp_setup_processor_id(void)
973{
974 pcpu_devices[0].address = stap();
975 S390_lowcore.cpu_nr = 0;
976 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
977 S390_lowcore.spinlock_index = 0;
978}
979
980/*
981 * the frequency of the profiling timer can be changed
982 * by writing a multiplier value into /proc/profile.
983 *
984 * usually you want to run this on all CPUs ;)
985 */
986int setup_profiling_timer(unsigned int multiplier)
987{
988 return 0;
989}
990
991static ssize_t cpu_configure_show(struct device *dev,
992 struct device_attribute *attr, char *buf)
993{
994 ssize_t count;
995
996 mutex_lock(&smp_cpu_state_mutex);
997 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
998 mutex_unlock(&smp_cpu_state_mutex);
999 return count;
1000}
1001
1002static ssize_t cpu_configure_store(struct device *dev,
1003 struct device_attribute *attr,
1004 const char *buf, size_t count)
1005{
1006 struct pcpu *pcpu;
1007 int cpu, val, rc, i;
1008 char delim;
1009
1010 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1011 return -EINVAL;
1012 if (val != 0 && val != 1)
1013 return -EINVAL;
1014 get_online_cpus();
1015 mutex_lock(&smp_cpu_state_mutex);
1016 rc = -EBUSY;
1017 /* disallow configuration changes of online cpus and cpu 0 */
1018 cpu = dev->id;
1019 cpu = smp_get_base_cpu(cpu);
1020 if (cpu == 0)
1021 goto out;
1022 for (i = 0; i <= smp_cpu_mtid; i++)
1023 if (cpu_online(cpu + i))
1024 goto out;
1025 pcpu = pcpu_devices + cpu;
1026 rc = 0;
1027 switch (val) {
1028 case 0:
1029 if (pcpu->state != CPU_STATE_CONFIGURED)
1030 break;
1031 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1032 if (rc)
1033 break;
1034 for (i = 0; i <= smp_cpu_mtid; i++) {
1035 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1036 continue;
1037 pcpu[i].state = CPU_STATE_STANDBY;
1038 smp_cpu_set_polarization(cpu + i,
1039 POLARIZATION_UNKNOWN);
1040 }
1041 topology_expect_change();
1042 break;
1043 case 1:
1044 if (pcpu->state != CPU_STATE_STANDBY)
1045 break;
1046 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1047 if (rc)
1048 break;
1049 for (i = 0; i <= smp_cpu_mtid; i++) {
1050 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1051 continue;
1052 pcpu[i].state = CPU_STATE_CONFIGURED;
1053 smp_cpu_set_polarization(cpu + i,
1054 POLARIZATION_UNKNOWN);
1055 }
1056 topology_expect_change();
1057 break;
1058 default:
1059 break;
1060 }
1061out:
1062 mutex_unlock(&smp_cpu_state_mutex);
1063 put_online_cpus();
1064 return rc ? rc : count;
1065}
1066static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1067
1068static ssize_t show_cpu_address(struct device *dev,
1069 struct device_attribute *attr, char *buf)
1070{
1071 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1072}
1073static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1074
1075static struct attribute *cpu_common_attrs[] = {
1076 &dev_attr_configure.attr,
1077 &dev_attr_address.attr,
1078 NULL,
1079};
1080
1081static struct attribute_group cpu_common_attr_group = {
1082 .attrs = cpu_common_attrs,
1083};
1084
1085static struct attribute *cpu_online_attrs[] = {
1086 &dev_attr_idle_count.attr,
1087 &dev_attr_idle_time_us.attr,
1088 NULL,
1089};
1090
1091static struct attribute_group cpu_online_attr_group = {
1092 .attrs = cpu_online_attrs,
1093};
1094
1095static int smp_cpu_online(unsigned int cpu)
1096{
1097 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1098
1099 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1100}
1101static int smp_cpu_pre_down(unsigned int cpu)
1102{
1103 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1104
1105 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1106 return 0;
1107}
1108
1109static int smp_add_present_cpu(int cpu)
1110{
1111 struct device *s;
1112 struct cpu *c;
1113 int rc;
1114
1115 c = kzalloc(sizeof(*c), GFP_KERNEL);
1116 if (!c)
1117 return -ENOMEM;
1118 per_cpu(cpu_device, cpu) = c;
1119 s = &c->dev;
1120 c->hotpluggable = 1;
1121 rc = register_cpu(c, cpu);
1122 if (rc)
1123 goto out;
1124 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1125 if (rc)
1126 goto out_cpu;
1127 rc = topology_cpu_init(c);
1128 if (rc)
1129 goto out_topology;
1130 return 0;
1131
1132out_topology:
1133 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1134out_cpu:
1135 unregister_cpu(c);
1136out:
1137 return rc;
1138}
1139
1140int __ref smp_rescan_cpus(void)
1141{
1142 struct sclp_core_info *info;
1143 int nr;
1144
1145 info = kzalloc(sizeof(*info), GFP_KERNEL);
1146 if (!info)
1147 return -ENOMEM;
1148 smp_get_core_info(info, 0);
1149 get_online_cpus();
1150 mutex_lock(&smp_cpu_state_mutex);
1151 nr = __smp_rescan_cpus(info, 1);
1152 mutex_unlock(&smp_cpu_state_mutex);
1153 put_online_cpus();
1154 kfree(info);
1155 if (nr)
1156 topology_schedule_update();
1157 return 0;
1158}
1159
1160static ssize_t __ref rescan_store(struct device *dev,
1161 struct device_attribute *attr,
1162 const char *buf,
1163 size_t count)
1164{
1165 int rc;
1166
1167 rc = lock_device_hotplug_sysfs();
1168 if (rc)
1169 return rc;
1170 rc = smp_rescan_cpus();
1171 unlock_device_hotplug();
1172 return rc ? rc : count;
1173}
1174static DEVICE_ATTR_WO(rescan);
1175
1176static int __init s390_smp_init(void)
1177{
1178 int cpu, rc = 0;
1179
1180 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1181 if (rc)
1182 return rc;
1183 for_each_present_cpu(cpu) {
1184 rc = smp_add_present_cpu(cpu);
1185 if (rc)
1186 goto out;
1187 }
1188
1189 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1190 smp_cpu_online, smp_cpu_pre_down);
1191 rc = rc <= 0 ? rc : 0;
1192out:
1193 return rc;
1194}
1195subsys_initcall(s390_smp_init);
1/*
2 * SMP related functions
3 *
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/err.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/irqflags.h>
31#include <linux/cpu.h>
32#include <linux/slab.h>
33#include <linux/crash_dump.h>
34#include <asm/asm-offsets.h>
35#include <asm/switch_to.h>
36#include <asm/facility.h>
37#include <asm/ipl.h>
38#include <asm/setup.h>
39#include <asm/irq.h>
40#include <asm/tlbflush.h>
41#include <asm/timer.h>
42#include <asm/lowcore.h>
43#include <asm/sclp.h>
44#include <asm/vdso.h>
45#include <asm/debug.h>
46#include <asm/os_info.h>
47#include "entry.h"
48
49enum {
50 sigp_sense = 1,
51 sigp_external_call = 2,
52 sigp_emergency_signal = 3,
53 sigp_start = 4,
54 sigp_stop = 5,
55 sigp_restart = 6,
56 sigp_stop_and_store_status = 9,
57 sigp_initial_cpu_reset = 11,
58 sigp_cpu_reset = 12,
59 sigp_set_prefix = 13,
60 sigp_store_status_at_address = 14,
61 sigp_store_extended_status_at_address = 15,
62 sigp_set_architecture = 18,
63 sigp_conditional_emergency_signal = 19,
64 sigp_sense_running = 21,
65};
66
67enum {
68 sigp_order_code_accepted = 0,
69 sigp_status_stored = 1,
70 sigp_busy = 2,
71 sigp_not_operational = 3,
72};
73
74enum {
75 ec_schedule = 0,
76 ec_call_function,
77 ec_call_function_single,
78 ec_stop_cpu,
79};
80
81enum {
82 CPU_STATE_STANDBY,
83 CPU_STATE_CONFIGURED,
84};
85
86struct pcpu {
87 struct cpu cpu;
88 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
89 unsigned long async_stack; /* async stack for the cpu */
90 unsigned long panic_stack; /* panic stack for the cpu */
91 unsigned long ec_mask; /* bit mask for ec_xxx functions */
92 int state; /* physical cpu state */
93 u32 status; /* last status received via sigp */
94 u16 address; /* physical cpu address */
95};
96
97static u8 boot_cpu_type;
98static u16 boot_cpu_address;
99static struct pcpu pcpu_devices[NR_CPUS];
100
101DEFINE_MUTEX(smp_cpu_state_mutex);
102
103/*
104 * Signal processor helper functions.
105 */
106static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
107{
108 register unsigned int reg1 asm ("1") = parm;
109 int cc;
110
111 asm volatile(
112 " sigp %1,%2,0(%3)\n"
113 " ipm %0\n"
114 " srl %0,28\n"
115 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
116 if (status && cc == 1)
117 *status = reg1;
118 return cc;
119}
120
121static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
122{
123 int cc;
124
125 while (1) {
126 cc = __pcpu_sigp(addr, order, parm, status);
127 if (cc != sigp_busy)
128 return cc;
129 cpu_relax();
130 }
131}
132
133static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
134{
135 int cc, retry;
136
137 for (retry = 0; ; retry++) {
138 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
139 if (cc != sigp_busy)
140 break;
141 if (retry >= 3)
142 udelay(10);
143 }
144 return cc;
145}
146
147static inline int pcpu_stopped(struct pcpu *pcpu)
148{
149 if (__pcpu_sigp(pcpu->address, sigp_sense,
150 0, &pcpu->status) != sigp_status_stored)
151 return 0;
152 /* Check for stopped and check stop state */
153 return !!(pcpu->status & 0x50);
154}
155
156static inline int pcpu_running(struct pcpu *pcpu)
157{
158 if (__pcpu_sigp(pcpu->address, sigp_sense_running,
159 0, &pcpu->status) != sigp_status_stored)
160 return 1;
161 /* Check for running status */
162 return !(pcpu->status & 0x400);
163}
164
165/*
166 * Find struct pcpu by cpu address.
167 */
168static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
169{
170 int cpu;
171
172 for_each_cpu(cpu, mask)
173 if (pcpu_devices[cpu].address == address)
174 return pcpu_devices + cpu;
175 return NULL;
176}
177
178static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
179{
180 int order;
181
182 set_bit(ec_bit, &pcpu->ec_mask);
183 order = pcpu_running(pcpu) ?
184 sigp_external_call : sigp_emergency_signal;
185 pcpu_sigp_retry(pcpu, order, 0);
186}
187
188static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
189{
190 struct _lowcore *lc;
191
192 if (pcpu != &pcpu_devices[0]) {
193 pcpu->lowcore = (struct _lowcore *)
194 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
195 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
196 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
197 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
198 goto out;
199 }
200 lc = pcpu->lowcore;
201 memcpy(lc, &S390_lowcore, 512);
202 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
203 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
204 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
205 lc->cpu_nr = cpu;
206#ifndef CONFIG_64BIT
207 if (MACHINE_HAS_IEEE) {
208 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
209 if (!lc->extended_save_area_addr)
210 goto out;
211 }
212#else
213 if (vdso_alloc_per_cpu(lc))
214 goto out;
215#endif
216 lowcore_ptr[cpu] = lc;
217 pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
218 return 0;
219out:
220 if (pcpu != &pcpu_devices[0]) {
221 free_page(pcpu->panic_stack);
222 free_pages(pcpu->async_stack, ASYNC_ORDER);
223 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
224 }
225 return -ENOMEM;
226}
227
228#ifdef CONFIG_HOTPLUG_CPU
229
230static void pcpu_free_lowcore(struct pcpu *pcpu)
231{
232 pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
233 lowcore_ptr[pcpu - pcpu_devices] = NULL;
234#ifndef CONFIG_64BIT
235 if (MACHINE_HAS_IEEE) {
236 struct _lowcore *lc = pcpu->lowcore;
237
238 free_page((unsigned long) lc->extended_save_area_addr);
239 lc->extended_save_area_addr = 0;
240 }
241#else
242 vdso_free_per_cpu(pcpu->lowcore);
243#endif
244 if (pcpu != &pcpu_devices[0]) {
245 free_page(pcpu->panic_stack);
246 free_pages(pcpu->async_stack, ASYNC_ORDER);
247 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
248 }
249}
250
251#endif /* CONFIG_HOTPLUG_CPU */
252
253static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
254{
255 struct _lowcore *lc = pcpu->lowcore;
256
257 atomic_inc(&init_mm.context.attach_count);
258 lc->cpu_nr = cpu;
259 lc->percpu_offset = __per_cpu_offset[cpu];
260 lc->kernel_asce = S390_lowcore.kernel_asce;
261 lc->machine_flags = S390_lowcore.machine_flags;
262 lc->ftrace_func = S390_lowcore.ftrace_func;
263 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
264 __ctl_store(lc->cregs_save_area, 0, 15);
265 save_access_regs((unsigned int *) lc->access_regs_save_area);
266 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
267 MAX_FACILITY_BIT/8);
268}
269
270static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
271{
272 struct _lowcore *lc = pcpu->lowcore;
273 struct thread_info *ti = task_thread_info(tsk);
274
275 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
276 lc->thread_info = (unsigned long) task_thread_info(tsk);
277 lc->current_task = (unsigned long) tsk;
278 lc->user_timer = ti->user_timer;
279 lc->system_timer = ti->system_timer;
280 lc->steal_timer = 0;
281}
282
283static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
284{
285 struct _lowcore *lc = pcpu->lowcore;
286
287 lc->restart_stack = lc->kernel_stack;
288 lc->restart_fn = (unsigned long) func;
289 lc->restart_data = (unsigned long) data;
290 lc->restart_source = -1UL;
291 pcpu_sigp_retry(pcpu, sigp_restart, 0);
292}
293
294/*
295 * Call function via PSW restart on pcpu and stop the current cpu.
296 */
297static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
298 void *data, unsigned long stack)
299{
300 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
301 struct {
302 unsigned long stack;
303 void *func;
304 void *data;
305 unsigned long source;
306 } restart = { stack, func, data, stap() };
307
308 __load_psw_mask(psw_kernel_bits);
309 if (pcpu->address == restart.source)
310 func(data); /* should not return */
311 /* Stop target cpu (if func returns this stops the current cpu). */
312 pcpu_sigp_retry(pcpu, sigp_stop, 0);
313 /* Restart func on the target cpu and stop the current cpu. */
314 memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
315 asm volatile(
316 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
317 " brc 2,0b # busy, try again\n"
318 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
319 " brc 2,1b # busy, try again\n"
320 : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
321 for (;;) ;
322}
323
324/*
325 * Call function on an online CPU.
326 */
327void smp_call_online_cpu(void (*func)(void *), void *data)
328{
329 struct pcpu *pcpu;
330
331 /* Use the current cpu if it is online. */
332 pcpu = pcpu_find_address(cpu_online_mask, stap());
333 if (!pcpu)
334 /* Use the first online cpu. */
335 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
336 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
337}
338
339/*
340 * Call function on the ipl CPU.
341 */
342void smp_call_ipl_cpu(void (*func)(void *), void *data)
343{
344 pcpu_delegate(&pcpu_devices[0], func, data,
345 pcpu_devices->panic_stack + PAGE_SIZE);
346}
347
348int smp_find_processor_id(u16 address)
349{
350 int cpu;
351
352 for_each_present_cpu(cpu)
353 if (pcpu_devices[cpu].address == address)
354 return cpu;
355 return -1;
356}
357
358int smp_vcpu_scheduled(int cpu)
359{
360 return pcpu_running(pcpu_devices + cpu);
361}
362
363void smp_yield(void)
364{
365 if (MACHINE_HAS_DIAG44)
366 asm volatile("diag 0,0,0x44");
367}
368
369void smp_yield_cpu(int cpu)
370{
371 if (MACHINE_HAS_DIAG9C)
372 asm volatile("diag %0,0,0x9c"
373 : : "d" (pcpu_devices[cpu].address));
374 else if (MACHINE_HAS_DIAG44)
375 asm volatile("diag 0,0,0x44");
376}
377
378/*
379 * Send cpus emergency shutdown signal. This gives the cpus the
380 * opportunity to complete outstanding interrupts.
381 */
382void smp_emergency_stop(cpumask_t *cpumask)
383{
384 u64 end;
385 int cpu;
386
387 end = get_clock() + (1000000UL << 12);
388 for_each_cpu(cpu, cpumask) {
389 struct pcpu *pcpu = pcpu_devices + cpu;
390 set_bit(ec_stop_cpu, &pcpu->ec_mask);
391 while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
392 0, NULL) == sigp_busy &&
393 get_clock() < end)
394 cpu_relax();
395 }
396 while (get_clock() < end) {
397 for_each_cpu(cpu, cpumask)
398 if (pcpu_stopped(pcpu_devices + cpu))
399 cpumask_clear_cpu(cpu, cpumask);
400 if (cpumask_empty(cpumask))
401 break;
402 cpu_relax();
403 }
404}
405
406/*
407 * Stop all cpus but the current one.
408 */
409void smp_send_stop(void)
410{
411 cpumask_t cpumask;
412 int cpu;
413
414 /* Disable all interrupts/machine checks */
415 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
416 trace_hardirqs_off();
417
418 debug_set_critical();
419 cpumask_copy(&cpumask, cpu_online_mask);
420 cpumask_clear_cpu(smp_processor_id(), &cpumask);
421
422 if (oops_in_progress)
423 smp_emergency_stop(&cpumask);
424
425 /* stop all processors */
426 for_each_cpu(cpu, &cpumask) {
427 struct pcpu *pcpu = pcpu_devices + cpu;
428 pcpu_sigp_retry(pcpu, sigp_stop, 0);
429 while (!pcpu_stopped(pcpu))
430 cpu_relax();
431 }
432}
433
434/*
435 * Stop the current cpu.
436 */
437void smp_stop_cpu(void)
438{
439 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
440 for (;;) ;
441}
442
443/*
444 * This is the main routine where commands issued by other
445 * cpus are handled.
446 */
447static void do_ext_call_interrupt(struct ext_code ext_code,
448 unsigned int param32, unsigned long param64)
449{
450 unsigned long bits;
451 int cpu;
452
453 cpu = smp_processor_id();
454 if (ext_code.code == 0x1202)
455 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
456 else
457 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
458 /*
459 * handle bit signal external calls
460 */
461 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
462
463 if (test_bit(ec_stop_cpu, &bits))
464 smp_stop_cpu();
465
466 if (test_bit(ec_schedule, &bits))
467 scheduler_ipi();
468
469 if (test_bit(ec_call_function, &bits))
470 generic_smp_call_function_interrupt();
471
472 if (test_bit(ec_call_function_single, &bits))
473 generic_smp_call_function_single_interrupt();
474
475}
476
477void arch_send_call_function_ipi_mask(const struct cpumask *mask)
478{
479 int cpu;
480
481 for_each_cpu(cpu, mask)
482 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
483}
484
485void arch_send_call_function_single_ipi(int cpu)
486{
487 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
488}
489
490#ifndef CONFIG_64BIT
491/*
492 * this function sends a 'purge tlb' signal to another CPU.
493 */
494static void smp_ptlb_callback(void *info)
495{
496 __tlb_flush_local();
497}
498
499void smp_ptlb_all(void)
500{
501 on_each_cpu(smp_ptlb_callback, NULL, 1);
502}
503EXPORT_SYMBOL(smp_ptlb_all);
504#endif /* ! CONFIG_64BIT */
505
506/*
507 * this function sends a 'reschedule' IPI to another CPU.
508 * it goes straight through and wastes no time serializing
509 * anything. Worst case is that we lose a reschedule ...
510 */
511void smp_send_reschedule(int cpu)
512{
513 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
514}
515
516/*
517 * parameter area for the set/clear control bit callbacks
518 */
519struct ec_creg_mask_parms {
520 unsigned long orval;
521 unsigned long andval;
522 int cr;
523};
524
525/*
526 * callback for setting/clearing control bits
527 */
528static void smp_ctl_bit_callback(void *info)
529{
530 struct ec_creg_mask_parms *pp = info;
531 unsigned long cregs[16];
532
533 __ctl_store(cregs, 0, 15);
534 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
535 __ctl_load(cregs, 0, 15);
536}
537
538/*
539 * Set a bit in a control register of all cpus
540 */
541void smp_ctl_set_bit(int cr, int bit)
542{
543 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
544
545 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
546}
547EXPORT_SYMBOL(smp_ctl_set_bit);
548
549/*
550 * Clear a bit in a control register of all cpus
551 */
552void smp_ctl_clear_bit(int cr, int bit)
553{
554 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
555
556 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
557}
558EXPORT_SYMBOL(smp_ctl_clear_bit);
559
560#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
561
562struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
563EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
564
565static void __init smp_get_save_area(int cpu, u16 address)
566{
567 void *lc = pcpu_devices[0].lowcore;
568 struct save_area *save_area;
569
570 if (is_kdump_kernel())
571 return;
572 if (!OLDMEM_BASE && (address == boot_cpu_address ||
573 ipl_info.type != IPL_TYPE_FCP_DUMP))
574 return;
575 if (cpu >= NR_CPUS) {
576 pr_warning("CPU %i exceeds the maximum %i and is excluded "
577 "from the dump\n", cpu, NR_CPUS - 1);
578 return;
579 }
580 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
581 if (!save_area)
582 panic("could not allocate memory for save area\n");
583 zfcpdump_save_areas[cpu] = save_area;
584#ifdef CONFIG_CRASH_DUMP
585 if (address == boot_cpu_address) {
586 /* Copy the registers of the boot cpu. */
587 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
588 SAVE_AREA_BASE - PAGE_SIZE, 0);
589 return;
590 }
591#endif
592 /* Get the registers of a non-boot cpu. */
593 __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
594 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
595}
596
597int smp_store_status(int cpu)
598{
599 struct pcpu *pcpu;
600
601 pcpu = pcpu_devices + cpu;
602 if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
603 0, NULL) != sigp_order_code_accepted)
604 return -EIO;
605 return 0;
606}
607
608#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
609
610static inline void smp_get_save_area(int cpu, u16 address) { }
611
612#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
613
614static struct sclp_cpu_info *smp_get_cpu_info(void)
615{
616 static int use_sigp_detection;
617 struct sclp_cpu_info *info;
618 int address;
619
620 info = kzalloc(sizeof(*info), GFP_KERNEL);
621 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
622 use_sigp_detection = 1;
623 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
624 if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
625 sigp_not_operational)
626 continue;
627 info->cpu[info->configured].address = address;
628 info->configured++;
629 }
630 info->combined = info->configured;
631 }
632 return info;
633}
634
635static int __devinit smp_add_present_cpu(int cpu);
636
637static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
638 int sysfs_add)
639{
640 struct pcpu *pcpu;
641 cpumask_t avail;
642 int cpu, nr, i;
643
644 nr = 0;
645 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
646 cpu = cpumask_first(&avail);
647 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
648 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
649 continue;
650 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
651 continue;
652 pcpu = pcpu_devices + cpu;
653 pcpu->address = info->cpu[i].address;
654 pcpu->state = (cpu >= info->configured) ?
655 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
656 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
657 set_cpu_present(cpu, true);
658 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
659 set_cpu_present(cpu, false);
660 else
661 nr++;
662 cpu = cpumask_next(cpu, &avail);
663 }
664 return nr;
665}
666
667static void __init smp_detect_cpus(void)
668{
669 unsigned int cpu, c_cpus, s_cpus;
670 struct sclp_cpu_info *info;
671
672 info = smp_get_cpu_info();
673 if (!info)
674 panic("smp_detect_cpus failed to allocate memory\n");
675 if (info->has_cpu_type) {
676 for (cpu = 0; cpu < info->combined; cpu++) {
677 if (info->cpu[cpu].address != boot_cpu_address)
678 continue;
679 /* The boot cpu dictates the cpu type. */
680 boot_cpu_type = info->cpu[cpu].type;
681 break;
682 }
683 }
684 c_cpus = s_cpus = 0;
685 for (cpu = 0; cpu < info->combined; cpu++) {
686 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
687 continue;
688 if (cpu < info->configured) {
689 smp_get_save_area(c_cpus, info->cpu[cpu].address);
690 c_cpus++;
691 } else
692 s_cpus++;
693 }
694 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
695 get_online_cpus();
696 __smp_rescan_cpus(info, 0);
697 put_online_cpus();
698 kfree(info);
699}
700
701/*
702 * Activate a secondary processor.
703 */
704static void __cpuinit smp_start_secondary(void *cpuvoid)
705{
706 S390_lowcore.last_update_clock = get_clock();
707 S390_lowcore.restart_stack = (unsigned long) restart_stack;
708 S390_lowcore.restart_fn = (unsigned long) do_restart;
709 S390_lowcore.restart_data = 0;
710 S390_lowcore.restart_source = -1UL;
711 restore_access_regs(S390_lowcore.access_regs_save_area);
712 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
713 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
714 cpu_init();
715 preempt_disable();
716 init_cpu_timer();
717 init_cpu_vtimer();
718 pfault_init();
719 notify_cpu_starting(smp_processor_id());
720 ipi_call_lock();
721 set_cpu_online(smp_processor_id(), true);
722 ipi_call_unlock();
723 local_irq_enable();
724 /* cpu_idle will call schedule for us */
725 cpu_idle();
726}
727
728/* Upping and downing of CPUs */
729int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
730{
731 struct pcpu *pcpu;
732 int rc;
733
734 pcpu = pcpu_devices + cpu;
735 if (pcpu->state != CPU_STATE_CONFIGURED)
736 return -EIO;
737 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
738 sigp_order_code_accepted)
739 return -EIO;
740
741 rc = pcpu_alloc_lowcore(pcpu, cpu);
742 if (rc)
743 return rc;
744 pcpu_prepare_secondary(pcpu, cpu);
745 pcpu_attach_task(pcpu, tidle);
746 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
747 while (!cpu_online(cpu))
748 cpu_relax();
749 return 0;
750}
751
752static int __init setup_possible_cpus(char *s)
753{
754 int max, cpu;
755
756 if (kstrtoint(s, 0, &max) < 0)
757 return 0;
758 init_cpu_possible(cpumask_of(0));
759 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
760 set_cpu_possible(cpu, true);
761 return 0;
762}
763early_param("possible_cpus", setup_possible_cpus);
764
765#ifdef CONFIG_HOTPLUG_CPU
766
767int __cpu_disable(void)
768{
769 unsigned long cregs[16];
770
771 set_cpu_online(smp_processor_id(), false);
772 /* Disable pseudo page faults on this cpu. */
773 pfault_fini();
774 /* Disable interrupt sources via control register. */
775 __ctl_store(cregs, 0, 15);
776 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
777 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
778 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
779 __ctl_load(cregs, 0, 15);
780 return 0;
781}
782
783void __cpu_die(unsigned int cpu)
784{
785 struct pcpu *pcpu;
786
787 /* Wait until target cpu is down */
788 pcpu = pcpu_devices + cpu;
789 while (!pcpu_stopped(pcpu))
790 cpu_relax();
791 pcpu_free_lowcore(pcpu);
792 atomic_dec(&init_mm.context.attach_count);
793}
794
795void __noreturn cpu_die(void)
796{
797 idle_task_exit();
798 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
799 for (;;) ;
800}
801
802#endif /* CONFIG_HOTPLUG_CPU */
803
804void __init smp_prepare_cpus(unsigned int max_cpus)
805{
806 /* request the 0x1201 emergency signal external interrupt */
807 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
808 panic("Couldn't request external interrupt 0x1201");
809 /* request the 0x1202 external call external interrupt */
810 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
811 panic("Couldn't request external interrupt 0x1202");
812 smp_detect_cpus();
813}
814
815void __init smp_prepare_boot_cpu(void)
816{
817 struct pcpu *pcpu = pcpu_devices;
818
819 boot_cpu_address = stap();
820 pcpu->state = CPU_STATE_CONFIGURED;
821 pcpu->address = boot_cpu_address;
822 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
823 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
824 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
825 S390_lowcore.percpu_offset = __per_cpu_offset[0];
826 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
827 set_cpu_present(0, true);
828 set_cpu_online(0, true);
829}
830
831void __init smp_cpus_done(unsigned int max_cpus)
832{
833}
834
835void __init smp_setup_processor_id(void)
836{
837 S390_lowcore.cpu_nr = 0;
838}
839
840/*
841 * the frequency of the profiling timer can be changed
842 * by writing a multiplier value into /proc/profile.
843 *
844 * usually you want to run this on all CPUs ;)
845 */
846int setup_profiling_timer(unsigned int multiplier)
847{
848 return 0;
849}
850
851#ifdef CONFIG_HOTPLUG_CPU
852static ssize_t cpu_configure_show(struct device *dev,
853 struct device_attribute *attr, char *buf)
854{
855 ssize_t count;
856
857 mutex_lock(&smp_cpu_state_mutex);
858 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
859 mutex_unlock(&smp_cpu_state_mutex);
860 return count;
861}
862
863static ssize_t cpu_configure_store(struct device *dev,
864 struct device_attribute *attr,
865 const char *buf, size_t count)
866{
867 struct pcpu *pcpu;
868 int cpu, val, rc;
869 char delim;
870
871 if (sscanf(buf, "%d %c", &val, &delim) != 1)
872 return -EINVAL;
873 if (val != 0 && val != 1)
874 return -EINVAL;
875 get_online_cpus();
876 mutex_lock(&smp_cpu_state_mutex);
877 rc = -EBUSY;
878 /* disallow configuration changes of online cpus and cpu 0 */
879 cpu = dev->id;
880 if (cpu_online(cpu) || cpu == 0)
881 goto out;
882 pcpu = pcpu_devices + cpu;
883 rc = 0;
884 switch (val) {
885 case 0:
886 if (pcpu->state != CPU_STATE_CONFIGURED)
887 break;
888 rc = sclp_cpu_deconfigure(pcpu->address);
889 if (rc)
890 break;
891 pcpu->state = CPU_STATE_STANDBY;
892 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
893 topology_expect_change();
894 break;
895 case 1:
896 if (pcpu->state != CPU_STATE_STANDBY)
897 break;
898 rc = sclp_cpu_configure(pcpu->address);
899 if (rc)
900 break;
901 pcpu->state = CPU_STATE_CONFIGURED;
902 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
903 topology_expect_change();
904 break;
905 default:
906 break;
907 }
908out:
909 mutex_unlock(&smp_cpu_state_mutex);
910 put_online_cpus();
911 return rc ? rc : count;
912}
913static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
914#endif /* CONFIG_HOTPLUG_CPU */
915
916static ssize_t show_cpu_address(struct device *dev,
917 struct device_attribute *attr, char *buf)
918{
919 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
920}
921static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
922
923static struct attribute *cpu_common_attrs[] = {
924#ifdef CONFIG_HOTPLUG_CPU
925 &dev_attr_configure.attr,
926#endif
927 &dev_attr_address.attr,
928 NULL,
929};
930
931static struct attribute_group cpu_common_attr_group = {
932 .attrs = cpu_common_attrs,
933};
934
935static ssize_t show_idle_count(struct device *dev,
936 struct device_attribute *attr, char *buf)
937{
938 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
939 unsigned long long idle_count;
940 unsigned int sequence;
941
942 do {
943 sequence = ACCESS_ONCE(idle->sequence);
944 idle_count = ACCESS_ONCE(idle->idle_count);
945 if (ACCESS_ONCE(idle->idle_enter))
946 idle_count++;
947 } while ((sequence & 1) || (idle->sequence != sequence));
948 return sprintf(buf, "%llu\n", idle_count);
949}
950static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
951
952static ssize_t show_idle_time(struct device *dev,
953 struct device_attribute *attr, char *buf)
954{
955 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
956 unsigned long long now, idle_time, idle_enter, idle_exit;
957 unsigned int sequence;
958
959 do {
960 now = get_clock();
961 sequence = ACCESS_ONCE(idle->sequence);
962 idle_time = ACCESS_ONCE(idle->idle_time);
963 idle_enter = ACCESS_ONCE(idle->idle_enter);
964 idle_exit = ACCESS_ONCE(idle->idle_exit);
965 } while ((sequence & 1) || (idle->sequence != sequence));
966 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
967 return sprintf(buf, "%llu\n", idle_time >> 12);
968}
969static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
970
971static struct attribute *cpu_online_attrs[] = {
972 &dev_attr_idle_count.attr,
973 &dev_attr_idle_time_us.attr,
974 NULL,
975};
976
977static struct attribute_group cpu_online_attr_group = {
978 .attrs = cpu_online_attrs,
979};
980
981static int __cpuinit smp_cpu_notify(struct notifier_block *self,
982 unsigned long action, void *hcpu)
983{
984 unsigned int cpu = (unsigned int)(long)hcpu;
985 struct cpu *c = &pcpu_devices[cpu].cpu;
986 struct device *s = &c->dev;
987 int err = 0;
988
989 switch (action) {
990 case CPU_ONLINE:
991 case CPU_ONLINE_FROZEN:
992 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
993 break;
994 case CPU_DEAD:
995 case CPU_DEAD_FROZEN:
996 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
997 break;
998 }
999 return notifier_from_errno(err);
1000}
1001
1002static struct notifier_block __cpuinitdata smp_cpu_nb = {
1003 .notifier_call = smp_cpu_notify,
1004};
1005
1006static int __devinit smp_add_present_cpu(int cpu)
1007{
1008 struct cpu *c = &pcpu_devices[cpu].cpu;
1009 struct device *s = &c->dev;
1010 int rc;
1011
1012 c->hotpluggable = 1;
1013 rc = register_cpu(c, cpu);
1014 if (rc)
1015 goto out;
1016 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1017 if (rc)
1018 goto out_cpu;
1019 if (cpu_online(cpu)) {
1020 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1021 if (rc)
1022 goto out_online;
1023 }
1024 rc = topology_cpu_init(c);
1025 if (rc)
1026 goto out_topology;
1027 return 0;
1028
1029out_topology:
1030 if (cpu_online(cpu))
1031 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1032out_online:
1033 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1034out_cpu:
1035#ifdef CONFIG_HOTPLUG_CPU
1036 unregister_cpu(c);
1037#endif
1038out:
1039 return rc;
1040}
1041
1042#ifdef CONFIG_HOTPLUG_CPU
1043
1044int __ref smp_rescan_cpus(void)
1045{
1046 struct sclp_cpu_info *info;
1047 int nr;
1048
1049 info = smp_get_cpu_info();
1050 if (!info)
1051 return -ENOMEM;
1052 get_online_cpus();
1053 mutex_lock(&smp_cpu_state_mutex);
1054 nr = __smp_rescan_cpus(info, 1);
1055 mutex_unlock(&smp_cpu_state_mutex);
1056 put_online_cpus();
1057 kfree(info);
1058 if (nr)
1059 topology_schedule_update();
1060 return 0;
1061}
1062
1063static ssize_t __ref rescan_store(struct device *dev,
1064 struct device_attribute *attr,
1065 const char *buf,
1066 size_t count)
1067{
1068 int rc;
1069
1070 rc = smp_rescan_cpus();
1071 return rc ? rc : count;
1072}
1073static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1074#endif /* CONFIG_HOTPLUG_CPU */
1075
1076static int __init s390_smp_init(void)
1077{
1078 int cpu, rc;
1079
1080 register_cpu_notifier(&smp_cpu_nb);
1081#ifdef CONFIG_HOTPLUG_CPU
1082 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1083 if (rc)
1084 return rc;
1085#endif
1086 for_each_present_cpu(cpu) {
1087 rc = smp_add_present_cpu(cpu);
1088 if (rc)
1089 return rc;
1090 }
1091 return 0;
1092}
1093subsys_initcall(s390_smp_init);