Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SMP related functions
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/memblock.h>
23#include <linux/export.h>
24#include <linux/init.h>
25#include <linux/mm.h>
26#include <linux/err.h>
27#include <linux/spinlock.h>
28#include <linux/kernel_stat.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/irqflags.h>
32#include <linux/irq_work.h>
33#include <linux/cpu.h>
34#include <linux/slab.h>
35#include <linux/sched/hotplug.h>
36#include <linux/sched/task_stack.h>
37#include <linux/crash_dump.h>
38#include <linux/kprobes.h>
39#include <asm/asm-offsets.h>
40#include <asm/ctlreg.h>
41#include <asm/pfault.h>
42#include <asm/diag.h>
43#include <asm/switch_to.h>
44#include <asm/facility.h>
45#include <asm/ipl.h>
46#include <asm/setup.h>
47#include <asm/irq.h>
48#include <asm/tlbflush.h>
49#include <asm/vtimer.h>
50#include <asm/abs_lowcore.h>
51#include <asm/sclp.h>
52#include <asm/debug.h>
53#include <asm/os_info.h>
54#include <asm/sigp.h>
55#include <asm/idle.h>
56#include <asm/nmi.h>
57#include <asm/stacktrace.h>
58#include <asm/topology.h>
59#include <asm/vdso.h>
60#include <asm/maccess.h>
61#include "entry.h"
62
63enum {
64 ec_schedule = 0,
65 ec_call_function_single,
66 ec_stop_cpu,
67 ec_mcck_pending,
68 ec_irq_work,
69};
70
71enum {
72 CPU_STATE_STANDBY,
73 CPU_STATE_CONFIGURED,
74};
75
76static DEFINE_PER_CPU(struct cpu *, cpu_device);
77
78struct pcpu {
79 unsigned long ec_mask; /* bit mask for ec_xxx functions */
80 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
81 signed char state; /* physical cpu state */
82 signed char polarization; /* physical polarization */
83 u16 address; /* physical cpu address */
84};
85
86static u8 boot_core_type;
87static struct pcpu pcpu_devices[NR_CPUS];
88
89unsigned int smp_cpu_mt_shift;
90EXPORT_SYMBOL(smp_cpu_mt_shift);
91
92unsigned int smp_cpu_mtid;
93EXPORT_SYMBOL(smp_cpu_mtid);
94
95#ifdef CONFIG_CRASH_DUMP
96__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
97#endif
98
99static unsigned int smp_max_threads __initdata = -1U;
100cpumask_t cpu_setup_mask;
101
102static int __init early_nosmt(char *s)
103{
104 smp_max_threads = 1;
105 return 0;
106}
107early_param("nosmt", early_nosmt);
108
109static int __init early_smt(char *s)
110{
111 get_option(&s, &smp_max_threads);
112 return 0;
113}
114early_param("smt", early_smt);
115
116/*
117 * The smp_cpu_state_mutex must be held when changing the state or polarization
118 * member of a pcpu data structure within the pcpu_devices array.
119 */
120DEFINE_MUTEX(smp_cpu_state_mutex);
121
122/*
123 * Signal processor helper functions.
124 */
125static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
126{
127 int cc;
128
129 while (1) {
130 cc = __pcpu_sigp(addr, order, parm, NULL);
131 if (cc != SIGP_CC_BUSY)
132 return cc;
133 cpu_relax();
134 }
135}
136
137static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
138{
139 int cc, retry;
140
141 for (retry = 0; ; retry++) {
142 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
143 if (cc != SIGP_CC_BUSY)
144 break;
145 if (retry >= 3)
146 udelay(10);
147 }
148 return cc;
149}
150
151static inline int pcpu_stopped(struct pcpu *pcpu)
152{
153 u32 status;
154
155 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
156 0, &status) != SIGP_CC_STATUS_STORED)
157 return 0;
158 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
159}
160
161static inline int pcpu_running(struct pcpu *pcpu)
162{
163 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
164 0, NULL) != SIGP_CC_STATUS_STORED)
165 return 1;
166 /* Status stored condition code is equivalent to cpu not running. */
167 return 0;
168}
169
170/*
171 * Find struct pcpu by cpu address.
172 */
173static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
174{
175 int cpu;
176
177 for_each_cpu(cpu, mask)
178 if (pcpu_devices[cpu].address == address)
179 return pcpu_devices + cpu;
180 return NULL;
181}
182
183static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
184{
185 int order;
186
187 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
188 return;
189 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
190 pcpu->ec_clk = get_tod_clock_fast();
191 pcpu_sigp_retry(pcpu, order, 0);
192}
193
194static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
195{
196 unsigned long async_stack, nodat_stack, mcck_stack;
197 struct lowcore *lc;
198
199 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
200 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
201 async_stack = stack_alloc();
202 mcck_stack = stack_alloc();
203 if (!lc || !nodat_stack || !async_stack || !mcck_stack)
204 goto out;
205 memcpy(lc, &S390_lowcore, 512);
206 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
207 lc->async_stack = async_stack + STACK_INIT_OFFSET;
208 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
209 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
210 lc->cpu_nr = cpu;
211 lc->spinlock_lockval = arch_spin_lockval(cpu);
212 lc->spinlock_index = 0;
213 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
214 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
215 lc->preempt_count = PREEMPT_DISABLED;
216 if (nmi_alloc_mcesa(&lc->mcesad))
217 goto out;
218 if (abs_lowcore_map(cpu, lc, true))
219 goto out_mcesa;
220 lowcore_ptr[cpu] = lc;
221 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
222 return 0;
223
224out_mcesa:
225 nmi_free_mcesa(&lc->mcesad);
226out:
227 stack_free(mcck_stack);
228 stack_free(async_stack);
229 free_pages(nodat_stack, THREAD_SIZE_ORDER);
230 free_pages((unsigned long) lc, LC_ORDER);
231 return -ENOMEM;
232}
233
234static void pcpu_free_lowcore(struct pcpu *pcpu)
235{
236 unsigned long async_stack, nodat_stack, mcck_stack;
237 struct lowcore *lc;
238 int cpu;
239
240 cpu = pcpu - pcpu_devices;
241 lc = lowcore_ptr[cpu];
242 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
243 async_stack = lc->async_stack - STACK_INIT_OFFSET;
244 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
245 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
246 lowcore_ptr[cpu] = NULL;
247 abs_lowcore_unmap(cpu);
248 nmi_free_mcesa(&lc->mcesad);
249 stack_free(async_stack);
250 stack_free(mcck_stack);
251 free_pages(nodat_stack, THREAD_SIZE_ORDER);
252 free_pages((unsigned long) lc, LC_ORDER);
253}
254
255static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
256{
257 struct lowcore *lc, *abs_lc;
258
259 lc = lowcore_ptr[cpu];
260 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
261 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
262 lc->cpu_nr = cpu;
263 lc->restart_flags = RESTART_FLAG_CTLREGS;
264 lc->spinlock_lockval = arch_spin_lockval(cpu);
265 lc->spinlock_index = 0;
266 lc->percpu_offset = __per_cpu_offset[cpu];
267 lc->kernel_asce = S390_lowcore.kernel_asce;
268 lc->user_asce = s390_invalid_asce;
269 lc->machine_flags = S390_lowcore.machine_flags;
270 lc->user_timer = lc->system_timer =
271 lc->steal_timer = lc->avg_steal_timer = 0;
272 abs_lc = get_abs_lowcore();
273 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
274 put_abs_lowcore(abs_lc);
275 lc->cregs_save_area[1] = lc->kernel_asce;
276 lc->cregs_save_area[7] = lc->user_asce;
277 save_access_regs((unsigned int *) lc->access_regs_save_area);
278 arch_spin_lock_setup(cpu);
279}
280
281static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
282{
283 struct lowcore *lc;
284 int cpu;
285
286 cpu = pcpu - pcpu_devices;
287 lc = lowcore_ptr[cpu];
288 lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
289 lc->current_task = (unsigned long)tsk;
290 lc->lpp = LPP_MAGIC;
291 lc->current_pid = tsk->pid;
292 lc->user_timer = tsk->thread.user_timer;
293 lc->guest_timer = tsk->thread.guest_timer;
294 lc->system_timer = tsk->thread.system_timer;
295 lc->hardirq_timer = tsk->thread.hardirq_timer;
296 lc->softirq_timer = tsk->thread.softirq_timer;
297 lc->steal_timer = 0;
298}
299
300static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
301{
302 struct lowcore *lc;
303 int cpu;
304
305 cpu = pcpu - pcpu_devices;
306 lc = lowcore_ptr[cpu];
307 lc->restart_stack = lc->kernel_stack;
308 lc->restart_fn = (unsigned long) func;
309 lc->restart_data = (unsigned long) data;
310 lc->restart_source = -1U;
311 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
312}
313
314typedef void (pcpu_delegate_fn)(void *);
315
316/*
317 * Call function via PSW restart on pcpu and stop the current cpu.
318 */
319static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
320{
321 func(data); /* should not return */
322}
323
324static void pcpu_delegate(struct pcpu *pcpu,
325 pcpu_delegate_fn *func,
326 void *data, unsigned long stack)
327{
328 struct lowcore *lc, *abs_lc;
329 unsigned int source_cpu;
330
331 lc = lowcore_ptr[pcpu - pcpu_devices];
332 source_cpu = stap();
333
334 if (pcpu->address == source_cpu) {
335 call_on_stack(2, stack, void, __pcpu_delegate,
336 pcpu_delegate_fn *, func, void *, data);
337 }
338 /* Stop target cpu (if func returns this stops the current cpu). */
339 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
340 pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
341 /* Restart func on the target cpu and stop the current cpu. */
342 if (lc) {
343 lc->restart_stack = stack;
344 lc->restart_fn = (unsigned long)func;
345 lc->restart_data = (unsigned long)data;
346 lc->restart_source = source_cpu;
347 } else {
348 abs_lc = get_abs_lowcore();
349 abs_lc->restart_stack = stack;
350 abs_lc->restart_fn = (unsigned long)func;
351 abs_lc->restart_data = (unsigned long)data;
352 abs_lc->restart_source = source_cpu;
353 put_abs_lowcore(abs_lc);
354 }
355 asm volatile(
356 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
357 " brc 2,0b # busy, try again\n"
358 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
359 " brc 2,1b # busy, try again\n"
360 : : "d" (pcpu->address), "d" (source_cpu),
361 "K" (SIGP_RESTART), "K" (SIGP_STOP)
362 : "0", "1", "cc");
363 for (;;) ;
364}
365
366/*
367 * Enable additional logical cpus for multi-threading.
368 */
369static int pcpu_set_smt(unsigned int mtid)
370{
371 int cc;
372
373 if (smp_cpu_mtid == mtid)
374 return 0;
375 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
376 if (cc == 0) {
377 smp_cpu_mtid = mtid;
378 smp_cpu_mt_shift = 0;
379 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
380 smp_cpu_mt_shift++;
381 pcpu_devices[0].address = stap();
382 }
383 return cc;
384}
385
386/*
387 * Call function on an online CPU.
388 */
389void smp_call_online_cpu(void (*func)(void *), void *data)
390{
391 struct pcpu *pcpu;
392
393 /* Use the current cpu if it is online. */
394 pcpu = pcpu_find_address(cpu_online_mask, stap());
395 if (!pcpu)
396 /* Use the first online cpu. */
397 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
398 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
399}
400
401/*
402 * Call function on the ipl CPU.
403 */
404void smp_call_ipl_cpu(void (*func)(void *), void *data)
405{
406 struct lowcore *lc = lowcore_ptr[0];
407
408 if (pcpu_devices[0].address == stap())
409 lc = &S390_lowcore;
410
411 pcpu_delegate(&pcpu_devices[0], func, data,
412 lc->nodat_stack);
413}
414
415int smp_find_processor_id(u16 address)
416{
417 int cpu;
418
419 for_each_present_cpu(cpu)
420 if (pcpu_devices[cpu].address == address)
421 return cpu;
422 return -1;
423}
424
425void schedule_mcck_handler(void)
426{
427 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
428}
429
430bool notrace arch_vcpu_is_preempted(int cpu)
431{
432 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
433 return false;
434 if (pcpu_running(pcpu_devices + cpu))
435 return false;
436 return true;
437}
438EXPORT_SYMBOL(arch_vcpu_is_preempted);
439
440void notrace smp_yield_cpu(int cpu)
441{
442 if (!MACHINE_HAS_DIAG9C)
443 return;
444 diag_stat_inc_norecursion(DIAG_STAT_X09C);
445 asm volatile("diag %0,0,0x9c"
446 : : "d" (pcpu_devices[cpu].address));
447}
448EXPORT_SYMBOL_GPL(smp_yield_cpu);
449
450/*
451 * Send cpus emergency shutdown signal. This gives the cpus the
452 * opportunity to complete outstanding interrupts.
453 */
454void notrace smp_emergency_stop(void)
455{
456 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
457 static cpumask_t cpumask;
458 u64 end;
459 int cpu;
460
461 arch_spin_lock(&lock);
462 cpumask_copy(&cpumask, cpu_online_mask);
463 cpumask_clear_cpu(smp_processor_id(), &cpumask);
464
465 end = get_tod_clock() + (1000000UL << 12);
466 for_each_cpu(cpu, &cpumask) {
467 struct pcpu *pcpu = pcpu_devices + cpu;
468 set_bit(ec_stop_cpu, &pcpu->ec_mask);
469 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
470 0, NULL) == SIGP_CC_BUSY &&
471 get_tod_clock() < end)
472 cpu_relax();
473 }
474 while (get_tod_clock() < end) {
475 for_each_cpu(cpu, &cpumask)
476 if (pcpu_stopped(pcpu_devices + cpu))
477 cpumask_clear_cpu(cpu, &cpumask);
478 if (cpumask_empty(&cpumask))
479 break;
480 cpu_relax();
481 }
482 arch_spin_unlock(&lock);
483}
484NOKPROBE_SYMBOL(smp_emergency_stop);
485
486/*
487 * Stop all cpus but the current one.
488 */
489void smp_send_stop(void)
490{
491 int cpu;
492
493 /* Disable all interrupts/machine checks */
494 __load_psw_mask(PSW_KERNEL_BITS);
495 trace_hardirqs_off();
496
497 debug_set_critical();
498
499 if (oops_in_progress)
500 smp_emergency_stop();
501
502 /* stop all processors */
503 for_each_online_cpu(cpu) {
504 if (cpu == smp_processor_id())
505 continue;
506 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
507 while (!pcpu_stopped(pcpu_devices + cpu))
508 cpu_relax();
509 }
510}
511
512/*
513 * This is the main routine where commands issued by other
514 * cpus are handled.
515 */
516static void smp_handle_ext_call(void)
517{
518 unsigned long bits;
519
520 /* handle bit signal external calls */
521 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
522 if (test_bit(ec_stop_cpu, &bits))
523 smp_stop_cpu();
524 if (test_bit(ec_schedule, &bits))
525 scheduler_ipi();
526 if (test_bit(ec_call_function_single, &bits))
527 generic_smp_call_function_single_interrupt();
528 if (test_bit(ec_mcck_pending, &bits))
529 s390_handle_mcck();
530 if (test_bit(ec_irq_work, &bits))
531 irq_work_run();
532}
533
534static void do_ext_call_interrupt(struct ext_code ext_code,
535 unsigned int param32, unsigned long param64)
536{
537 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
538 smp_handle_ext_call();
539}
540
541void arch_send_call_function_ipi_mask(const struct cpumask *mask)
542{
543 int cpu;
544
545 for_each_cpu(cpu, mask)
546 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
547}
548
549void arch_send_call_function_single_ipi(int cpu)
550{
551 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
552}
553
554/*
555 * this function sends a 'reschedule' IPI to another CPU.
556 * it goes straight through and wastes no time serializing
557 * anything. Worst case is that we lose a reschedule ...
558 */
559void arch_smp_send_reschedule(int cpu)
560{
561 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
562}
563
564#ifdef CONFIG_IRQ_WORK
565void arch_irq_work_raise(void)
566{
567 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
568}
569#endif
570
571#ifdef CONFIG_CRASH_DUMP
572
573int smp_store_status(int cpu)
574{
575 struct lowcore *lc;
576 struct pcpu *pcpu;
577 unsigned long pa;
578
579 pcpu = pcpu_devices + cpu;
580 lc = lowcore_ptr[cpu];
581 pa = __pa(&lc->floating_pt_save_area);
582 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
583 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
584 return -EIO;
585 if (!cpu_has_vx() && !MACHINE_HAS_GS)
586 return 0;
587 pa = lc->mcesad & MCESA_ORIGIN_MASK;
588 if (MACHINE_HAS_GS)
589 pa |= lc->mcesad & MCESA_LC_MASK;
590 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
591 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
592 return -EIO;
593 return 0;
594}
595
596/*
597 * Collect CPU state of the previous, crashed system.
598 * There are four cases:
599 * 1) standard zfcp/nvme dump
600 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
601 * The state for all CPUs except the boot CPU needs to be collected
602 * with sigp stop-and-store-status. The boot CPU state is located in
603 * the absolute lowcore of the memory stored in the HSA. The zcore code
604 * will copy the boot CPU state from the HSA.
605 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
606 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
607 * The state for all CPUs except the boot CPU needs to be collected
608 * with sigp stop-and-store-status. The firmware or the boot-loader
609 * stored the registers of the boot CPU in the absolute lowcore in the
610 * memory of the old system.
611 * 3) kdump and the old kernel did not store the CPU state,
612 * or stand-alone kdump for DASD
613 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
614 * The state for all CPUs except the boot CPU needs to be collected
615 * with sigp stop-and-store-status. The kexec code or the boot-loader
616 * stored the registers of the boot CPU in the memory of the old system.
617 * 4) kdump and the old kernel stored the CPU state
618 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
619 * This case does not exist for s390 anymore, setup_arch explicitly
620 * deactivates the elfcorehdr= kernel parameter
621 */
622static bool dump_available(void)
623{
624 return oldmem_data.start || is_ipl_type_dump();
625}
626
627void __init smp_save_dump_ipl_cpu(void)
628{
629 struct save_area *sa;
630 void *regs;
631
632 if (!dump_available())
633 return;
634 sa = save_area_alloc(true);
635 regs = memblock_alloc(512, 8);
636 if (!sa || !regs)
637 panic("could not allocate memory for boot CPU save area\n");
638 copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
639 save_area_add_regs(sa, regs);
640 memblock_free(regs, 512);
641 if (cpu_has_vx())
642 save_area_add_vxrs(sa, boot_cpu_vector_save_area);
643}
644
645void __init smp_save_dump_secondary_cpus(void)
646{
647 int addr, boot_cpu_addr, max_cpu_addr;
648 struct save_area *sa;
649 void *page;
650
651 if (!dump_available())
652 return;
653 /* Allocate a page as dumping area for the store status sigps */
654 page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
655 if (!page)
656 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
657 PAGE_SIZE, 1UL << 31);
658
659 /* Set multi-threading state to the previous system. */
660 pcpu_set_smt(sclp.mtid_prev);
661 boot_cpu_addr = stap();
662 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
663 for (addr = 0; addr <= max_cpu_addr; addr++) {
664 if (addr == boot_cpu_addr)
665 continue;
666 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
667 SIGP_CC_NOT_OPERATIONAL)
668 continue;
669 sa = save_area_alloc(false);
670 if (!sa)
671 panic("could not allocate memory for save area\n");
672 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
673 save_area_add_regs(sa, page);
674 if (cpu_has_vx()) {
675 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
676 save_area_add_vxrs(sa, page);
677 }
678 }
679 memblock_free(page, PAGE_SIZE);
680 diag_amode31_ops.diag308_reset();
681 pcpu_set_smt(0);
682}
683#endif /* CONFIG_CRASH_DUMP */
684
685void smp_cpu_set_polarization(int cpu, int val)
686{
687 pcpu_devices[cpu].polarization = val;
688}
689
690int smp_cpu_get_polarization(int cpu)
691{
692 return pcpu_devices[cpu].polarization;
693}
694
695int smp_cpu_get_cpu_address(int cpu)
696{
697 return pcpu_devices[cpu].address;
698}
699
700static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
701{
702 static int use_sigp_detection;
703 int address;
704
705 if (use_sigp_detection || sclp_get_core_info(info, early)) {
706 use_sigp_detection = 1;
707 for (address = 0;
708 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
709 address += (1U << smp_cpu_mt_shift)) {
710 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
711 SIGP_CC_NOT_OPERATIONAL)
712 continue;
713 info->core[info->configured].core_id =
714 address >> smp_cpu_mt_shift;
715 info->configured++;
716 }
717 info->combined = info->configured;
718 }
719}
720
721static int smp_add_present_cpu(int cpu);
722
723static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
724 bool configured, bool early)
725{
726 struct pcpu *pcpu;
727 int cpu, nr, i;
728 u16 address;
729
730 nr = 0;
731 if (sclp.has_core_type && core->type != boot_core_type)
732 return nr;
733 cpu = cpumask_first(avail);
734 address = core->core_id << smp_cpu_mt_shift;
735 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
736 if (pcpu_find_address(cpu_present_mask, address + i))
737 continue;
738 pcpu = pcpu_devices + cpu;
739 pcpu->address = address + i;
740 if (configured)
741 pcpu->state = CPU_STATE_CONFIGURED;
742 else
743 pcpu->state = CPU_STATE_STANDBY;
744 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
745 set_cpu_present(cpu, true);
746 if (!early && smp_add_present_cpu(cpu) != 0)
747 set_cpu_present(cpu, false);
748 else
749 nr++;
750 cpumask_clear_cpu(cpu, avail);
751 cpu = cpumask_next(cpu, avail);
752 }
753 return nr;
754}
755
756static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
757{
758 struct sclp_core_entry *core;
759 static cpumask_t avail;
760 bool configured;
761 u16 core_id;
762 int nr, i;
763
764 cpus_read_lock();
765 mutex_lock(&smp_cpu_state_mutex);
766 nr = 0;
767 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
768 /*
769 * Add IPL core first (which got logical CPU number 0) to make sure
770 * that all SMT threads get subsequent logical CPU numbers.
771 */
772 if (early) {
773 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
774 for (i = 0; i < info->configured; i++) {
775 core = &info->core[i];
776 if (core->core_id == core_id) {
777 nr += smp_add_core(core, &avail, true, early);
778 break;
779 }
780 }
781 }
782 for (i = 0; i < info->combined; i++) {
783 configured = i < info->configured;
784 nr += smp_add_core(&info->core[i], &avail, configured, early);
785 }
786 mutex_unlock(&smp_cpu_state_mutex);
787 cpus_read_unlock();
788 return nr;
789}
790
791void __init smp_detect_cpus(void)
792{
793 unsigned int cpu, mtid, c_cpus, s_cpus;
794 struct sclp_core_info *info;
795 u16 address;
796
797 /* Get CPU information */
798 info = memblock_alloc(sizeof(*info), 8);
799 if (!info)
800 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
801 __func__, sizeof(*info), 8);
802 smp_get_core_info(info, 1);
803 /* Find boot CPU type */
804 if (sclp.has_core_type) {
805 address = stap();
806 for (cpu = 0; cpu < info->combined; cpu++)
807 if (info->core[cpu].core_id == address) {
808 /* The boot cpu dictates the cpu type. */
809 boot_core_type = info->core[cpu].type;
810 break;
811 }
812 if (cpu >= info->combined)
813 panic("Could not find boot CPU type");
814 }
815
816 /* Set multi-threading state for the current system */
817 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
818 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
819 pcpu_set_smt(mtid);
820
821 /* Print number of CPUs */
822 c_cpus = s_cpus = 0;
823 for (cpu = 0; cpu < info->combined; cpu++) {
824 if (sclp.has_core_type &&
825 info->core[cpu].type != boot_core_type)
826 continue;
827 if (cpu < info->configured)
828 c_cpus += smp_cpu_mtid + 1;
829 else
830 s_cpus += smp_cpu_mtid + 1;
831 }
832 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
833
834 /* Add CPUs present at boot */
835 __smp_rescan_cpus(info, true);
836 memblock_free(info, sizeof(*info));
837}
838
839/*
840 * Activate a secondary processor.
841 */
842static void smp_start_secondary(void *cpuvoid)
843{
844 int cpu = raw_smp_processor_id();
845
846 S390_lowcore.last_update_clock = get_tod_clock();
847 S390_lowcore.restart_stack = (unsigned long)restart_stack;
848 S390_lowcore.restart_fn = (unsigned long)do_restart;
849 S390_lowcore.restart_data = 0;
850 S390_lowcore.restart_source = -1U;
851 S390_lowcore.restart_flags = 0;
852 restore_access_regs(S390_lowcore.access_regs_save_area);
853 cpu_init();
854 rcutree_report_cpu_starting(cpu);
855 init_cpu_timer();
856 vtime_init();
857 vdso_getcpu_init();
858 pfault_init();
859 cpumask_set_cpu(cpu, &cpu_setup_mask);
860 update_cpu_masks();
861 notify_cpu_starting(cpu);
862 if (topology_cpu_dedicated(cpu))
863 set_cpu_flag(CIF_DEDICATED_CPU);
864 else
865 clear_cpu_flag(CIF_DEDICATED_CPU);
866 set_cpu_online(cpu, true);
867 inc_irq_stat(CPU_RST);
868 local_irq_enable();
869 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
870}
871
872/* Upping and downing of CPUs */
873int __cpu_up(unsigned int cpu, struct task_struct *tidle)
874{
875 struct pcpu *pcpu = pcpu_devices + cpu;
876 int rc;
877
878 if (pcpu->state != CPU_STATE_CONFIGURED)
879 return -EIO;
880 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
881 SIGP_CC_ORDER_CODE_ACCEPTED)
882 return -EIO;
883
884 rc = pcpu_alloc_lowcore(pcpu, cpu);
885 if (rc)
886 return rc;
887 /*
888 * Make sure global control register contents do not change
889 * until new CPU has initialized control registers.
890 */
891 system_ctlreg_lock();
892 pcpu_prepare_secondary(pcpu, cpu);
893 pcpu_attach_task(pcpu, tidle);
894 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
895 /* Wait until cpu puts itself in the online & active maps */
896 while (!cpu_online(cpu))
897 cpu_relax();
898 system_ctlreg_unlock();
899 return 0;
900}
901
902static unsigned int setup_possible_cpus __initdata;
903
904static int __init _setup_possible_cpus(char *s)
905{
906 get_option(&s, &setup_possible_cpus);
907 return 0;
908}
909early_param("possible_cpus", _setup_possible_cpus);
910
911int __cpu_disable(void)
912{
913 struct ctlreg cregs[16];
914 int cpu;
915
916 /* Handle possible pending IPIs */
917 smp_handle_ext_call();
918 cpu = smp_processor_id();
919 set_cpu_online(cpu, false);
920 cpumask_clear_cpu(cpu, &cpu_setup_mask);
921 update_cpu_masks();
922 /* Disable pseudo page faults on this cpu. */
923 pfault_fini();
924 /* Disable interrupt sources via control register. */
925 __local_ctl_store(0, 15, cregs);
926 cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
927 cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
928 cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
929 __local_ctl_load(0, 15, cregs);
930 clear_cpu_flag(CIF_NOHZ_DELAY);
931 return 0;
932}
933
934void __cpu_die(unsigned int cpu)
935{
936 struct pcpu *pcpu;
937
938 /* Wait until target cpu is down */
939 pcpu = pcpu_devices + cpu;
940 while (!pcpu_stopped(pcpu))
941 cpu_relax();
942 pcpu_free_lowcore(pcpu);
943 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
944 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
945}
946
947void __noreturn cpu_die(void)
948{
949 idle_task_exit();
950 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
951 for (;;) ;
952}
953
954void __init smp_fill_possible_mask(void)
955{
956 unsigned int possible, sclp_max, cpu;
957
958 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
959 sclp_max = min(smp_max_threads, sclp_max);
960 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
961 possible = setup_possible_cpus ?: nr_cpu_ids;
962 possible = min(possible, sclp_max);
963 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
964 set_cpu_possible(cpu, true);
965}
966
967void __init smp_prepare_cpus(unsigned int max_cpus)
968{
969 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
970 panic("Couldn't request external interrupt 0x1201");
971 system_ctl_set_bit(0, 14);
972 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
973 panic("Couldn't request external interrupt 0x1202");
974 system_ctl_set_bit(0, 13);
975}
976
977void __init smp_prepare_boot_cpu(void)
978{
979 struct pcpu *pcpu = pcpu_devices;
980
981 WARN_ON(!cpu_present(0) || !cpu_online(0));
982 pcpu->state = CPU_STATE_CONFIGURED;
983 S390_lowcore.percpu_offset = __per_cpu_offset[0];
984 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
985}
986
987void __init smp_setup_processor_id(void)
988{
989 pcpu_devices[0].address = stap();
990 S390_lowcore.cpu_nr = 0;
991 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
992 S390_lowcore.spinlock_index = 0;
993}
994
995/*
996 * the frequency of the profiling timer can be changed
997 * by writing a multiplier value into /proc/profile.
998 *
999 * usually you want to run this on all CPUs ;)
1000 */
1001int setup_profiling_timer(unsigned int multiplier)
1002{
1003 return 0;
1004}
1005
1006static ssize_t cpu_configure_show(struct device *dev,
1007 struct device_attribute *attr, char *buf)
1008{
1009 ssize_t count;
1010
1011 mutex_lock(&smp_cpu_state_mutex);
1012 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1013 mutex_unlock(&smp_cpu_state_mutex);
1014 return count;
1015}
1016
1017static ssize_t cpu_configure_store(struct device *dev,
1018 struct device_attribute *attr,
1019 const char *buf, size_t count)
1020{
1021 struct pcpu *pcpu;
1022 int cpu, val, rc, i;
1023 char delim;
1024
1025 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1026 return -EINVAL;
1027 if (val != 0 && val != 1)
1028 return -EINVAL;
1029 cpus_read_lock();
1030 mutex_lock(&smp_cpu_state_mutex);
1031 rc = -EBUSY;
1032 /* disallow configuration changes of online cpus */
1033 cpu = dev->id;
1034 cpu = smp_get_base_cpu(cpu);
1035 for (i = 0; i <= smp_cpu_mtid; i++)
1036 if (cpu_online(cpu + i))
1037 goto out;
1038 pcpu = pcpu_devices + cpu;
1039 rc = 0;
1040 switch (val) {
1041 case 0:
1042 if (pcpu->state != CPU_STATE_CONFIGURED)
1043 break;
1044 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1045 if (rc)
1046 break;
1047 for (i = 0; i <= smp_cpu_mtid; i++) {
1048 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1049 continue;
1050 pcpu[i].state = CPU_STATE_STANDBY;
1051 smp_cpu_set_polarization(cpu + i,
1052 POLARIZATION_UNKNOWN);
1053 }
1054 topology_expect_change();
1055 break;
1056 case 1:
1057 if (pcpu->state != CPU_STATE_STANDBY)
1058 break;
1059 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1060 if (rc)
1061 break;
1062 for (i = 0; i <= smp_cpu_mtid; i++) {
1063 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1064 continue;
1065 pcpu[i].state = CPU_STATE_CONFIGURED;
1066 smp_cpu_set_polarization(cpu + i,
1067 POLARIZATION_UNKNOWN);
1068 }
1069 topology_expect_change();
1070 break;
1071 default:
1072 break;
1073 }
1074out:
1075 mutex_unlock(&smp_cpu_state_mutex);
1076 cpus_read_unlock();
1077 return rc ? rc : count;
1078}
1079static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1080
1081static ssize_t show_cpu_address(struct device *dev,
1082 struct device_attribute *attr, char *buf)
1083{
1084 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1085}
1086static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1087
1088static struct attribute *cpu_common_attrs[] = {
1089 &dev_attr_configure.attr,
1090 &dev_attr_address.attr,
1091 NULL,
1092};
1093
1094static struct attribute_group cpu_common_attr_group = {
1095 .attrs = cpu_common_attrs,
1096};
1097
1098static struct attribute *cpu_online_attrs[] = {
1099 &dev_attr_idle_count.attr,
1100 &dev_attr_idle_time_us.attr,
1101 NULL,
1102};
1103
1104static struct attribute_group cpu_online_attr_group = {
1105 .attrs = cpu_online_attrs,
1106};
1107
1108static int smp_cpu_online(unsigned int cpu)
1109{
1110 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1111
1112 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1113}
1114
1115static int smp_cpu_pre_down(unsigned int cpu)
1116{
1117 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1118
1119 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1120 return 0;
1121}
1122
1123static int smp_add_present_cpu(int cpu)
1124{
1125 struct device *s;
1126 struct cpu *c;
1127 int rc;
1128
1129 c = kzalloc(sizeof(*c), GFP_KERNEL);
1130 if (!c)
1131 return -ENOMEM;
1132 per_cpu(cpu_device, cpu) = c;
1133 s = &c->dev;
1134 c->hotpluggable = !!cpu;
1135 rc = register_cpu(c, cpu);
1136 if (rc)
1137 goto out;
1138 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1139 if (rc)
1140 goto out_cpu;
1141 rc = topology_cpu_init(c);
1142 if (rc)
1143 goto out_topology;
1144 return 0;
1145
1146out_topology:
1147 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1148out_cpu:
1149 unregister_cpu(c);
1150out:
1151 return rc;
1152}
1153
1154int __ref smp_rescan_cpus(void)
1155{
1156 struct sclp_core_info *info;
1157 int nr;
1158
1159 info = kzalloc(sizeof(*info), GFP_KERNEL);
1160 if (!info)
1161 return -ENOMEM;
1162 smp_get_core_info(info, 0);
1163 nr = __smp_rescan_cpus(info, false);
1164 kfree(info);
1165 if (nr)
1166 topology_schedule_update();
1167 return 0;
1168}
1169
1170static ssize_t __ref rescan_store(struct device *dev,
1171 struct device_attribute *attr,
1172 const char *buf,
1173 size_t count)
1174{
1175 int rc;
1176
1177 rc = lock_device_hotplug_sysfs();
1178 if (rc)
1179 return rc;
1180 rc = smp_rescan_cpus();
1181 unlock_device_hotplug();
1182 return rc ? rc : count;
1183}
1184static DEVICE_ATTR_WO(rescan);
1185
1186static int __init s390_smp_init(void)
1187{
1188 struct device *dev_root;
1189 int cpu, rc = 0;
1190
1191 dev_root = bus_get_dev_root(&cpu_subsys);
1192 if (dev_root) {
1193 rc = device_create_file(dev_root, &dev_attr_rescan);
1194 put_device(dev_root);
1195 if (rc)
1196 return rc;
1197 }
1198
1199 for_each_present_cpu(cpu) {
1200 rc = smp_add_present_cpu(cpu);
1201 if (rc)
1202 goto out;
1203 }
1204
1205 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1206 smp_cpu_online, smp_cpu_pre_down);
1207 rc = rc <= 0 ? rc : 0;
1208out:
1209 return rc;
1210}
1211subsys_initcall(s390_smp_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SMP related functions
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/memblock.h>
23#include <linux/export.h>
24#include <linux/init.h>
25#include <linux/mm.h>
26#include <linux/err.h>
27#include <linux/spinlock.h>
28#include <linux/kernel_stat.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/irqflags.h>
32#include <linux/irq_work.h>
33#include <linux/cpu.h>
34#include <linux/slab.h>
35#include <linux/sched/hotplug.h>
36#include <linux/sched/task_stack.h>
37#include <linux/crash_dump.h>
38#include <linux/kprobes.h>
39#include <asm/access-regs.h>
40#include <asm/asm-offsets.h>
41#include <asm/ctlreg.h>
42#include <asm/pfault.h>
43#include <asm/diag.h>
44#include <asm/facility.h>
45#include <asm/fpu.h>
46#include <asm/ipl.h>
47#include <asm/setup.h>
48#include <asm/irq.h>
49#include <asm/tlbflush.h>
50#include <asm/vtimer.h>
51#include <asm/abs_lowcore.h>
52#include <asm/sclp.h>
53#include <asm/debug.h>
54#include <asm/os_info.h>
55#include <asm/sigp.h>
56#include <asm/idle.h>
57#include <asm/nmi.h>
58#include <asm/stacktrace.h>
59#include <asm/topology.h>
60#include <asm/vdso.h>
61#include <asm/maccess.h>
62#include "entry.h"
63
64enum {
65 ec_schedule = 0,
66 ec_call_function_single,
67 ec_stop_cpu,
68 ec_mcck_pending,
69 ec_irq_work,
70};
71
72enum {
73 CPU_STATE_STANDBY,
74 CPU_STATE_CONFIGURED,
75};
76
77static DEFINE_PER_CPU(struct cpu *, cpu_device);
78
79struct pcpu {
80 unsigned long ec_mask; /* bit mask for ec_xxx functions */
81 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
82 signed char state; /* physical cpu state */
83 signed char polarization; /* physical polarization */
84 u16 address; /* physical cpu address */
85};
86
87static u8 boot_core_type;
88static struct pcpu pcpu_devices[NR_CPUS];
89
90unsigned int smp_cpu_mt_shift;
91EXPORT_SYMBOL(smp_cpu_mt_shift);
92
93unsigned int smp_cpu_mtid;
94EXPORT_SYMBOL(smp_cpu_mtid);
95
96#ifdef CONFIG_CRASH_DUMP
97__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
98#endif
99
100static unsigned int smp_max_threads __initdata = -1U;
101cpumask_t cpu_setup_mask;
102
103static int __init early_nosmt(char *s)
104{
105 smp_max_threads = 1;
106 return 0;
107}
108early_param("nosmt", early_nosmt);
109
110static int __init early_smt(char *s)
111{
112 get_option(&s, &smp_max_threads);
113 return 0;
114}
115early_param("smt", early_smt);
116
117/*
118 * The smp_cpu_state_mutex must be held when changing the state or polarization
119 * member of a pcpu data structure within the pcpu_devices array.
120 */
121DEFINE_MUTEX(smp_cpu_state_mutex);
122
123/*
124 * Signal processor helper functions.
125 */
126static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
127{
128 int cc;
129
130 while (1) {
131 cc = __pcpu_sigp(addr, order, parm, NULL);
132 if (cc != SIGP_CC_BUSY)
133 return cc;
134 cpu_relax();
135 }
136}
137
138static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
139{
140 int cc, retry;
141
142 for (retry = 0; ; retry++) {
143 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
144 if (cc != SIGP_CC_BUSY)
145 break;
146 if (retry >= 3)
147 udelay(10);
148 }
149 return cc;
150}
151
152static inline int pcpu_stopped(struct pcpu *pcpu)
153{
154 u32 status;
155
156 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
157 0, &status) != SIGP_CC_STATUS_STORED)
158 return 0;
159 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
160}
161
162static inline int pcpu_running(struct pcpu *pcpu)
163{
164 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
165 0, NULL) != SIGP_CC_STATUS_STORED)
166 return 1;
167 /* Status stored condition code is equivalent to cpu not running. */
168 return 0;
169}
170
171/*
172 * Find struct pcpu by cpu address.
173 */
174static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
175{
176 int cpu;
177
178 for_each_cpu(cpu, mask)
179 if (pcpu_devices[cpu].address == address)
180 return pcpu_devices + cpu;
181 return NULL;
182}
183
184static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
185{
186 int order;
187
188 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
189 return;
190 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
191 pcpu->ec_clk = get_tod_clock_fast();
192 pcpu_sigp_retry(pcpu, order, 0);
193}
194
195static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
196{
197 unsigned long async_stack, nodat_stack, mcck_stack;
198 struct lowcore *lc;
199
200 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
201 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
202 async_stack = stack_alloc();
203 mcck_stack = stack_alloc();
204 if (!lc || !nodat_stack || !async_stack || !mcck_stack)
205 goto out;
206 memcpy(lc, &S390_lowcore, 512);
207 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
208 lc->async_stack = async_stack + STACK_INIT_OFFSET;
209 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
210 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
211 lc->cpu_nr = cpu;
212 lc->spinlock_lockval = arch_spin_lockval(cpu);
213 lc->spinlock_index = 0;
214 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
215 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
216 lc->preempt_count = PREEMPT_DISABLED;
217 if (nmi_alloc_mcesa(&lc->mcesad))
218 goto out;
219 if (abs_lowcore_map(cpu, lc, true))
220 goto out_mcesa;
221 lowcore_ptr[cpu] = lc;
222 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
223 return 0;
224
225out_mcesa:
226 nmi_free_mcesa(&lc->mcesad);
227out:
228 stack_free(mcck_stack);
229 stack_free(async_stack);
230 free_pages(nodat_stack, THREAD_SIZE_ORDER);
231 free_pages((unsigned long) lc, LC_ORDER);
232 return -ENOMEM;
233}
234
235static void pcpu_free_lowcore(struct pcpu *pcpu)
236{
237 unsigned long async_stack, nodat_stack, mcck_stack;
238 struct lowcore *lc;
239 int cpu;
240
241 cpu = pcpu - pcpu_devices;
242 lc = lowcore_ptr[cpu];
243 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
244 async_stack = lc->async_stack - STACK_INIT_OFFSET;
245 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
246 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
247 lowcore_ptr[cpu] = NULL;
248 abs_lowcore_unmap(cpu);
249 nmi_free_mcesa(&lc->mcesad);
250 stack_free(async_stack);
251 stack_free(mcck_stack);
252 free_pages(nodat_stack, THREAD_SIZE_ORDER);
253 free_pages((unsigned long) lc, LC_ORDER);
254}
255
256static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
257{
258 struct lowcore *lc, *abs_lc;
259
260 lc = lowcore_ptr[cpu];
261 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
262 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
263 lc->cpu_nr = cpu;
264 lc->restart_flags = RESTART_FLAG_CTLREGS;
265 lc->spinlock_lockval = arch_spin_lockval(cpu);
266 lc->spinlock_index = 0;
267 lc->percpu_offset = __per_cpu_offset[cpu];
268 lc->kernel_asce = S390_lowcore.kernel_asce;
269 lc->user_asce = s390_invalid_asce;
270 lc->machine_flags = S390_lowcore.machine_flags;
271 lc->user_timer = lc->system_timer =
272 lc->steal_timer = lc->avg_steal_timer = 0;
273 abs_lc = get_abs_lowcore();
274 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
275 put_abs_lowcore(abs_lc);
276 lc->cregs_save_area[1] = lc->kernel_asce;
277 lc->cregs_save_area[7] = lc->user_asce;
278 save_access_regs((unsigned int *) lc->access_regs_save_area);
279 arch_spin_lock_setup(cpu);
280}
281
282static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
283{
284 struct lowcore *lc;
285 int cpu;
286
287 cpu = pcpu - pcpu_devices;
288 lc = lowcore_ptr[cpu];
289 lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
290 lc->current_task = (unsigned long)tsk;
291 lc->lpp = LPP_MAGIC;
292 lc->current_pid = tsk->pid;
293 lc->user_timer = tsk->thread.user_timer;
294 lc->guest_timer = tsk->thread.guest_timer;
295 lc->system_timer = tsk->thread.system_timer;
296 lc->hardirq_timer = tsk->thread.hardirq_timer;
297 lc->softirq_timer = tsk->thread.softirq_timer;
298 lc->steal_timer = 0;
299}
300
301static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
302{
303 struct lowcore *lc;
304 int cpu;
305
306 cpu = pcpu - pcpu_devices;
307 lc = lowcore_ptr[cpu];
308 lc->restart_stack = lc->kernel_stack;
309 lc->restart_fn = (unsigned long) func;
310 lc->restart_data = (unsigned long) data;
311 lc->restart_source = -1U;
312 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
313}
314
315typedef void (pcpu_delegate_fn)(void *);
316
317/*
318 * Call function via PSW restart on pcpu and stop the current cpu.
319 */
320static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
321{
322 func(data); /* should not return */
323}
324
325static void pcpu_delegate(struct pcpu *pcpu,
326 pcpu_delegate_fn *func,
327 void *data, unsigned long stack)
328{
329 struct lowcore *lc, *abs_lc;
330 unsigned int source_cpu;
331
332 lc = lowcore_ptr[pcpu - pcpu_devices];
333 source_cpu = stap();
334
335 if (pcpu->address == source_cpu) {
336 call_on_stack(2, stack, void, __pcpu_delegate,
337 pcpu_delegate_fn *, func, void *, data);
338 }
339 /* Stop target cpu (if func returns this stops the current cpu). */
340 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
341 pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
342 /* Restart func on the target cpu and stop the current cpu. */
343 if (lc) {
344 lc->restart_stack = stack;
345 lc->restart_fn = (unsigned long)func;
346 lc->restart_data = (unsigned long)data;
347 lc->restart_source = source_cpu;
348 } else {
349 abs_lc = get_abs_lowcore();
350 abs_lc->restart_stack = stack;
351 abs_lc->restart_fn = (unsigned long)func;
352 abs_lc->restart_data = (unsigned long)data;
353 abs_lc->restart_source = source_cpu;
354 put_abs_lowcore(abs_lc);
355 }
356 asm volatile(
357 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
358 " brc 2,0b # busy, try again\n"
359 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
360 " brc 2,1b # busy, try again\n"
361 : : "d" (pcpu->address), "d" (source_cpu),
362 "K" (SIGP_RESTART), "K" (SIGP_STOP)
363 : "0", "1", "cc");
364 for (;;) ;
365}
366
367/*
368 * Enable additional logical cpus for multi-threading.
369 */
370static int pcpu_set_smt(unsigned int mtid)
371{
372 int cc;
373
374 if (smp_cpu_mtid == mtid)
375 return 0;
376 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
377 if (cc == 0) {
378 smp_cpu_mtid = mtid;
379 smp_cpu_mt_shift = 0;
380 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
381 smp_cpu_mt_shift++;
382 pcpu_devices[0].address = stap();
383 }
384 return cc;
385}
386
387/*
388 * Call function on an online CPU.
389 */
390void smp_call_online_cpu(void (*func)(void *), void *data)
391{
392 struct pcpu *pcpu;
393
394 /* Use the current cpu if it is online. */
395 pcpu = pcpu_find_address(cpu_online_mask, stap());
396 if (!pcpu)
397 /* Use the first online cpu. */
398 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
399 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
400}
401
402/*
403 * Call function on the ipl CPU.
404 */
405void smp_call_ipl_cpu(void (*func)(void *), void *data)
406{
407 struct lowcore *lc = lowcore_ptr[0];
408
409 if (pcpu_devices[0].address == stap())
410 lc = &S390_lowcore;
411
412 pcpu_delegate(&pcpu_devices[0], func, data,
413 lc->nodat_stack);
414}
415
416int smp_find_processor_id(u16 address)
417{
418 int cpu;
419
420 for_each_present_cpu(cpu)
421 if (pcpu_devices[cpu].address == address)
422 return cpu;
423 return -1;
424}
425
426void schedule_mcck_handler(void)
427{
428 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
429}
430
431bool notrace arch_vcpu_is_preempted(int cpu)
432{
433 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
434 return false;
435 if (pcpu_running(pcpu_devices + cpu))
436 return false;
437 return true;
438}
439EXPORT_SYMBOL(arch_vcpu_is_preempted);
440
441void notrace smp_yield_cpu(int cpu)
442{
443 if (!MACHINE_HAS_DIAG9C)
444 return;
445 diag_stat_inc_norecursion(DIAG_STAT_X09C);
446 asm volatile("diag %0,0,0x9c"
447 : : "d" (pcpu_devices[cpu].address));
448}
449EXPORT_SYMBOL_GPL(smp_yield_cpu);
450
451/*
452 * Send cpus emergency shutdown signal. This gives the cpus the
453 * opportunity to complete outstanding interrupts.
454 */
455void notrace smp_emergency_stop(void)
456{
457 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
458 static cpumask_t cpumask;
459 u64 end;
460 int cpu;
461
462 arch_spin_lock(&lock);
463 cpumask_copy(&cpumask, cpu_online_mask);
464 cpumask_clear_cpu(smp_processor_id(), &cpumask);
465
466 end = get_tod_clock() + (1000000UL << 12);
467 for_each_cpu(cpu, &cpumask) {
468 struct pcpu *pcpu = pcpu_devices + cpu;
469 set_bit(ec_stop_cpu, &pcpu->ec_mask);
470 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
471 0, NULL) == SIGP_CC_BUSY &&
472 get_tod_clock() < end)
473 cpu_relax();
474 }
475 while (get_tod_clock() < end) {
476 for_each_cpu(cpu, &cpumask)
477 if (pcpu_stopped(pcpu_devices + cpu))
478 cpumask_clear_cpu(cpu, &cpumask);
479 if (cpumask_empty(&cpumask))
480 break;
481 cpu_relax();
482 }
483 arch_spin_unlock(&lock);
484}
485NOKPROBE_SYMBOL(smp_emergency_stop);
486
487/*
488 * Stop all cpus but the current one.
489 */
490void smp_send_stop(void)
491{
492 int cpu;
493
494 /* Disable all interrupts/machine checks */
495 __load_psw_mask(PSW_KERNEL_BITS);
496 trace_hardirqs_off();
497
498 debug_set_critical();
499
500 if (oops_in_progress)
501 smp_emergency_stop();
502
503 /* stop all processors */
504 for_each_online_cpu(cpu) {
505 if (cpu == smp_processor_id())
506 continue;
507 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
508 while (!pcpu_stopped(pcpu_devices + cpu))
509 cpu_relax();
510 }
511}
512
513/*
514 * This is the main routine where commands issued by other
515 * cpus are handled.
516 */
517static void smp_handle_ext_call(void)
518{
519 unsigned long bits;
520
521 /* handle bit signal external calls */
522 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
523 if (test_bit(ec_stop_cpu, &bits))
524 smp_stop_cpu();
525 if (test_bit(ec_schedule, &bits))
526 scheduler_ipi();
527 if (test_bit(ec_call_function_single, &bits))
528 generic_smp_call_function_single_interrupt();
529 if (test_bit(ec_mcck_pending, &bits))
530 s390_handle_mcck();
531 if (test_bit(ec_irq_work, &bits))
532 irq_work_run();
533}
534
535static void do_ext_call_interrupt(struct ext_code ext_code,
536 unsigned int param32, unsigned long param64)
537{
538 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
539 smp_handle_ext_call();
540}
541
542void arch_send_call_function_ipi_mask(const struct cpumask *mask)
543{
544 int cpu;
545
546 for_each_cpu(cpu, mask)
547 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
548}
549
550void arch_send_call_function_single_ipi(int cpu)
551{
552 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
553}
554
555/*
556 * this function sends a 'reschedule' IPI to another CPU.
557 * it goes straight through and wastes no time serializing
558 * anything. Worst case is that we lose a reschedule ...
559 */
560void arch_smp_send_reschedule(int cpu)
561{
562 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
563}
564
565#ifdef CONFIG_IRQ_WORK
566void arch_irq_work_raise(void)
567{
568 pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
569}
570#endif
571
572#ifdef CONFIG_CRASH_DUMP
573
574int smp_store_status(int cpu)
575{
576 struct lowcore *lc;
577 struct pcpu *pcpu;
578 unsigned long pa;
579
580 pcpu = pcpu_devices + cpu;
581 lc = lowcore_ptr[cpu];
582 pa = __pa(&lc->floating_pt_save_area);
583 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
584 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
585 return -EIO;
586 if (!cpu_has_vx() && !MACHINE_HAS_GS)
587 return 0;
588 pa = lc->mcesad & MCESA_ORIGIN_MASK;
589 if (MACHINE_HAS_GS)
590 pa |= lc->mcesad & MCESA_LC_MASK;
591 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
592 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
593 return -EIO;
594 return 0;
595}
596
597/*
598 * Collect CPU state of the previous, crashed system.
599 * There are four cases:
600 * 1) standard zfcp/nvme dump
601 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
602 * The state for all CPUs except the boot CPU needs to be collected
603 * with sigp stop-and-store-status. The boot CPU state is located in
604 * the absolute lowcore of the memory stored in the HSA. The zcore code
605 * will copy the boot CPU state from the HSA.
606 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
607 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
608 * The state for all CPUs except the boot CPU needs to be collected
609 * with sigp stop-and-store-status. The firmware or the boot-loader
610 * stored the registers of the boot CPU in the absolute lowcore in the
611 * memory of the old system.
612 * 3) kdump and the old kernel did not store the CPU state,
613 * or stand-alone kdump for DASD
614 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
615 * The state for all CPUs except the boot CPU needs to be collected
616 * with sigp stop-and-store-status. The kexec code or the boot-loader
617 * stored the registers of the boot CPU in the memory of the old system.
618 * 4) kdump and the old kernel stored the CPU state
619 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
620 * This case does not exist for s390 anymore, setup_arch explicitly
621 * deactivates the elfcorehdr= kernel parameter
622 */
623static bool dump_available(void)
624{
625 return oldmem_data.start || is_ipl_type_dump();
626}
627
628void __init smp_save_dump_ipl_cpu(void)
629{
630 struct save_area *sa;
631 void *regs;
632
633 if (!dump_available())
634 return;
635 sa = save_area_alloc(true);
636 regs = memblock_alloc(512, 8);
637 if (!sa || !regs)
638 panic("could not allocate memory for boot CPU save area\n");
639 copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
640 save_area_add_regs(sa, regs);
641 memblock_free(regs, 512);
642 if (cpu_has_vx())
643 save_area_add_vxrs(sa, boot_cpu_vector_save_area);
644}
645
646void __init smp_save_dump_secondary_cpus(void)
647{
648 int addr, boot_cpu_addr, max_cpu_addr;
649 struct save_area *sa;
650 void *page;
651
652 if (!dump_available())
653 return;
654 /* Allocate a page as dumping area for the store status sigps */
655 page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
656 if (!page)
657 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
658 PAGE_SIZE, 1UL << 31);
659
660 /* Set multi-threading state to the previous system. */
661 pcpu_set_smt(sclp.mtid_prev);
662 boot_cpu_addr = stap();
663 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
664 for (addr = 0; addr <= max_cpu_addr; addr++) {
665 if (addr == boot_cpu_addr)
666 continue;
667 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
668 SIGP_CC_NOT_OPERATIONAL)
669 continue;
670 sa = save_area_alloc(false);
671 if (!sa)
672 panic("could not allocate memory for save area\n");
673 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
674 save_area_add_regs(sa, page);
675 if (cpu_has_vx()) {
676 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
677 save_area_add_vxrs(sa, page);
678 }
679 }
680 memblock_free(page, PAGE_SIZE);
681 diag_amode31_ops.diag308_reset();
682 pcpu_set_smt(0);
683}
684#endif /* CONFIG_CRASH_DUMP */
685
686void smp_cpu_set_polarization(int cpu, int val)
687{
688 pcpu_devices[cpu].polarization = val;
689}
690
691int smp_cpu_get_polarization(int cpu)
692{
693 return pcpu_devices[cpu].polarization;
694}
695
696int smp_cpu_get_cpu_address(int cpu)
697{
698 return pcpu_devices[cpu].address;
699}
700
701static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
702{
703 static int use_sigp_detection;
704 int address;
705
706 if (use_sigp_detection || sclp_get_core_info(info, early)) {
707 use_sigp_detection = 1;
708 for (address = 0;
709 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
710 address += (1U << smp_cpu_mt_shift)) {
711 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
712 SIGP_CC_NOT_OPERATIONAL)
713 continue;
714 info->core[info->configured].core_id =
715 address >> smp_cpu_mt_shift;
716 info->configured++;
717 }
718 info->combined = info->configured;
719 }
720}
721
722static int smp_add_present_cpu(int cpu);
723
724static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
725 bool configured, bool early)
726{
727 struct pcpu *pcpu;
728 int cpu, nr, i;
729 u16 address;
730
731 nr = 0;
732 if (sclp.has_core_type && core->type != boot_core_type)
733 return nr;
734 cpu = cpumask_first(avail);
735 address = core->core_id << smp_cpu_mt_shift;
736 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
737 if (pcpu_find_address(cpu_present_mask, address + i))
738 continue;
739 pcpu = pcpu_devices + cpu;
740 pcpu->address = address + i;
741 if (configured)
742 pcpu->state = CPU_STATE_CONFIGURED;
743 else
744 pcpu->state = CPU_STATE_STANDBY;
745 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
746 set_cpu_present(cpu, true);
747 if (!early && smp_add_present_cpu(cpu) != 0)
748 set_cpu_present(cpu, false);
749 else
750 nr++;
751 cpumask_clear_cpu(cpu, avail);
752 cpu = cpumask_next(cpu, avail);
753 }
754 return nr;
755}
756
757static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
758{
759 struct sclp_core_entry *core;
760 static cpumask_t avail;
761 bool configured;
762 u16 core_id;
763 int nr, i;
764
765 cpus_read_lock();
766 mutex_lock(&smp_cpu_state_mutex);
767 nr = 0;
768 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
769 /*
770 * Add IPL core first (which got logical CPU number 0) to make sure
771 * that all SMT threads get subsequent logical CPU numbers.
772 */
773 if (early) {
774 core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
775 for (i = 0; i < info->configured; i++) {
776 core = &info->core[i];
777 if (core->core_id == core_id) {
778 nr += smp_add_core(core, &avail, true, early);
779 break;
780 }
781 }
782 }
783 for (i = 0; i < info->combined; i++) {
784 configured = i < info->configured;
785 nr += smp_add_core(&info->core[i], &avail, configured, early);
786 }
787 mutex_unlock(&smp_cpu_state_mutex);
788 cpus_read_unlock();
789 return nr;
790}
791
792void __init smp_detect_cpus(void)
793{
794 unsigned int cpu, mtid, c_cpus, s_cpus;
795 struct sclp_core_info *info;
796 u16 address;
797
798 /* Get CPU information */
799 info = memblock_alloc(sizeof(*info), 8);
800 if (!info)
801 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
802 __func__, sizeof(*info), 8);
803 smp_get_core_info(info, 1);
804 /* Find boot CPU type */
805 if (sclp.has_core_type) {
806 address = stap();
807 for (cpu = 0; cpu < info->combined; cpu++)
808 if (info->core[cpu].core_id == address) {
809 /* The boot cpu dictates the cpu type. */
810 boot_core_type = info->core[cpu].type;
811 break;
812 }
813 if (cpu >= info->combined)
814 panic("Could not find boot CPU type");
815 }
816
817 /* Set multi-threading state for the current system */
818 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
819 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
820 pcpu_set_smt(mtid);
821
822 /* Print number of CPUs */
823 c_cpus = s_cpus = 0;
824 for (cpu = 0; cpu < info->combined; cpu++) {
825 if (sclp.has_core_type &&
826 info->core[cpu].type != boot_core_type)
827 continue;
828 if (cpu < info->configured)
829 c_cpus += smp_cpu_mtid + 1;
830 else
831 s_cpus += smp_cpu_mtid + 1;
832 }
833 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
834
835 /* Add CPUs present at boot */
836 __smp_rescan_cpus(info, true);
837 memblock_free(info, sizeof(*info));
838}
839
840/*
841 * Activate a secondary processor.
842 */
843static void smp_start_secondary(void *cpuvoid)
844{
845 int cpu = raw_smp_processor_id();
846
847 S390_lowcore.last_update_clock = get_tod_clock();
848 S390_lowcore.restart_stack = (unsigned long)restart_stack;
849 S390_lowcore.restart_fn = (unsigned long)do_restart;
850 S390_lowcore.restart_data = 0;
851 S390_lowcore.restart_source = -1U;
852 S390_lowcore.restart_flags = 0;
853 restore_access_regs(S390_lowcore.access_regs_save_area);
854 cpu_init();
855 rcutree_report_cpu_starting(cpu);
856 init_cpu_timer();
857 vtime_init();
858 vdso_getcpu_init();
859 pfault_init();
860 cpumask_set_cpu(cpu, &cpu_setup_mask);
861 update_cpu_masks();
862 notify_cpu_starting(cpu);
863 if (topology_cpu_dedicated(cpu))
864 set_cpu_flag(CIF_DEDICATED_CPU);
865 else
866 clear_cpu_flag(CIF_DEDICATED_CPU);
867 set_cpu_online(cpu, true);
868 inc_irq_stat(CPU_RST);
869 local_irq_enable();
870 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
871}
872
873/* Upping and downing of CPUs */
874int __cpu_up(unsigned int cpu, struct task_struct *tidle)
875{
876 struct pcpu *pcpu = pcpu_devices + cpu;
877 int rc;
878
879 if (pcpu->state != CPU_STATE_CONFIGURED)
880 return -EIO;
881 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
882 SIGP_CC_ORDER_CODE_ACCEPTED)
883 return -EIO;
884
885 rc = pcpu_alloc_lowcore(pcpu, cpu);
886 if (rc)
887 return rc;
888 /*
889 * Make sure global control register contents do not change
890 * until new CPU has initialized control registers.
891 */
892 system_ctlreg_lock();
893 pcpu_prepare_secondary(pcpu, cpu);
894 pcpu_attach_task(pcpu, tidle);
895 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
896 /* Wait until cpu puts itself in the online & active maps */
897 while (!cpu_online(cpu))
898 cpu_relax();
899 system_ctlreg_unlock();
900 return 0;
901}
902
903static unsigned int setup_possible_cpus __initdata;
904
905static int __init _setup_possible_cpus(char *s)
906{
907 get_option(&s, &setup_possible_cpus);
908 return 0;
909}
910early_param("possible_cpus", _setup_possible_cpus);
911
912int __cpu_disable(void)
913{
914 struct ctlreg cregs[16];
915 int cpu;
916
917 /* Handle possible pending IPIs */
918 smp_handle_ext_call();
919 cpu = smp_processor_id();
920 set_cpu_online(cpu, false);
921 cpumask_clear_cpu(cpu, &cpu_setup_mask);
922 update_cpu_masks();
923 /* Disable pseudo page faults on this cpu. */
924 pfault_fini();
925 /* Disable interrupt sources via control register. */
926 __local_ctl_store(0, 15, cregs);
927 cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
928 cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
929 cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
930 __local_ctl_load(0, 15, cregs);
931 clear_cpu_flag(CIF_NOHZ_DELAY);
932 return 0;
933}
934
935void __cpu_die(unsigned int cpu)
936{
937 struct pcpu *pcpu;
938
939 /* Wait until target cpu is down */
940 pcpu = pcpu_devices + cpu;
941 while (!pcpu_stopped(pcpu))
942 cpu_relax();
943 pcpu_free_lowcore(pcpu);
944 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
945 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
946}
947
948void __noreturn cpu_die(void)
949{
950 idle_task_exit();
951 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
952 for (;;) ;
953}
954
955void __init smp_fill_possible_mask(void)
956{
957 unsigned int possible, sclp_max, cpu;
958
959 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
960 sclp_max = min(smp_max_threads, sclp_max);
961 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
962 possible = setup_possible_cpus ?: nr_cpu_ids;
963 possible = min(possible, sclp_max);
964 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
965 set_cpu_possible(cpu, true);
966}
967
968void __init smp_prepare_cpus(unsigned int max_cpus)
969{
970 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
971 panic("Couldn't request external interrupt 0x1201");
972 system_ctl_set_bit(0, 14);
973 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
974 panic("Couldn't request external interrupt 0x1202");
975 system_ctl_set_bit(0, 13);
976}
977
978void __init smp_prepare_boot_cpu(void)
979{
980 struct pcpu *pcpu = pcpu_devices;
981
982 WARN_ON(!cpu_present(0) || !cpu_online(0));
983 pcpu->state = CPU_STATE_CONFIGURED;
984 S390_lowcore.percpu_offset = __per_cpu_offset[0];
985 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
986}
987
988void __init smp_setup_processor_id(void)
989{
990 pcpu_devices[0].address = stap();
991 S390_lowcore.cpu_nr = 0;
992 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
993 S390_lowcore.spinlock_index = 0;
994}
995
996/*
997 * the frequency of the profiling timer can be changed
998 * by writing a multiplier value into /proc/profile.
999 *
1000 * usually you want to run this on all CPUs ;)
1001 */
1002int setup_profiling_timer(unsigned int multiplier)
1003{
1004 return 0;
1005}
1006
1007static ssize_t cpu_configure_show(struct device *dev,
1008 struct device_attribute *attr, char *buf)
1009{
1010 ssize_t count;
1011
1012 mutex_lock(&smp_cpu_state_mutex);
1013 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1014 mutex_unlock(&smp_cpu_state_mutex);
1015 return count;
1016}
1017
1018static ssize_t cpu_configure_store(struct device *dev,
1019 struct device_attribute *attr,
1020 const char *buf, size_t count)
1021{
1022 struct pcpu *pcpu;
1023 int cpu, val, rc, i;
1024 char delim;
1025
1026 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1027 return -EINVAL;
1028 if (val != 0 && val != 1)
1029 return -EINVAL;
1030 cpus_read_lock();
1031 mutex_lock(&smp_cpu_state_mutex);
1032 rc = -EBUSY;
1033 /* disallow configuration changes of online cpus */
1034 cpu = dev->id;
1035 cpu = smp_get_base_cpu(cpu);
1036 for (i = 0; i <= smp_cpu_mtid; i++)
1037 if (cpu_online(cpu + i))
1038 goto out;
1039 pcpu = pcpu_devices + cpu;
1040 rc = 0;
1041 switch (val) {
1042 case 0:
1043 if (pcpu->state != CPU_STATE_CONFIGURED)
1044 break;
1045 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1046 if (rc)
1047 break;
1048 for (i = 0; i <= smp_cpu_mtid; i++) {
1049 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1050 continue;
1051 pcpu[i].state = CPU_STATE_STANDBY;
1052 smp_cpu_set_polarization(cpu + i,
1053 POLARIZATION_UNKNOWN);
1054 }
1055 topology_expect_change();
1056 break;
1057 case 1:
1058 if (pcpu->state != CPU_STATE_STANDBY)
1059 break;
1060 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1061 if (rc)
1062 break;
1063 for (i = 0; i <= smp_cpu_mtid; i++) {
1064 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1065 continue;
1066 pcpu[i].state = CPU_STATE_CONFIGURED;
1067 smp_cpu_set_polarization(cpu + i,
1068 POLARIZATION_UNKNOWN);
1069 }
1070 topology_expect_change();
1071 break;
1072 default:
1073 break;
1074 }
1075out:
1076 mutex_unlock(&smp_cpu_state_mutex);
1077 cpus_read_unlock();
1078 return rc ? rc : count;
1079}
1080static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1081
1082static ssize_t show_cpu_address(struct device *dev,
1083 struct device_attribute *attr, char *buf)
1084{
1085 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1086}
1087static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1088
1089static struct attribute *cpu_common_attrs[] = {
1090 &dev_attr_configure.attr,
1091 &dev_attr_address.attr,
1092 NULL,
1093};
1094
1095static struct attribute_group cpu_common_attr_group = {
1096 .attrs = cpu_common_attrs,
1097};
1098
1099static struct attribute *cpu_online_attrs[] = {
1100 &dev_attr_idle_count.attr,
1101 &dev_attr_idle_time_us.attr,
1102 NULL,
1103};
1104
1105static struct attribute_group cpu_online_attr_group = {
1106 .attrs = cpu_online_attrs,
1107};
1108
1109static int smp_cpu_online(unsigned int cpu)
1110{
1111 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1112
1113 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1114}
1115
1116static int smp_cpu_pre_down(unsigned int cpu)
1117{
1118 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1119
1120 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1121 return 0;
1122}
1123
1124static int smp_add_present_cpu(int cpu)
1125{
1126 struct device *s;
1127 struct cpu *c;
1128 int rc;
1129
1130 c = kzalloc(sizeof(*c), GFP_KERNEL);
1131 if (!c)
1132 return -ENOMEM;
1133 per_cpu(cpu_device, cpu) = c;
1134 s = &c->dev;
1135 c->hotpluggable = !!cpu;
1136 rc = register_cpu(c, cpu);
1137 if (rc)
1138 goto out;
1139 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1140 if (rc)
1141 goto out_cpu;
1142 rc = topology_cpu_init(c);
1143 if (rc)
1144 goto out_topology;
1145 return 0;
1146
1147out_topology:
1148 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1149out_cpu:
1150 unregister_cpu(c);
1151out:
1152 return rc;
1153}
1154
1155int __ref smp_rescan_cpus(void)
1156{
1157 struct sclp_core_info *info;
1158 int nr;
1159
1160 info = kzalloc(sizeof(*info), GFP_KERNEL);
1161 if (!info)
1162 return -ENOMEM;
1163 smp_get_core_info(info, 0);
1164 nr = __smp_rescan_cpus(info, false);
1165 kfree(info);
1166 if (nr)
1167 topology_schedule_update();
1168 return 0;
1169}
1170
1171static ssize_t __ref rescan_store(struct device *dev,
1172 struct device_attribute *attr,
1173 const char *buf,
1174 size_t count)
1175{
1176 int rc;
1177
1178 rc = lock_device_hotplug_sysfs();
1179 if (rc)
1180 return rc;
1181 rc = smp_rescan_cpus();
1182 unlock_device_hotplug();
1183 return rc ? rc : count;
1184}
1185static DEVICE_ATTR_WO(rescan);
1186
1187static int __init s390_smp_init(void)
1188{
1189 struct device *dev_root;
1190 int cpu, rc = 0;
1191
1192 dev_root = bus_get_dev_root(&cpu_subsys);
1193 if (dev_root) {
1194 rc = device_create_file(dev_root, &dev_attr_rescan);
1195 put_device(dev_root);
1196 if (rc)
1197 return rc;
1198 }
1199
1200 for_each_present_cpu(cpu) {
1201 rc = smp_add_present_cpu(cpu);
1202 if (rc)
1203 goto out;
1204 }
1205
1206 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1207 smp_cpu_online, smp_cpu_pre_down);
1208 rc = rc <= 0 ? rc : 0;
1209out:
1210 return rc;
1211}
1212subsys_initcall(s390_smp_init);