Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SMP related functions
4 *
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/memblock.h>
23#include <linux/export.h>
24#include <linux/init.h>
25#include <linux/mm.h>
26#include <linux/err.h>
27#include <linux/spinlock.h>
28#include <linux/kernel_stat.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/irqflags.h>
32#include <linux/irq_work.h>
33#include <linux/cpu.h>
34#include <linux/slab.h>
35#include <linux/sched/hotplug.h>
36#include <linux/sched/task_stack.h>
37#include <linux/crash_dump.h>
38#include <linux/kprobes.h>
39#include <asm/access-regs.h>
40#include <asm/asm-offsets.h>
41#include <asm/ctlreg.h>
42#include <asm/pfault.h>
43#include <asm/diag.h>
44#include <asm/facility.h>
45#include <asm/fpu.h>
46#include <asm/ipl.h>
47#include <asm/setup.h>
48#include <asm/irq.h>
49#include <asm/tlbflush.h>
50#include <asm/vtimer.h>
51#include <asm/abs_lowcore.h>
52#include <asm/sclp.h>
53#include <asm/debug.h>
54#include <asm/os_info.h>
55#include <asm/sigp.h>
56#include <asm/idle.h>
57#include <asm/nmi.h>
58#include <asm/stacktrace.h>
59#include <asm/topology.h>
60#include <asm/vdso.h>
61#include <asm/maccess.h>
62#include "entry.h"
63
64enum {
65 ec_schedule = 0,
66 ec_call_function_single,
67 ec_stop_cpu,
68 ec_mcck_pending,
69 ec_irq_work,
70};
71
72enum {
73 CPU_STATE_STANDBY,
74 CPU_STATE_CONFIGURED,
75};
76
77static u8 boot_core_type;
78DEFINE_PER_CPU(struct pcpu, pcpu_devices);
79/*
80 * Pointer to the pcpu area of the boot CPU. This is required when a restart
81 * interrupt is triggered on an offline CPU. For that case accessing percpu
82 * data with the common primitives does not work, since the percpu offset is
83 * stored in a non existent lowcore.
84 */
85static struct pcpu *ipl_pcpu;
86
87unsigned int smp_cpu_mt_shift;
88EXPORT_SYMBOL(smp_cpu_mt_shift);
89
90unsigned int smp_cpu_mtid;
91EXPORT_SYMBOL(smp_cpu_mtid);
92
93#ifdef CONFIG_CRASH_DUMP
94__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
95#endif
96
97static unsigned int smp_max_threads __initdata = -1U;
98cpumask_t cpu_setup_mask;
99
100static int __init early_nosmt(char *s)
101{
102 smp_max_threads = 1;
103 return 0;
104}
105early_param("nosmt", early_nosmt);
106
107static int __init early_smt(char *s)
108{
109 get_option(&s, &smp_max_threads);
110 return 0;
111}
112early_param("smt", early_smt);
113
114/*
115 * The smp_cpu_state_mutex must be held when changing the state or polarization
116 * member of a pcpu data structure within the pcpu_devices array.
117 */
118DEFINE_MUTEX(smp_cpu_state_mutex);
119
120/*
121 * Signal processor helper functions.
122 */
123static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
124{
125 int cc;
126
127 while (1) {
128 cc = __pcpu_sigp(addr, order, parm, NULL);
129 if (cc != SIGP_CC_BUSY)
130 return cc;
131 cpu_relax();
132 }
133}
134
135static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
136{
137 int cc, retry;
138
139 for (retry = 0; ; retry++) {
140 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
141 if (cc != SIGP_CC_BUSY)
142 break;
143 if (retry >= 3)
144 udelay(10);
145 }
146 return cc;
147}
148
149static inline int pcpu_stopped(struct pcpu *pcpu)
150{
151 u32 status;
152
153 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
154 0, &status) != SIGP_CC_STATUS_STORED)
155 return 0;
156 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
157}
158
159static inline int pcpu_running(struct pcpu *pcpu)
160{
161 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
162 0, NULL) != SIGP_CC_STATUS_STORED)
163 return 1;
164 /* Status stored condition code is equivalent to cpu not running. */
165 return 0;
166}
167
168/*
169 * Find struct pcpu by cpu address.
170 */
171static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
172{
173 int cpu;
174
175 for_each_cpu(cpu, mask)
176 if (per_cpu(pcpu_devices, cpu).address == address)
177 return &per_cpu(pcpu_devices, cpu);
178 return NULL;
179}
180
181static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
182{
183 int order;
184
185 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
186 return;
187 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
188 pcpu->ec_clk = get_tod_clock_fast();
189 pcpu_sigp_retry(pcpu, order, 0);
190}
191
192static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
193{
194 unsigned long async_stack, nodat_stack, mcck_stack;
195 struct lowcore *lc;
196
197 lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
198 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
199 async_stack = stack_alloc();
200 mcck_stack = stack_alloc();
201 if (!lc || !nodat_stack || !async_stack || !mcck_stack)
202 goto out;
203 memcpy(lc, get_lowcore(), 512);
204 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
205 lc->async_stack = async_stack + STACK_INIT_OFFSET;
206 lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
207 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
208 lc->cpu_nr = cpu;
209 lc->spinlock_lockval = arch_spin_lockval(cpu);
210 lc->spinlock_index = 0;
211 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
212 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
213 lc->preempt_count = PREEMPT_DISABLED;
214 if (nmi_alloc_mcesa(&lc->mcesad))
215 goto out;
216 if (abs_lowcore_map(cpu, lc, true))
217 goto out_mcesa;
218 lowcore_ptr[cpu] = lc;
219 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
220 return 0;
221
222out_mcesa:
223 nmi_free_mcesa(&lc->mcesad);
224out:
225 stack_free(mcck_stack);
226 stack_free(async_stack);
227 free_pages(nodat_stack, THREAD_SIZE_ORDER);
228 free_pages((unsigned long) lc, LC_ORDER);
229 return -ENOMEM;
230}
231
232static void pcpu_free_lowcore(struct pcpu *pcpu, int cpu)
233{
234 unsigned long async_stack, nodat_stack, mcck_stack;
235 struct lowcore *lc;
236
237 lc = lowcore_ptr[cpu];
238 nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
239 async_stack = lc->async_stack - STACK_INIT_OFFSET;
240 mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
241 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
242 lowcore_ptr[cpu] = NULL;
243 abs_lowcore_unmap(cpu);
244 nmi_free_mcesa(&lc->mcesad);
245 stack_free(async_stack);
246 stack_free(mcck_stack);
247 free_pages(nodat_stack, THREAD_SIZE_ORDER);
248 free_pages((unsigned long) lc, LC_ORDER);
249}
250
251static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
252{
253 struct lowcore *lc, *abs_lc;
254
255 lc = lowcore_ptr[cpu];
256 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
257 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
258 lc->cpu_nr = cpu;
259 lc->pcpu = (unsigned long)pcpu;
260 lc->restart_flags = RESTART_FLAG_CTLREGS;
261 lc->spinlock_lockval = arch_spin_lockval(cpu);
262 lc->spinlock_index = 0;
263 lc->percpu_offset = __per_cpu_offset[cpu];
264 lc->kernel_asce = get_lowcore()->kernel_asce;
265 lc->user_asce = s390_invalid_asce;
266 lc->machine_flags = get_lowcore()->machine_flags;
267 lc->user_timer = lc->system_timer =
268 lc->steal_timer = lc->avg_steal_timer = 0;
269 abs_lc = get_abs_lowcore();
270 memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
271 put_abs_lowcore(abs_lc);
272 lc->cregs_save_area[1] = lc->kernel_asce;
273 lc->cregs_save_area[7] = lc->user_asce;
274 save_access_regs((unsigned int *) lc->access_regs_save_area);
275 arch_spin_lock_setup(cpu);
276}
277
278static void pcpu_attach_task(int cpu, struct task_struct *tsk)
279{
280 struct lowcore *lc;
281
282 lc = lowcore_ptr[cpu];
283 lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
284 lc->current_task = (unsigned long)tsk;
285 lc->lpp = LPP_MAGIC;
286 lc->current_pid = tsk->pid;
287 lc->user_timer = tsk->thread.user_timer;
288 lc->guest_timer = tsk->thread.guest_timer;
289 lc->system_timer = tsk->thread.system_timer;
290 lc->hardirq_timer = tsk->thread.hardirq_timer;
291 lc->softirq_timer = tsk->thread.softirq_timer;
292 lc->steal_timer = 0;
293}
294
295static void pcpu_start_fn(int cpu, void (*func)(void *), void *data)
296{
297 struct lowcore *lc;
298
299 lc = lowcore_ptr[cpu];
300 lc->restart_stack = lc->kernel_stack;
301 lc->restart_fn = (unsigned long) func;
302 lc->restart_data = (unsigned long) data;
303 lc->restart_source = -1U;
304 pcpu_sigp_retry(per_cpu_ptr(&pcpu_devices, cpu), SIGP_RESTART, 0);
305}
306
307typedef void (pcpu_delegate_fn)(void *);
308
309/*
310 * Call function via PSW restart on pcpu and stop the current cpu.
311 */
312static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
313{
314 func(data); /* should not return */
315}
316
317static void pcpu_delegate(struct pcpu *pcpu, int cpu,
318 pcpu_delegate_fn *func,
319 void *data, unsigned long stack)
320{
321 struct lowcore *lc, *abs_lc;
322 unsigned int source_cpu;
323
324 lc = lowcore_ptr[cpu];
325 source_cpu = stap();
326
327 if (pcpu->address == source_cpu) {
328 call_on_stack(2, stack, void, __pcpu_delegate,
329 pcpu_delegate_fn *, func, void *, data);
330 }
331 /* Stop target cpu (if func returns this stops the current cpu). */
332 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
333 pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
334 /* Restart func on the target cpu and stop the current cpu. */
335 if (lc) {
336 lc->restart_stack = stack;
337 lc->restart_fn = (unsigned long)func;
338 lc->restart_data = (unsigned long)data;
339 lc->restart_source = source_cpu;
340 } else {
341 abs_lc = get_abs_lowcore();
342 abs_lc->restart_stack = stack;
343 abs_lc->restart_fn = (unsigned long)func;
344 abs_lc->restart_data = (unsigned long)data;
345 abs_lc->restart_source = source_cpu;
346 put_abs_lowcore(abs_lc);
347 }
348 asm volatile(
349 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
350 " brc 2,0b # busy, try again\n"
351 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
352 " brc 2,1b # busy, try again\n"
353 : : "d" (pcpu->address), "d" (source_cpu),
354 "K" (SIGP_RESTART), "K" (SIGP_STOP)
355 : "0", "1", "cc");
356 for (;;) ;
357}
358
359/*
360 * Enable additional logical cpus for multi-threading.
361 */
362static int pcpu_set_smt(unsigned int mtid)
363{
364 int cc;
365
366 if (smp_cpu_mtid == mtid)
367 return 0;
368 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
369 if (cc == 0) {
370 smp_cpu_mtid = mtid;
371 smp_cpu_mt_shift = 0;
372 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
373 smp_cpu_mt_shift++;
374 per_cpu(pcpu_devices, 0).address = stap();
375 }
376 return cc;
377}
378
379/*
380 * Call function on the ipl CPU.
381 */
382void smp_call_ipl_cpu(void (*func)(void *), void *data)
383{
384 struct lowcore *lc = lowcore_ptr[0];
385
386 if (ipl_pcpu->address == stap())
387 lc = get_lowcore();
388
389 pcpu_delegate(ipl_pcpu, 0, func, data, lc->nodat_stack);
390}
391
392int smp_find_processor_id(u16 address)
393{
394 int cpu;
395
396 for_each_present_cpu(cpu)
397 if (per_cpu(pcpu_devices, cpu).address == address)
398 return cpu;
399 return -1;
400}
401
402void schedule_mcck_handler(void)
403{
404 pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_mcck_pending);
405}
406
407bool notrace arch_vcpu_is_preempted(int cpu)
408{
409 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
410 return false;
411 if (pcpu_running(per_cpu_ptr(&pcpu_devices, cpu)))
412 return false;
413 return true;
414}
415EXPORT_SYMBOL(arch_vcpu_is_preempted);
416
417void notrace smp_yield_cpu(int cpu)
418{
419 if (!MACHINE_HAS_DIAG9C)
420 return;
421 diag_stat_inc_norecursion(DIAG_STAT_X09C);
422 asm volatile("diag %0,0,0x9c"
423 : : "d" (per_cpu(pcpu_devices, cpu).address));
424}
425EXPORT_SYMBOL_GPL(smp_yield_cpu);
426
427/*
428 * Send cpus emergency shutdown signal. This gives the cpus the
429 * opportunity to complete outstanding interrupts.
430 */
431void notrace smp_emergency_stop(void)
432{
433 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
434 static cpumask_t cpumask;
435 u64 end;
436 int cpu;
437
438 arch_spin_lock(&lock);
439 cpumask_copy(&cpumask, cpu_online_mask);
440 cpumask_clear_cpu(smp_processor_id(), &cpumask);
441
442 end = get_tod_clock() + (1000000UL << 12);
443 for_each_cpu(cpu, &cpumask) {
444 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
445 set_bit(ec_stop_cpu, &pcpu->ec_mask);
446 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
447 0, NULL) == SIGP_CC_BUSY &&
448 get_tod_clock() < end)
449 cpu_relax();
450 }
451 while (get_tod_clock() < end) {
452 for_each_cpu(cpu, &cpumask)
453 if (pcpu_stopped(per_cpu_ptr(&pcpu_devices, cpu)))
454 cpumask_clear_cpu(cpu, &cpumask);
455 if (cpumask_empty(&cpumask))
456 break;
457 cpu_relax();
458 }
459 arch_spin_unlock(&lock);
460}
461NOKPROBE_SYMBOL(smp_emergency_stop);
462
463/*
464 * Stop all cpus but the current one.
465 */
466void smp_send_stop(void)
467{
468 struct pcpu *pcpu;
469 int cpu;
470
471 /* Disable all interrupts/machine checks */
472 __load_psw_mask(PSW_KERNEL_BITS);
473 trace_hardirqs_off();
474
475 debug_set_critical();
476
477 if (oops_in_progress)
478 smp_emergency_stop();
479
480 /* stop all processors */
481 for_each_online_cpu(cpu) {
482 if (cpu == smp_processor_id())
483 continue;
484 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
485 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
486 while (!pcpu_stopped(pcpu))
487 cpu_relax();
488 }
489}
490
491/*
492 * This is the main routine where commands issued by other
493 * cpus are handled.
494 */
495static void smp_handle_ext_call(void)
496{
497 unsigned long bits;
498
499 /* handle bit signal external calls */
500 bits = this_cpu_xchg(pcpu_devices.ec_mask, 0);
501 if (test_bit(ec_stop_cpu, &bits))
502 smp_stop_cpu();
503 if (test_bit(ec_schedule, &bits))
504 scheduler_ipi();
505 if (test_bit(ec_call_function_single, &bits))
506 generic_smp_call_function_single_interrupt();
507 if (test_bit(ec_mcck_pending, &bits))
508 s390_handle_mcck();
509 if (test_bit(ec_irq_work, &bits))
510 irq_work_run();
511}
512
513static void do_ext_call_interrupt(struct ext_code ext_code,
514 unsigned int param32, unsigned long param64)
515{
516 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
517 smp_handle_ext_call();
518}
519
520void arch_send_call_function_ipi_mask(const struct cpumask *mask)
521{
522 int cpu;
523
524 for_each_cpu(cpu, mask)
525 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
526}
527
528void arch_send_call_function_single_ipi(int cpu)
529{
530 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_call_function_single);
531}
532
533/*
534 * this function sends a 'reschedule' IPI to another CPU.
535 * it goes straight through and wastes no time serializing
536 * anything. Worst case is that we lose a reschedule ...
537 */
538void arch_smp_send_reschedule(int cpu)
539{
540 pcpu_ec_call(per_cpu_ptr(&pcpu_devices, cpu), ec_schedule);
541}
542
543#ifdef CONFIG_IRQ_WORK
544void arch_irq_work_raise(void)
545{
546 pcpu_ec_call(this_cpu_ptr(&pcpu_devices), ec_irq_work);
547}
548#endif
549
550#ifdef CONFIG_CRASH_DUMP
551
552int smp_store_status(int cpu)
553{
554 struct lowcore *lc;
555 struct pcpu *pcpu;
556 unsigned long pa;
557
558 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
559 lc = lowcore_ptr[cpu];
560 pa = __pa(&lc->floating_pt_save_area);
561 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
562 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
563 return -EIO;
564 if (!cpu_has_vx() && !MACHINE_HAS_GS)
565 return 0;
566 pa = lc->mcesad & MCESA_ORIGIN_MASK;
567 if (MACHINE_HAS_GS)
568 pa |= lc->mcesad & MCESA_LC_MASK;
569 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
570 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
571 return -EIO;
572 return 0;
573}
574
575/*
576 * Collect CPU state of the previous, crashed system.
577 * There are three cases:
578 * 1) standard zfcp/nvme dump
579 * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
580 * The state for all CPUs except the boot CPU needs to be collected
581 * with sigp stop-and-store-status. The boot CPU state is located in
582 * the absolute lowcore of the memory stored in the HSA. The zcore code
583 * will copy the boot CPU state from the HSA.
584 * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
585 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
586 * The state for all CPUs except the boot CPU needs to be collected
587 * with sigp stop-and-store-status. The firmware or the boot-loader
588 * stored the registers of the boot CPU in the absolute lowcore in the
589 * memory of the old system.
590 * 3) kdump or stand-alone kdump for DASD
591 * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == false
592 * The state for all CPUs except the boot CPU needs to be collected
593 * with sigp stop-and-store-status. The kexec code or the boot-loader
594 * stored the registers of the boot CPU in the memory of the old system.
595 *
596 * Note that the legacy kdump mode where the old kernel stored the CPU states
597 * does no longer exist: setup_arch() explicitly deactivates the elfcorehdr=
598 * kernel parameter. The is_kdump_kernel() implementation on s390 is independent
599 * of the elfcorehdr= parameter.
600 */
601static bool dump_available(void)
602{
603 return oldmem_data.start || is_ipl_type_dump();
604}
605
606void __init smp_save_dump_ipl_cpu(void)
607{
608 struct save_area *sa;
609 void *regs;
610
611 if (!dump_available())
612 return;
613 sa = save_area_alloc(true);
614 regs = memblock_alloc(512, 8);
615 if (!sa || !regs)
616 panic("could not allocate memory for boot CPU save area\n");
617 copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
618 save_area_add_regs(sa, regs);
619 memblock_free(regs, 512);
620 if (cpu_has_vx())
621 save_area_add_vxrs(sa, boot_cpu_vector_save_area);
622}
623
624void __init smp_save_dump_secondary_cpus(void)
625{
626 int addr, boot_cpu_addr, max_cpu_addr;
627 struct save_area *sa;
628 void *page;
629
630 if (!dump_available())
631 return;
632 /* Allocate a page as dumping area for the store status sigps */
633 page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
634 if (!page)
635 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
636 PAGE_SIZE, 1UL << 31);
637
638 /* Set multi-threading state to the previous system. */
639 pcpu_set_smt(sclp.mtid_prev);
640 boot_cpu_addr = stap();
641 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
642 for (addr = 0; addr <= max_cpu_addr; addr++) {
643 if (addr == boot_cpu_addr)
644 continue;
645 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
646 SIGP_CC_NOT_OPERATIONAL)
647 continue;
648 sa = save_area_alloc(false);
649 if (!sa)
650 panic("could not allocate memory for save area\n");
651 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
652 save_area_add_regs(sa, page);
653 if (cpu_has_vx()) {
654 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
655 save_area_add_vxrs(sa, page);
656 }
657 }
658 memblock_free(page, PAGE_SIZE);
659 diag_amode31_ops.diag308_reset();
660 pcpu_set_smt(0);
661}
662#endif /* CONFIG_CRASH_DUMP */
663
664void smp_cpu_set_polarization(int cpu, int val)
665{
666 per_cpu(pcpu_devices, cpu).polarization = val;
667}
668
669int smp_cpu_get_polarization(int cpu)
670{
671 return per_cpu(pcpu_devices, cpu).polarization;
672}
673
674void smp_cpu_set_capacity(int cpu, unsigned long val)
675{
676 per_cpu(pcpu_devices, cpu).capacity = val;
677}
678
679unsigned long smp_cpu_get_capacity(int cpu)
680{
681 return per_cpu(pcpu_devices, cpu).capacity;
682}
683
684void smp_set_core_capacity(int cpu, unsigned long val)
685{
686 int i;
687
688 cpu = smp_get_base_cpu(cpu);
689 for (i = cpu; (i <= cpu + smp_cpu_mtid) && (i < nr_cpu_ids); i++)
690 smp_cpu_set_capacity(i, val);
691}
692
693int smp_cpu_get_cpu_address(int cpu)
694{
695 return per_cpu(pcpu_devices, cpu).address;
696}
697
698static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
699{
700 static int use_sigp_detection;
701 int address;
702
703 if (use_sigp_detection || sclp_get_core_info(info, early)) {
704 use_sigp_detection = 1;
705 for (address = 0;
706 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
707 address += (1U << smp_cpu_mt_shift)) {
708 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
709 SIGP_CC_NOT_OPERATIONAL)
710 continue;
711 info->core[info->configured].core_id =
712 address >> smp_cpu_mt_shift;
713 info->configured++;
714 }
715 info->combined = info->configured;
716 }
717}
718
719static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
720 bool configured, bool early)
721{
722 struct pcpu *pcpu;
723 int cpu, nr, i;
724 u16 address;
725
726 nr = 0;
727 if (sclp.has_core_type && core->type != boot_core_type)
728 return nr;
729 cpu = cpumask_first(avail);
730 address = core->core_id << smp_cpu_mt_shift;
731 for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
732 if (pcpu_find_address(cpu_present_mask, address + i))
733 continue;
734 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
735 pcpu->address = address + i;
736 if (configured)
737 pcpu->state = CPU_STATE_CONFIGURED;
738 else
739 pcpu->state = CPU_STATE_STANDBY;
740 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
741 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
742 set_cpu_present(cpu, true);
743 if (!early && arch_register_cpu(cpu))
744 set_cpu_present(cpu, false);
745 else
746 nr++;
747 cpumask_clear_cpu(cpu, avail);
748 cpu = cpumask_next(cpu, avail);
749 }
750 return nr;
751}
752
753static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
754{
755 struct sclp_core_entry *core;
756 static cpumask_t avail;
757 bool configured;
758 u16 core_id;
759 int nr, i;
760
761 cpus_read_lock();
762 mutex_lock(&smp_cpu_state_mutex);
763 nr = 0;
764 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
765 /*
766 * Add IPL core first (which got logical CPU number 0) to make sure
767 * that all SMT threads get subsequent logical CPU numbers.
768 */
769 if (early) {
770 core_id = per_cpu(pcpu_devices, 0).address >> smp_cpu_mt_shift;
771 for (i = 0; i < info->configured; i++) {
772 core = &info->core[i];
773 if (core->core_id == core_id) {
774 nr += smp_add_core(core, &avail, true, early);
775 break;
776 }
777 }
778 }
779 for (i = 0; i < info->combined; i++) {
780 configured = i < info->configured;
781 nr += smp_add_core(&info->core[i], &avail, configured, early);
782 }
783 mutex_unlock(&smp_cpu_state_mutex);
784 cpus_read_unlock();
785 return nr;
786}
787
788void __init smp_detect_cpus(void)
789{
790 unsigned int cpu, mtid, c_cpus, s_cpus;
791 struct sclp_core_info *info;
792 u16 address;
793
794 /* Get CPU information */
795 info = memblock_alloc(sizeof(*info), 8);
796 if (!info)
797 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
798 __func__, sizeof(*info), 8);
799 smp_get_core_info(info, 1);
800 /* Find boot CPU type */
801 if (sclp.has_core_type) {
802 address = stap();
803 for (cpu = 0; cpu < info->combined; cpu++)
804 if (info->core[cpu].core_id == address) {
805 /* The boot cpu dictates the cpu type. */
806 boot_core_type = info->core[cpu].type;
807 break;
808 }
809 if (cpu >= info->combined)
810 panic("Could not find boot CPU type");
811 }
812
813 /* Set multi-threading state for the current system */
814 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
815 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
816 pcpu_set_smt(mtid);
817
818 /* Print number of CPUs */
819 c_cpus = s_cpus = 0;
820 for (cpu = 0; cpu < info->combined; cpu++) {
821 if (sclp.has_core_type &&
822 info->core[cpu].type != boot_core_type)
823 continue;
824 if (cpu < info->configured)
825 c_cpus += smp_cpu_mtid + 1;
826 else
827 s_cpus += smp_cpu_mtid + 1;
828 }
829 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
830 memblock_free(info, sizeof(*info));
831}
832
833/*
834 * Activate a secondary processor.
835 */
836static void smp_start_secondary(void *cpuvoid)
837{
838 struct lowcore *lc = get_lowcore();
839 int cpu = raw_smp_processor_id();
840
841 lc->last_update_clock = get_tod_clock();
842 lc->restart_stack = (unsigned long)restart_stack;
843 lc->restart_fn = (unsigned long)do_restart;
844 lc->restart_data = 0;
845 lc->restart_source = -1U;
846 lc->restart_flags = 0;
847 restore_access_regs(lc->access_regs_save_area);
848 cpu_init();
849 rcutree_report_cpu_starting(cpu);
850 init_cpu_timer();
851 vtime_init();
852 vdso_getcpu_init();
853 pfault_init();
854 cpumask_set_cpu(cpu, &cpu_setup_mask);
855 update_cpu_masks();
856 notify_cpu_starting(cpu);
857 if (topology_cpu_dedicated(cpu))
858 set_cpu_flag(CIF_DEDICATED_CPU);
859 else
860 clear_cpu_flag(CIF_DEDICATED_CPU);
861 set_cpu_online(cpu, true);
862 inc_irq_stat(CPU_RST);
863 local_irq_enable();
864 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
865}
866
867/* Upping and downing of CPUs */
868int __cpu_up(unsigned int cpu, struct task_struct *tidle)
869{
870 struct pcpu *pcpu = per_cpu_ptr(&pcpu_devices, cpu);
871 int rc;
872
873 if (pcpu->state != CPU_STATE_CONFIGURED)
874 return -EIO;
875 if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
876 SIGP_CC_ORDER_CODE_ACCEPTED)
877 return -EIO;
878
879 rc = pcpu_alloc_lowcore(pcpu, cpu);
880 if (rc)
881 return rc;
882 /*
883 * Make sure global control register contents do not change
884 * until new CPU has initialized control registers.
885 */
886 system_ctlreg_lock();
887 pcpu_prepare_secondary(pcpu, cpu);
888 pcpu_attach_task(cpu, tidle);
889 pcpu_start_fn(cpu, smp_start_secondary, NULL);
890 /* Wait until cpu puts itself in the online & active maps */
891 while (!cpu_online(cpu))
892 cpu_relax();
893 system_ctlreg_unlock();
894 return 0;
895}
896
897static unsigned int setup_possible_cpus __initdata;
898
899static int __init _setup_possible_cpus(char *s)
900{
901 get_option(&s, &setup_possible_cpus);
902 return 0;
903}
904early_param("possible_cpus", _setup_possible_cpus);
905
906int __cpu_disable(void)
907{
908 struct ctlreg cregs[16];
909 int cpu;
910
911 /* Handle possible pending IPIs */
912 smp_handle_ext_call();
913 cpu = smp_processor_id();
914 set_cpu_online(cpu, false);
915 cpumask_clear_cpu(cpu, &cpu_setup_mask);
916 update_cpu_masks();
917 /* Disable pseudo page faults on this cpu. */
918 pfault_fini();
919 /* Disable interrupt sources via control register. */
920 __local_ctl_store(0, 15, cregs);
921 cregs[0].val &= ~0x0000ee70UL; /* disable all external interrupts */
922 cregs[6].val &= ~0xff000000UL; /* disable all I/O interrupts */
923 cregs[14].val &= ~0x1f000000UL; /* disable most machine checks */
924 __local_ctl_load(0, 15, cregs);
925 clear_cpu_flag(CIF_NOHZ_DELAY);
926 return 0;
927}
928
929void __cpu_die(unsigned int cpu)
930{
931 struct pcpu *pcpu;
932
933 /* Wait until target cpu is down */
934 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
935 while (!pcpu_stopped(pcpu))
936 cpu_relax();
937 pcpu_free_lowcore(pcpu, cpu);
938 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
939 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
940 pcpu->flags = 0;
941}
942
943void __noreturn cpu_die(void)
944{
945 idle_task_exit();
946 pcpu_sigp_retry(this_cpu_ptr(&pcpu_devices), SIGP_STOP, 0);
947 for (;;) ;
948}
949
950void __init smp_fill_possible_mask(void)
951{
952 unsigned int possible, sclp_max, cpu;
953
954 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
955 sclp_max = min(smp_max_threads, sclp_max);
956 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
957 possible = setup_possible_cpus ?: nr_cpu_ids;
958 possible = min(possible, sclp_max);
959 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
960 set_cpu_possible(cpu, true);
961}
962
963void __init smp_prepare_cpus(unsigned int max_cpus)
964{
965 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
966 panic("Couldn't request external interrupt 0x1201");
967 system_ctl_set_bit(0, 14);
968 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
969 panic("Couldn't request external interrupt 0x1202");
970 system_ctl_set_bit(0, 13);
971 smp_rescan_cpus(true);
972}
973
974void __init smp_prepare_boot_cpu(void)
975{
976 struct lowcore *lc = get_lowcore();
977
978 WARN_ON(!cpu_present(0) || !cpu_online(0));
979 lc->percpu_offset = __per_cpu_offset[0];
980 ipl_pcpu = per_cpu_ptr(&pcpu_devices, 0);
981 ipl_pcpu->state = CPU_STATE_CONFIGURED;
982 lc->pcpu = (unsigned long)ipl_pcpu;
983 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
984 smp_cpu_set_capacity(0, CPU_CAPACITY_HIGH);
985}
986
987void __init smp_setup_processor_id(void)
988{
989 struct lowcore *lc = get_lowcore();
990
991 lc->cpu_nr = 0;
992 per_cpu(pcpu_devices, 0).address = stap();
993 lc->spinlock_lockval = arch_spin_lockval(0);
994 lc->spinlock_index = 0;
995}
996
997/*
998 * the frequency of the profiling timer can be changed
999 * by writing a multiplier value into /proc/profile.
1000 *
1001 * usually you want to run this on all CPUs ;)
1002 */
1003int setup_profiling_timer(unsigned int multiplier)
1004{
1005 return 0;
1006}
1007
1008static ssize_t cpu_configure_show(struct device *dev,
1009 struct device_attribute *attr, char *buf)
1010{
1011 ssize_t count;
1012
1013 mutex_lock(&smp_cpu_state_mutex);
1014 count = sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).state);
1015 mutex_unlock(&smp_cpu_state_mutex);
1016 return count;
1017}
1018
1019static ssize_t cpu_configure_store(struct device *dev,
1020 struct device_attribute *attr,
1021 const char *buf, size_t count)
1022{
1023 struct pcpu *pcpu;
1024 int cpu, val, rc, i;
1025 char delim;
1026
1027 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1028 return -EINVAL;
1029 if (val != 0 && val != 1)
1030 return -EINVAL;
1031 cpus_read_lock();
1032 mutex_lock(&smp_cpu_state_mutex);
1033 rc = -EBUSY;
1034 /* disallow configuration changes of online cpus */
1035 cpu = dev->id;
1036 cpu = smp_get_base_cpu(cpu);
1037 for (i = 0; i <= smp_cpu_mtid; i++)
1038 if (cpu_online(cpu + i))
1039 goto out;
1040 pcpu = per_cpu_ptr(&pcpu_devices, cpu);
1041 rc = 0;
1042 switch (val) {
1043 case 0:
1044 if (pcpu->state != CPU_STATE_CONFIGURED)
1045 break;
1046 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1047 if (rc)
1048 break;
1049 for (i = 0; i <= smp_cpu_mtid; i++) {
1050 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1051 continue;
1052 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_STANDBY;
1053 smp_cpu_set_polarization(cpu + i,
1054 POLARIZATION_UNKNOWN);
1055 }
1056 topology_expect_change();
1057 break;
1058 case 1:
1059 if (pcpu->state != CPU_STATE_STANDBY)
1060 break;
1061 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1062 if (rc)
1063 break;
1064 for (i = 0; i <= smp_cpu_mtid; i++) {
1065 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1066 continue;
1067 per_cpu(pcpu_devices, cpu + i).state = CPU_STATE_CONFIGURED;
1068 smp_cpu_set_polarization(cpu + i,
1069 POLARIZATION_UNKNOWN);
1070 }
1071 topology_expect_change();
1072 break;
1073 default:
1074 break;
1075 }
1076out:
1077 mutex_unlock(&smp_cpu_state_mutex);
1078 cpus_read_unlock();
1079 return rc ? rc : count;
1080}
1081static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1082
1083static ssize_t show_cpu_address(struct device *dev,
1084 struct device_attribute *attr, char *buf)
1085{
1086 return sysfs_emit(buf, "%d\n", per_cpu(pcpu_devices, dev->id).address);
1087}
1088static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1089
1090static struct attribute *cpu_common_attrs[] = {
1091 &dev_attr_configure.attr,
1092 &dev_attr_address.attr,
1093 NULL,
1094};
1095
1096static struct attribute_group cpu_common_attr_group = {
1097 .attrs = cpu_common_attrs,
1098};
1099
1100static struct attribute *cpu_online_attrs[] = {
1101 &dev_attr_idle_count.attr,
1102 &dev_attr_idle_time_us.attr,
1103 NULL,
1104};
1105
1106static struct attribute_group cpu_online_attr_group = {
1107 .attrs = cpu_online_attrs,
1108};
1109
1110static int smp_cpu_online(unsigned int cpu)
1111{
1112 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1113
1114 return sysfs_create_group(&c->dev.kobj, &cpu_online_attr_group);
1115}
1116
1117static int smp_cpu_pre_down(unsigned int cpu)
1118{
1119 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1120
1121 sysfs_remove_group(&c->dev.kobj, &cpu_online_attr_group);
1122 return 0;
1123}
1124
1125bool arch_cpu_is_hotpluggable(int cpu)
1126{
1127 return !!cpu;
1128}
1129
1130int arch_register_cpu(int cpu)
1131{
1132 struct cpu *c = per_cpu_ptr(&cpu_devices, cpu);
1133 int rc;
1134
1135 c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
1136 rc = register_cpu(c, cpu);
1137 if (rc)
1138 goto out;
1139 rc = sysfs_create_group(&c->dev.kobj, &cpu_common_attr_group);
1140 if (rc)
1141 goto out_cpu;
1142 rc = topology_cpu_init(c);
1143 if (rc)
1144 goto out_topology;
1145 return 0;
1146
1147out_topology:
1148 sysfs_remove_group(&c->dev.kobj, &cpu_common_attr_group);
1149out_cpu:
1150 unregister_cpu(c);
1151out:
1152 return rc;
1153}
1154
1155int __ref smp_rescan_cpus(bool early)
1156{
1157 struct sclp_core_info *info;
1158 int nr;
1159
1160 info = kzalloc(sizeof(*info), GFP_KERNEL);
1161 if (!info)
1162 return -ENOMEM;
1163 smp_get_core_info(info, 0);
1164 nr = __smp_rescan_cpus(info, early);
1165 kfree(info);
1166 if (nr)
1167 topology_schedule_update();
1168 return 0;
1169}
1170
1171static ssize_t __ref rescan_store(struct device *dev,
1172 struct device_attribute *attr,
1173 const char *buf,
1174 size_t count)
1175{
1176 int rc;
1177
1178 rc = lock_device_hotplug_sysfs();
1179 if (rc)
1180 return rc;
1181 rc = smp_rescan_cpus(false);
1182 unlock_device_hotplug();
1183 return rc ? rc : count;
1184}
1185static DEVICE_ATTR_WO(rescan);
1186
1187static int __init s390_smp_init(void)
1188{
1189 struct device *dev_root;
1190 int rc;
1191
1192 dev_root = bus_get_dev_root(&cpu_subsys);
1193 if (dev_root) {
1194 rc = device_create_file(dev_root, &dev_attr_rescan);
1195 put_device(dev_root);
1196 if (rc)
1197 return rc;
1198 }
1199 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1200 smp_cpu_online, smp_cpu_pre_down);
1201 rc = rc <= 0 ? rc : 0;
1202 return rc;
1203}
1204subsys_initcall(s390_smp_init);
1/*
2 * SMP related functions
3 *
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/bootmem.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/mm.h>
26#include <linux/err.h>
27#include <linux/spinlock.h>
28#include <linux/kernel_stat.h>
29#include <linux/delay.h>
30#include <linux/interrupt.h>
31#include <linux/irqflags.h>
32#include <linux/cpu.h>
33#include <linux/slab.h>
34#include <linux/crash_dump.h>
35#include <linux/memblock.h>
36#include <asm/asm-offsets.h>
37#include <asm/diag.h>
38#include <asm/switch_to.h>
39#include <asm/facility.h>
40#include <asm/ipl.h>
41#include <asm/setup.h>
42#include <asm/irq.h>
43#include <asm/tlbflush.h>
44#include <asm/vtimer.h>
45#include <asm/lowcore.h>
46#include <asm/sclp.h>
47#include <asm/vdso.h>
48#include <asm/debug.h>
49#include <asm/os_info.h>
50#include <asm/sigp.h>
51#include <asm/idle.h>
52#include "entry.h"
53
54enum {
55 ec_schedule = 0,
56 ec_call_function_single,
57 ec_stop_cpu,
58};
59
60enum {
61 CPU_STATE_STANDBY,
62 CPU_STATE_CONFIGURED,
63};
64
65static DEFINE_PER_CPU(struct cpu *, cpu_device);
66
67struct pcpu {
68 struct lowcore *lowcore; /* lowcore page(s) for the cpu */
69 unsigned long ec_mask; /* bit mask for ec_xxx functions */
70 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
71 signed char state; /* physical cpu state */
72 signed char polarization; /* physical polarization */
73 u16 address; /* physical cpu address */
74};
75
76static u8 boot_core_type;
77static struct pcpu pcpu_devices[NR_CPUS];
78
79unsigned int smp_cpu_mt_shift;
80EXPORT_SYMBOL(smp_cpu_mt_shift);
81
82unsigned int smp_cpu_mtid;
83EXPORT_SYMBOL(smp_cpu_mtid);
84
85#ifdef CONFIG_CRASH_DUMP
86__vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
87#endif
88
89static unsigned int smp_max_threads __initdata = -1U;
90
91static int __init early_nosmt(char *s)
92{
93 smp_max_threads = 1;
94 return 0;
95}
96early_param("nosmt", early_nosmt);
97
98static int __init early_smt(char *s)
99{
100 get_option(&s, &smp_max_threads);
101 return 0;
102}
103early_param("smt", early_smt);
104
105/*
106 * The smp_cpu_state_mutex must be held when changing the state or polarization
107 * member of a pcpu data structure within the pcpu_devices arreay.
108 */
109DEFINE_MUTEX(smp_cpu_state_mutex);
110
111/*
112 * Signal processor helper functions.
113 */
114static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
115{
116 int cc;
117
118 while (1) {
119 cc = __pcpu_sigp(addr, order, parm, NULL);
120 if (cc != SIGP_CC_BUSY)
121 return cc;
122 cpu_relax();
123 }
124}
125
126static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
127{
128 int cc, retry;
129
130 for (retry = 0; ; retry++) {
131 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
132 if (cc != SIGP_CC_BUSY)
133 break;
134 if (retry >= 3)
135 udelay(10);
136 }
137 return cc;
138}
139
140static inline int pcpu_stopped(struct pcpu *pcpu)
141{
142 u32 uninitialized_var(status);
143
144 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
145 0, &status) != SIGP_CC_STATUS_STORED)
146 return 0;
147 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
148}
149
150static inline int pcpu_running(struct pcpu *pcpu)
151{
152 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
153 0, NULL) != SIGP_CC_STATUS_STORED)
154 return 1;
155 /* Status stored condition code is equivalent to cpu not running. */
156 return 0;
157}
158
159/*
160 * Find struct pcpu by cpu address.
161 */
162static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
163{
164 int cpu;
165
166 for_each_cpu(cpu, mask)
167 if (pcpu_devices[cpu].address == address)
168 return pcpu_devices + cpu;
169 return NULL;
170}
171
172static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
173{
174 int order;
175
176 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
177 return;
178 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
179 pcpu->ec_clk = get_tod_clock_fast();
180 pcpu_sigp_retry(pcpu, order, 0);
181}
182
183#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
184#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
185
186static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
187{
188 unsigned long async_stack, panic_stack;
189 struct lowcore *lc;
190
191 if (pcpu != &pcpu_devices[0]) {
192 pcpu->lowcore = (struct lowcore *)
193 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
194 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
195 panic_stack = __get_free_page(GFP_KERNEL);
196 if (!pcpu->lowcore || !panic_stack || !async_stack)
197 goto out;
198 } else {
199 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
200 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
201 }
202 lc = pcpu->lowcore;
203 memcpy(lc, &S390_lowcore, 512);
204 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
205 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
206 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
207 lc->cpu_nr = cpu;
208 lc->spinlock_lockval = arch_spin_lockval(cpu);
209 if (MACHINE_HAS_VX)
210 lc->vector_save_area_addr =
211 (unsigned long) &lc->vector_save_area;
212 if (vdso_alloc_per_cpu(lc))
213 goto out;
214 lowcore_ptr[cpu] = lc;
215 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
216 return 0;
217out:
218 if (pcpu != &pcpu_devices[0]) {
219 free_page(panic_stack);
220 free_pages(async_stack, ASYNC_ORDER);
221 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
222 }
223 return -ENOMEM;
224}
225
226#ifdef CONFIG_HOTPLUG_CPU
227
228static void pcpu_free_lowcore(struct pcpu *pcpu)
229{
230 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
231 lowcore_ptr[pcpu - pcpu_devices] = NULL;
232 vdso_free_per_cpu(pcpu->lowcore);
233 if (pcpu == &pcpu_devices[0])
234 return;
235 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
236 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
237 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
238}
239
240#endif /* CONFIG_HOTPLUG_CPU */
241
242static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
243{
244 struct lowcore *lc = pcpu->lowcore;
245
246 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
247 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
248 lc->cpu_nr = cpu;
249 lc->spinlock_lockval = arch_spin_lockval(cpu);
250 lc->percpu_offset = __per_cpu_offset[cpu];
251 lc->kernel_asce = S390_lowcore.kernel_asce;
252 lc->machine_flags = S390_lowcore.machine_flags;
253 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
254 __ctl_store(lc->cregs_save_area, 0, 15);
255 save_access_regs((unsigned int *) lc->access_regs_save_area);
256 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
257 MAX_FACILITY_BIT/8);
258}
259
260static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
261{
262 struct lowcore *lc = pcpu->lowcore;
263
264 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
265 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
266 lc->current_task = (unsigned long) tsk;
267 lc->lpp = LPP_MAGIC;
268 lc->current_pid = tsk->pid;
269 lc->user_timer = tsk->thread.user_timer;
270 lc->system_timer = tsk->thread.system_timer;
271 lc->steal_timer = 0;
272}
273
274static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
275{
276 struct lowcore *lc = pcpu->lowcore;
277
278 lc->restart_stack = lc->kernel_stack;
279 lc->restart_fn = (unsigned long) func;
280 lc->restart_data = (unsigned long) data;
281 lc->restart_source = -1UL;
282 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
283}
284
285/*
286 * Call function via PSW restart on pcpu and stop the current cpu.
287 */
288static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
289 void *data, unsigned long stack)
290{
291 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
292 unsigned long source_cpu = stap();
293
294 __load_psw_mask(PSW_KERNEL_BITS);
295 if (pcpu->address == source_cpu)
296 func(data); /* should not return */
297 /* Stop target cpu (if func returns this stops the current cpu). */
298 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
299 /* Restart func on the target cpu and stop the current cpu. */
300 mem_assign_absolute(lc->restart_stack, stack);
301 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
302 mem_assign_absolute(lc->restart_data, (unsigned long) data);
303 mem_assign_absolute(lc->restart_source, source_cpu);
304 asm volatile(
305 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
306 " brc 2,0b # busy, try again\n"
307 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
308 " brc 2,1b # busy, try again\n"
309 : : "d" (pcpu->address), "d" (source_cpu),
310 "K" (SIGP_RESTART), "K" (SIGP_STOP)
311 : "0", "1", "cc");
312 for (;;) ;
313}
314
315/*
316 * Enable additional logical cpus for multi-threading.
317 */
318static int pcpu_set_smt(unsigned int mtid)
319{
320 int cc;
321
322 if (smp_cpu_mtid == mtid)
323 return 0;
324 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
325 if (cc == 0) {
326 smp_cpu_mtid = mtid;
327 smp_cpu_mt_shift = 0;
328 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
329 smp_cpu_mt_shift++;
330 pcpu_devices[0].address = stap();
331 }
332 return cc;
333}
334
335/*
336 * Call function on an online CPU.
337 */
338void smp_call_online_cpu(void (*func)(void *), void *data)
339{
340 struct pcpu *pcpu;
341
342 /* Use the current cpu if it is online. */
343 pcpu = pcpu_find_address(cpu_online_mask, stap());
344 if (!pcpu)
345 /* Use the first online cpu. */
346 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
347 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
348}
349
350/*
351 * Call function on the ipl CPU.
352 */
353void smp_call_ipl_cpu(void (*func)(void *), void *data)
354{
355 pcpu_delegate(&pcpu_devices[0], func, data,
356 pcpu_devices->lowcore->panic_stack -
357 PANIC_FRAME_OFFSET + PAGE_SIZE);
358}
359
360int smp_find_processor_id(u16 address)
361{
362 int cpu;
363
364 for_each_present_cpu(cpu)
365 if (pcpu_devices[cpu].address == address)
366 return cpu;
367 return -1;
368}
369
370bool arch_vcpu_is_preempted(int cpu)
371{
372 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
373 return false;
374 if (pcpu_running(pcpu_devices + cpu))
375 return false;
376 return true;
377}
378EXPORT_SYMBOL(arch_vcpu_is_preempted);
379
380void smp_yield_cpu(int cpu)
381{
382 if (MACHINE_HAS_DIAG9C) {
383 diag_stat_inc_norecursion(DIAG_STAT_X09C);
384 asm volatile("diag %0,0,0x9c"
385 : : "d" (pcpu_devices[cpu].address));
386 } else if (MACHINE_HAS_DIAG44) {
387 diag_stat_inc_norecursion(DIAG_STAT_X044);
388 asm volatile("diag 0,0,0x44");
389 }
390}
391
392/*
393 * Send cpus emergency shutdown signal. This gives the cpus the
394 * opportunity to complete outstanding interrupts.
395 */
396static void smp_emergency_stop(cpumask_t *cpumask)
397{
398 u64 end;
399 int cpu;
400
401 end = get_tod_clock() + (1000000UL << 12);
402 for_each_cpu(cpu, cpumask) {
403 struct pcpu *pcpu = pcpu_devices + cpu;
404 set_bit(ec_stop_cpu, &pcpu->ec_mask);
405 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
406 0, NULL) == SIGP_CC_BUSY &&
407 get_tod_clock() < end)
408 cpu_relax();
409 }
410 while (get_tod_clock() < end) {
411 for_each_cpu(cpu, cpumask)
412 if (pcpu_stopped(pcpu_devices + cpu))
413 cpumask_clear_cpu(cpu, cpumask);
414 if (cpumask_empty(cpumask))
415 break;
416 cpu_relax();
417 }
418}
419
420/*
421 * Stop all cpus but the current one.
422 */
423void smp_send_stop(void)
424{
425 cpumask_t cpumask;
426 int cpu;
427
428 /* Disable all interrupts/machine checks */
429 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
430 trace_hardirqs_off();
431
432 debug_set_critical();
433 cpumask_copy(&cpumask, cpu_online_mask);
434 cpumask_clear_cpu(smp_processor_id(), &cpumask);
435
436 if (oops_in_progress)
437 smp_emergency_stop(&cpumask);
438
439 /* stop all processors */
440 for_each_cpu(cpu, &cpumask) {
441 struct pcpu *pcpu = pcpu_devices + cpu;
442 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
443 while (!pcpu_stopped(pcpu))
444 cpu_relax();
445 }
446}
447
448/*
449 * This is the main routine where commands issued by other
450 * cpus are handled.
451 */
452static void smp_handle_ext_call(void)
453{
454 unsigned long bits;
455
456 /* handle bit signal external calls */
457 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
458 if (test_bit(ec_stop_cpu, &bits))
459 smp_stop_cpu();
460 if (test_bit(ec_schedule, &bits))
461 scheduler_ipi();
462 if (test_bit(ec_call_function_single, &bits))
463 generic_smp_call_function_single_interrupt();
464}
465
466static void do_ext_call_interrupt(struct ext_code ext_code,
467 unsigned int param32, unsigned long param64)
468{
469 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
470 smp_handle_ext_call();
471}
472
473void arch_send_call_function_ipi_mask(const struct cpumask *mask)
474{
475 int cpu;
476
477 for_each_cpu(cpu, mask)
478 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
479}
480
481void arch_send_call_function_single_ipi(int cpu)
482{
483 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
484}
485
486/*
487 * this function sends a 'reschedule' IPI to another CPU.
488 * it goes straight through and wastes no time serializing
489 * anything. Worst case is that we lose a reschedule ...
490 */
491void smp_send_reschedule(int cpu)
492{
493 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
494}
495
496/*
497 * parameter area for the set/clear control bit callbacks
498 */
499struct ec_creg_mask_parms {
500 unsigned long orval;
501 unsigned long andval;
502 int cr;
503};
504
505/*
506 * callback for setting/clearing control bits
507 */
508static void smp_ctl_bit_callback(void *info)
509{
510 struct ec_creg_mask_parms *pp = info;
511 unsigned long cregs[16];
512
513 __ctl_store(cregs, 0, 15);
514 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
515 __ctl_load(cregs, 0, 15);
516}
517
518/*
519 * Set a bit in a control register of all cpus
520 */
521void smp_ctl_set_bit(int cr, int bit)
522{
523 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
524
525 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
526}
527EXPORT_SYMBOL(smp_ctl_set_bit);
528
529/*
530 * Clear a bit in a control register of all cpus
531 */
532void smp_ctl_clear_bit(int cr, int bit)
533{
534 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
535
536 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
537}
538EXPORT_SYMBOL(smp_ctl_clear_bit);
539
540#ifdef CONFIG_CRASH_DUMP
541
542int smp_store_status(int cpu)
543{
544 struct pcpu *pcpu = pcpu_devices + cpu;
545 unsigned long pa;
546
547 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
548 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
549 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
550 return -EIO;
551 if (!MACHINE_HAS_VX)
552 return 0;
553 pa = __pa(pcpu->lowcore->vector_save_area_addr);
554 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
555 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
556 return -EIO;
557 return 0;
558}
559
560/*
561 * Collect CPU state of the previous, crashed system.
562 * There are four cases:
563 * 1) standard zfcp dump
564 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
565 * The state for all CPUs except the boot CPU needs to be collected
566 * with sigp stop-and-store-status. The boot CPU state is located in
567 * the absolute lowcore of the memory stored in the HSA. The zcore code
568 * will copy the boot CPU state from the HSA.
569 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
570 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
571 * The state for all CPUs except the boot CPU needs to be collected
572 * with sigp stop-and-store-status. The firmware or the boot-loader
573 * stored the registers of the boot CPU in the absolute lowcore in the
574 * memory of the old system.
575 * 3) kdump and the old kernel did not store the CPU state,
576 * or stand-alone kdump for DASD
577 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
578 * The state for all CPUs except the boot CPU needs to be collected
579 * with sigp stop-and-store-status. The kexec code or the boot-loader
580 * stored the registers of the boot CPU in the memory of the old system.
581 * 4) kdump and the old kernel stored the CPU state
582 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
583 * This case does not exist for s390 anymore, setup_arch explicitly
584 * deactivates the elfcorehdr= kernel parameter
585 */
586static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
587 bool is_boot_cpu, unsigned long page)
588{
589 __vector128 *vxrs = (__vector128 *) page;
590
591 if (is_boot_cpu)
592 vxrs = boot_cpu_vector_save_area;
593 else
594 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
595 save_area_add_vxrs(sa, vxrs);
596}
597
598static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
599 bool is_boot_cpu, unsigned long page)
600{
601 void *regs = (void *) page;
602
603 if (is_boot_cpu)
604 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
605 else
606 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
607 save_area_add_regs(sa, regs);
608}
609
610void __init smp_save_dump_cpus(void)
611{
612 int addr, boot_cpu_addr, max_cpu_addr;
613 struct save_area *sa;
614 unsigned long page;
615 bool is_boot_cpu;
616
617 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
618 /* No previous system present, normal boot. */
619 return;
620 /* Allocate a page as dumping area for the store status sigps */
621 page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
622 /* Set multi-threading state to the previous system. */
623 pcpu_set_smt(sclp.mtid_prev);
624 boot_cpu_addr = stap();
625 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
626 for (addr = 0; addr <= max_cpu_addr; addr++) {
627 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
628 SIGP_CC_NOT_OPERATIONAL)
629 continue;
630 is_boot_cpu = (addr == boot_cpu_addr);
631 /* Allocate save area */
632 sa = save_area_alloc(is_boot_cpu);
633 if (!sa)
634 panic("could not allocate memory for save area\n");
635 if (MACHINE_HAS_VX)
636 /* Get the vector registers */
637 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
638 /*
639 * For a zfcp dump OLDMEM_BASE == NULL and the registers
640 * of the boot CPU are stored in the HSA. To retrieve
641 * these registers an SCLP request is required which is
642 * done by drivers/s390/char/zcore.c:init_cpu_info()
643 */
644 if (!is_boot_cpu || OLDMEM_BASE)
645 /* Get the CPU registers */
646 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
647 }
648 memblock_free(page, PAGE_SIZE);
649 diag308_reset();
650 pcpu_set_smt(0);
651}
652#endif /* CONFIG_CRASH_DUMP */
653
654void smp_cpu_set_polarization(int cpu, int val)
655{
656 pcpu_devices[cpu].polarization = val;
657}
658
659int smp_cpu_get_polarization(int cpu)
660{
661 return pcpu_devices[cpu].polarization;
662}
663
664static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
665{
666 static int use_sigp_detection;
667 int address;
668
669 if (use_sigp_detection || sclp_get_core_info(info, early)) {
670 use_sigp_detection = 1;
671 for (address = 0;
672 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
673 address += (1U << smp_cpu_mt_shift)) {
674 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
675 SIGP_CC_NOT_OPERATIONAL)
676 continue;
677 info->core[info->configured].core_id =
678 address >> smp_cpu_mt_shift;
679 info->configured++;
680 }
681 info->combined = info->configured;
682 }
683}
684
685static int smp_add_present_cpu(int cpu);
686
687static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
688{
689 struct pcpu *pcpu;
690 cpumask_t avail;
691 int cpu, nr, i, j;
692 u16 address;
693
694 nr = 0;
695 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
696 cpu = cpumask_first(&avail);
697 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
698 if (sclp.has_core_type && info->core[i].type != boot_core_type)
699 continue;
700 address = info->core[i].core_id << smp_cpu_mt_shift;
701 for (j = 0; j <= smp_cpu_mtid; j++) {
702 if (pcpu_find_address(cpu_present_mask, address + j))
703 continue;
704 pcpu = pcpu_devices + cpu;
705 pcpu->address = address + j;
706 pcpu->state =
707 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
708 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
709 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
710 set_cpu_present(cpu, true);
711 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
712 set_cpu_present(cpu, false);
713 else
714 nr++;
715 cpu = cpumask_next(cpu, &avail);
716 if (cpu >= nr_cpu_ids)
717 break;
718 }
719 }
720 return nr;
721}
722
723void __init smp_detect_cpus(void)
724{
725 unsigned int cpu, mtid, c_cpus, s_cpus;
726 struct sclp_core_info *info;
727 u16 address;
728
729 /* Get CPU information */
730 info = memblock_virt_alloc(sizeof(*info), 8);
731 smp_get_core_info(info, 1);
732 /* Find boot CPU type */
733 if (sclp.has_core_type) {
734 address = stap();
735 for (cpu = 0; cpu < info->combined; cpu++)
736 if (info->core[cpu].core_id == address) {
737 /* The boot cpu dictates the cpu type. */
738 boot_core_type = info->core[cpu].type;
739 break;
740 }
741 if (cpu >= info->combined)
742 panic("Could not find boot CPU type");
743 }
744
745 /* Set multi-threading state for the current system */
746 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
747 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
748 pcpu_set_smt(mtid);
749
750 /* Print number of CPUs */
751 c_cpus = s_cpus = 0;
752 for (cpu = 0; cpu < info->combined; cpu++) {
753 if (sclp.has_core_type &&
754 info->core[cpu].type != boot_core_type)
755 continue;
756 if (cpu < info->configured)
757 c_cpus += smp_cpu_mtid + 1;
758 else
759 s_cpus += smp_cpu_mtid + 1;
760 }
761 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
762
763 /* Add CPUs present at boot */
764 get_online_cpus();
765 __smp_rescan_cpus(info, 0);
766 put_online_cpus();
767 memblock_free_early((unsigned long)info, sizeof(*info));
768}
769
770/*
771 * Activate a secondary processor.
772 */
773static void smp_start_secondary(void *cpuvoid)
774{
775 S390_lowcore.last_update_clock = get_tod_clock();
776 S390_lowcore.restart_stack = (unsigned long) restart_stack;
777 S390_lowcore.restart_fn = (unsigned long) do_restart;
778 S390_lowcore.restart_data = 0;
779 S390_lowcore.restart_source = -1UL;
780 restore_access_regs(S390_lowcore.access_regs_save_area);
781 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
782 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
783 cpu_init();
784 preempt_disable();
785 init_cpu_timer();
786 vtime_init();
787 pfault_init();
788 notify_cpu_starting(smp_processor_id());
789 set_cpu_online(smp_processor_id(), true);
790 inc_irq_stat(CPU_RST);
791 local_irq_enable();
792 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
793}
794
795/* Upping and downing of CPUs */
796int __cpu_up(unsigned int cpu, struct task_struct *tidle)
797{
798 struct pcpu *pcpu;
799 int base, i, rc;
800
801 pcpu = pcpu_devices + cpu;
802 if (pcpu->state != CPU_STATE_CONFIGURED)
803 return -EIO;
804 base = smp_get_base_cpu(cpu);
805 for (i = 0; i <= smp_cpu_mtid; i++) {
806 if (base + i < nr_cpu_ids)
807 if (cpu_online(base + i))
808 break;
809 }
810 /*
811 * If this is the first CPU of the core to get online
812 * do an initial CPU reset.
813 */
814 if (i > smp_cpu_mtid &&
815 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
816 SIGP_CC_ORDER_CODE_ACCEPTED)
817 return -EIO;
818
819 rc = pcpu_alloc_lowcore(pcpu, cpu);
820 if (rc)
821 return rc;
822 pcpu_prepare_secondary(pcpu, cpu);
823 pcpu_attach_task(pcpu, tidle);
824 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
825 /* Wait until cpu puts itself in the online & active maps */
826 while (!cpu_online(cpu))
827 cpu_relax();
828 return 0;
829}
830
831static unsigned int setup_possible_cpus __initdata;
832
833static int __init _setup_possible_cpus(char *s)
834{
835 get_option(&s, &setup_possible_cpus);
836 return 0;
837}
838early_param("possible_cpus", _setup_possible_cpus);
839
840#ifdef CONFIG_HOTPLUG_CPU
841
842int __cpu_disable(void)
843{
844 unsigned long cregs[16];
845
846 /* Handle possible pending IPIs */
847 smp_handle_ext_call();
848 set_cpu_online(smp_processor_id(), false);
849 /* Disable pseudo page faults on this cpu. */
850 pfault_fini();
851 /* Disable interrupt sources via control register. */
852 __ctl_store(cregs, 0, 15);
853 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
854 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
855 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
856 __ctl_load(cregs, 0, 15);
857 clear_cpu_flag(CIF_NOHZ_DELAY);
858 return 0;
859}
860
861void __cpu_die(unsigned int cpu)
862{
863 struct pcpu *pcpu;
864
865 /* Wait until target cpu is down */
866 pcpu = pcpu_devices + cpu;
867 while (!pcpu_stopped(pcpu))
868 cpu_relax();
869 pcpu_free_lowcore(pcpu);
870 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
871 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
872}
873
874void __noreturn cpu_die(void)
875{
876 idle_task_exit();
877 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
878 for (;;) ;
879}
880
881#endif /* CONFIG_HOTPLUG_CPU */
882
883void __init smp_fill_possible_mask(void)
884{
885 unsigned int possible, sclp_max, cpu;
886
887 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
888 sclp_max = min(smp_max_threads, sclp_max);
889 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
890 possible = setup_possible_cpus ?: nr_cpu_ids;
891 possible = min(possible, sclp_max);
892 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
893 set_cpu_possible(cpu, true);
894}
895
896void __init smp_prepare_cpus(unsigned int max_cpus)
897{
898 /* request the 0x1201 emergency signal external interrupt */
899 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
900 panic("Couldn't request external interrupt 0x1201");
901 /* request the 0x1202 external call external interrupt */
902 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
903 panic("Couldn't request external interrupt 0x1202");
904}
905
906void __init smp_prepare_boot_cpu(void)
907{
908 struct pcpu *pcpu = pcpu_devices;
909
910 pcpu->state = CPU_STATE_CONFIGURED;
911 pcpu->address = stap();
912 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
913 S390_lowcore.percpu_offset = __per_cpu_offset[0];
914 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
915 set_cpu_present(0, true);
916 set_cpu_online(0, true);
917}
918
919void __init smp_cpus_done(unsigned int max_cpus)
920{
921}
922
923void __init smp_setup_processor_id(void)
924{
925 S390_lowcore.cpu_nr = 0;
926 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
927}
928
929/*
930 * the frequency of the profiling timer can be changed
931 * by writing a multiplier value into /proc/profile.
932 *
933 * usually you want to run this on all CPUs ;)
934 */
935int setup_profiling_timer(unsigned int multiplier)
936{
937 return 0;
938}
939
940#ifdef CONFIG_HOTPLUG_CPU
941static ssize_t cpu_configure_show(struct device *dev,
942 struct device_attribute *attr, char *buf)
943{
944 ssize_t count;
945
946 mutex_lock(&smp_cpu_state_mutex);
947 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
948 mutex_unlock(&smp_cpu_state_mutex);
949 return count;
950}
951
952static ssize_t cpu_configure_store(struct device *dev,
953 struct device_attribute *attr,
954 const char *buf, size_t count)
955{
956 struct pcpu *pcpu;
957 int cpu, val, rc, i;
958 char delim;
959
960 if (sscanf(buf, "%d %c", &val, &delim) != 1)
961 return -EINVAL;
962 if (val != 0 && val != 1)
963 return -EINVAL;
964 get_online_cpus();
965 mutex_lock(&smp_cpu_state_mutex);
966 rc = -EBUSY;
967 /* disallow configuration changes of online cpus and cpu 0 */
968 cpu = dev->id;
969 cpu = smp_get_base_cpu(cpu);
970 if (cpu == 0)
971 goto out;
972 for (i = 0; i <= smp_cpu_mtid; i++)
973 if (cpu_online(cpu + i))
974 goto out;
975 pcpu = pcpu_devices + cpu;
976 rc = 0;
977 switch (val) {
978 case 0:
979 if (pcpu->state != CPU_STATE_CONFIGURED)
980 break;
981 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
982 if (rc)
983 break;
984 for (i = 0; i <= smp_cpu_mtid; i++) {
985 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
986 continue;
987 pcpu[i].state = CPU_STATE_STANDBY;
988 smp_cpu_set_polarization(cpu + i,
989 POLARIZATION_UNKNOWN);
990 }
991 topology_expect_change();
992 break;
993 case 1:
994 if (pcpu->state != CPU_STATE_STANDBY)
995 break;
996 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
997 if (rc)
998 break;
999 for (i = 0; i <= smp_cpu_mtid; i++) {
1000 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1001 continue;
1002 pcpu[i].state = CPU_STATE_CONFIGURED;
1003 smp_cpu_set_polarization(cpu + i,
1004 POLARIZATION_UNKNOWN);
1005 }
1006 topology_expect_change();
1007 break;
1008 default:
1009 break;
1010 }
1011out:
1012 mutex_unlock(&smp_cpu_state_mutex);
1013 put_online_cpus();
1014 return rc ? rc : count;
1015}
1016static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1017#endif /* CONFIG_HOTPLUG_CPU */
1018
1019static ssize_t show_cpu_address(struct device *dev,
1020 struct device_attribute *attr, char *buf)
1021{
1022 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1023}
1024static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1025
1026static struct attribute *cpu_common_attrs[] = {
1027#ifdef CONFIG_HOTPLUG_CPU
1028 &dev_attr_configure.attr,
1029#endif
1030 &dev_attr_address.attr,
1031 NULL,
1032};
1033
1034static struct attribute_group cpu_common_attr_group = {
1035 .attrs = cpu_common_attrs,
1036};
1037
1038static struct attribute *cpu_online_attrs[] = {
1039 &dev_attr_idle_count.attr,
1040 &dev_attr_idle_time_us.attr,
1041 NULL,
1042};
1043
1044static struct attribute_group cpu_online_attr_group = {
1045 .attrs = cpu_online_attrs,
1046};
1047
1048static int smp_cpu_online(unsigned int cpu)
1049{
1050 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1051
1052 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1053}
1054static int smp_cpu_pre_down(unsigned int cpu)
1055{
1056 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1057
1058 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1059 return 0;
1060}
1061
1062static int smp_add_present_cpu(int cpu)
1063{
1064 struct device *s;
1065 struct cpu *c;
1066 int rc;
1067
1068 c = kzalloc(sizeof(*c), GFP_KERNEL);
1069 if (!c)
1070 return -ENOMEM;
1071 per_cpu(cpu_device, cpu) = c;
1072 s = &c->dev;
1073 c->hotpluggable = 1;
1074 rc = register_cpu(c, cpu);
1075 if (rc)
1076 goto out;
1077 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1078 if (rc)
1079 goto out_cpu;
1080 rc = topology_cpu_init(c);
1081 if (rc)
1082 goto out_topology;
1083 return 0;
1084
1085out_topology:
1086 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1087out_cpu:
1088#ifdef CONFIG_HOTPLUG_CPU
1089 unregister_cpu(c);
1090#endif
1091out:
1092 return rc;
1093}
1094
1095#ifdef CONFIG_HOTPLUG_CPU
1096
1097int __ref smp_rescan_cpus(void)
1098{
1099 struct sclp_core_info *info;
1100 int nr;
1101
1102 info = kzalloc(sizeof(*info), GFP_KERNEL);
1103 if (!info)
1104 return -ENOMEM;
1105 smp_get_core_info(info, 0);
1106 get_online_cpus();
1107 mutex_lock(&smp_cpu_state_mutex);
1108 nr = __smp_rescan_cpus(info, 1);
1109 mutex_unlock(&smp_cpu_state_mutex);
1110 put_online_cpus();
1111 kfree(info);
1112 if (nr)
1113 topology_schedule_update();
1114 return 0;
1115}
1116
1117static ssize_t __ref rescan_store(struct device *dev,
1118 struct device_attribute *attr,
1119 const char *buf,
1120 size_t count)
1121{
1122 int rc;
1123
1124 rc = smp_rescan_cpus();
1125 return rc ? rc : count;
1126}
1127static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1128#endif /* CONFIG_HOTPLUG_CPU */
1129
1130static int __init s390_smp_init(void)
1131{
1132 int cpu, rc = 0;
1133
1134#ifdef CONFIG_HOTPLUG_CPU
1135 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1136 if (rc)
1137 return rc;
1138#endif
1139 for_each_present_cpu(cpu) {
1140 rc = smp_add_present_cpu(cpu);
1141 if (rc)
1142 goto out;
1143 }
1144
1145 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1146 smp_cpu_online, smp_cpu_pre_down);
1147out:
1148 return rc;
1149}
1150subsys_initcall(s390_smp_init);