Loading...
1/*
2 * arch/s390/kernel/smp.c
3 *
4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
21 */
22
23#define KMSG_COMPONENT "cpu"
24#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
25
26#include <linux/workqueue.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/mm.h>
30#include <linux/err.h>
31#include <linux/spinlock.h>
32#include <linux/kernel_stat.h>
33#include <linux/delay.h>
34#include <linux/cache.h>
35#include <linux/interrupt.h>
36#include <linux/irqflags.h>
37#include <linux/cpu.h>
38#include <linux/timex.h>
39#include <linux/bootmem.h>
40#include <linux/slab.h>
41#include <asm/asm-offsets.h>
42#include <asm/ipl.h>
43#include <asm/setup.h>
44#include <asm/sigp.h>
45#include <asm/pgalloc.h>
46#include <asm/irq.h>
47#include <asm/cpcmd.h>
48#include <asm/tlbflush.h>
49#include <asm/timer.h>
50#include <asm/lowcore.h>
51#include <asm/sclp.h>
52#include <asm/cputime.h>
53#include <asm/vdso.h>
54#include <asm/cpu.h>
55#include "entry.h"
56
57/* logical cpu to cpu address */
58unsigned short __cpu_logical_map[NR_CPUS];
59
60static struct task_struct *current_set[NR_CPUS];
61
62static u8 smp_cpu_type;
63static int smp_use_sigp_detection;
64
65enum s390_cpu_state {
66 CPU_STATE_STANDBY,
67 CPU_STATE_CONFIGURED,
68};
69
70DEFINE_MUTEX(smp_cpu_state_mutex);
71int smp_cpu_polarization[NR_CPUS];
72static int smp_cpu_state[NR_CPUS];
73static int cpu_management;
74
75static DEFINE_PER_CPU(struct cpu, cpu_devices);
76
77static void smp_ext_bitcall(int, int);
78
79static int raw_cpu_stopped(int cpu)
80{
81 u32 status;
82
83 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
84 case sigp_status_stored:
85 /* Check for stopped and check stop state */
86 if (status & 0x50)
87 return 1;
88 break;
89 default:
90 break;
91 }
92 return 0;
93}
94
95static inline int cpu_stopped(int cpu)
96{
97 return raw_cpu_stopped(cpu_logical_map(cpu));
98}
99
100void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
101{
102 struct _lowcore *lc, *current_lc;
103 struct stack_frame *sf;
104 struct pt_regs *regs;
105 unsigned long sp;
106
107 if (smp_processor_id() == 0)
108 func(data);
109 __load_psw_mask(PSW_BASE_BITS | PSW_DEFAULT_KEY);
110 /* Disable lowcore protection */
111 __ctl_clear_bit(0, 28);
112 current_lc = lowcore_ptr[smp_processor_id()];
113 lc = lowcore_ptr[0];
114 if (!lc)
115 lc = current_lc;
116 lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
117 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
118 if (!cpu_online(0))
119 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
120 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
121 cpu_relax();
122 sp = lc->panic_stack;
123 sp -= sizeof(struct pt_regs);
124 regs = (struct pt_regs *) sp;
125 memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs));
126 regs->psw = lc->psw_save_area;
127 sp -= STACK_FRAME_OVERHEAD;
128 sf = (struct stack_frame *) sp;
129 sf->back_chain = regs->gprs[15];
130 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
131}
132
133void smp_send_stop(void)
134{
135 int cpu, rc;
136
137 /* Disable all interrupts/machine checks */
138 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
139 trace_hardirqs_off();
140
141 /* stop all processors */
142 for_each_online_cpu(cpu) {
143 if (cpu == smp_processor_id())
144 continue;
145 do {
146 rc = sigp(cpu, sigp_stop);
147 } while (rc == sigp_busy);
148
149 while (!cpu_stopped(cpu))
150 cpu_relax();
151 }
152}
153
154/*
155 * This is the main routine where commands issued by other
156 * cpus are handled.
157 */
158
159static void do_ext_call_interrupt(unsigned int ext_int_code,
160 unsigned int param32, unsigned long param64)
161{
162 unsigned long bits;
163
164 kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
165 /*
166 * handle bit signal external calls
167 */
168 bits = xchg(&S390_lowcore.ext_call_fast, 0);
169
170 if (test_bit(ec_schedule, &bits))
171 scheduler_ipi();
172
173 if (test_bit(ec_call_function, &bits))
174 generic_smp_call_function_interrupt();
175
176 if (test_bit(ec_call_function_single, &bits))
177 generic_smp_call_function_single_interrupt();
178}
179
180/*
181 * Send an external call sigp to another cpu and return without waiting
182 * for its completion.
183 */
184static void smp_ext_bitcall(int cpu, int sig)
185{
186 /*
187 * Set signaling bit in lowcore of target cpu and kick it
188 */
189 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
190 while (sigp(cpu, sigp_emergency_signal) == sigp_busy)
191 udelay(10);
192}
193
194void arch_send_call_function_ipi_mask(const struct cpumask *mask)
195{
196 int cpu;
197
198 for_each_cpu(cpu, mask)
199 smp_ext_bitcall(cpu, ec_call_function);
200}
201
202void arch_send_call_function_single_ipi(int cpu)
203{
204 smp_ext_bitcall(cpu, ec_call_function_single);
205}
206
207#ifndef CONFIG_64BIT
208/*
209 * this function sends a 'purge tlb' signal to another CPU.
210 */
211static void smp_ptlb_callback(void *info)
212{
213 __tlb_flush_local();
214}
215
216void smp_ptlb_all(void)
217{
218 on_each_cpu(smp_ptlb_callback, NULL, 1);
219}
220EXPORT_SYMBOL(smp_ptlb_all);
221#endif /* ! CONFIG_64BIT */
222
223/*
224 * this function sends a 'reschedule' IPI to another CPU.
225 * it goes straight through and wastes no time serializing
226 * anything. Worst case is that we lose a reschedule ...
227 */
228void smp_send_reschedule(int cpu)
229{
230 smp_ext_bitcall(cpu, ec_schedule);
231}
232
233/*
234 * parameter area for the set/clear control bit callbacks
235 */
236struct ec_creg_mask_parms {
237 unsigned long orvals[16];
238 unsigned long andvals[16];
239};
240
241/*
242 * callback for setting/clearing control bits
243 */
244static void smp_ctl_bit_callback(void *info)
245{
246 struct ec_creg_mask_parms *pp = info;
247 unsigned long cregs[16];
248 int i;
249
250 __ctl_store(cregs, 0, 15);
251 for (i = 0; i <= 15; i++)
252 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
253 __ctl_load(cregs, 0, 15);
254}
255
256/*
257 * Set a bit in a control register of all cpus
258 */
259void smp_ctl_set_bit(int cr, int bit)
260{
261 struct ec_creg_mask_parms parms;
262
263 memset(&parms.orvals, 0, sizeof(parms.orvals));
264 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
265 parms.orvals[cr] = 1UL << bit;
266 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
267}
268EXPORT_SYMBOL(smp_ctl_set_bit);
269
270/*
271 * Clear a bit in a control register of all cpus
272 */
273void smp_ctl_clear_bit(int cr, int bit)
274{
275 struct ec_creg_mask_parms parms;
276
277 memset(&parms.orvals, 0, sizeof(parms.orvals));
278 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
279 parms.andvals[cr] = ~(1UL << bit);
280 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
281}
282EXPORT_SYMBOL(smp_ctl_clear_bit);
283
284#ifdef CONFIG_ZFCPDUMP
285
286static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
287{
288 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
289 return;
290 if (cpu >= NR_CPUS) {
291 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
292 "the dump\n", cpu, NR_CPUS - 1);
293 return;
294 }
295 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
296 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
297 cpu_relax();
298 memcpy_real(zfcpdump_save_areas[cpu],
299 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
300 sizeof(struct save_area));
301}
302
303struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
304EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
305
306#else
307
308static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
309
310#endif /* CONFIG_ZFCPDUMP */
311
312static int cpu_known(int cpu_id)
313{
314 int cpu;
315
316 for_each_present_cpu(cpu) {
317 if (__cpu_logical_map[cpu] == cpu_id)
318 return 1;
319 }
320 return 0;
321}
322
323static int smp_rescan_cpus_sigp(cpumask_t avail)
324{
325 int cpu_id, logical_cpu;
326
327 logical_cpu = cpumask_first(&avail);
328 if (logical_cpu >= nr_cpu_ids)
329 return 0;
330 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
331 if (cpu_known(cpu_id))
332 continue;
333 __cpu_logical_map[logical_cpu] = cpu_id;
334 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
335 if (!cpu_stopped(logical_cpu))
336 continue;
337 set_cpu_present(logical_cpu, true);
338 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
339 logical_cpu = cpumask_next(logical_cpu, &avail);
340 if (logical_cpu >= nr_cpu_ids)
341 break;
342 }
343 return 0;
344}
345
346static int smp_rescan_cpus_sclp(cpumask_t avail)
347{
348 struct sclp_cpu_info *info;
349 int cpu_id, logical_cpu, cpu;
350 int rc;
351
352 logical_cpu = cpumask_first(&avail);
353 if (logical_cpu >= nr_cpu_ids)
354 return 0;
355 info = kmalloc(sizeof(*info), GFP_KERNEL);
356 if (!info)
357 return -ENOMEM;
358 rc = sclp_get_cpu_info(info);
359 if (rc)
360 goto out;
361 for (cpu = 0; cpu < info->combined; cpu++) {
362 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
363 continue;
364 cpu_id = info->cpu[cpu].address;
365 if (cpu_known(cpu_id))
366 continue;
367 __cpu_logical_map[logical_cpu] = cpu_id;
368 smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
369 set_cpu_present(logical_cpu, true);
370 if (cpu >= info->configured)
371 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
372 else
373 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
374 logical_cpu = cpumask_next(logical_cpu, &avail);
375 if (logical_cpu >= nr_cpu_ids)
376 break;
377 }
378out:
379 kfree(info);
380 return rc;
381}
382
383static int __smp_rescan_cpus(void)
384{
385 cpumask_t avail;
386
387 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
388 if (smp_use_sigp_detection)
389 return smp_rescan_cpus_sigp(avail);
390 else
391 return smp_rescan_cpus_sclp(avail);
392}
393
394static void __init smp_detect_cpus(void)
395{
396 unsigned int cpu, c_cpus, s_cpus;
397 struct sclp_cpu_info *info;
398 u16 boot_cpu_addr, cpu_addr;
399
400 c_cpus = 1;
401 s_cpus = 0;
402 boot_cpu_addr = __cpu_logical_map[0];
403 info = kmalloc(sizeof(*info), GFP_KERNEL);
404 if (!info)
405 panic("smp_detect_cpus failed to allocate memory\n");
406 /* Use sigp detection algorithm if sclp doesn't work. */
407 if (sclp_get_cpu_info(info)) {
408 smp_use_sigp_detection = 1;
409 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
410 if (cpu == boot_cpu_addr)
411 continue;
412 if (!raw_cpu_stopped(cpu))
413 continue;
414 smp_get_save_area(c_cpus, cpu);
415 c_cpus++;
416 }
417 goto out;
418 }
419
420 if (info->has_cpu_type) {
421 for (cpu = 0; cpu < info->combined; cpu++) {
422 if (info->cpu[cpu].address == boot_cpu_addr) {
423 smp_cpu_type = info->cpu[cpu].type;
424 break;
425 }
426 }
427 }
428
429 for (cpu = 0; cpu < info->combined; cpu++) {
430 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
431 continue;
432 cpu_addr = info->cpu[cpu].address;
433 if (cpu_addr == boot_cpu_addr)
434 continue;
435 if (!raw_cpu_stopped(cpu_addr)) {
436 s_cpus++;
437 continue;
438 }
439 smp_get_save_area(c_cpus, cpu_addr);
440 c_cpus++;
441 }
442out:
443 kfree(info);
444 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
445 get_online_cpus();
446 __smp_rescan_cpus();
447 put_online_cpus();
448}
449
450/*
451 * Activate a secondary processor.
452 */
453int __cpuinit start_secondary(void *cpuvoid)
454{
455 cpu_init();
456 preempt_disable();
457 init_cpu_timer();
458 init_cpu_vtimer();
459 pfault_init();
460
461 notify_cpu_starting(smp_processor_id());
462 ipi_call_lock();
463 set_cpu_online(smp_processor_id(), true);
464 ipi_call_unlock();
465 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
466 S390_lowcore.restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
467 S390_lowcore.restart_psw.addr =
468 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
469 __ctl_set_bit(0, 28); /* Enable lowcore protection */
470 /*
471 * Wait until the cpu which brought this one up marked it
472 * active before enabling interrupts.
473 */
474 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
475 cpu_relax();
476 local_irq_enable();
477 /* cpu_idle will call schedule for us */
478 cpu_idle();
479 return 0;
480}
481
482struct create_idle {
483 struct work_struct work;
484 struct task_struct *idle;
485 struct completion done;
486 int cpu;
487};
488
489static void __cpuinit smp_fork_idle(struct work_struct *work)
490{
491 struct create_idle *c_idle;
492
493 c_idle = container_of(work, struct create_idle, work);
494 c_idle->idle = fork_idle(c_idle->cpu);
495 complete(&c_idle->done);
496}
497
498static int __cpuinit smp_alloc_lowcore(int cpu)
499{
500 unsigned long async_stack, panic_stack;
501 struct _lowcore *lowcore;
502
503 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
504 if (!lowcore)
505 return -ENOMEM;
506 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
507 panic_stack = __get_free_page(GFP_KERNEL);
508 if (!panic_stack || !async_stack)
509 goto out;
510 memcpy(lowcore, &S390_lowcore, 512);
511 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
512 lowcore->async_stack = async_stack + ASYNC_SIZE;
513 lowcore->panic_stack = panic_stack + PAGE_SIZE;
514 lowcore->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
515 lowcore->restart_psw.addr =
516 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
517 if (user_mode != HOME_SPACE_MODE)
518 lowcore->restart_psw.mask |= PSW_ASC_HOME;
519#ifndef CONFIG_64BIT
520 if (MACHINE_HAS_IEEE) {
521 unsigned long save_area;
522
523 save_area = get_zeroed_page(GFP_KERNEL);
524 if (!save_area)
525 goto out;
526 lowcore->extended_save_area_addr = (u32) save_area;
527 }
528#else
529 if (vdso_alloc_per_cpu(cpu, lowcore))
530 goto out;
531#endif
532 lowcore_ptr[cpu] = lowcore;
533 return 0;
534
535out:
536 free_page(panic_stack);
537 free_pages(async_stack, ASYNC_ORDER);
538 free_pages((unsigned long) lowcore, LC_ORDER);
539 return -ENOMEM;
540}
541
542static void smp_free_lowcore(int cpu)
543{
544 struct _lowcore *lowcore;
545
546 lowcore = lowcore_ptr[cpu];
547#ifndef CONFIG_64BIT
548 if (MACHINE_HAS_IEEE)
549 free_page((unsigned long) lowcore->extended_save_area_addr);
550#else
551 vdso_free_per_cpu(cpu, lowcore);
552#endif
553 free_page(lowcore->panic_stack - PAGE_SIZE);
554 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
555 free_pages((unsigned long) lowcore, LC_ORDER);
556 lowcore_ptr[cpu] = NULL;
557}
558
559/* Upping and downing of CPUs */
560int __cpuinit __cpu_up(unsigned int cpu)
561{
562 struct _lowcore *cpu_lowcore;
563 struct create_idle c_idle;
564 struct task_struct *idle;
565 struct stack_frame *sf;
566 u32 lowcore;
567 int ccode;
568
569 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
570 return -EIO;
571 idle = current_set[cpu];
572 if (!idle) {
573 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
574 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
575 c_idle.cpu = cpu;
576 schedule_work(&c_idle.work);
577 wait_for_completion(&c_idle.done);
578 if (IS_ERR(c_idle.idle))
579 return PTR_ERR(c_idle.idle);
580 idle = c_idle.idle;
581 current_set[cpu] = c_idle.idle;
582 }
583 init_idle(idle, cpu);
584 if (smp_alloc_lowcore(cpu))
585 return -ENOMEM;
586 do {
587 ccode = sigp(cpu, sigp_initial_cpu_reset);
588 if (ccode == sigp_busy)
589 udelay(10);
590 if (ccode == sigp_not_operational)
591 goto err_out;
592 } while (ccode == sigp_busy);
593
594 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
595 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
596 udelay(10);
597
598 cpu_lowcore = lowcore_ptr[cpu];
599 cpu_lowcore->kernel_stack = (unsigned long)
600 task_stack_page(idle) + THREAD_SIZE;
601 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
602 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
603 - sizeof(struct pt_regs)
604 - sizeof(struct stack_frame));
605 memset(sf, 0, sizeof(struct stack_frame));
606 sf->gprs[9] = (unsigned long) sf;
607 cpu_lowcore->save_area[15] = (unsigned long) sf;
608 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
609 atomic_inc(&init_mm.context.attach_count);
610 asm volatile(
611 " stam 0,15,0(%0)"
612 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
613 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
614 cpu_lowcore->current_task = (unsigned long) idle;
615 cpu_lowcore->cpu_nr = cpu;
616 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
617 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
618 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
619 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
620 MAX_FACILITY_BIT/8);
621 eieio();
622
623 while (sigp(cpu, sigp_restart) == sigp_busy)
624 udelay(10);
625
626 while (!cpu_online(cpu))
627 cpu_relax();
628 return 0;
629
630err_out:
631 smp_free_lowcore(cpu);
632 return -EIO;
633}
634
635static int __init setup_possible_cpus(char *s)
636{
637 int pcpus, cpu;
638
639 pcpus = simple_strtoul(s, NULL, 0);
640 init_cpu_possible(cpumask_of(0));
641 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
642 set_cpu_possible(cpu, true);
643 return 0;
644}
645early_param("possible_cpus", setup_possible_cpus);
646
647#ifdef CONFIG_HOTPLUG_CPU
648
649int __cpu_disable(void)
650{
651 struct ec_creg_mask_parms cr_parms;
652 int cpu = smp_processor_id();
653
654 set_cpu_online(cpu, false);
655
656 /* Disable pfault pseudo page faults on this cpu. */
657 pfault_fini();
658
659 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
660 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
661
662 /* disable all external interrupts */
663 cr_parms.orvals[0] = 0;
664 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
665 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
666 1 << 4);
667 /* disable all I/O interrupts */
668 cr_parms.orvals[6] = 0;
669 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
670 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
671 /* disable most machine checks */
672 cr_parms.orvals[14] = 0;
673 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
674 1 << 25 | 1 << 24);
675
676 smp_ctl_bit_callback(&cr_parms);
677
678 return 0;
679}
680
681void __cpu_die(unsigned int cpu)
682{
683 /* Wait until target cpu is down */
684 while (!cpu_stopped(cpu))
685 cpu_relax();
686 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
687 udelay(10);
688 smp_free_lowcore(cpu);
689 atomic_dec(&init_mm.context.attach_count);
690}
691
692void __noreturn cpu_die(void)
693{
694 idle_task_exit();
695 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
696 cpu_relax();
697 for (;;);
698}
699
700#endif /* CONFIG_HOTPLUG_CPU */
701
702void __init smp_prepare_cpus(unsigned int max_cpus)
703{
704#ifndef CONFIG_64BIT
705 unsigned long save_area = 0;
706#endif
707 unsigned long async_stack, panic_stack;
708 struct _lowcore *lowcore;
709
710 smp_detect_cpus();
711
712 /* request the 0x1201 emergency signal external interrupt */
713 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
714 panic("Couldn't request external interrupt 0x1201");
715
716 /* Reallocate current lowcore, but keep its contents. */
717 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
718 panic_stack = __get_free_page(GFP_KERNEL);
719 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
720 BUG_ON(!lowcore || !panic_stack || !async_stack);
721#ifndef CONFIG_64BIT
722 if (MACHINE_HAS_IEEE)
723 save_area = get_zeroed_page(GFP_KERNEL);
724#endif
725 local_irq_disable();
726 local_mcck_disable();
727 lowcore_ptr[smp_processor_id()] = lowcore;
728 *lowcore = S390_lowcore;
729 lowcore->panic_stack = panic_stack + PAGE_SIZE;
730 lowcore->async_stack = async_stack + ASYNC_SIZE;
731#ifndef CONFIG_64BIT
732 if (MACHINE_HAS_IEEE)
733 lowcore->extended_save_area_addr = (u32) save_area;
734#endif
735 set_prefix((u32)(unsigned long) lowcore);
736 local_mcck_enable();
737 local_irq_enable();
738#ifdef CONFIG_64BIT
739 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
740 BUG();
741#endif
742}
743
744void __init smp_prepare_boot_cpu(void)
745{
746 BUG_ON(smp_processor_id() != 0);
747
748 current_thread_info()->cpu = 0;
749 set_cpu_present(0, true);
750 set_cpu_online(0, true);
751 S390_lowcore.percpu_offset = __per_cpu_offset[0];
752 current_set[0] = current;
753 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
754 smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
755}
756
757void __init smp_cpus_done(unsigned int max_cpus)
758{
759}
760
761void __init smp_setup_processor_id(void)
762{
763 S390_lowcore.cpu_nr = 0;
764 __cpu_logical_map[0] = stap();
765}
766
767/*
768 * the frequency of the profiling timer can be changed
769 * by writing a multiplier value into /proc/profile.
770 *
771 * usually you want to run this on all CPUs ;)
772 */
773int setup_profiling_timer(unsigned int multiplier)
774{
775 return 0;
776}
777
778#ifdef CONFIG_HOTPLUG_CPU
779static ssize_t cpu_configure_show(struct sys_device *dev,
780 struct sysdev_attribute *attr, char *buf)
781{
782 ssize_t count;
783
784 mutex_lock(&smp_cpu_state_mutex);
785 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
786 mutex_unlock(&smp_cpu_state_mutex);
787 return count;
788}
789
790static ssize_t cpu_configure_store(struct sys_device *dev,
791 struct sysdev_attribute *attr,
792 const char *buf, size_t count)
793{
794 int cpu = dev->id;
795 int val, rc;
796 char delim;
797
798 if (sscanf(buf, "%d %c", &val, &delim) != 1)
799 return -EINVAL;
800 if (val != 0 && val != 1)
801 return -EINVAL;
802
803 get_online_cpus();
804 mutex_lock(&smp_cpu_state_mutex);
805 rc = -EBUSY;
806 /* disallow configuration changes of online cpus and cpu 0 */
807 if (cpu_online(cpu) || cpu == 0)
808 goto out;
809 rc = 0;
810 switch (val) {
811 case 0:
812 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
813 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
814 if (!rc) {
815 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
816 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
817 }
818 }
819 break;
820 case 1:
821 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
822 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
823 if (!rc) {
824 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
825 smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
826 }
827 }
828 break;
829 default:
830 break;
831 }
832out:
833 mutex_unlock(&smp_cpu_state_mutex);
834 put_online_cpus();
835 return rc ? rc : count;
836}
837static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
838#endif /* CONFIG_HOTPLUG_CPU */
839
840static ssize_t cpu_polarization_show(struct sys_device *dev,
841 struct sysdev_attribute *attr, char *buf)
842{
843 int cpu = dev->id;
844 ssize_t count;
845
846 mutex_lock(&smp_cpu_state_mutex);
847 switch (smp_cpu_polarization[cpu]) {
848 case POLARIZATION_HRZ:
849 count = sprintf(buf, "horizontal\n");
850 break;
851 case POLARIZATION_VL:
852 count = sprintf(buf, "vertical:low\n");
853 break;
854 case POLARIZATION_VM:
855 count = sprintf(buf, "vertical:medium\n");
856 break;
857 case POLARIZATION_VH:
858 count = sprintf(buf, "vertical:high\n");
859 break;
860 default:
861 count = sprintf(buf, "unknown\n");
862 break;
863 }
864 mutex_unlock(&smp_cpu_state_mutex);
865 return count;
866}
867static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
868
869static ssize_t show_cpu_address(struct sys_device *dev,
870 struct sysdev_attribute *attr, char *buf)
871{
872 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
873}
874static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
875
876
877static struct attribute *cpu_common_attrs[] = {
878#ifdef CONFIG_HOTPLUG_CPU
879 &attr_configure.attr,
880#endif
881 &attr_address.attr,
882 &attr_polarization.attr,
883 NULL,
884};
885
886static struct attribute_group cpu_common_attr_group = {
887 .attrs = cpu_common_attrs,
888};
889
890static ssize_t show_capability(struct sys_device *dev,
891 struct sysdev_attribute *attr, char *buf)
892{
893 unsigned int capability;
894 int rc;
895
896 rc = get_cpu_capability(&capability);
897 if (rc)
898 return rc;
899 return sprintf(buf, "%u\n", capability);
900}
901static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
902
903static ssize_t show_idle_count(struct sys_device *dev,
904 struct sysdev_attribute *attr, char *buf)
905{
906 struct s390_idle_data *idle;
907 unsigned long long idle_count;
908 unsigned int sequence;
909
910 idle = &per_cpu(s390_idle, dev->id);
911repeat:
912 sequence = idle->sequence;
913 smp_rmb();
914 if (sequence & 1)
915 goto repeat;
916 idle_count = idle->idle_count;
917 if (idle->idle_enter)
918 idle_count++;
919 smp_rmb();
920 if (idle->sequence != sequence)
921 goto repeat;
922 return sprintf(buf, "%llu\n", idle_count);
923}
924static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
925
926static ssize_t show_idle_time(struct sys_device *dev,
927 struct sysdev_attribute *attr, char *buf)
928{
929 struct s390_idle_data *idle;
930 unsigned long long now, idle_time, idle_enter;
931 unsigned int sequence;
932
933 idle = &per_cpu(s390_idle, dev->id);
934 now = get_clock();
935repeat:
936 sequence = idle->sequence;
937 smp_rmb();
938 if (sequence & 1)
939 goto repeat;
940 idle_time = idle->idle_time;
941 idle_enter = idle->idle_enter;
942 if (idle_enter != 0ULL && idle_enter < now)
943 idle_time += now - idle_enter;
944 smp_rmb();
945 if (idle->sequence != sequence)
946 goto repeat;
947 return sprintf(buf, "%llu\n", idle_time >> 12);
948}
949static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
950
951static struct attribute *cpu_online_attrs[] = {
952 &attr_capability.attr,
953 &attr_idle_count.attr,
954 &attr_idle_time_us.attr,
955 NULL,
956};
957
958static struct attribute_group cpu_online_attr_group = {
959 .attrs = cpu_online_attrs,
960};
961
962static int __cpuinit smp_cpu_notify(struct notifier_block *self,
963 unsigned long action, void *hcpu)
964{
965 unsigned int cpu = (unsigned int)(long)hcpu;
966 struct cpu *c = &per_cpu(cpu_devices, cpu);
967 struct sys_device *s = &c->sysdev;
968 struct s390_idle_data *idle;
969 int err = 0;
970
971 switch (action) {
972 case CPU_ONLINE:
973 case CPU_ONLINE_FROZEN:
974 idle = &per_cpu(s390_idle, cpu);
975 memset(idle, 0, sizeof(struct s390_idle_data));
976 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
977 break;
978 case CPU_DEAD:
979 case CPU_DEAD_FROZEN:
980 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
981 break;
982 }
983 return notifier_from_errno(err);
984}
985
986static struct notifier_block __cpuinitdata smp_cpu_nb = {
987 .notifier_call = smp_cpu_notify,
988};
989
990static int __devinit smp_add_present_cpu(int cpu)
991{
992 struct cpu *c = &per_cpu(cpu_devices, cpu);
993 struct sys_device *s = &c->sysdev;
994 int rc;
995
996 c->hotpluggable = 1;
997 rc = register_cpu(c, cpu);
998 if (rc)
999 goto out;
1000 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1001 if (rc)
1002 goto out_cpu;
1003 if (!cpu_online(cpu))
1004 goto out;
1005 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1006 if (!rc)
1007 return 0;
1008 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1009out_cpu:
1010#ifdef CONFIG_HOTPLUG_CPU
1011 unregister_cpu(c);
1012#endif
1013out:
1014 return rc;
1015}
1016
1017#ifdef CONFIG_HOTPLUG_CPU
1018
1019int __ref smp_rescan_cpus(void)
1020{
1021 cpumask_t newcpus;
1022 int cpu;
1023 int rc;
1024
1025 get_online_cpus();
1026 mutex_lock(&smp_cpu_state_mutex);
1027 cpumask_copy(&newcpus, cpu_present_mask);
1028 rc = __smp_rescan_cpus();
1029 if (rc)
1030 goto out;
1031 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1032 for_each_cpu(cpu, &newcpus) {
1033 rc = smp_add_present_cpu(cpu);
1034 if (rc)
1035 set_cpu_present(cpu, false);
1036 }
1037 rc = 0;
1038out:
1039 mutex_unlock(&smp_cpu_state_mutex);
1040 put_online_cpus();
1041 if (!cpumask_empty(&newcpus))
1042 topology_schedule_update();
1043 return rc;
1044}
1045
1046static ssize_t __ref rescan_store(struct sysdev_class *class,
1047 struct sysdev_class_attribute *attr,
1048 const char *buf,
1049 size_t count)
1050{
1051 int rc;
1052
1053 rc = smp_rescan_cpus();
1054 return rc ? rc : count;
1055}
1056static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1057#endif /* CONFIG_HOTPLUG_CPU */
1058
1059static ssize_t dispatching_show(struct sysdev_class *class,
1060 struct sysdev_class_attribute *attr,
1061 char *buf)
1062{
1063 ssize_t count;
1064
1065 mutex_lock(&smp_cpu_state_mutex);
1066 count = sprintf(buf, "%d\n", cpu_management);
1067 mutex_unlock(&smp_cpu_state_mutex);
1068 return count;
1069}
1070
1071static ssize_t dispatching_store(struct sysdev_class *dev,
1072 struct sysdev_class_attribute *attr,
1073 const char *buf,
1074 size_t count)
1075{
1076 int val, rc;
1077 char delim;
1078
1079 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1080 return -EINVAL;
1081 if (val != 0 && val != 1)
1082 return -EINVAL;
1083 rc = 0;
1084 get_online_cpus();
1085 mutex_lock(&smp_cpu_state_mutex);
1086 if (cpu_management == val)
1087 goto out;
1088 rc = topology_set_cpu_management(val);
1089 if (!rc)
1090 cpu_management = val;
1091out:
1092 mutex_unlock(&smp_cpu_state_mutex);
1093 put_online_cpus();
1094 return rc ? rc : count;
1095}
1096static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
1097 dispatching_store);
1098
1099static int __init topology_init(void)
1100{
1101 int cpu;
1102 int rc;
1103
1104 register_cpu_notifier(&smp_cpu_nb);
1105
1106#ifdef CONFIG_HOTPLUG_CPU
1107 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1108 if (rc)
1109 return rc;
1110#endif
1111 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
1112 if (rc)
1113 return rc;
1114 for_each_present_cpu(cpu) {
1115 rc = smp_add_present_cpu(cpu);
1116 if (rc)
1117 return rc;
1118 }
1119 return 0;
1120}
1121subsys_initcall(topology_init);
1/*
2 * SMP related functions
3 *
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
8 *
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
16 */
17
18#define KMSG_COMPONENT "cpu"
19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20
21#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/init.h>
24#include <linux/mm.h>
25#include <linux/err.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/irqflags.h>
31#include <linux/cpu.h>
32#include <linux/slab.h>
33#include <linux/crash_dump.h>
34#include <asm/asm-offsets.h>
35#include <asm/switch_to.h>
36#include <asm/facility.h>
37#include <asm/ipl.h>
38#include <asm/setup.h>
39#include <asm/irq.h>
40#include <asm/tlbflush.h>
41#include <asm/timer.h>
42#include <asm/lowcore.h>
43#include <asm/sclp.h>
44#include <asm/vdso.h>
45#include <asm/debug.h>
46#include <asm/os_info.h>
47#include "entry.h"
48
49enum {
50 sigp_sense = 1,
51 sigp_external_call = 2,
52 sigp_emergency_signal = 3,
53 sigp_start = 4,
54 sigp_stop = 5,
55 sigp_restart = 6,
56 sigp_stop_and_store_status = 9,
57 sigp_initial_cpu_reset = 11,
58 sigp_cpu_reset = 12,
59 sigp_set_prefix = 13,
60 sigp_store_status_at_address = 14,
61 sigp_store_extended_status_at_address = 15,
62 sigp_set_architecture = 18,
63 sigp_conditional_emergency_signal = 19,
64 sigp_sense_running = 21,
65};
66
67enum {
68 sigp_order_code_accepted = 0,
69 sigp_status_stored = 1,
70 sigp_busy = 2,
71 sigp_not_operational = 3,
72};
73
74enum {
75 ec_schedule = 0,
76 ec_call_function,
77 ec_call_function_single,
78 ec_stop_cpu,
79};
80
81enum {
82 CPU_STATE_STANDBY,
83 CPU_STATE_CONFIGURED,
84};
85
86struct pcpu {
87 struct cpu cpu;
88 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
89 unsigned long async_stack; /* async stack for the cpu */
90 unsigned long panic_stack; /* panic stack for the cpu */
91 unsigned long ec_mask; /* bit mask for ec_xxx functions */
92 int state; /* physical cpu state */
93 u32 status; /* last status received via sigp */
94 u16 address; /* physical cpu address */
95};
96
97static u8 boot_cpu_type;
98static u16 boot_cpu_address;
99static struct pcpu pcpu_devices[NR_CPUS];
100
101DEFINE_MUTEX(smp_cpu_state_mutex);
102
103/*
104 * Signal processor helper functions.
105 */
106static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
107{
108 register unsigned int reg1 asm ("1") = parm;
109 int cc;
110
111 asm volatile(
112 " sigp %1,%2,0(%3)\n"
113 " ipm %0\n"
114 " srl %0,28\n"
115 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
116 if (status && cc == 1)
117 *status = reg1;
118 return cc;
119}
120
121static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
122{
123 int cc;
124
125 while (1) {
126 cc = __pcpu_sigp(addr, order, parm, status);
127 if (cc != sigp_busy)
128 return cc;
129 cpu_relax();
130 }
131}
132
133static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
134{
135 int cc, retry;
136
137 for (retry = 0; ; retry++) {
138 cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
139 if (cc != sigp_busy)
140 break;
141 if (retry >= 3)
142 udelay(10);
143 }
144 return cc;
145}
146
147static inline int pcpu_stopped(struct pcpu *pcpu)
148{
149 if (__pcpu_sigp(pcpu->address, sigp_sense,
150 0, &pcpu->status) != sigp_status_stored)
151 return 0;
152 /* Check for stopped and check stop state */
153 return !!(pcpu->status & 0x50);
154}
155
156static inline int pcpu_running(struct pcpu *pcpu)
157{
158 if (__pcpu_sigp(pcpu->address, sigp_sense_running,
159 0, &pcpu->status) != sigp_status_stored)
160 return 1;
161 /* Check for running status */
162 return !(pcpu->status & 0x400);
163}
164
165/*
166 * Find struct pcpu by cpu address.
167 */
168static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
169{
170 int cpu;
171
172 for_each_cpu(cpu, mask)
173 if (pcpu_devices[cpu].address == address)
174 return pcpu_devices + cpu;
175 return NULL;
176}
177
178static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
179{
180 int order;
181
182 set_bit(ec_bit, &pcpu->ec_mask);
183 order = pcpu_running(pcpu) ?
184 sigp_external_call : sigp_emergency_signal;
185 pcpu_sigp_retry(pcpu, order, 0);
186}
187
188static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
189{
190 struct _lowcore *lc;
191
192 if (pcpu != &pcpu_devices[0]) {
193 pcpu->lowcore = (struct _lowcore *)
194 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
195 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
196 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
197 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
198 goto out;
199 }
200 lc = pcpu->lowcore;
201 memcpy(lc, &S390_lowcore, 512);
202 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
203 lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
204 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
205 lc->cpu_nr = cpu;
206#ifndef CONFIG_64BIT
207 if (MACHINE_HAS_IEEE) {
208 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
209 if (!lc->extended_save_area_addr)
210 goto out;
211 }
212#else
213 if (vdso_alloc_per_cpu(lc))
214 goto out;
215#endif
216 lowcore_ptr[cpu] = lc;
217 pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
218 return 0;
219out:
220 if (pcpu != &pcpu_devices[0]) {
221 free_page(pcpu->panic_stack);
222 free_pages(pcpu->async_stack, ASYNC_ORDER);
223 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
224 }
225 return -ENOMEM;
226}
227
228#ifdef CONFIG_HOTPLUG_CPU
229
230static void pcpu_free_lowcore(struct pcpu *pcpu)
231{
232 pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
233 lowcore_ptr[pcpu - pcpu_devices] = NULL;
234#ifndef CONFIG_64BIT
235 if (MACHINE_HAS_IEEE) {
236 struct _lowcore *lc = pcpu->lowcore;
237
238 free_page((unsigned long) lc->extended_save_area_addr);
239 lc->extended_save_area_addr = 0;
240 }
241#else
242 vdso_free_per_cpu(pcpu->lowcore);
243#endif
244 if (pcpu != &pcpu_devices[0]) {
245 free_page(pcpu->panic_stack);
246 free_pages(pcpu->async_stack, ASYNC_ORDER);
247 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
248 }
249}
250
251#endif /* CONFIG_HOTPLUG_CPU */
252
253static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
254{
255 struct _lowcore *lc = pcpu->lowcore;
256
257 atomic_inc(&init_mm.context.attach_count);
258 lc->cpu_nr = cpu;
259 lc->percpu_offset = __per_cpu_offset[cpu];
260 lc->kernel_asce = S390_lowcore.kernel_asce;
261 lc->machine_flags = S390_lowcore.machine_flags;
262 lc->ftrace_func = S390_lowcore.ftrace_func;
263 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
264 __ctl_store(lc->cregs_save_area, 0, 15);
265 save_access_regs((unsigned int *) lc->access_regs_save_area);
266 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
267 MAX_FACILITY_BIT/8);
268}
269
270static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
271{
272 struct _lowcore *lc = pcpu->lowcore;
273 struct thread_info *ti = task_thread_info(tsk);
274
275 lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
276 lc->thread_info = (unsigned long) task_thread_info(tsk);
277 lc->current_task = (unsigned long) tsk;
278 lc->user_timer = ti->user_timer;
279 lc->system_timer = ti->system_timer;
280 lc->steal_timer = 0;
281}
282
283static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
284{
285 struct _lowcore *lc = pcpu->lowcore;
286
287 lc->restart_stack = lc->kernel_stack;
288 lc->restart_fn = (unsigned long) func;
289 lc->restart_data = (unsigned long) data;
290 lc->restart_source = -1UL;
291 pcpu_sigp_retry(pcpu, sigp_restart, 0);
292}
293
294/*
295 * Call function via PSW restart on pcpu and stop the current cpu.
296 */
297static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
298 void *data, unsigned long stack)
299{
300 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
301 struct {
302 unsigned long stack;
303 void *func;
304 void *data;
305 unsigned long source;
306 } restart = { stack, func, data, stap() };
307
308 __load_psw_mask(psw_kernel_bits);
309 if (pcpu->address == restart.source)
310 func(data); /* should not return */
311 /* Stop target cpu (if func returns this stops the current cpu). */
312 pcpu_sigp_retry(pcpu, sigp_stop, 0);
313 /* Restart func on the target cpu and stop the current cpu. */
314 memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
315 asm volatile(
316 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
317 " brc 2,0b # busy, try again\n"
318 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
319 " brc 2,1b # busy, try again\n"
320 : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
321 for (;;) ;
322}
323
324/*
325 * Call function on an online CPU.
326 */
327void smp_call_online_cpu(void (*func)(void *), void *data)
328{
329 struct pcpu *pcpu;
330
331 /* Use the current cpu if it is online. */
332 pcpu = pcpu_find_address(cpu_online_mask, stap());
333 if (!pcpu)
334 /* Use the first online cpu. */
335 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
336 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
337}
338
339/*
340 * Call function on the ipl CPU.
341 */
342void smp_call_ipl_cpu(void (*func)(void *), void *data)
343{
344 pcpu_delegate(&pcpu_devices[0], func, data,
345 pcpu_devices->panic_stack + PAGE_SIZE);
346}
347
348int smp_find_processor_id(u16 address)
349{
350 int cpu;
351
352 for_each_present_cpu(cpu)
353 if (pcpu_devices[cpu].address == address)
354 return cpu;
355 return -1;
356}
357
358int smp_vcpu_scheduled(int cpu)
359{
360 return pcpu_running(pcpu_devices + cpu);
361}
362
363void smp_yield(void)
364{
365 if (MACHINE_HAS_DIAG44)
366 asm volatile("diag 0,0,0x44");
367}
368
369void smp_yield_cpu(int cpu)
370{
371 if (MACHINE_HAS_DIAG9C)
372 asm volatile("diag %0,0,0x9c"
373 : : "d" (pcpu_devices[cpu].address));
374 else if (MACHINE_HAS_DIAG44)
375 asm volatile("diag 0,0,0x44");
376}
377
378/*
379 * Send cpus emergency shutdown signal. This gives the cpus the
380 * opportunity to complete outstanding interrupts.
381 */
382void smp_emergency_stop(cpumask_t *cpumask)
383{
384 u64 end;
385 int cpu;
386
387 end = get_clock() + (1000000UL << 12);
388 for_each_cpu(cpu, cpumask) {
389 struct pcpu *pcpu = pcpu_devices + cpu;
390 set_bit(ec_stop_cpu, &pcpu->ec_mask);
391 while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
392 0, NULL) == sigp_busy &&
393 get_clock() < end)
394 cpu_relax();
395 }
396 while (get_clock() < end) {
397 for_each_cpu(cpu, cpumask)
398 if (pcpu_stopped(pcpu_devices + cpu))
399 cpumask_clear_cpu(cpu, cpumask);
400 if (cpumask_empty(cpumask))
401 break;
402 cpu_relax();
403 }
404}
405
406/*
407 * Stop all cpus but the current one.
408 */
409void smp_send_stop(void)
410{
411 cpumask_t cpumask;
412 int cpu;
413
414 /* Disable all interrupts/machine checks */
415 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
416 trace_hardirqs_off();
417
418 debug_set_critical();
419 cpumask_copy(&cpumask, cpu_online_mask);
420 cpumask_clear_cpu(smp_processor_id(), &cpumask);
421
422 if (oops_in_progress)
423 smp_emergency_stop(&cpumask);
424
425 /* stop all processors */
426 for_each_cpu(cpu, &cpumask) {
427 struct pcpu *pcpu = pcpu_devices + cpu;
428 pcpu_sigp_retry(pcpu, sigp_stop, 0);
429 while (!pcpu_stopped(pcpu))
430 cpu_relax();
431 }
432}
433
434/*
435 * Stop the current cpu.
436 */
437void smp_stop_cpu(void)
438{
439 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
440 for (;;) ;
441}
442
443/*
444 * This is the main routine where commands issued by other
445 * cpus are handled.
446 */
447static void do_ext_call_interrupt(struct ext_code ext_code,
448 unsigned int param32, unsigned long param64)
449{
450 unsigned long bits;
451 int cpu;
452
453 cpu = smp_processor_id();
454 if (ext_code.code == 0x1202)
455 kstat_cpu(cpu).irqs[EXTINT_EXC]++;
456 else
457 kstat_cpu(cpu).irqs[EXTINT_EMS]++;
458 /*
459 * handle bit signal external calls
460 */
461 bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
462
463 if (test_bit(ec_stop_cpu, &bits))
464 smp_stop_cpu();
465
466 if (test_bit(ec_schedule, &bits))
467 scheduler_ipi();
468
469 if (test_bit(ec_call_function, &bits))
470 generic_smp_call_function_interrupt();
471
472 if (test_bit(ec_call_function_single, &bits))
473 generic_smp_call_function_single_interrupt();
474
475}
476
477void arch_send_call_function_ipi_mask(const struct cpumask *mask)
478{
479 int cpu;
480
481 for_each_cpu(cpu, mask)
482 pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
483}
484
485void arch_send_call_function_single_ipi(int cpu)
486{
487 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
488}
489
490#ifndef CONFIG_64BIT
491/*
492 * this function sends a 'purge tlb' signal to another CPU.
493 */
494static void smp_ptlb_callback(void *info)
495{
496 __tlb_flush_local();
497}
498
499void smp_ptlb_all(void)
500{
501 on_each_cpu(smp_ptlb_callback, NULL, 1);
502}
503EXPORT_SYMBOL(smp_ptlb_all);
504#endif /* ! CONFIG_64BIT */
505
506/*
507 * this function sends a 'reschedule' IPI to another CPU.
508 * it goes straight through and wastes no time serializing
509 * anything. Worst case is that we lose a reschedule ...
510 */
511void smp_send_reschedule(int cpu)
512{
513 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
514}
515
516/*
517 * parameter area for the set/clear control bit callbacks
518 */
519struct ec_creg_mask_parms {
520 unsigned long orval;
521 unsigned long andval;
522 int cr;
523};
524
525/*
526 * callback for setting/clearing control bits
527 */
528static void smp_ctl_bit_callback(void *info)
529{
530 struct ec_creg_mask_parms *pp = info;
531 unsigned long cregs[16];
532
533 __ctl_store(cregs, 0, 15);
534 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
535 __ctl_load(cregs, 0, 15);
536}
537
538/*
539 * Set a bit in a control register of all cpus
540 */
541void smp_ctl_set_bit(int cr, int bit)
542{
543 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
544
545 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
546}
547EXPORT_SYMBOL(smp_ctl_set_bit);
548
549/*
550 * Clear a bit in a control register of all cpus
551 */
552void smp_ctl_clear_bit(int cr, int bit)
553{
554 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
555
556 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
557}
558EXPORT_SYMBOL(smp_ctl_clear_bit);
559
560#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
561
562struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
563EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
564
565static void __init smp_get_save_area(int cpu, u16 address)
566{
567 void *lc = pcpu_devices[0].lowcore;
568 struct save_area *save_area;
569
570 if (is_kdump_kernel())
571 return;
572 if (!OLDMEM_BASE && (address == boot_cpu_address ||
573 ipl_info.type != IPL_TYPE_FCP_DUMP))
574 return;
575 if (cpu >= NR_CPUS) {
576 pr_warning("CPU %i exceeds the maximum %i and is excluded "
577 "from the dump\n", cpu, NR_CPUS - 1);
578 return;
579 }
580 save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
581 if (!save_area)
582 panic("could not allocate memory for save area\n");
583 zfcpdump_save_areas[cpu] = save_area;
584#ifdef CONFIG_CRASH_DUMP
585 if (address == boot_cpu_address) {
586 /* Copy the registers of the boot cpu. */
587 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
588 SAVE_AREA_BASE - PAGE_SIZE, 0);
589 return;
590 }
591#endif
592 /* Get the registers of a non-boot cpu. */
593 __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
594 memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
595}
596
597int smp_store_status(int cpu)
598{
599 struct pcpu *pcpu;
600
601 pcpu = pcpu_devices + cpu;
602 if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
603 0, NULL) != sigp_order_code_accepted)
604 return -EIO;
605 return 0;
606}
607
608#else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
609
610static inline void smp_get_save_area(int cpu, u16 address) { }
611
612#endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
613
614static struct sclp_cpu_info *smp_get_cpu_info(void)
615{
616 static int use_sigp_detection;
617 struct sclp_cpu_info *info;
618 int address;
619
620 info = kzalloc(sizeof(*info), GFP_KERNEL);
621 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
622 use_sigp_detection = 1;
623 for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
624 if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
625 sigp_not_operational)
626 continue;
627 info->cpu[info->configured].address = address;
628 info->configured++;
629 }
630 info->combined = info->configured;
631 }
632 return info;
633}
634
635static int __devinit smp_add_present_cpu(int cpu);
636
637static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
638 int sysfs_add)
639{
640 struct pcpu *pcpu;
641 cpumask_t avail;
642 int cpu, nr, i;
643
644 nr = 0;
645 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
646 cpu = cpumask_first(&avail);
647 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
648 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
649 continue;
650 if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
651 continue;
652 pcpu = pcpu_devices + cpu;
653 pcpu->address = info->cpu[i].address;
654 pcpu->state = (cpu >= info->configured) ?
655 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
656 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
657 set_cpu_present(cpu, true);
658 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
659 set_cpu_present(cpu, false);
660 else
661 nr++;
662 cpu = cpumask_next(cpu, &avail);
663 }
664 return nr;
665}
666
667static void __init smp_detect_cpus(void)
668{
669 unsigned int cpu, c_cpus, s_cpus;
670 struct sclp_cpu_info *info;
671
672 info = smp_get_cpu_info();
673 if (!info)
674 panic("smp_detect_cpus failed to allocate memory\n");
675 if (info->has_cpu_type) {
676 for (cpu = 0; cpu < info->combined; cpu++) {
677 if (info->cpu[cpu].address != boot_cpu_address)
678 continue;
679 /* The boot cpu dictates the cpu type. */
680 boot_cpu_type = info->cpu[cpu].type;
681 break;
682 }
683 }
684 c_cpus = s_cpus = 0;
685 for (cpu = 0; cpu < info->combined; cpu++) {
686 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
687 continue;
688 if (cpu < info->configured) {
689 smp_get_save_area(c_cpus, info->cpu[cpu].address);
690 c_cpus++;
691 } else
692 s_cpus++;
693 }
694 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
695 get_online_cpus();
696 __smp_rescan_cpus(info, 0);
697 put_online_cpus();
698 kfree(info);
699}
700
701/*
702 * Activate a secondary processor.
703 */
704static void __cpuinit smp_start_secondary(void *cpuvoid)
705{
706 S390_lowcore.last_update_clock = get_clock();
707 S390_lowcore.restart_stack = (unsigned long) restart_stack;
708 S390_lowcore.restart_fn = (unsigned long) do_restart;
709 S390_lowcore.restart_data = 0;
710 S390_lowcore.restart_source = -1UL;
711 restore_access_regs(S390_lowcore.access_regs_save_area);
712 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
713 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
714 cpu_init();
715 preempt_disable();
716 init_cpu_timer();
717 init_cpu_vtimer();
718 pfault_init();
719 notify_cpu_starting(smp_processor_id());
720 ipi_call_lock();
721 set_cpu_online(smp_processor_id(), true);
722 ipi_call_unlock();
723 local_irq_enable();
724 /* cpu_idle will call schedule for us */
725 cpu_idle();
726}
727
728/* Upping and downing of CPUs */
729int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
730{
731 struct pcpu *pcpu;
732 int rc;
733
734 pcpu = pcpu_devices + cpu;
735 if (pcpu->state != CPU_STATE_CONFIGURED)
736 return -EIO;
737 if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
738 sigp_order_code_accepted)
739 return -EIO;
740
741 rc = pcpu_alloc_lowcore(pcpu, cpu);
742 if (rc)
743 return rc;
744 pcpu_prepare_secondary(pcpu, cpu);
745 pcpu_attach_task(pcpu, tidle);
746 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
747 while (!cpu_online(cpu))
748 cpu_relax();
749 return 0;
750}
751
752static int __init setup_possible_cpus(char *s)
753{
754 int max, cpu;
755
756 if (kstrtoint(s, 0, &max) < 0)
757 return 0;
758 init_cpu_possible(cpumask_of(0));
759 for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
760 set_cpu_possible(cpu, true);
761 return 0;
762}
763early_param("possible_cpus", setup_possible_cpus);
764
765#ifdef CONFIG_HOTPLUG_CPU
766
767int __cpu_disable(void)
768{
769 unsigned long cregs[16];
770
771 set_cpu_online(smp_processor_id(), false);
772 /* Disable pseudo page faults on this cpu. */
773 pfault_fini();
774 /* Disable interrupt sources via control register. */
775 __ctl_store(cregs, 0, 15);
776 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
777 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
778 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
779 __ctl_load(cregs, 0, 15);
780 return 0;
781}
782
783void __cpu_die(unsigned int cpu)
784{
785 struct pcpu *pcpu;
786
787 /* Wait until target cpu is down */
788 pcpu = pcpu_devices + cpu;
789 while (!pcpu_stopped(pcpu))
790 cpu_relax();
791 pcpu_free_lowcore(pcpu);
792 atomic_dec(&init_mm.context.attach_count);
793}
794
795void __noreturn cpu_die(void)
796{
797 idle_task_exit();
798 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
799 for (;;) ;
800}
801
802#endif /* CONFIG_HOTPLUG_CPU */
803
804void __init smp_prepare_cpus(unsigned int max_cpus)
805{
806 /* request the 0x1201 emergency signal external interrupt */
807 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
808 panic("Couldn't request external interrupt 0x1201");
809 /* request the 0x1202 external call external interrupt */
810 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
811 panic("Couldn't request external interrupt 0x1202");
812 smp_detect_cpus();
813}
814
815void __init smp_prepare_boot_cpu(void)
816{
817 struct pcpu *pcpu = pcpu_devices;
818
819 boot_cpu_address = stap();
820 pcpu->state = CPU_STATE_CONFIGURED;
821 pcpu->address = boot_cpu_address;
822 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
823 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
824 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
825 S390_lowcore.percpu_offset = __per_cpu_offset[0];
826 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
827 set_cpu_present(0, true);
828 set_cpu_online(0, true);
829}
830
831void __init smp_cpus_done(unsigned int max_cpus)
832{
833}
834
835void __init smp_setup_processor_id(void)
836{
837 S390_lowcore.cpu_nr = 0;
838}
839
840/*
841 * the frequency of the profiling timer can be changed
842 * by writing a multiplier value into /proc/profile.
843 *
844 * usually you want to run this on all CPUs ;)
845 */
846int setup_profiling_timer(unsigned int multiplier)
847{
848 return 0;
849}
850
851#ifdef CONFIG_HOTPLUG_CPU
852static ssize_t cpu_configure_show(struct device *dev,
853 struct device_attribute *attr, char *buf)
854{
855 ssize_t count;
856
857 mutex_lock(&smp_cpu_state_mutex);
858 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
859 mutex_unlock(&smp_cpu_state_mutex);
860 return count;
861}
862
863static ssize_t cpu_configure_store(struct device *dev,
864 struct device_attribute *attr,
865 const char *buf, size_t count)
866{
867 struct pcpu *pcpu;
868 int cpu, val, rc;
869 char delim;
870
871 if (sscanf(buf, "%d %c", &val, &delim) != 1)
872 return -EINVAL;
873 if (val != 0 && val != 1)
874 return -EINVAL;
875 get_online_cpus();
876 mutex_lock(&smp_cpu_state_mutex);
877 rc = -EBUSY;
878 /* disallow configuration changes of online cpus and cpu 0 */
879 cpu = dev->id;
880 if (cpu_online(cpu) || cpu == 0)
881 goto out;
882 pcpu = pcpu_devices + cpu;
883 rc = 0;
884 switch (val) {
885 case 0:
886 if (pcpu->state != CPU_STATE_CONFIGURED)
887 break;
888 rc = sclp_cpu_deconfigure(pcpu->address);
889 if (rc)
890 break;
891 pcpu->state = CPU_STATE_STANDBY;
892 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
893 topology_expect_change();
894 break;
895 case 1:
896 if (pcpu->state != CPU_STATE_STANDBY)
897 break;
898 rc = sclp_cpu_configure(pcpu->address);
899 if (rc)
900 break;
901 pcpu->state = CPU_STATE_CONFIGURED;
902 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
903 topology_expect_change();
904 break;
905 default:
906 break;
907 }
908out:
909 mutex_unlock(&smp_cpu_state_mutex);
910 put_online_cpus();
911 return rc ? rc : count;
912}
913static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
914#endif /* CONFIG_HOTPLUG_CPU */
915
916static ssize_t show_cpu_address(struct device *dev,
917 struct device_attribute *attr, char *buf)
918{
919 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
920}
921static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
922
923static struct attribute *cpu_common_attrs[] = {
924#ifdef CONFIG_HOTPLUG_CPU
925 &dev_attr_configure.attr,
926#endif
927 &dev_attr_address.attr,
928 NULL,
929};
930
931static struct attribute_group cpu_common_attr_group = {
932 .attrs = cpu_common_attrs,
933};
934
935static ssize_t show_idle_count(struct device *dev,
936 struct device_attribute *attr, char *buf)
937{
938 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
939 unsigned long long idle_count;
940 unsigned int sequence;
941
942 do {
943 sequence = ACCESS_ONCE(idle->sequence);
944 idle_count = ACCESS_ONCE(idle->idle_count);
945 if (ACCESS_ONCE(idle->idle_enter))
946 idle_count++;
947 } while ((sequence & 1) || (idle->sequence != sequence));
948 return sprintf(buf, "%llu\n", idle_count);
949}
950static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
951
952static ssize_t show_idle_time(struct device *dev,
953 struct device_attribute *attr, char *buf)
954{
955 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
956 unsigned long long now, idle_time, idle_enter, idle_exit;
957 unsigned int sequence;
958
959 do {
960 now = get_clock();
961 sequence = ACCESS_ONCE(idle->sequence);
962 idle_time = ACCESS_ONCE(idle->idle_time);
963 idle_enter = ACCESS_ONCE(idle->idle_enter);
964 idle_exit = ACCESS_ONCE(idle->idle_exit);
965 } while ((sequence & 1) || (idle->sequence != sequence));
966 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
967 return sprintf(buf, "%llu\n", idle_time >> 12);
968}
969static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
970
971static struct attribute *cpu_online_attrs[] = {
972 &dev_attr_idle_count.attr,
973 &dev_attr_idle_time_us.attr,
974 NULL,
975};
976
977static struct attribute_group cpu_online_attr_group = {
978 .attrs = cpu_online_attrs,
979};
980
981static int __cpuinit smp_cpu_notify(struct notifier_block *self,
982 unsigned long action, void *hcpu)
983{
984 unsigned int cpu = (unsigned int)(long)hcpu;
985 struct cpu *c = &pcpu_devices[cpu].cpu;
986 struct device *s = &c->dev;
987 int err = 0;
988
989 switch (action) {
990 case CPU_ONLINE:
991 case CPU_ONLINE_FROZEN:
992 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
993 break;
994 case CPU_DEAD:
995 case CPU_DEAD_FROZEN:
996 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
997 break;
998 }
999 return notifier_from_errno(err);
1000}
1001
1002static struct notifier_block __cpuinitdata smp_cpu_nb = {
1003 .notifier_call = smp_cpu_notify,
1004};
1005
1006static int __devinit smp_add_present_cpu(int cpu)
1007{
1008 struct cpu *c = &pcpu_devices[cpu].cpu;
1009 struct device *s = &c->dev;
1010 int rc;
1011
1012 c->hotpluggable = 1;
1013 rc = register_cpu(c, cpu);
1014 if (rc)
1015 goto out;
1016 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1017 if (rc)
1018 goto out_cpu;
1019 if (cpu_online(cpu)) {
1020 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1021 if (rc)
1022 goto out_online;
1023 }
1024 rc = topology_cpu_init(c);
1025 if (rc)
1026 goto out_topology;
1027 return 0;
1028
1029out_topology:
1030 if (cpu_online(cpu))
1031 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1032out_online:
1033 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1034out_cpu:
1035#ifdef CONFIG_HOTPLUG_CPU
1036 unregister_cpu(c);
1037#endif
1038out:
1039 return rc;
1040}
1041
1042#ifdef CONFIG_HOTPLUG_CPU
1043
1044int __ref smp_rescan_cpus(void)
1045{
1046 struct sclp_cpu_info *info;
1047 int nr;
1048
1049 info = smp_get_cpu_info();
1050 if (!info)
1051 return -ENOMEM;
1052 get_online_cpus();
1053 mutex_lock(&smp_cpu_state_mutex);
1054 nr = __smp_rescan_cpus(info, 1);
1055 mutex_unlock(&smp_cpu_state_mutex);
1056 put_online_cpus();
1057 kfree(info);
1058 if (nr)
1059 topology_schedule_update();
1060 return 0;
1061}
1062
1063static ssize_t __ref rescan_store(struct device *dev,
1064 struct device_attribute *attr,
1065 const char *buf,
1066 size_t count)
1067{
1068 int rc;
1069
1070 rc = smp_rescan_cpus();
1071 return rc ? rc : count;
1072}
1073static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1074#endif /* CONFIG_HOTPLUG_CPU */
1075
1076static int __init s390_smp_init(void)
1077{
1078 int cpu, rc;
1079
1080 register_cpu_notifier(&smp_cpu_nb);
1081#ifdef CONFIG_HOTPLUG_CPU
1082 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1083 if (rc)
1084 return rc;
1085#endif
1086 for_each_present_cpu(cpu) {
1087 rc = smp_add_present_cpu(cpu);
1088 if (rc)
1089 return rc;
1090 }
1091 return 0;
1092}
1093subsys_initcall(s390_smp_init);