Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
16#define KMSG_COMPONENT "setup"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
19#include <linux/errno.h>
20#include <linux/export.h>
21#include <linux/sched.h>
22#include <linux/sched/task.h>
23#include <linux/cpu.h>
24#include <linux/kernel.h>
25#include <linux/memblock.h>
26#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
30#include <linux/random.h>
31#include <linux/user.h>
32#include <linux/tty.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
35#include <linux/init.h>
36#include <linux/initrd.h>
37#include <linux/root_dev.h>
38#include <linux/console.h>
39#include <linux/kernel_stat.h>
40#include <linux/dma-map-ops.h>
41#include <linux/device.h>
42#include <linux/notifier.h>
43#include <linux/pfn.h>
44#include <linux/ctype.h>
45#include <linux/reboot.h>
46#include <linux/topology.h>
47#include <linux/kexec.h>
48#include <linux/crash_dump.h>
49#include <linux/memory.h>
50#include <linux/compat.h>
51#include <linux/start_kernel.h>
52#include <linux/hugetlb.h>
53#include <linux/kmemleak.h>
54
55#include <asm/archrandom.h>
56#include <asm/boot_data.h>
57#include <asm/ipl.h>
58#include <asm/facility.h>
59#include <asm/smp.h>
60#include <asm/mmu_context.h>
61#include <asm/cpcmd.h>
62#include <asm/abs_lowcore.h>
63#include <asm/nmi.h>
64#include <asm/irq.h>
65#include <asm/page.h>
66#include <asm/ptrace.h>
67#include <asm/sections.h>
68#include <asm/ebcdic.h>
69#include <asm/diag.h>
70#include <asm/os_info.h>
71#include <asm/sclp.h>
72#include <asm/stacktrace.h>
73#include <asm/sysinfo.h>
74#include <asm/numa.h>
75#include <asm/alternative.h>
76#include <asm/nospec-branch.h>
77#include <asm/physmem_info.h>
78#include <asm/maccess.h>
79#include <asm/uv.h>
80#include <asm/asm-offsets.h>
81#include "entry.h"
82
83/*
84 * Machine setup..
85 */
86unsigned int console_mode = 0;
87EXPORT_SYMBOL(console_mode);
88
89unsigned int console_devno = -1;
90EXPORT_SYMBOL(console_devno);
91
92unsigned int console_irq = -1;
93EXPORT_SYMBOL(console_irq);
94
95/*
96 * Some code and data needs to stay below 2 GB, even when the kernel would be
97 * relocated above 2 GB, because it has to use 31 bit addresses.
98 * Such code and data is part of the .amode31 section.
99 */
100char __amode31_ref *__samode31 = _samode31;
101char __amode31_ref *__eamode31 = _eamode31;
102char __amode31_ref *__stext_amode31 = _stext_amode31;
103char __amode31_ref *__etext_amode31 = _etext_amode31;
104struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
105struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
106
107/*
108 * Control registers CR2, CR5 and CR15 are initialized with addresses
109 * of tables that must be placed below 2G which is handled by the AMODE31
110 * sections.
111 * Because the AMODE31 sections are relocated below 2G at startup,
112 * the content of control registers CR2, CR5 and CR15 must be updated
113 * with new addresses after the relocation. The initial initialization of
114 * control registers occurs in head64.S and then gets updated again after AMODE31
115 * relocation. We must access the relevant AMODE31 tables indirectly via
116 * pointers placed in the .amode31.refs linker section. Those pointers get
117 * updated automatically during AMODE31 relocation and always contain a valid
118 * address within AMODE31 sections.
119 */
120
121static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
122
123static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
124 [1] = 0xffffffffffffffff
125};
126
127static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
128 0x80000000, 0, 0, 0,
129 0x80000000, 0, 0, 0,
130 0x80000000, 0, 0, 0,
131 0x80000000, 0, 0, 0,
132 0x80000000, 0, 0, 0,
133 0x80000000, 0, 0, 0,
134 0x80000000, 0, 0, 0,
135 0x80000000, 0, 0, 0
136};
137
138static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
139 0, 0, 0x89000000, 0,
140 0, 0, 0x8a000000, 0
141};
142
143static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
144static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
145static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
146static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
147
148unsigned long __bootdata_preserved(max_mappable);
149struct physmem_info __bootdata(physmem_info);
150
151struct vm_layout __bootdata_preserved(vm_layout);
152EXPORT_SYMBOL(vm_layout);
153int __bootdata_preserved(__kaslr_enabled);
154unsigned int __bootdata_preserved(zlib_dfltcc_support);
155EXPORT_SYMBOL(zlib_dfltcc_support);
156u64 __bootdata_preserved(stfle_fac_list[16]);
157EXPORT_SYMBOL(stfle_fac_list);
158struct oldmem_data __bootdata_preserved(oldmem_data);
159
160unsigned long VMALLOC_START;
161EXPORT_SYMBOL(VMALLOC_START);
162
163unsigned long VMALLOC_END;
164EXPORT_SYMBOL(VMALLOC_END);
165
166struct page *vmemmap;
167EXPORT_SYMBOL(vmemmap);
168unsigned long vmemmap_size;
169
170unsigned long MODULES_VADDR;
171unsigned long MODULES_END;
172
173/* An array with a pointer to the lowcore of every CPU. */
174struct lowcore *lowcore_ptr[NR_CPUS];
175EXPORT_SYMBOL(lowcore_ptr);
176
177DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
178
179/*
180 * The Write Back bit position in the physaddr is given by the SLPC PCI.
181 * Leaving the mask zero always uses write through which is safe
182 */
183unsigned long mio_wb_bit_mask __ro_after_init;
184
185/*
186 * This is set up by the setup-routine at boot-time
187 * for S390 need to find out, what we have to setup
188 * using address 0x10400 ...
189 */
190
191#include <asm/setup.h>
192
193/*
194 * condev= and conmode= setup parameter.
195 */
196
197static int __init condev_setup(char *str)
198{
199 int vdev;
200
201 vdev = simple_strtoul(str, &str, 0);
202 if (vdev >= 0 && vdev < 65536) {
203 console_devno = vdev;
204 console_irq = -1;
205 }
206 return 1;
207}
208
209__setup("condev=", condev_setup);
210
211static void __init set_preferred_console(void)
212{
213 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
214 add_preferred_console("ttyS", 0, NULL);
215 else if (CONSOLE_IS_3270)
216 add_preferred_console("tty3270", 0, NULL);
217 else if (CONSOLE_IS_VT220)
218 add_preferred_console("ttysclp", 0, NULL);
219 else if (CONSOLE_IS_HVC)
220 add_preferred_console("hvc", 0, NULL);
221}
222
223static int __init conmode_setup(char *str)
224{
225#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
226 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
227 SET_CONSOLE_SCLP;
228#endif
229#if defined(CONFIG_TN3215_CONSOLE)
230 if (!strcmp(str, "3215"))
231 SET_CONSOLE_3215;
232#endif
233#if defined(CONFIG_TN3270_CONSOLE)
234 if (!strcmp(str, "3270"))
235 SET_CONSOLE_3270;
236#endif
237 set_preferred_console();
238 return 1;
239}
240
241__setup("conmode=", conmode_setup);
242
243static void __init conmode_default(void)
244{
245 char query_buffer[1024];
246 char *ptr;
247
248 if (MACHINE_IS_VM) {
249 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
250 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
251 ptr = strstr(query_buffer, "SUBCHANNEL =");
252 console_irq = simple_strtoul(ptr + 13, NULL, 16);
253 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
254 ptr = strstr(query_buffer, "CONMODE");
255 /*
256 * Set the conmode to 3215 so that the device recognition
257 * will set the cu_type of the console to 3215. If the
258 * conmode is 3270 and we don't set it back then both
259 * 3215 and the 3270 driver will try to access the console
260 * device (3215 as console and 3270 as normal tty).
261 */
262 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
263 if (ptr == NULL) {
264#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
265 SET_CONSOLE_SCLP;
266#endif
267 return;
268 }
269 if (str_has_prefix(ptr + 8, "3270")) {
270#if defined(CONFIG_TN3270_CONSOLE)
271 SET_CONSOLE_3270;
272#elif defined(CONFIG_TN3215_CONSOLE)
273 SET_CONSOLE_3215;
274#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
275 SET_CONSOLE_SCLP;
276#endif
277 } else if (str_has_prefix(ptr + 8, "3215")) {
278#if defined(CONFIG_TN3215_CONSOLE)
279 SET_CONSOLE_3215;
280#elif defined(CONFIG_TN3270_CONSOLE)
281 SET_CONSOLE_3270;
282#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
283 SET_CONSOLE_SCLP;
284#endif
285 }
286 } else if (MACHINE_IS_KVM) {
287 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
288 SET_CONSOLE_VT220;
289 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
290 SET_CONSOLE_SCLP;
291 else
292 SET_CONSOLE_HVC;
293 } else {
294#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
295 SET_CONSOLE_SCLP;
296#endif
297 }
298}
299
300#ifdef CONFIG_CRASH_DUMP
301static void __init setup_zfcpdump(void)
302{
303 if (!is_ipl_type_dump())
304 return;
305 if (oldmem_data.start)
306 return;
307 strlcat(boot_command_line, " cio_ignore=all,!ipldev,!condev", COMMAND_LINE_SIZE);
308 console_loglevel = 2;
309}
310#else
311static inline void setup_zfcpdump(void) {}
312#endif /* CONFIG_CRASH_DUMP */
313
314 /*
315 * Reboot, halt and power_off stubs. They just call _machine_restart,
316 * _machine_halt or _machine_power_off.
317 */
318
319void machine_restart(char *command)
320{
321 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
322 /*
323 * Only unblank the console if we are called in enabled
324 * context or a bust_spinlocks cleared the way for us.
325 */
326 console_unblank();
327 _machine_restart(command);
328}
329
330void machine_halt(void)
331{
332 if (!in_interrupt() || oops_in_progress)
333 /*
334 * Only unblank the console if we are called in enabled
335 * context or a bust_spinlocks cleared the way for us.
336 */
337 console_unblank();
338 _machine_halt();
339}
340
341void machine_power_off(void)
342{
343 if (!in_interrupt() || oops_in_progress)
344 /*
345 * Only unblank the console if we are called in enabled
346 * context or a bust_spinlocks cleared the way for us.
347 */
348 console_unblank();
349 _machine_power_off();
350}
351
352/*
353 * Dummy power off function.
354 */
355void (*pm_power_off)(void) = machine_power_off;
356EXPORT_SYMBOL_GPL(pm_power_off);
357
358void *restart_stack;
359
360unsigned long stack_alloc(void)
361{
362#ifdef CONFIG_VMAP_STACK
363 void *ret;
364
365 ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
366 NUMA_NO_NODE, __builtin_return_address(0));
367 kmemleak_not_leak(ret);
368 return (unsigned long)ret;
369#else
370 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
371#endif
372}
373
374void stack_free(unsigned long stack)
375{
376#ifdef CONFIG_VMAP_STACK
377 vfree((void *) stack);
378#else
379 free_pages(stack, THREAD_SIZE_ORDER);
380#endif
381}
382
383static unsigned long __init stack_alloc_early(void)
384{
385 unsigned long stack;
386
387 stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
388 if (!stack) {
389 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
390 __func__, THREAD_SIZE, THREAD_SIZE);
391 }
392 return stack;
393}
394
395static void __init setup_lowcore(void)
396{
397 struct lowcore *lc, *abs_lc;
398
399 /*
400 * Setup lowcore for boot cpu
401 */
402 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
403 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
404 if (!lc)
405 panic("%s: Failed to allocate %zu bytes align=%zx\n",
406 __func__, sizeof(*lc), sizeof(*lc));
407
408 lc->pcpu = (unsigned long)per_cpu_ptr(&pcpu_devices, 0);
409 lc->restart_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT;
410 lc->restart_psw.addr = __pa(restart_int_handler);
411 lc->external_new_psw.mask = PSW_KERNEL_BITS;
412 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
413 lc->svc_new_psw.mask = PSW_KERNEL_BITS;
414 lc->svc_new_psw.addr = (unsigned long) system_call;
415 lc->program_new_psw.mask = PSW_KERNEL_BITS;
416 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
417 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
418 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
419 lc->io_new_psw.mask = PSW_KERNEL_BITS;
420 lc->io_new_psw.addr = (unsigned long) io_int_handler;
421 lc->clock_comparator = clock_comparator_max;
422 lc->current_task = (unsigned long)&init_task;
423 lc->lpp = LPP_MAGIC;
424 lc->machine_flags = get_lowcore()->machine_flags;
425 lc->preempt_count = get_lowcore()->preempt_count;
426 nmi_alloc_mcesa_early(&lc->mcesad);
427 lc->sys_enter_timer = get_lowcore()->sys_enter_timer;
428 lc->exit_timer = get_lowcore()->exit_timer;
429 lc->user_timer = get_lowcore()->user_timer;
430 lc->system_timer = get_lowcore()->system_timer;
431 lc->steal_timer = get_lowcore()->steal_timer;
432 lc->last_update_timer = get_lowcore()->last_update_timer;
433 lc->last_update_clock = get_lowcore()->last_update_clock;
434 /*
435 * Allocate the global restart stack which is the same for
436 * all CPUs in case *one* of them does a PSW restart.
437 */
438 restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
439 lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
440 lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
441 lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
442 lc->kernel_stack = get_lowcore()->kernel_stack;
443 /*
444 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
445 * restart data to the absolute zero lowcore. This is necessary if
446 * PSW restart is done on an offline CPU that has lowcore zero.
447 */
448 lc->restart_stack = (unsigned long) restart_stack;
449 lc->restart_fn = (unsigned long) do_restart;
450 lc->restart_data = 0;
451 lc->restart_source = -1U;
452 lc->spinlock_lockval = arch_spin_lockval(0);
453 lc->spinlock_index = 0;
454 arch_spin_lock_setup(0);
455 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
456 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
457 lc->preempt_count = PREEMPT_DISABLED;
458 lc->kernel_asce = get_lowcore()->kernel_asce;
459 lc->user_asce = get_lowcore()->user_asce;
460
461 system_ctlreg_init_save_area(lc);
462 abs_lc = get_abs_lowcore();
463 abs_lc->restart_stack = lc->restart_stack;
464 abs_lc->restart_fn = lc->restart_fn;
465 abs_lc->restart_data = lc->restart_data;
466 abs_lc->restart_source = lc->restart_source;
467 abs_lc->restart_psw = lc->restart_psw;
468 abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
469 abs_lc->program_new_psw = lc->program_new_psw;
470 abs_lc->mcesad = lc->mcesad;
471 put_abs_lowcore(abs_lc);
472
473 set_prefix(__pa(lc));
474 lowcore_ptr[0] = lc;
475 if (abs_lowcore_map(0, lowcore_ptr[0], false))
476 panic("Couldn't setup absolute lowcore");
477}
478
479static struct resource code_resource = {
480 .name = "Kernel code",
481 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
482};
483
484static struct resource data_resource = {
485 .name = "Kernel data",
486 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
487};
488
489static struct resource bss_resource = {
490 .name = "Kernel bss",
491 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
492};
493
494static struct resource __initdata *standard_resources[] = {
495 &code_resource,
496 &data_resource,
497 &bss_resource,
498};
499
500static void __init setup_resources(void)
501{
502 struct resource *res, *std_res, *sub_res;
503 phys_addr_t start, end;
504 int j;
505 u64 i;
506
507 code_resource.start = __pa_symbol(_text);
508 code_resource.end = __pa_symbol(_etext) - 1;
509 data_resource.start = __pa_symbol(_etext);
510 data_resource.end = __pa_symbol(_edata) - 1;
511 bss_resource.start = __pa_symbol(__bss_start);
512 bss_resource.end = __pa_symbol(__bss_stop) - 1;
513
514 for_each_mem_range(i, &start, &end) {
515 res = memblock_alloc(sizeof(*res), 8);
516 if (!res)
517 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
518 __func__, sizeof(*res), 8);
519 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
520
521 res->name = "System RAM";
522 res->start = start;
523 /*
524 * In memblock, end points to the first byte after the
525 * range while in resources, end points to the last byte in
526 * the range.
527 */
528 res->end = end - 1;
529 request_resource(&iomem_resource, res);
530
531 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
532 std_res = standard_resources[j];
533 if (std_res->start < res->start ||
534 std_res->start > res->end)
535 continue;
536 if (std_res->end > res->end) {
537 sub_res = memblock_alloc(sizeof(*sub_res), 8);
538 if (!sub_res)
539 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
540 __func__, sizeof(*sub_res), 8);
541 *sub_res = *std_res;
542 sub_res->end = res->end;
543 std_res->start = res->end + 1;
544 request_resource(res, sub_res);
545 } else {
546 request_resource(res, std_res);
547 }
548 }
549 }
550#ifdef CONFIG_CRASH_DUMP
551 /*
552 * Re-add removed crash kernel memory as reserved memory. This makes
553 * sure it will be mapped with the identity mapping and struct pages
554 * will be created, so it can be resized later on.
555 * However add it later since the crash kernel resource should not be
556 * part of the System RAM resource.
557 */
558 if (crashk_res.end) {
559 memblock_add_node(crashk_res.start, resource_size(&crashk_res),
560 0, MEMBLOCK_NONE);
561 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
562 insert_resource(&iomem_resource, &crashk_res);
563 }
564#endif
565}
566
567static void __init setup_memory_end(void)
568{
569 max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
570 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
571}
572
573#ifdef CONFIG_CRASH_DUMP
574
575/*
576 * When kdump is enabled, we have to ensure that no memory from the area
577 * [0 - crashkernel memory size] is set offline - it will be exchanged with
578 * the crashkernel memory region when kdump is triggered. The crashkernel
579 * memory region can never get offlined (pages are unmovable).
580 */
581static int kdump_mem_notifier(struct notifier_block *nb,
582 unsigned long action, void *data)
583{
584 struct memory_notify *arg = data;
585
586 if (action != MEM_GOING_OFFLINE)
587 return NOTIFY_OK;
588 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
589 return NOTIFY_BAD;
590 return NOTIFY_OK;
591}
592
593static struct notifier_block kdump_mem_nb = {
594 .notifier_call = kdump_mem_notifier,
595};
596
597#endif
598
599/*
600 * Reserve page tables created by decompressor
601 */
602static void __init reserve_pgtables(void)
603{
604 unsigned long start, end;
605 struct reserved_range *range;
606
607 for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
608 memblock_reserve(start, end - start);
609}
610
611/*
612 * Reserve memory for kdump kernel to be loaded with kexec
613 */
614static void __init reserve_crashkernel(void)
615{
616#ifdef CONFIG_CRASH_DUMP
617 unsigned long long crash_base, crash_size;
618 phys_addr_t low, high;
619 int rc;
620
621 rc = parse_crashkernel(boot_command_line, ident_map_size,
622 &crash_size, &crash_base, NULL, NULL);
623
624 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
625 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
626 if (rc || crash_size == 0)
627 return;
628
629 if (memblock.memory.regions[0].size < crash_size) {
630 pr_info("crashkernel reservation failed: %s\n",
631 "first memory chunk must be at least crashkernel size");
632 return;
633 }
634
635 low = crash_base ?: oldmem_data.start;
636 high = low + crash_size;
637 if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
638 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
639 crash_base = low;
640 } else {
641 /* Find suitable area in free memory */
642 low = max_t(unsigned long, crash_size, sclp.hsa_size);
643 high = crash_base ? crash_base + crash_size : ULONG_MAX;
644
645 if (crash_base && crash_base < low) {
646 pr_info("crashkernel reservation failed: %s\n",
647 "crash_base too low");
648 return;
649 }
650 low = crash_base ?: low;
651 crash_base = memblock_phys_alloc_range(crash_size,
652 KEXEC_CRASH_MEM_ALIGN,
653 low, high);
654 }
655
656 if (!crash_base) {
657 pr_info("crashkernel reservation failed: %s\n",
658 "no suitable area found");
659 return;
660 }
661
662 if (register_memory_notifier(&kdump_mem_nb)) {
663 memblock_phys_free(crash_base, crash_size);
664 return;
665 }
666
667 if (!oldmem_data.start && MACHINE_IS_VM)
668 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
669 crashk_res.start = crash_base;
670 crashk_res.end = crash_base + crash_size - 1;
671 memblock_remove(crash_base, crash_size);
672 pr_info("Reserving %lluMB of memory at %lluMB "
673 "for crashkernel (System RAM: %luMB)\n",
674 crash_size >> 20, crash_base >> 20,
675 (unsigned long)memblock.memory.total_size >> 20);
676 os_info_crashkernel_add(crash_base, crash_size);
677#endif
678}
679
680/*
681 * Reserve the initrd from being used by memblock
682 */
683static void __init reserve_initrd(void)
684{
685 unsigned long addr, size;
686
687 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
688 return;
689 initrd_start = (unsigned long)__va(addr);
690 initrd_end = initrd_start + size;
691 memblock_reserve(addr, size);
692}
693
694/*
695 * Reserve the memory area used to pass the certificate lists
696 */
697static void __init reserve_certificate_list(void)
698{
699 if (ipl_cert_list_addr)
700 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
701}
702
703static void __init reserve_physmem_info(void)
704{
705 unsigned long addr, size;
706
707 if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
708 memblock_reserve(addr, size);
709}
710
711static void __init free_physmem_info(void)
712{
713 unsigned long addr, size;
714
715 if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
716 memblock_phys_free(addr, size);
717}
718
719static void __init memblock_add_physmem_info(void)
720{
721 unsigned long start, end;
722 int i;
723
724 pr_debug("physmem info source: %s (%hhd)\n",
725 get_physmem_info_source(), physmem_info.info_source);
726 /* keep memblock lists close to the kernel */
727 memblock_set_bottom_up(true);
728 for_each_physmem_usable_range(i, &start, &end)
729 memblock_add(start, end - start);
730 for_each_physmem_online_range(i, &start, &end)
731 memblock_physmem_add(start, end - start);
732 memblock_set_bottom_up(false);
733 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
734}
735
736/*
737 * Reserve memory used for lowcore.
738 */
739static void __init reserve_lowcore(void)
740{
741 void *lowcore_start = get_lowcore();
742 void *lowcore_end = lowcore_start + sizeof(struct lowcore);
743 void *start, *end;
744
745 if ((void *)__identity_base < lowcore_end) {
746 start = max(lowcore_start, (void *)__identity_base);
747 end = min(lowcore_end, (void *)(__identity_base + ident_map_size));
748 memblock_reserve(__pa(start), __pa(end));
749 }
750}
751
752/*
753 * Reserve memory used for absolute lowcore/command line/kernel image.
754 */
755static void __init reserve_kernel(void)
756{
757 memblock_reserve(0, STARTUP_NORMAL_OFFSET);
758 memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
759 memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
760 memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
761 memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
762 memblock_reserve(__pa(_stext), _end - _stext);
763}
764
765static void __init setup_memory(void)
766{
767 phys_addr_t start, end;
768 u64 i;
769
770 /*
771 * Init storage key for present memory
772 */
773 for_each_mem_range(i, &start, &end)
774 storage_key_init_range(start, end);
775
776 psw_set_key(PAGE_DEFAULT_KEY);
777}
778
779static void __init relocate_amode31_section(void)
780{
781 unsigned long amode31_size = __eamode31 - __samode31;
782 long amode31_offset, *ptr;
783
784 amode31_offset = AMODE31_START - (unsigned long)__samode31;
785 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
786
787 /* Move original AMODE31 section to the new one */
788 memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size);
789 /* Zero out the old AMODE31 section to catch invalid accesses within it */
790 memset(__samode31, 0, amode31_size);
791
792 /* Update all AMODE31 region references */
793 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
794 *ptr += amode31_offset;
795}
796
797/* This must be called after AMODE31 relocation */
798static void __init setup_cr(void)
799{
800 union ctlreg2 cr2;
801 union ctlreg5 cr5;
802 union ctlreg15 cr15;
803
804 __ctl_duct[1] = (unsigned long)__ctl_aste;
805 __ctl_duct[2] = (unsigned long)__ctl_aste;
806 __ctl_duct[4] = (unsigned long)__ctl_duald;
807
808 /* Update control registers CR2, CR5 and CR15 */
809 local_ctl_store(2, &cr2.reg);
810 local_ctl_store(5, &cr5.reg);
811 local_ctl_store(15, &cr15.reg);
812 cr2.ducto = (unsigned long)__ctl_duct >> 6;
813 cr5.pasteo = (unsigned long)__ctl_duct >> 6;
814 cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
815 system_ctl_load(2, &cr2.reg);
816 system_ctl_load(5, &cr5.reg);
817 system_ctl_load(15, &cr15.reg);
818}
819
820/*
821 * Add system information as device randomness
822 */
823static void __init setup_randomness(void)
824{
825 struct sysinfo_3_2_2 *vmms;
826
827 vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
828 if (!vmms)
829 panic("Failed to allocate memory for sysinfo structure\n");
830 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
831 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
832 memblock_free(vmms, PAGE_SIZE);
833
834 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
835 static_branch_enable(&s390_arch_random_available);
836}
837
838/*
839 * Issue diagnose 318 to set the control program name and
840 * version codes.
841 */
842static void __init setup_control_program_code(void)
843{
844 union diag318_info diag318_info = {
845 .cpnc = CPNC_LINUX,
846 .cpvc = 0,
847 };
848
849 if (!sclp.has_diag318)
850 return;
851
852 diag_stat_inc(DIAG_STAT_X318);
853 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
854}
855
856/*
857 * Print the component list from the IPL report
858 */
859static void __init log_component_list(void)
860{
861 struct ipl_rb_component_entry *ptr, *end;
862 char *str;
863
864 if (!early_ipl_comp_list_addr)
865 return;
866 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
867 pr_info("Linux is running with Secure-IPL enabled\n");
868 else
869 pr_info("Linux is running with Secure-IPL disabled\n");
870 ptr = __va(early_ipl_comp_list_addr);
871 end = (void *) ptr + early_ipl_comp_list_size;
872 pr_info("The IPL report contains the following components:\n");
873 while (ptr < end) {
874 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
875 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
876 str = "signed, verified";
877 else
878 str = "signed, verification failed";
879 } else {
880 str = "not signed";
881 }
882 pr_info("%016llx - %016llx (%s)\n",
883 ptr->addr, ptr->addr + ptr->len, str);
884 ptr++;
885 }
886}
887
888/*
889 * Setup function called from init/main.c just after the banner
890 * was printed.
891 */
892
893void __init setup_arch(char **cmdline_p)
894{
895 /*
896 * print what head.S has found out about the machine
897 */
898 if (MACHINE_IS_VM)
899 pr_info("Linux is running as a z/VM "
900 "guest operating system in 64-bit mode\n");
901 else if (MACHINE_IS_KVM)
902 pr_info("Linux is running under KVM in 64-bit mode\n");
903 else if (MACHINE_IS_LPAR)
904 pr_info("Linux is running natively in 64-bit mode\n");
905 else
906 pr_info("Linux is running as a guest in 64-bit mode\n");
907
908 if (have_relocated_lowcore())
909 pr_info("Lowcore relocated to 0x%px\n", get_lowcore());
910
911 log_component_list();
912
913 /* Have one command line that is parsed and saved in /proc/cmdline */
914 /* boot_command_line has been already set up in early.c */
915 *cmdline_p = boot_command_line;
916
917 ROOT_DEV = Root_RAM0;
918
919 setup_initial_init_mm(_text, _etext, _edata, _end);
920
921 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
922 nospec_auto_detect();
923
924 jump_label_init();
925 parse_early_param();
926#ifdef CONFIG_CRASH_DUMP
927 /* Deactivate elfcorehdr= kernel parameter */
928 elfcorehdr_addr = ELFCORE_ADDR_MAX;
929#endif
930
931 os_info_init();
932 setup_ipl();
933 setup_control_program_code();
934
935 /* Do some memory reservations *before* memory is added to memblock */
936 reserve_pgtables();
937 reserve_lowcore();
938 reserve_kernel();
939 reserve_initrd();
940 reserve_certificate_list();
941 reserve_physmem_info();
942 memblock_set_current_limit(ident_map_size);
943 memblock_allow_resize();
944
945 /* Get information about *all* installed memory */
946 memblock_add_physmem_info();
947
948 free_physmem_info();
949 setup_memory_end();
950 memblock_dump_all();
951 setup_memory();
952
953 relocate_amode31_section();
954 setup_cr();
955 setup_uv();
956 dma_contiguous_reserve(ident_map_size);
957 vmcp_cma_reserve();
958 if (MACHINE_HAS_EDAT2)
959 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
960
961 reserve_crashkernel();
962#ifdef CONFIG_CRASH_DUMP
963 /*
964 * Be aware that smp_save_dump_secondary_cpus() triggers a system reset.
965 * Therefore CPU and device initialization should be done afterwards.
966 */
967 smp_save_dump_secondary_cpus();
968#endif
969
970 setup_resources();
971 setup_lowcore();
972 smp_fill_possible_mask();
973 cpu_detect_mhz_feature();
974 cpu_init();
975 numa_setup();
976 smp_detect_cpus();
977 topology_init_early();
978
979 if (test_facility(193))
980 static_branch_enable(&cpu_has_bear);
981
982 /*
983 * Create kernel page tables.
984 */
985 paging_init();
986
987 /*
988 * After paging_init created the kernel page table, the new PSWs
989 * in lowcore can now run with DAT enabled.
990 */
991#ifdef CONFIG_CRASH_DUMP
992 smp_save_dump_ipl_cpu();
993#endif
994
995 /* Setup default console */
996 conmode_default();
997 set_preferred_console();
998
999 apply_alternative_instructions();
1000 if (IS_ENABLED(CONFIG_EXPOLINE))
1001 nospec_init_branches();
1002
1003 /* Setup zfcp/nvme dump support */
1004 setup_zfcpdump();
1005
1006 /* Add system specific data to the random pool */
1007 setup_randomness();
1008}
1009
1010void __init arch_cpu_finalize_init(void)
1011{
1012 sclp_init();
1013}
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2012
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "arch/i386/kernel/setup.c"
8 * Copyright (C) 1995, Linus Torvalds
9 */
10
11/*
12 * This file handles the architecture-dependent parts of initialization
13 */
14
15#define KMSG_COMPONENT "setup"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/errno.h>
19#include <linux/export.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/memblock.h>
23#include <linux/mm.h>
24#include <linux/stddef.h>
25#include <linux/unistd.h>
26#include <linux/ptrace.h>
27#include <linux/random.h>
28#include <linux/user.h>
29#include <linux/tty.h>
30#include <linux/ioport.h>
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/initrd.h>
34#include <linux/bootmem.h>
35#include <linux/root_dev.h>
36#include <linux/console.h>
37#include <linux/kernel_stat.h>
38#include <linux/device.h>
39#include <linux/notifier.h>
40#include <linux/pfn.h>
41#include <linux/ctype.h>
42#include <linux/reboot.h>
43#include <linux/topology.h>
44#include <linux/kexec.h>
45#include <linux/crash_dump.h>
46#include <linux/memory.h>
47#include <linux/compat.h>
48
49#include <asm/ipl.h>
50#include <asm/facility.h>
51#include <asm/smp.h>
52#include <asm/mmu_context.h>
53#include <asm/cpcmd.h>
54#include <asm/lowcore.h>
55#include <asm/irq.h>
56#include <asm/page.h>
57#include <asm/ptrace.h>
58#include <asm/sections.h>
59#include <asm/ebcdic.h>
60#include <asm/kvm_virtio.h>
61#include <asm/diag.h>
62#include <asm/os_info.h>
63#include <asm/sclp.h>
64#include <asm/sysinfo.h>
65#include <asm/numa.h>
66#include "entry.h"
67
68/*
69 * Machine setup..
70 */
71unsigned int console_mode = 0;
72EXPORT_SYMBOL(console_mode);
73
74unsigned int console_devno = -1;
75EXPORT_SYMBOL(console_devno);
76
77unsigned int console_irq = -1;
78EXPORT_SYMBOL(console_irq);
79
80unsigned long elf_hwcap __read_mostly = 0;
81char elf_platform[ELF_PLATFORM_SIZE];
82
83unsigned long int_hwcap = 0;
84
85int __initdata memory_end_set;
86unsigned long __initdata memory_end;
87unsigned long __initdata max_physmem_end;
88
89unsigned long VMALLOC_START;
90EXPORT_SYMBOL(VMALLOC_START);
91
92unsigned long VMALLOC_END;
93EXPORT_SYMBOL(VMALLOC_END);
94
95struct page *vmemmap;
96EXPORT_SYMBOL(vmemmap);
97
98unsigned long MODULES_VADDR;
99unsigned long MODULES_END;
100
101/* An array with a pointer to the lowcore of every CPU. */
102struct lowcore *lowcore_ptr[NR_CPUS];
103EXPORT_SYMBOL(lowcore_ptr);
104
105/*
106 * This is set up by the setup-routine at boot-time
107 * for S390 need to find out, what we have to setup
108 * using address 0x10400 ...
109 */
110
111#include <asm/setup.h>
112
113/*
114 * condev= and conmode= setup parameter.
115 */
116
117static int __init condev_setup(char *str)
118{
119 int vdev;
120
121 vdev = simple_strtoul(str, &str, 0);
122 if (vdev >= 0 && vdev < 65536) {
123 console_devno = vdev;
124 console_irq = -1;
125 }
126 return 1;
127}
128
129__setup("condev=", condev_setup);
130
131static void __init set_preferred_console(void)
132{
133 if (MACHINE_IS_KVM) {
134 if (sclp.has_vt220)
135 add_preferred_console("ttyS", 1, NULL);
136 else if (sclp.has_linemode)
137 add_preferred_console("ttyS", 0, NULL);
138 else
139 add_preferred_console("hvc", 0, NULL);
140 } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
141 add_preferred_console("ttyS", 0, NULL);
142 else if (CONSOLE_IS_3270)
143 add_preferred_console("tty3270", 0, NULL);
144}
145
146static int __init conmode_setup(char *str)
147{
148#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
149 if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
150 SET_CONSOLE_SCLP;
151#endif
152#if defined(CONFIG_TN3215_CONSOLE)
153 if (strncmp(str, "3215", 5) == 0)
154 SET_CONSOLE_3215;
155#endif
156#if defined(CONFIG_TN3270_CONSOLE)
157 if (strncmp(str, "3270", 5) == 0)
158 SET_CONSOLE_3270;
159#endif
160 set_preferred_console();
161 return 1;
162}
163
164__setup("conmode=", conmode_setup);
165
166static void __init conmode_default(void)
167{
168 char query_buffer[1024];
169 char *ptr;
170
171 if (MACHINE_IS_VM) {
172 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
173 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
174 ptr = strstr(query_buffer, "SUBCHANNEL =");
175 console_irq = simple_strtoul(ptr + 13, NULL, 16);
176 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
177 ptr = strstr(query_buffer, "CONMODE");
178 /*
179 * Set the conmode to 3215 so that the device recognition
180 * will set the cu_type of the console to 3215. If the
181 * conmode is 3270 and we don't set it back then both
182 * 3215 and the 3270 driver will try to access the console
183 * device (3215 as console and 3270 as normal tty).
184 */
185 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
186 if (ptr == NULL) {
187#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
188 SET_CONSOLE_SCLP;
189#endif
190 return;
191 }
192 if (strncmp(ptr + 8, "3270", 4) == 0) {
193#if defined(CONFIG_TN3270_CONSOLE)
194 SET_CONSOLE_3270;
195#elif defined(CONFIG_TN3215_CONSOLE)
196 SET_CONSOLE_3215;
197#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
198 SET_CONSOLE_SCLP;
199#endif
200 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
201#if defined(CONFIG_TN3215_CONSOLE)
202 SET_CONSOLE_3215;
203#elif defined(CONFIG_TN3270_CONSOLE)
204 SET_CONSOLE_3270;
205#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
206 SET_CONSOLE_SCLP;
207#endif
208 }
209 } else {
210#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
211 SET_CONSOLE_SCLP;
212#endif
213 }
214}
215
216#ifdef CONFIG_CRASH_DUMP
217static void __init setup_zfcpdump(void)
218{
219 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
220 return;
221 if (OLDMEM_BASE)
222 return;
223 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
224 console_loglevel = 2;
225}
226#else
227static inline void setup_zfcpdump(void) {}
228#endif /* CONFIG_CRASH_DUMP */
229
230 /*
231 * Reboot, halt and power_off stubs. They just call _machine_restart,
232 * _machine_halt or _machine_power_off.
233 */
234
235void machine_restart(char *command)
236{
237 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
238 /*
239 * Only unblank the console if we are called in enabled
240 * context or a bust_spinlocks cleared the way for us.
241 */
242 console_unblank();
243 _machine_restart(command);
244}
245
246void machine_halt(void)
247{
248 if (!in_interrupt() || oops_in_progress)
249 /*
250 * Only unblank the console if we are called in enabled
251 * context or a bust_spinlocks cleared the way for us.
252 */
253 console_unblank();
254 _machine_halt();
255}
256
257void machine_power_off(void)
258{
259 if (!in_interrupt() || oops_in_progress)
260 /*
261 * Only unblank the console if we are called in enabled
262 * context or a bust_spinlocks cleared the way for us.
263 */
264 console_unblank();
265 _machine_power_off();
266}
267
268/*
269 * Dummy power off function.
270 */
271void (*pm_power_off)(void) = machine_power_off;
272EXPORT_SYMBOL_GPL(pm_power_off);
273
274static int __init early_parse_mem(char *p)
275{
276 memory_end = memparse(p, &p);
277 memory_end &= PAGE_MASK;
278 memory_end_set = 1;
279 return 0;
280}
281early_param("mem", early_parse_mem);
282
283static int __init parse_vmalloc(char *arg)
284{
285 if (!arg)
286 return -EINVAL;
287 VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
288 return 0;
289}
290early_param("vmalloc", parse_vmalloc);
291
292void *restart_stack __attribute__((__section__(".data")));
293
294static void __init setup_lowcore(void)
295{
296 struct lowcore *lc;
297
298 /*
299 * Setup lowcore for boot cpu
300 */
301 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
302 lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
303 lc->restart_psw.mask = PSW_KERNEL_BITS;
304 lc->restart_psw.addr = (unsigned long) restart_int_handler;
305 lc->external_new_psw.mask = PSW_KERNEL_BITS |
306 PSW_MASK_DAT | PSW_MASK_MCHECK;
307 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
308 lc->svc_new_psw.mask = PSW_KERNEL_BITS |
309 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
310 lc->svc_new_psw.addr = (unsigned long) system_call;
311 lc->program_new_psw.mask = PSW_KERNEL_BITS |
312 PSW_MASK_DAT | PSW_MASK_MCHECK;
313 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
314 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
315 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
316 lc->io_new_psw.mask = PSW_KERNEL_BITS |
317 PSW_MASK_DAT | PSW_MASK_MCHECK;
318 lc->io_new_psw.addr = (unsigned long) io_int_handler;
319 lc->clock_comparator = -1ULL;
320 lc->kernel_stack = ((unsigned long) &init_thread_union)
321 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
322 lc->async_stack = (unsigned long)
323 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
324 + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
325 lc->panic_stack = (unsigned long)
326 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
327 + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
328 lc->current_task = (unsigned long) init_thread_union.thread_info.task;
329 lc->thread_info = (unsigned long) &init_thread_union;
330 lc->lpp = LPP_MAGIC;
331 lc->machine_flags = S390_lowcore.machine_flags;
332 lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
333 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
334 MAX_FACILITY_BIT/8);
335 if (MACHINE_HAS_VX)
336 lc->vector_save_area_addr =
337 (unsigned long) &lc->vector_save_area;
338 lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
339 lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
340 lc->async_enter_timer = S390_lowcore.async_enter_timer;
341 lc->exit_timer = S390_lowcore.exit_timer;
342 lc->user_timer = S390_lowcore.user_timer;
343 lc->system_timer = S390_lowcore.system_timer;
344 lc->steal_timer = S390_lowcore.steal_timer;
345 lc->last_update_timer = S390_lowcore.last_update_timer;
346 lc->last_update_clock = S390_lowcore.last_update_clock;
347
348 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
349 restart_stack += ASYNC_SIZE;
350
351 /*
352 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
353 * restart data to the absolute zero lowcore. This is necessary if
354 * PSW restart is done on an offline CPU that has lowcore zero.
355 */
356 lc->restart_stack = (unsigned long) restart_stack;
357 lc->restart_fn = (unsigned long) do_restart;
358 lc->restart_data = 0;
359 lc->restart_source = -1UL;
360
361 /* Setup absolute zero lowcore */
362 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
363 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
364 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
365 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
366 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
367
368#ifdef CONFIG_SMP
369 lc->spinlock_lockval = arch_spin_lockval(0);
370#endif
371
372 set_prefix((u32)(unsigned long) lc);
373 lowcore_ptr[0] = lc;
374}
375
376static struct resource code_resource = {
377 .name = "Kernel code",
378 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
379};
380
381static struct resource data_resource = {
382 .name = "Kernel data",
383 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
384};
385
386static struct resource bss_resource = {
387 .name = "Kernel bss",
388 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
389};
390
391static struct resource __initdata *standard_resources[] = {
392 &code_resource,
393 &data_resource,
394 &bss_resource,
395};
396
397static void __init setup_resources(void)
398{
399 struct resource *res, *std_res, *sub_res;
400 struct memblock_region *reg;
401 int j;
402
403 code_resource.start = (unsigned long) &_text;
404 code_resource.end = (unsigned long) &_etext - 1;
405 data_resource.start = (unsigned long) &_etext;
406 data_resource.end = (unsigned long) &_edata - 1;
407 bss_resource.start = (unsigned long) &__bss_start;
408 bss_resource.end = (unsigned long) &__bss_stop - 1;
409
410 for_each_memblock(memory, reg) {
411 res = alloc_bootmem_low(sizeof(*res));
412 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
413
414 res->name = "System RAM";
415 res->start = reg->base;
416 res->end = reg->base + reg->size - 1;
417 request_resource(&iomem_resource, res);
418
419 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
420 std_res = standard_resources[j];
421 if (std_res->start < res->start ||
422 std_res->start > res->end)
423 continue;
424 if (std_res->end > res->end) {
425 sub_res = alloc_bootmem_low(sizeof(*sub_res));
426 *sub_res = *std_res;
427 sub_res->end = res->end;
428 std_res->start = res->end + 1;
429 request_resource(res, sub_res);
430 } else {
431 request_resource(res, std_res);
432 }
433 }
434 }
435}
436
437static void __init setup_memory_end(void)
438{
439 unsigned long vmax, vmalloc_size, tmp;
440
441 /* Choose kernel address space layout: 2, 3, or 4 levels. */
442 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
443 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
444 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
445 if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42))
446 vmax = 1UL << 42; /* 3-level kernel page table */
447 else
448 vmax = 1UL << 53; /* 4-level kernel page table */
449 /* module area is at the end of the kernel address space. */
450 MODULES_END = vmax;
451 MODULES_VADDR = MODULES_END - MODULES_LEN;
452 VMALLOC_END = MODULES_VADDR;
453 VMALLOC_START = vmax - vmalloc_size;
454
455 /* Split remaining virtual space between 1:1 mapping & vmemmap array */
456 tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
457 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
458 tmp = SECTION_ALIGN_UP(tmp);
459 tmp = VMALLOC_START - tmp * sizeof(struct page);
460 tmp &= ~((vmax >> 11) - 1); /* align to page table level */
461 tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
462 vmemmap = (struct page *) tmp;
463
464 /* Take care that memory_end is set and <= vmemmap */
465 memory_end = min(memory_end ?: max_physmem_end, tmp);
466 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
467 memblock_remove(memory_end, ULONG_MAX);
468
469 pr_notice("Max memory size: %luMB\n", memory_end >> 20);
470}
471
472static void __init setup_vmcoreinfo(void)
473{
474 mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
475}
476
477#ifdef CONFIG_CRASH_DUMP
478
479/*
480 * When kdump is enabled, we have to ensure that no memory from
481 * the area [0 - crashkernel memory size] and
482 * [crashk_res.start - crashk_res.end] is set offline.
483 */
484static int kdump_mem_notifier(struct notifier_block *nb,
485 unsigned long action, void *data)
486{
487 struct memory_notify *arg = data;
488
489 if (action != MEM_GOING_OFFLINE)
490 return NOTIFY_OK;
491 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
492 return NOTIFY_BAD;
493 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
494 return NOTIFY_OK;
495 if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
496 return NOTIFY_OK;
497 return NOTIFY_BAD;
498}
499
500static struct notifier_block kdump_mem_nb = {
501 .notifier_call = kdump_mem_notifier,
502};
503
504#endif
505
506/*
507 * Make sure that the area behind memory_end is protected
508 */
509static void reserve_memory_end(void)
510{
511#ifdef CONFIG_CRASH_DUMP
512 if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
513 !OLDMEM_BASE && sclp.hsa_size) {
514 memory_end = sclp.hsa_size;
515 memory_end &= PAGE_MASK;
516 memory_end_set = 1;
517 }
518#endif
519 if (!memory_end_set)
520 return;
521 memblock_reserve(memory_end, ULONG_MAX);
522}
523
524/*
525 * Make sure that oldmem, where the dump is stored, is protected
526 */
527static void reserve_oldmem(void)
528{
529#ifdef CONFIG_CRASH_DUMP
530 if (OLDMEM_BASE)
531 /* Forget all memory above the running kdump system */
532 memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
533#endif
534}
535
536/*
537 * Make sure that oldmem, where the dump is stored, is protected
538 */
539static void remove_oldmem(void)
540{
541#ifdef CONFIG_CRASH_DUMP
542 if (OLDMEM_BASE)
543 /* Forget all memory above the running kdump system */
544 memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
545#endif
546}
547
548/*
549 * Reserve memory for kdump kernel to be loaded with kexec
550 */
551static void __init reserve_crashkernel(void)
552{
553#ifdef CONFIG_CRASH_DUMP
554 unsigned long long crash_base, crash_size;
555 phys_addr_t low, high;
556 int rc;
557
558 rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
559 &crash_base);
560
561 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
562 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
563 if (rc || crash_size == 0)
564 return;
565
566 if (memblock.memory.regions[0].size < crash_size) {
567 pr_info("crashkernel reservation failed: %s\n",
568 "first memory chunk must be at least crashkernel size");
569 return;
570 }
571
572 low = crash_base ?: OLDMEM_BASE;
573 high = low + crash_size;
574 if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
575 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
576 crash_base = low;
577 } else {
578 /* Find suitable area in free memory */
579 low = max_t(unsigned long, crash_size, sclp.hsa_size);
580 high = crash_base ? crash_base + crash_size : ULONG_MAX;
581
582 if (crash_base && crash_base < low) {
583 pr_info("crashkernel reservation failed: %s\n",
584 "crash_base too low");
585 return;
586 }
587 low = crash_base ?: low;
588 crash_base = memblock_find_in_range(low, high, crash_size,
589 KEXEC_CRASH_MEM_ALIGN);
590 }
591
592 if (!crash_base) {
593 pr_info("crashkernel reservation failed: %s\n",
594 "no suitable area found");
595 return;
596 }
597
598 if (register_memory_notifier(&kdump_mem_nb))
599 return;
600
601 if (!OLDMEM_BASE && MACHINE_IS_VM)
602 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
603 crashk_res.start = crash_base;
604 crashk_res.end = crash_base + crash_size - 1;
605 insert_resource(&iomem_resource, &crashk_res);
606 memblock_remove(crash_base, crash_size);
607 pr_info("Reserving %lluMB of memory at %lluMB "
608 "for crashkernel (System RAM: %luMB)\n",
609 crash_size >> 20, crash_base >> 20,
610 (unsigned long)memblock.memory.total_size >> 20);
611 os_info_crashkernel_add(crash_base, crash_size);
612#endif
613}
614
615/*
616 * Reserve the initrd from being used by memblock
617 */
618static void __init reserve_initrd(void)
619{
620#ifdef CONFIG_BLK_DEV_INITRD
621 initrd_start = INITRD_START;
622 initrd_end = initrd_start + INITRD_SIZE;
623 memblock_reserve(INITRD_START, INITRD_SIZE);
624#endif
625}
626
627/*
628 * Check for initrd being in usable memory
629 */
630static void __init check_initrd(void)
631{
632#ifdef CONFIG_BLK_DEV_INITRD
633 if (INITRD_START && INITRD_SIZE &&
634 !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
635 pr_err("initrd does not fit memory.\n");
636 memblock_free(INITRD_START, INITRD_SIZE);
637 initrd_start = initrd_end = 0;
638 }
639#endif
640}
641
642/*
643 * Reserve memory used for lowcore/command line/kernel image.
644 */
645static void __init reserve_kernel(void)
646{
647 unsigned long start_pfn = PFN_UP(__pa(&_end));
648
649#ifdef CONFIG_DMA_API_DEBUG
650 /*
651 * DMA_API_DEBUG code stumbles over addresses from the
652 * range [_ehead, _stext]. Mark the memory as reserved
653 * so it is not used for CONFIG_DMA_API_DEBUG=y.
654 */
655 memblock_reserve(0, PFN_PHYS(start_pfn));
656#else
657 memblock_reserve(0, (unsigned long)_ehead);
658 memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
659 - (unsigned long)_stext);
660#endif
661}
662
663static void __init setup_memory(void)
664{
665 struct memblock_region *reg;
666
667 /*
668 * Init storage key for present memory
669 */
670 for_each_memblock(memory, reg) {
671 storage_key_init_range(reg->base, reg->base + reg->size);
672 }
673 psw_set_key(PAGE_DEFAULT_KEY);
674
675 /* Only cosmetics */
676 memblock_enforce_memory_limit(memblock_end_of_DRAM());
677}
678
679/*
680 * Setup hardware capabilities.
681 */
682static int __init setup_hwcaps(void)
683{
684 static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
685 struct cpuid cpu_id;
686 int i;
687
688 /*
689 * The store facility list bits numbers as found in the principles
690 * of operation are numbered with bit 1UL<<31 as number 0 to
691 * bit 1UL<<0 as number 31.
692 * Bit 0: instructions named N3, "backported" to esa-mode
693 * Bit 2: z/Architecture mode is active
694 * Bit 7: the store-facility-list-extended facility is installed
695 * Bit 17: the message-security assist is installed
696 * Bit 19: the long-displacement facility is installed
697 * Bit 21: the extended-immediate facility is installed
698 * Bit 22: extended-translation facility 3 is installed
699 * Bit 30: extended-translation facility 3 enhancement facility
700 * These get translated to:
701 * HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
702 * HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
703 * HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
704 * HWCAP_S390_ETF3EH bit 8 (22 && 30).
705 */
706 for (i = 0; i < 6; i++)
707 if (test_facility(stfl_bits[i]))
708 elf_hwcap |= 1UL << i;
709
710 if (test_facility(22) && test_facility(30))
711 elf_hwcap |= HWCAP_S390_ETF3EH;
712
713 /*
714 * Check for additional facilities with store-facility-list-extended.
715 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
716 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
717 * as stored by stfl, bits 32-xxx contain additional facilities.
718 * How many facility words are stored depends on the number of
719 * doublewords passed to the instruction. The additional facilities
720 * are:
721 * Bit 42: decimal floating point facility is installed
722 * Bit 44: perform floating point operation facility is installed
723 * translated to:
724 * HWCAP_S390_DFP bit 6 (42 && 44).
725 */
726 if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
727 elf_hwcap |= HWCAP_S390_DFP;
728
729 /*
730 * Huge page support HWCAP_S390_HPAGE is bit 7.
731 */
732 if (MACHINE_HAS_HPAGE)
733 elf_hwcap |= HWCAP_S390_HPAGE;
734
735 /*
736 * 64-bit register support for 31-bit processes
737 * HWCAP_S390_HIGH_GPRS is bit 9.
738 */
739 elf_hwcap |= HWCAP_S390_HIGH_GPRS;
740
741 /*
742 * Transactional execution support HWCAP_S390_TE is bit 10.
743 */
744 if (test_facility(50) && test_facility(73))
745 elf_hwcap |= HWCAP_S390_TE;
746
747 /*
748 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
749 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
750 * instead of facility bit 129.
751 */
752 if (MACHINE_HAS_VX)
753 elf_hwcap |= HWCAP_S390_VXRS;
754 get_cpu_id(&cpu_id);
755 add_device_randomness(&cpu_id, sizeof(cpu_id));
756 switch (cpu_id.machine) {
757 case 0x2064:
758 case 0x2066:
759 default: /* Use "z900" as default for 64 bit kernels. */
760 strcpy(elf_platform, "z900");
761 break;
762 case 0x2084:
763 case 0x2086:
764 strcpy(elf_platform, "z990");
765 break;
766 case 0x2094:
767 case 0x2096:
768 strcpy(elf_platform, "z9-109");
769 break;
770 case 0x2097:
771 case 0x2098:
772 strcpy(elf_platform, "z10");
773 break;
774 case 0x2817:
775 case 0x2818:
776 strcpy(elf_platform, "z196");
777 break;
778 case 0x2827:
779 case 0x2828:
780 strcpy(elf_platform, "zEC12");
781 break;
782 case 0x2964:
783 case 0x2965:
784 strcpy(elf_platform, "z13");
785 break;
786 }
787
788 /*
789 * Virtualization support HWCAP_INT_SIE is bit 0.
790 */
791 if (sclp.has_sief2)
792 int_hwcap |= HWCAP_INT_SIE;
793
794 return 0;
795}
796arch_initcall(setup_hwcaps);
797
798/*
799 * Add system information as device randomness
800 */
801static void __init setup_randomness(void)
802{
803 struct sysinfo_3_2_2 *vmms;
804
805 vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
806 if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
807 add_device_randomness(&vmms, vmms->count);
808 free_page((unsigned long) vmms);
809}
810
811/*
812 * Setup function called from init/main.c just after the banner
813 * was printed.
814 */
815
816void __init setup_arch(char **cmdline_p)
817{
818 /*
819 * print what head.S has found out about the machine
820 */
821 if (MACHINE_IS_VM)
822 pr_info("Linux is running as a z/VM "
823 "guest operating system in 64-bit mode\n");
824 else if (MACHINE_IS_KVM)
825 pr_info("Linux is running under KVM in 64-bit mode\n");
826 else if (MACHINE_IS_LPAR)
827 pr_info("Linux is running natively in 64-bit mode\n");
828
829 /* Have one command line that is parsed and saved in /proc/cmdline */
830 /* boot_command_line has been already set up in early.c */
831 *cmdline_p = boot_command_line;
832
833 ROOT_DEV = Root_RAM0;
834
835 /* Is init_mm really needed? */
836 init_mm.start_code = PAGE_OFFSET;
837 init_mm.end_code = (unsigned long) &_etext;
838 init_mm.end_data = (unsigned long) &_edata;
839 init_mm.brk = (unsigned long) &_end;
840
841 parse_early_param();
842#ifdef CONFIG_CRASH_DUMP
843 /* Deactivate elfcorehdr= kernel parameter */
844 elfcorehdr_addr = ELFCORE_ADDR_MAX;
845#endif
846
847 os_info_init();
848 setup_ipl();
849
850 /* Do some memory reservations *before* memory is added to memblock */
851 reserve_memory_end();
852 reserve_oldmem();
853 reserve_kernel();
854 reserve_initrd();
855 memblock_allow_resize();
856
857 /* Get information about *all* installed memory */
858 detect_memory_memblock();
859
860 remove_oldmem();
861
862 /*
863 * Make sure all chunks are MAX_ORDER aligned so we don't need the
864 * extra checks that HOLES_IN_ZONE would require.
865 *
866 * Is this still required?
867 */
868 memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
869
870 setup_memory_end();
871 setup_memory();
872
873 check_initrd();
874 reserve_crashkernel();
875#ifdef CONFIG_CRASH_DUMP
876 /*
877 * Be aware that smp_save_dump_cpus() triggers a system reset.
878 * Therefore CPU and device initialization should be done afterwards.
879 */
880 smp_save_dump_cpus();
881#endif
882
883 setup_resources();
884 setup_vmcoreinfo();
885 setup_lowcore();
886 smp_fill_possible_mask();
887 cpu_init();
888 numa_setup();
889
890 /*
891 * Create kernel page tables and switch to virtual addressing.
892 */
893 paging_init();
894
895 /* Setup default console */
896 conmode_default();
897 set_preferred_console();
898
899 /* Setup zfcpdump support */
900 setup_zfcpdump();
901
902 /* Add system specific data to the random pool */
903 setup_randomness();
904}