Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
 
 
   3 *  S390 version
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   7 *
   8 *  Derived from "arch/i386/kernel/setup.c"
   9 *    Copyright (C) 1995, Linus Torvalds
  10 */
  11
  12/*
  13 * This file handles the architecture-dependent parts of initialization
  14 */
  15
  16#define KMSG_COMPONENT "setup"
  17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18
  19#include <linux/errno.h>
  20#include <linux/export.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task.h>
  23#include <linux/cpu.h>
  24#include <linux/kernel.h>
  25#include <linux/memblock.h>
  26#include <linux/mm.h>
  27#include <linux/stddef.h>
  28#include <linux/unistd.h>
  29#include <linux/ptrace.h>
  30#include <linux/random.h>
  31#include <linux/user.h>
  32#include <linux/tty.h>
  33#include <linux/ioport.h>
  34#include <linux/delay.h>
  35#include <linux/init.h>
  36#include <linux/initrd.h>
 
  37#include <linux/root_dev.h>
  38#include <linux/console.h>
  39#include <linux/kernel_stat.h>
  40#include <linux/dma-map-ops.h>
  41#include <linux/device.h>
  42#include <linux/notifier.h>
  43#include <linux/pfn.h>
  44#include <linux/ctype.h>
  45#include <linux/reboot.h>
  46#include <linux/topology.h>
 
  47#include <linux/kexec.h>
  48#include <linux/crash_dump.h>
  49#include <linux/memory.h>
  50#include <linux/compat.h>
  51#include <linux/start_kernel.h>
  52#include <linux/hugetlb.h>
  53#include <linux/kmemleak.h>
  54
  55#include <asm/archrandom.h>
  56#include <asm/boot_data.h>
  57#include <asm/ipl.h>
 
  58#include <asm/facility.h>
  59#include <asm/smp.h>
  60#include <asm/mmu_context.h>
  61#include <asm/cpcmd.h>
  62#include <asm/abs_lowcore.h>
  63#include <asm/nmi.h>
  64#include <asm/irq.h>
  65#include <asm/page.h>
  66#include <asm/ptrace.h>
  67#include <asm/sections.h>
  68#include <asm/ebcdic.h>
 
  69#include <asm/diag.h>
  70#include <asm/os_info.h>
  71#include <asm/sclp.h>
  72#include <asm/stacktrace.h>
  73#include <asm/sysinfo.h>
  74#include <asm/numa.h>
  75#include <asm/alternative.h>
  76#include <asm/nospec-branch.h>
  77#include <asm/mem_detect.h>
  78#include <asm/maccess.h>
  79#include <asm/uv.h>
  80#include <asm/asm-offsets.h>
  81#include "entry.h"
  82
 
 
 
 
 
 
 
 
 
 
 
 
  83/*
  84 * Machine setup..
  85 */
  86unsigned int console_mode = 0;
  87EXPORT_SYMBOL(console_mode);
  88
  89unsigned int console_devno = -1;
  90EXPORT_SYMBOL(console_devno);
  91
  92unsigned int console_irq = -1;
  93EXPORT_SYMBOL(console_irq);
  94
  95/*
  96 * Some code and data needs to stay below 2 GB, even when the kernel would be
  97 * relocated above 2 GB, because it has to use 31 bit addresses.
  98 * Such code and data is part of the .amode31 section.
  99 */
 100unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
 101unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
 102unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
 103unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
 104struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
 105struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
 106
 107/*
 108 * Control registers CR2, CR5 and CR15 are initialized with addresses
 109 * of tables that must be placed below 2G which is handled by the AMODE31
 110 * sections.
 111 * Because the AMODE31 sections are relocated below 2G at startup,
 112 * the content of control registers CR2, CR5 and CR15 must be updated
 113 * with new addresses after the relocation. The initial initialization of
 114 * control registers occurs in head64.S and then gets updated again after AMODE31
 115 * relocation. We must access the relevant AMODE31 tables indirectly via
 116 * pointers placed in the .amode31.refs linker section. Those pointers get
 117 * updated automatically during AMODE31 relocation and always contain a valid
 118 * address within AMODE31 sections.
 119 */
 120
 121static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
 122
 123static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
 124	[1] = 0xffffffffffffffff
 125};
 126
 127static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
 128	0x80000000, 0, 0, 0,
 129	0x80000000, 0, 0, 0,
 130	0x80000000, 0, 0, 0,
 131	0x80000000, 0, 0, 0,
 132	0x80000000, 0, 0, 0,
 133	0x80000000, 0, 0, 0,
 134	0x80000000, 0, 0, 0,
 135	0x80000000, 0, 0, 0
 136};
 137
 138static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
 139	0, 0, 0x89000000, 0,
 140	0, 0, 0x8a000000, 0
 141};
 142
 143static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
 144static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
 145static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
 146static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
 147
 148int __bootdata(noexec_disabled);
 149unsigned long __bootdata(ident_map_size);
 150struct mem_detect_info __bootdata(mem_detect);
 151struct initrd_data __bootdata(initrd_data);
 152
 153unsigned long __bootdata_preserved(__kaslr_offset);
 154unsigned long __bootdata(__amode31_base);
 155unsigned int __bootdata_preserved(zlib_dfltcc_support);
 156EXPORT_SYMBOL(zlib_dfltcc_support);
 157u64 __bootdata_preserved(stfle_fac_list[16]);
 158EXPORT_SYMBOL(stfle_fac_list);
 159u64 __bootdata_preserved(alt_stfle_fac_list[16]);
 160struct oldmem_data __bootdata_preserved(oldmem_data);
 161
 162unsigned long VMALLOC_START;
 163EXPORT_SYMBOL(VMALLOC_START);
 164
 165unsigned long VMALLOC_END;
 166EXPORT_SYMBOL(VMALLOC_END);
 167
 168struct page *vmemmap;
 169EXPORT_SYMBOL(vmemmap);
 170unsigned long vmemmap_size;
 171
 172unsigned long MODULES_VADDR;
 173unsigned long MODULES_END;
 174
 175/* An array with a pointer to the lowcore of every CPU. */
 176struct lowcore *lowcore_ptr[NR_CPUS];
 177EXPORT_SYMBOL(lowcore_ptr);
 178
 179DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
 180
 181/*
 182 * The Write Back bit position in the physaddr is given by the SLPC PCI.
 183 * Leaving the mask zero always uses write through which is safe
 184 */
 185unsigned long mio_wb_bit_mask __ro_after_init;
 186
 187/*
 188 * This is set up by the setup-routine at boot-time
 189 * for S390 need to find out, what we have to setup
 190 * using address 0x10400 ...
 191 */
 192
 193#include <asm/setup.h>
 194
 195/*
 196 * condev= and conmode= setup parameter.
 197 */
 198
 199static int __init condev_setup(char *str)
 200{
 201	int vdev;
 202
 203	vdev = simple_strtoul(str, &str, 0);
 204	if (vdev >= 0 && vdev < 65536) {
 205		console_devno = vdev;
 206		console_irq = -1;
 207	}
 208	return 1;
 209}
 210
 211__setup("condev=", condev_setup);
 212
 213static void __init set_preferred_console(void)
 214{
 215	if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
 
 
 216		add_preferred_console("ttyS", 0, NULL);
 217	else if (CONSOLE_IS_3270)
 218		add_preferred_console("tty3270", 0, NULL);
 219	else if (CONSOLE_IS_VT220)
 220		add_preferred_console("ttysclp", 0, NULL);
 221	else if (CONSOLE_IS_HVC)
 222		add_preferred_console("hvc", 0, NULL);
 223}
 224
 225static int __init conmode_setup(char *str)
 226{
 227#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 228	if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
 229                SET_CONSOLE_SCLP;
 230#endif
 231#if defined(CONFIG_TN3215_CONSOLE)
 232	if (!strcmp(str, "3215"))
 233		SET_CONSOLE_3215;
 234#endif
 235#if defined(CONFIG_TN3270_CONSOLE)
 236	if (!strcmp(str, "3270"))
 237		SET_CONSOLE_3270;
 238#endif
 239	set_preferred_console();
 240        return 1;
 241}
 242
 243__setup("conmode=", conmode_setup);
 244
 245static void __init conmode_default(void)
 246{
 247	char query_buffer[1024];
 248	char *ptr;
 249
 250        if (MACHINE_IS_VM) {
 251		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
 252		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
 253		ptr = strstr(query_buffer, "SUBCHANNEL =");
 254		console_irq = simple_strtoul(ptr + 13, NULL, 16);
 255		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
 256		ptr = strstr(query_buffer, "CONMODE");
 257		/*
 258		 * Set the conmode to 3215 so that the device recognition 
 259		 * will set the cu_type of the console to 3215. If the
 260		 * conmode is 3270 and we don't set it back then both
 261		 * 3215 and the 3270 driver will try to access the console
 262		 * device (3215 as console and 3270 as normal tty).
 263		 */
 264		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
 265		if (ptr == NULL) {
 266#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 267			SET_CONSOLE_SCLP;
 268#endif
 269			return;
 270		}
 271		if (str_has_prefix(ptr + 8, "3270")) {
 272#if defined(CONFIG_TN3270_CONSOLE)
 273			SET_CONSOLE_3270;
 274#elif defined(CONFIG_TN3215_CONSOLE)
 275			SET_CONSOLE_3215;
 276#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 277			SET_CONSOLE_SCLP;
 278#endif
 279		} else if (str_has_prefix(ptr + 8, "3215")) {
 280#if defined(CONFIG_TN3215_CONSOLE)
 281			SET_CONSOLE_3215;
 282#elif defined(CONFIG_TN3270_CONSOLE)
 283			SET_CONSOLE_3270;
 284#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 285			SET_CONSOLE_SCLP;
 286#endif
 287		}
 288	} else if (MACHINE_IS_KVM) {
 289		if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
 290			SET_CONSOLE_VT220;
 291		else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
 292			SET_CONSOLE_SCLP;
 293		else
 294			SET_CONSOLE_HVC;
 295	} else {
 296#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 297		SET_CONSOLE_SCLP;
 298#endif
 299	}
 300}
 301
 302#ifdef CONFIG_CRASH_DUMP
 303static void __init setup_zfcpdump(void)
 304{
 305	if (!is_ipl_type_dump())
 
 
 306		return;
 307	if (oldmem_data.start)
 308		return;
 309	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
 
 
 
 
 
 
 310	console_loglevel = 2;
 311}
 312#else
 313static inline void setup_zfcpdump(void) {}
 314#endif /* CONFIG_CRASH_DUMP */
 315
 316 /*
 317 * Reboot, halt and power_off stubs. They just call _machine_restart,
 318 * _machine_halt or _machine_power_off. 
 319 */
 320
 321void machine_restart(char *command)
 322{
 323	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
 324		/*
 325		 * Only unblank the console if we are called in enabled
 326		 * context or a bust_spinlocks cleared the way for us.
 327		 */
 328		console_unblank();
 329	_machine_restart(command);
 330}
 331
 332void machine_halt(void)
 333{
 334	if (!in_interrupt() || oops_in_progress)
 335		/*
 336		 * Only unblank the console if we are called in enabled
 337		 * context or a bust_spinlocks cleared the way for us.
 338		 */
 339		console_unblank();
 340	_machine_halt();
 341}
 342
 343void machine_power_off(void)
 344{
 345	if (!in_interrupt() || oops_in_progress)
 346		/*
 347		 * Only unblank the console if we are called in enabled
 348		 * context or a bust_spinlocks cleared the way for us.
 349		 */
 350		console_unblank();
 351	_machine_power_off();
 352}
 353
 354/*
 355 * Dummy power off function.
 356 */
 357void (*pm_power_off)(void) = machine_power_off;
 358EXPORT_SYMBOL_GPL(pm_power_off);
 359
 360void *restart_stack;
 361
 362unsigned long stack_alloc(void)
 363{
 364#ifdef CONFIG_VMAP_STACK
 365	void *ret;
 
 
 
 366
 367	ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
 368			     NUMA_NO_NODE, __builtin_return_address(0));
 369	kmemleak_not_leak(ret);
 370	return (unsigned long)ret;
 371#else
 372	return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
 373#endif
 374}
 
 
 
 
 375
 376void stack_free(unsigned long stack)
 377{
 378#ifdef CONFIG_VMAP_STACK
 379	vfree((void *) stack);
 380#else
 381	free_pages(stack, THREAD_SIZE_ORDER);
 
 382#endif
 
 
 
 
 
 
 
 
 383}
 384
 385int __init arch_early_irq_init(void)
 
 
 
 386{
 387	unsigned long stack;
 388
 389	stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
 390	if (!stack)
 391		panic("Couldn't allocate async stack");
 392	S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
 393	return 0;
 394}
 
 395
 396void __init arch_call_rest_init(void)
 397{
 398	unsigned long stack;
 399
 400	smp_reinit_ipl_cpu();
 401	stack = stack_alloc();
 402	if (!stack)
 403		panic("Couldn't allocate kernel stack");
 404	current->stack = (void *) stack;
 405#ifdef CONFIG_VMAP_STACK
 406	current->stack_vm_area = (void *) stack;
 407#endif
 408	set_task_stack_end_magic(current);
 409	stack += STACK_INIT_OFFSET;
 410	S390_lowcore.kernel_stack = stack;
 411	call_on_stack_noreturn(rest_init, stack);
 412}
 
 413
 414static void __init setup_lowcore_dat_off(void)
 415{
 416	unsigned long int_psw_mask = PSW_KERNEL_BITS;
 417	struct lowcore *abs_lc, *lc;
 418	unsigned long mcck_stack;
 419	unsigned long flags;
 
 
 
 
 
 
 
 420
 421	if (IS_ENABLED(CONFIG_KASAN))
 422		int_psw_mask |= PSW_MASK_DAT;
 
 423
 424	/*
 425	 * Setup lowcore for boot cpu
 426	 */
 427	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
 428	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
 429	if (!lc)
 430		panic("%s: Failed to allocate %zu bytes align=%zx\n",
 431		      __func__, sizeof(*lc), sizeof(*lc));
 432
 433	lc->restart_psw.mask = PSW_KERNEL_BITS;
 434	lc->restart_psw.addr = (unsigned long) restart_int_handler;
 435	lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 436	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
 437	lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 438	lc->svc_new_psw.addr = (unsigned long) system_call;
 439	lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 440	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
 441	lc->mcck_new_psw.mask = int_psw_mask;
 442	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
 443	lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 444	lc->io_new_psw.addr = (unsigned long) io_int_handler;
 445	lc->clock_comparator = clock_comparator_max;
 446	lc->nodat_stack = ((unsigned long) &init_thread_union)
 447		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 448	lc->current_task = (unsigned long)&init_task;
 449	lc->lpp = LPP_MAGIC;
 
 
 
 
 
 
 
 450	lc->machine_flags = S390_lowcore.machine_flags;
 451	lc->preempt_count = S390_lowcore.preempt_count;
 452	nmi_alloc_mcesa_early(&lc->mcesad);
 453	lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
 
 
 
 
 
 
 
 
 
 
 
 
 454	lc->exit_timer = S390_lowcore.exit_timer;
 455	lc->user_timer = S390_lowcore.user_timer;
 456	lc->system_timer = S390_lowcore.system_timer;
 457	lc->steal_timer = S390_lowcore.steal_timer;
 458	lc->last_update_timer = S390_lowcore.last_update_timer;
 459	lc->last_update_clock = S390_lowcore.last_update_clock;
 
 460
 461	/*
 462	 * Allocate the global restart stack which is the same for
 463	 * all CPUs in cast *one* of them does a PSW restart.
 464	 */
 465	restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 466	if (!restart_stack)
 467		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 468		      __func__, THREAD_SIZE, THREAD_SIZE);
 469	restart_stack += STACK_INIT_OFFSET;
 470
 471	/*
 472	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
 473	 * restart data to the absolute zero lowcore. This is necessary if
 474	 * PSW restart is done on an offline CPU that has lowcore zero.
 475	 */
 476	lc->restart_stack = (unsigned long) restart_stack;
 477	lc->restart_fn = (unsigned long) do_restart;
 478	lc->restart_data = 0;
 479	lc->restart_source = -1U;
 480
 481	abs_lc = get_abs_lowcore(&flags);
 482	abs_lc->restart_stack = lc->restart_stack;
 483	abs_lc->restart_fn = lc->restart_fn;
 484	abs_lc->restart_data = lc->restart_data;
 485	abs_lc->restart_source = lc->restart_source;
 486	abs_lc->restart_psw = lc->restart_psw;
 487	abs_lc->mcesad = lc->mcesad;
 488	put_abs_lowcore(abs_lc, flags);
 489
 490	mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 491	if (!mcck_stack)
 492		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 493		      __func__, THREAD_SIZE, THREAD_SIZE);
 494	lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
 495
 496	lc->spinlock_lockval = arch_spin_lockval(0);
 497	lc->spinlock_index = 0;
 498	arch_spin_lock_setup(0);
 499	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
 500	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
 501	lc->preempt_count = PREEMPT_DISABLED;
 502
 503	set_prefix(__pa(lc));
 504	lowcore_ptr[0] = lc;
 505}
 506
 507static void __init setup_lowcore_dat_on(void)
 508{
 509	struct lowcore *abs_lc;
 510	unsigned long flags;
 511	int i;
 512
 513	__ctl_clear_bit(0, 28);
 514	S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
 515	S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
 516	S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
 517	S390_lowcore.mcck_new_psw.mask |= PSW_MASK_DAT;
 518	S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
 519	__ctl_set_bit(0, 28);
 520	__ctl_store(S390_lowcore.cregs_save_area, 0, 15);
 521	if (abs_lowcore_map(0, lowcore_ptr[0], true))
 522		panic("Couldn't setup absolute lowcore");
 523	abs_lowcore_mapped = true;
 524	abs_lc = get_abs_lowcore(&flags);
 525	abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
 526	abs_lc->program_new_psw = S390_lowcore.program_new_psw;
 527	for (i = 0; i < 16; i++)
 528		abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i];
 529	put_abs_lowcore(abs_lc, flags);
 530}
 531
 532static struct resource code_resource = {
 533	.name  = "Kernel code",
 534	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 535};
 536
 537static struct resource data_resource = {
 538	.name = "Kernel data",
 539	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 540};
 541
 542static struct resource bss_resource = {
 543	.name = "Kernel bss",
 544	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 545};
 546
 547static struct resource __initdata *standard_resources[] = {
 548	&code_resource,
 549	&data_resource,
 550	&bss_resource,
 551};
 552
 553static void __init setup_resources(void)
 554{
 555	struct resource *res, *std_res, *sub_res;
 556	phys_addr_t start, end;
 557	int j;
 558	u64 i;
 559
 560	code_resource.start = (unsigned long) _text;
 561	code_resource.end = (unsigned long) _etext - 1;
 562	data_resource.start = (unsigned long) _etext;
 563	data_resource.end = (unsigned long) _edata - 1;
 564	bss_resource.start = (unsigned long) __bss_start;
 565	bss_resource.end = (unsigned long) __bss_stop - 1;
 566
 567	for_each_mem_range(i, &start, &end) {
 568		res = memblock_alloc(sizeof(*res), 8);
 569		if (!res)
 570			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 571			      __func__, sizeof(*res), 8);
 572		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 573
 574		res->name = "System RAM";
 575		res->start = start;
 576		/*
 577		 * In memblock, end points to the first byte after the
 578		 * range while in resourses, end points to the last byte in
 579		 * the range.
 580		 */
 581		res->end = end - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582		request_resource(&iomem_resource, res);
 583
 584		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
 585			std_res = standard_resources[j];
 586			if (std_res->start < res->start ||
 587			    std_res->start > res->end)
 588				continue;
 589			if (std_res->end > res->end) {
 590				sub_res = memblock_alloc(sizeof(*sub_res), 8);
 591				if (!sub_res)
 592					panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 593					      __func__, sizeof(*sub_res), 8);
 594				*sub_res = *std_res;
 595				sub_res->end = res->end;
 596				std_res->start = res->end + 1;
 597				request_resource(res, sub_res);
 598			} else {
 599				request_resource(res, std_res);
 600			}
 601		}
 602	}
 603#ifdef CONFIG_CRASH_DUMP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604	/*
 605	 * Re-add removed crash kernel memory as reserved memory. This makes
 606	 * sure it will be mapped with the identity mapping and struct pages
 607	 * will be created, so it can be resized later on.
 608	 * However add it later since the crash kernel resource should not be
 609	 * part of the System RAM resource.
 610	 */
 611	if (crashk_res.end) {
 612		memblock_add_node(crashk_res.start, resource_size(&crashk_res),
 613				  0, MEMBLOCK_NONE);
 614		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
 615		insert_resource(&iomem_resource, &crashk_res);
 
 
 
 
 
 
 
 
 
 
 
 
 616	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 617#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618}
 619
 620static void __init setup_memory_end(void)
 621{
 622	memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
 623	max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
 624	pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
 
 
 625}
 626
 627#ifdef CONFIG_CRASH_DUMP
 628
 629/*
 630 * When kdump is enabled, we have to ensure that no memory from the area
 631 * [0 - crashkernel memory size] is set offline - it will be exchanged with
 632 * the crashkernel memory region when kdump is triggered. The crashkernel
 633 * memory region can never get offlined (pages are unmovable).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 634 */
 635static int kdump_mem_notifier(struct notifier_block *nb,
 636			      unsigned long action, void *data)
 637{
 638	struct memory_notify *arg = data;
 639
 640	if (action != MEM_GOING_OFFLINE)
 641		return NOTIFY_OK;
 642	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
 643		return NOTIFY_BAD;
 644	return NOTIFY_OK;
 
 
 
 
 645}
 646
 647static struct notifier_block kdump_mem_nb = {
 648	.notifier_call = kdump_mem_notifier,
 649};
 650
 651#endif
 652
 653/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654 * Reserve memory for kdump kernel to be loaded with kexec
 655 */
 656static void __init reserve_crashkernel(void)
 657{
 658#ifdef CONFIG_CRASH_DUMP
 659	unsigned long long crash_base, crash_size;
 660	phys_addr_t low, high;
 661	int rc;
 662
 663	rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
 664			       &crash_base);
 665
 666	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
 667	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
 668	if (rc || crash_size == 0)
 669		return;
 670
 671	if (memblock.memory.regions[0].size < crash_size) {
 672		pr_info("crashkernel reservation failed: %s\n",
 673			"first memory chunk must be at least crashkernel size");
 674		return;
 675	}
 676
 677	low = crash_base ?: oldmem_data.start;
 678	high = low + crash_size;
 679	if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
 680		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
 681		crash_base = low;
 682	} else {
 683		/* Find suitable area in free memory */
 684		low = max_t(unsigned long, crash_size, sclp.hsa_size);
 685		high = crash_base ? crash_base + crash_size : ULONG_MAX;
 686
 687		if (crash_base && crash_base < low) {
 688			pr_info("crashkernel reservation failed: %s\n",
 689				"crash_base too low");
 690			return;
 691		}
 692		low = crash_base ?: low;
 693		crash_base = memblock_phys_alloc_range(crash_size,
 694						       KEXEC_CRASH_MEM_ALIGN,
 695						       low, high);
 696	}
 697
 698	if (!crash_base) {
 699		pr_info("crashkernel reservation failed: %s\n",
 700			"no suitable area found");
 701		return;
 702	}
 703
 704	if (register_memory_notifier(&kdump_mem_nb)) {
 705		memblock_phys_free(crash_base, crash_size);
 706		return;
 707	}
 708
 709	if (!oldmem_data.start && MACHINE_IS_VM)
 710		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
 711	crashk_res.start = crash_base;
 712	crashk_res.end = crash_base + crash_size - 1;
 713	memblock_remove(crash_base, crash_size);
 
 714	pr_info("Reserving %lluMB of memory at %lluMB "
 715		"for crashkernel (System RAM: %luMB)\n",
 716		crash_size >> 20, crash_base >> 20,
 717		(unsigned long)memblock.memory.total_size >> 20);
 718	os_info_crashkernel_add(crash_base, crash_size);
 719#endif
 720}
 721
 722/*
 723 * Reserve the initrd from being used by memblock
 724 */
 725static void __init reserve_initrd(void)
 726{
 727#ifdef CONFIG_BLK_DEV_INITRD
 728	if (!initrd_data.start || !initrd_data.size)
 729		return;
 730	initrd_start = (unsigned long)__va(initrd_data.start);
 731	initrd_end = initrd_start + initrd_data.size;
 732	memblock_reserve(initrd_data.start, initrd_data.size);
 733#endif
 734}
 735
 736/*
 737 * Reserve the memory area used to pass the certificate lists
 738 */
 739static void __init reserve_certificate_list(void)
 740{
 741	if (ipl_cert_list_addr)
 742		memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
 743}
 744
 745static void __init reserve_mem_detect_info(void)
 746{
 747	unsigned long start, size;
 
 
 
 748
 749	get_mem_detect_reserved(&start, &size);
 750	if (size)
 751		memblock_reserve(start, size);
 752}
 
 753
 754static void __init free_mem_detect_info(void)
 755{
 756	unsigned long start, size;
 757
 758	get_mem_detect_reserved(&start, &size);
 759	if (size)
 760		memblock_phys_free(start, size);
 761}
 762
 763static const char * __init get_mem_info_source(void)
 764{
 765	switch (mem_detect.info_source) {
 766	case MEM_DETECT_SCLP_STOR_INFO:
 767		return "sclp storage info";
 768	case MEM_DETECT_DIAG260:
 769		return "diag260";
 770	case MEM_DETECT_SCLP_READ_INFO:
 771		return "sclp read info";
 772	case MEM_DETECT_BIN_SEARCH:
 773		return "binary search";
 774	}
 775	return "none";
 776}
 777
 778static void __init memblock_add_mem_detect_info(void)
 779{
 780	unsigned long start, end;
 781	int i;
 782
 783	pr_debug("physmem info source: %s (%hhd)\n",
 784		 get_mem_info_source(), mem_detect.info_source);
 785	/* keep memblock lists close to the kernel */
 786	memblock_set_bottom_up(true);
 787	for_each_mem_detect_block(i, &start, &end) {
 788		memblock_add(start, end - start);
 789		memblock_physmem_add(start, end - start);
 790	}
 791	memblock_set_bottom_up(false);
 792	memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
 793}
 794
 795/*
 796 * Check for initrd being in usable memory
 797 */
 798static void __init check_initrd(void)
 799{
 800#ifdef CONFIG_BLK_DEV_INITRD
 801	if (initrd_data.start && initrd_data.size &&
 802	    !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
 803		pr_err("The initial RAM disk does not fit into the memory\n");
 804		memblock_phys_free(initrd_data.start, initrd_data.size);
 805		initrd_start = initrd_end = 0;
 806	}
 807#endif
 808}
 809
 810/*
 811 * Reserve memory used for lowcore/command line/kernel image.
 812 */
 813static void __init reserve_kernel(void)
 814{
 815	memblock_reserve(0, STARTUP_NORMAL_OFFSET);
 816	memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
 817	memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
 818	memblock_reserve(__amode31_base, __eamode31 - __samode31);
 819	memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
 820	memblock_reserve(__pa(_stext), _end - _stext);
 821}
 822
 823static void __init setup_memory(void)
 824{
 825	phys_addr_t start, end;
 826	u64 i;
 827
 828	/*
 829	 * Init storage key for present memory
 830	 */
 831	for_each_mem_range(i, &start, &end)
 832		storage_key_init_range(start, end);
 833
 834	psw_set_key(PAGE_DEFAULT_KEY);
 835}
 
 836
 837static void __init relocate_amode31_section(void)
 838{
 839	unsigned long amode31_size = __eamode31 - __samode31;
 840	long amode31_offset = __amode31_base - __samode31;
 841	long *ptr;
 842
 843	pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
 844
 845	/* Move original AMODE31 section to the new one */
 846	memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
 847	/* Zero out the old AMODE31 section to catch invalid accesses within it */
 848	memset((void *)__samode31, 0, amode31_size);
 849
 850	/* Update all AMODE31 region references */
 851	for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
 852		*ptr += amode31_offset;
 853}
 854
 855/* This must be called after AMODE31 relocation */
 856static void __init setup_cr(void)
 857{
 858	union ctlreg2 cr2;
 859	union ctlreg5 cr5;
 860	union ctlreg15 cr15;
 861
 862	__ctl_duct[1] = (unsigned long)__ctl_aste;
 863	__ctl_duct[2] = (unsigned long)__ctl_aste;
 864	__ctl_duct[4] = (unsigned long)__ctl_duald;
 865
 866	/* Update control registers CR2, CR5 and CR15 */
 867	__ctl_store(cr2.val, 2, 2);
 868	__ctl_store(cr5.val, 5, 5);
 869	__ctl_store(cr15.val, 15, 15);
 870	cr2.ducto = (unsigned long)__ctl_duct >> 6;
 871	cr5.pasteo = (unsigned long)__ctl_duct >> 6;
 872	cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
 873	__ctl_load(cr2.val, 2, 2);
 874	__ctl_load(cr5.val, 5, 5);
 875	__ctl_load(cr15.val, 15, 15);
 876}
 877
 878/*
 879 * Add system information as device randomness
 880 */
 881static void __init setup_randomness(void)
 882{
 883	struct sysinfo_3_2_2 *vmms;
 
 
 
 
 
 
 
 
 
 884
 885	vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 886	if (!vmms)
 887		panic("Failed to allocate memory for sysinfo structure\n");
 888	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
 889		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
 890	memblock_free(vmms, PAGE_SIZE);
 891
 892	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
 893		static_branch_enable(&s390_arch_random_available);
 894}
 895
 896/*
 897 * Find the correct size for the task_struct. This depends on
 898 * the size of the struct fpu at the end of the thread_struct
 899 * which is embedded in the task_struct.
 900 */
 901static void __init setup_task_size(void)
 902{
 903	int task_size = sizeof(struct task_struct);
 
 
 
 
 
 
 
 904
 905	if (!MACHINE_HAS_VX) {
 906		task_size -= sizeof(__vector128) * __NUM_VXRS;
 907		task_size += sizeof(freg_t) * __NUM_FPRS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908	}
 909	arch_task_struct_size = task_size;
 910}
 911
 912/*
 913 * Issue diagnose 318 to set the control program name and
 914 * version codes.
 915 */
 916static void __init setup_control_program_code(void)
 917{
 918	union diag318_info diag318_info = {
 919		.cpnc = CPNC_LINUX,
 920		.cpvc = 0,
 921	};
 922
 923	if (!sclp.has_diag318)
 924		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925
 926	diag_stat_inc(DIAG_STAT_X318);
 927	asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
 928}
 929
 930/*
 931 * Print the component list from the IPL report
 932 */
 933static void __init log_component_list(void)
 934{
 935	struct ipl_rb_component_entry *ptr, *end;
 936	char *str;
 
 
 
 
 
 
 
 
 937
 938	if (!early_ipl_comp_list_addr)
 939		return;
 940	if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
 941		pr_info("Linux is running with Secure-IPL enabled\n");
 942	else
 943		pr_info("Linux is running with Secure-IPL disabled\n");
 944	ptr = (void *) early_ipl_comp_list_addr;
 945	end = (void *) ptr + early_ipl_comp_list_size;
 946	pr_info("The IPL report contains the following components:\n");
 947	while (ptr < end) {
 948		if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
 949			if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
 950				str = "signed, verified";
 951			else
 952				str = "signed, verification failed";
 953		} else {
 954			str = "not signed";
 955		}
 956		pr_info("%016llx - %016llx (%s)\n",
 957			ptr->addr, ptr->addr + ptr->len, str);
 958		ptr++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959	}
 960}
 961
 962/*
 963 * Setup function called from init/main.c just after the banner
 964 * was printed.
 965 */
 966
 967void __init setup_arch(char **cmdline_p)
 968{
 969        /*
 970         * print what head.S has found out about the machine
 971         */
 
 
 
 
 
 
 
 
 
 
 
 
 
 972	if (MACHINE_IS_VM)
 973		pr_info("Linux is running as a z/VM "
 974			"guest operating system in 64-bit mode\n");
 975	else if (MACHINE_IS_KVM)
 976		pr_info("Linux is running under KVM in 64-bit mode\n");
 977	else if (MACHINE_IS_LPAR)
 978		pr_info("Linux is running natively in 64-bit mode\n");
 979	else
 980		pr_info("Linux is running as a guest in 64-bit mode\n");
 981
 982	log_component_list();
 983
 984	/* Have one command line that is parsed and saved in /proc/cmdline */
 985	/* boot_command_line has been already set up in early.c */
 986	*cmdline_p = boot_command_line;
 987
 988        ROOT_DEV = Root_RAM0;
 989
 990	setup_initial_init_mm(_text, _etext, _edata, _end);
 
 
 
 991
 992	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
 993		nospec_auto_detect();
 
 
 994
 995	jump_label_init();
 996	parse_early_param();
 997#ifdef CONFIG_CRASH_DUMP
 998	/* Deactivate elfcorehdr= kernel parameter */
 999	elfcorehdr_addr = ELFCORE_ADDR_MAX;
1000#endif
1001
1002	os_info_init();
1003	setup_ipl();
1004	setup_task_size();
1005	setup_control_program_code();
1006
1007	/* Do some memory reservations *before* memory is added to memblock */
1008	reserve_kernel();
1009	reserve_initrd();
1010	reserve_certificate_list();
1011	reserve_mem_detect_info();
1012	memblock_set_current_limit(ident_map_size);
1013	memblock_allow_resize();
1014
1015	/* Get information about *all* installed memory */
1016	memblock_add_mem_detect_info();
1017
1018	free_mem_detect_info();
1019	setup_memory_end();
1020	memblock_dump_all();
 
 
1021	setup_memory();
 
 
 
1022
1023	relocate_amode31_section();
1024	setup_cr();
1025	setup_uv();
1026	dma_contiguous_reserve(ident_map_size);
1027	vmcp_cma_reserve();
1028	if (MACHINE_HAS_EDAT2)
1029		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1030
1031	check_initrd();
1032	reserve_crashkernel();
1033#ifdef CONFIG_CRASH_DUMP
1034	/*
1035	 * Be aware that smp_save_dump_secondary_cpus() triggers a system reset.
1036	 * Therefore CPU and device initialization should be done afterwards.
1037	 */
1038	smp_save_dump_secondary_cpus();
1039#endif
1040
1041	setup_resources();
1042	setup_lowcore_dat_off();
1043	smp_fill_possible_mask();
1044	cpu_detect_mhz_feature();
1045        cpu_init();
1046	numa_setup();
1047	smp_detect_cpus();
1048	topology_init_early();
1049
1050	if (test_facility(193))
1051		static_branch_enable(&cpu_has_bear);
1052
1053	/*
1054	 * Create kernel page tables and switch to virtual addressing.
1055	 */
1056        paging_init();
1057	memcpy_real_init();
1058	/*
1059	 * After paging_init created the kernel page table, the new PSWs
1060	 * in lowcore can now run with DAT enabled.
1061	 */
1062	setup_lowcore_dat_on();
1063#ifdef CONFIG_CRASH_DUMP
1064	smp_save_dump_ipl_cpu();
1065#endif
1066
1067        /* Setup default console */
1068	conmode_default();
1069	set_preferred_console();
1070
1071	apply_alternative_instructions();
1072	if (IS_ENABLED(CONFIG_EXPOLINE))
1073		nospec_init_branches();
1074
1075	/* Setup zfcp/nvme dump support */
1076	setup_zfcpdump();
1077
1078	/* Add system specific data to the random pool */
1079	setup_randomness();
1080}
v3.5.6
 
   1/*
   2 *  arch/s390/kernel/setup.c
   3 *
   4 *  S390 version
   5 *    Copyright (C) IBM Corp. 1999,2012
   6 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   7 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   8 *
   9 *  Derived from "arch/i386/kernel/setup.c"
  10 *    Copyright (C) 1995, Linus Torvalds
  11 */
  12
  13/*
  14 * This file handles the architecture-dependent parts of initialization
  15 */
  16
  17#define KMSG_COMPONENT "setup"
  18#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19
  20#include <linux/errno.h>
  21#include <linux/module.h>
  22#include <linux/sched.h>
 
 
  23#include <linux/kernel.h>
  24#include <linux/memblock.h>
  25#include <linux/mm.h>
  26#include <linux/stddef.h>
  27#include <linux/unistd.h>
  28#include <linux/ptrace.h>
 
  29#include <linux/user.h>
  30#include <linux/tty.h>
  31#include <linux/ioport.h>
  32#include <linux/delay.h>
  33#include <linux/init.h>
  34#include <linux/initrd.h>
  35#include <linux/bootmem.h>
  36#include <linux/root_dev.h>
  37#include <linux/console.h>
  38#include <linux/kernel_stat.h>
 
  39#include <linux/device.h>
  40#include <linux/notifier.h>
  41#include <linux/pfn.h>
  42#include <linux/ctype.h>
  43#include <linux/reboot.h>
  44#include <linux/topology.h>
  45#include <linux/ftrace.h>
  46#include <linux/kexec.h>
  47#include <linux/crash_dump.h>
  48#include <linux/memory.h>
  49#include <linux/compat.h>
 
 
 
  50
 
 
  51#include <asm/ipl.h>
  52#include <asm/uaccess.h>
  53#include <asm/facility.h>
  54#include <asm/smp.h>
  55#include <asm/mmu_context.h>
  56#include <asm/cpcmd.h>
  57#include <asm/lowcore.h>
 
  58#include <asm/irq.h>
  59#include <asm/page.h>
  60#include <asm/ptrace.h>
  61#include <asm/sections.h>
  62#include <asm/ebcdic.h>
  63#include <asm/kvm_virtio.h>
  64#include <asm/diag.h>
  65#include <asm/os_info.h>
 
 
 
 
 
 
 
 
 
 
  66#include "entry.h"
  67
  68long psw_kernel_bits	= PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
  69			  PSW_MASK_EA | PSW_MASK_BA;
  70long psw_user_bits	= PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
  71			  PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
  72			  PSW_MASK_PSTATE | PSW_ASC_HOME;
  73
  74/*
  75 * User copy operations.
  76 */
  77struct uaccess_ops uaccess;
  78EXPORT_SYMBOL(uaccess);
  79
  80/*
  81 * Machine setup..
  82 */
  83unsigned int console_mode = 0;
  84EXPORT_SYMBOL(console_mode);
  85
  86unsigned int console_devno = -1;
  87EXPORT_SYMBOL(console_devno);
  88
  89unsigned int console_irq = -1;
  90EXPORT_SYMBOL(console_irq);
  91
  92unsigned long elf_hwcap = 0;
  93char elf_platform[ELF_PLATFORM_SIZE];
 
 
 
 
 
 
 
 
 
  94
  95struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
 
 
 
 
 
 
 
 
 
 
 
 
  96
  97int __initdata memory_end_set;
  98unsigned long __initdata memory_end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  99
 100unsigned long VMALLOC_START;
 101EXPORT_SYMBOL(VMALLOC_START);
 102
 103unsigned long VMALLOC_END;
 104EXPORT_SYMBOL(VMALLOC_END);
 105
 106struct page *vmemmap;
 107EXPORT_SYMBOL(vmemmap);
 
 
 
 
 108
 109/* An array with a pointer to the lowcore of every CPU. */
 110struct _lowcore *lowcore_ptr[NR_CPUS];
 111EXPORT_SYMBOL(lowcore_ptr);
 112
 
 
 
 
 
 
 
 
 113/*
 114 * This is set up by the setup-routine at boot-time
 115 * for S390 need to find out, what we have to setup
 116 * using address 0x10400 ...
 117 */
 118
 119#include <asm/setup.h>
 120
 121/*
 122 * condev= and conmode= setup parameter.
 123 */
 124
 125static int __init condev_setup(char *str)
 126{
 127	int vdev;
 128
 129	vdev = simple_strtoul(str, &str, 0);
 130	if (vdev >= 0 && vdev < 65536) {
 131		console_devno = vdev;
 132		console_irq = -1;
 133	}
 134	return 1;
 135}
 136
 137__setup("condev=", condev_setup);
 138
 139static void __init set_preferred_console(void)
 140{
 141	if (MACHINE_IS_KVM)
 142		add_preferred_console("hvc", 0, NULL);
 143	else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
 144		add_preferred_console("ttyS", 0, NULL);
 145	else if (CONSOLE_IS_3270)
 146		add_preferred_console("tty3270", 0, NULL);
 
 
 
 
 147}
 148
 149static int __init conmode_setup(char *str)
 150{
 151#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 152	if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
 153                SET_CONSOLE_SCLP;
 154#endif
 155#if defined(CONFIG_TN3215_CONSOLE)
 156	if (strncmp(str, "3215", 5) == 0)
 157		SET_CONSOLE_3215;
 158#endif
 159#if defined(CONFIG_TN3270_CONSOLE)
 160	if (strncmp(str, "3270", 5) == 0)
 161		SET_CONSOLE_3270;
 162#endif
 163	set_preferred_console();
 164        return 1;
 165}
 166
 167__setup("conmode=", conmode_setup);
 168
 169static void __init conmode_default(void)
 170{
 171	char query_buffer[1024];
 172	char *ptr;
 173
 174        if (MACHINE_IS_VM) {
 175		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
 176		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
 177		ptr = strstr(query_buffer, "SUBCHANNEL =");
 178		console_irq = simple_strtoul(ptr + 13, NULL, 16);
 179		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
 180		ptr = strstr(query_buffer, "CONMODE");
 181		/*
 182		 * Set the conmode to 3215 so that the device recognition 
 183		 * will set the cu_type of the console to 3215. If the
 184		 * conmode is 3270 and we don't set it back then both
 185		 * 3215 and the 3270 driver will try to access the console
 186		 * device (3215 as console and 3270 as normal tty).
 187		 */
 188		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
 189		if (ptr == NULL) {
 190#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 191			SET_CONSOLE_SCLP;
 192#endif
 193			return;
 194		}
 195		if (strncmp(ptr + 8, "3270", 4) == 0) {
 196#if defined(CONFIG_TN3270_CONSOLE)
 197			SET_CONSOLE_3270;
 198#elif defined(CONFIG_TN3215_CONSOLE)
 199			SET_CONSOLE_3215;
 200#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 201			SET_CONSOLE_SCLP;
 202#endif
 203		} else if (strncmp(ptr + 8, "3215", 4) == 0) {
 204#if defined(CONFIG_TN3215_CONSOLE)
 205			SET_CONSOLE_3215;
 206#elif defined(CONFIG_TN3270_CONSOLE)
 207			SET_CONSOLE_3270;
 208#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 209			SET_CONSOLE_SCLP;
 210#endif
 211		}
 
 
 
 
 
 
 
 212	} else {
 213#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 214		SET_CONSOLE_SCLP;
 215#endif
 216	}
 217}
 218
 219#ifdef CONFIG_ZFCPDUMP
 220static void __init setup_zfcpdump(unsigned int console_devno)
 221{
 222	static char str[41];
 223
 224	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 225		return;
 226	if (OLDMEM_BASE)
 227		return;
 228	if (console_devno != -1)
 229		sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
 230			ipl_info.data.fcp.dev_id.devno, console_devno);
 231	else
 232		sprintf(str, " cio_ignore=all,!0.0.%04x",
 233			ipl_info.data.fcp.dev_id.devno);
 234	strcat(boot_command_line, str);
 235	console_loglevel = 2;
 236}
 237#else
 238static inline void setup_zfcpdump(unsigned int console_devno) {}
 239#endif /* CONFIG_ZFCPDUMP */
 240
 241 /*
 242 * Reboot, halt and power_off stubs. They just call _machine_restart,
 243 * _machine_halt or _machine_power_off. 
 244 */
 245
 246void machine_restart(char *command)
 247{
 248	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
 249		/*
 250		 * Only unblank the console if we are called in enabled
 251		 * context or a bust_spinlocks cleared the way for us.
 252		 */
 253		console_unblank();
 254	_machine_restart(command);
 255}
 256
 257void machine_halt(void)
 258{
 259	if (!in_interrupt() || oops_in_progress)
 260		/*
 261		 * Only unblank the console if we are called in enabled
 262		 * context or a bust_spinlocks cleared the way for us.
 263		 */
 264		console_unblank();
 265	_machine_halt();
 266}
 267
 268void machine_power_off(void)
 269{
 270	if (!in_interrupt() || oops_in_progress)
 271		/*
 272		 * Only unblank the console if we are called in enabled
 273		 * context or a bust_spinlocks cleared the way for us.
 274		 */
 275		console_unblank();
 276	_machine_power_off();
 277}
 278
 279/*
 280 * Dummy power off function.
 281 */
 282void (*pm_power_off)(void) = machine_power_off;
 
 283
 284static int __init early_parse_mem(char *p)
 
 
 285{
 286	memory_end = memparse(p, &p);
 287	memory_end_set = 1;
 288	return 0;
 289}
 290early_param("mem", early_parse_mem);
 291
 292static int __init parse_vmalloc(char *arg)
 293{
 294	if (!arg)
 295		return -EINVAL;
 296	VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
 297	return 0;
 
 298}
 299early_param("vmalloc", parse_vmalloc);
 300
 301unsigned int user_mode = HOME_SPACE_MODE;
 302EXPORT_SYMBOL_GPL(user_mode);
 303
 304static int set_amode_primary(void)
 305{
 306	psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
 307	psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
 308#ifdef CONFIG_COMPAT
 309	psw32_user_bits =
 310		(psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
 311#endif
 312
 313	if (MACHINE_HAS_MVCOS) {
 314		memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
 315		return 1;
 316	} else {
 317		memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
 318		return 0;
 319	}
 320}
 321
 322/*
 323 * Switch kernel/user addressing modes?
 324 */
 325static int __init early_parse_switch_amode(char *p)
 326{
 327	user_mode = PRIMARY_SPACE_MODE;
 
 
 
 
 
 328	return 0;
 329}
 330early_param("switch_amode", early_parse_switch_amode);
 331
 332static int __init early_parse_user_mode(char *p)
 333{
 334	if (p && strcmp(p, "primary") == 0)
 335		user_mode = PRIMARY_SPACE_MODE;
 336	else if (!p || strcmp(p, "home") == 0)
 337		user_mode = HOME_SPACE_MODE;
 338	else
 339		return 1;
 340	return 0;
 
 
 
 
 
 
 
 341}
 342early_param("user_mode", early_parse_user_mode);
 343
 344static void setup_addressing_mode(void)
 345{
 346	if (user_mode == PRIMARY_SPACE_MODE) {
 347		if (set_amode_primary())
 348			pr_info("Address spaces switched, "
 349				"mvcos available\n");
 350		else
 351			pr_info("Address spaces switched, "
 352				"mvcos not available\n");
 353	}
 354}
 355
 356void *restart_stack __attribute__((__section__(".data")));
 357
 358static void __init setup_lowcore(void)
 359{
 360	struct _lowcore *lc;
 361
 362	/*
 363	 * Setup lowcore for boot cpu
 364	 */
 365	BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
 366	lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
 367	lc->restart_psw.mask = psw_kernel_bits;
 368	lc->restart_psw.addr =
 369		PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
 370	lc->external_new_psw.mask = psw_kernel_bits |
 371		PSW_MASK_DAT | PSW_MASK_MCHECK;
 372	lc->external_new_psw.addr =
 373		PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
 374	lc->svc_new_psw.mask = psw_kernel_bits |
 375		PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
 376	lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
 377	lc->program_new_psw.mask = psw_kernel_bits |
 378		PSW_MASK_DAT | PSW_MASK_MCHECK;
 379	lc->program_new_psw.addr =
 380		PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
 381	lc->mcck_new_psw.mask = psw_kernel_bits;
 382	lc->mcck_new_psw.addr =
 383		PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
 384	lc->io_new_psw.mask = psw_kernel_bits |
 385		PSW_MASK_DAT | PSW_MASK_MCHECK;
 386	lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
 387	lc->clock_comparator = -1ULL;
 388	lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
 389	lc->async_stack = (unsigned long)
 390		__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
 391	lc->panic_stack = (unsigned long)
 392		__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
 393	lc->current_task = (unsigned long) init_thread_union.thread_info.task;
 394	lc->thread_info = (unsigned long) &init_thread_union;
 395	lc->machine_flags = S390_lowcore.machine_flags;
 396	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
 397	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
 398	       MAX_FACILITY_BIT/8);
 399#ifndef CONFIG_64BIT
 400	if (MACHINE_HAS_IEEE) {
 401		lc->extended_save_area_addr = (__u32)
 402			__alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
 403		/* enable extended save area */
 404		__ctl_set_bit(14, 29);
 405	}
 406#else
 407	lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
 408#endif
 409	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
 410	lc->async_enter_timer = S390_lowcore.async_enter_timer;
 411	lc->exit_timer = S390_lowcore.exit_timer;
 412	lc->user_timer = S390_lowcore.user_timer;
 413	lc->system_timer = S390_lowcore.system_timer;
 414	lc->steal_timer = S390_lowcore.steal_timer;
 415	lc->last_update_timer = S390_lowcore.last_update_timer;
 416	lc->last_update_clock = S390_lowcore.last_update_clock;
 417	lc->ftrace_func = S390_lowcore.ftrace_func;
 418
 419	restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
 420	restart_stack += ASYNC_SIZE;
 
 
 
 
 
 
 
 421
 422	/*
 423	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
 424	 * restart data to the absolute zero lowcore. This is necesary if
 425	 * PSW restart is done on an offline CPU that has lowcore zero.
 426	 */
 427	lc->restart_stack = (unsigned long) restart_stack;
 428	lc->restart_fn = (unsigned long) do_restart;
 429	lc->restart_data = 0;
 430	lc->restart_source = -1UL;
 431
 432	/* Setup absolute zero lowcore */
 433	memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
 434			4 * sizeof(unsigned long));
 435	memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
 436			sizeof(lc->restart_psw));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	set_prefix((u32)(unsigned long) lc);
 439	lowcore_ptr[0] = lc;
 440}
 441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442static struct resource code_resource = {
 443	.name  = "Kernel code",
 444	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 445};
 446
 447static struct resource data_resource = {
 448	.name = "Kernel data",
 449	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 450};
 451
 452static struct resource bss_resource = {
 453	.name = "Kernel bss",
 454	.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
 455};
 456
 457static struct resource __initdata *standard_resources[] = {
 458	&code_resource,
 459	&data_resource,
 460	&bss_resource,
 461};
 462
 463static void __init setup_resources(void)
 464{
 465	struct resource *res, *std_res, *sub_res;
 466	int i, j;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467
 468	code_resource.start = (unsigned long) &_text;
 469	code_resource.end = (unsigned long) &_etext - 1;
 470	data_resource.start = (unsigned long) &_etext;
 471	data_resource.end = (unsigned long) &_edata - 1;
 472	bss_resource.start = (unsigned long) &__bss_start;
 473	bss_resource.end = (unsigned long) &__bss_stop - 1;
 474
 475	for (i = 0; i < MEMORY_CHUNKS; i++) {
 476		if (!memory_chunk[i].size)
 477			continue;
 478		if (memory_chunk[i].type == CHUNK_OLDMEM ||
 479		    memory_chunk[i].type == CHUNK_CRASHK)
 480			continue;
 481		res = alloc_bootmem_low(sizeof(*res));
 482		res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
 483		switch (memory_chunk[i].type) {
 484		case CHUNK_READ_WRITE:
 485		case CHUNK_CRASHK:
 486			res->name = "System RAM";
 487			break;
 488		case CHUNK_READ_ONLY:
 489			res->name = "System ROM";
 490			res->flags |= IORESOURCE_READONLY;
 491			break;
 492		default:
 493			res->name = "reserved";
 494		}
 495		res->start = memory_chunk[i].addr;
 496		res->end = res->start + memory_chunk[i].size - 1;
 497		request_resource(&iomem_resource, res);
 498
 499		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
 500			std_res = standard_resources[j];
 501			if (std_res->start < res->start ||
 502			    std_res->start > res->end)
 503				continue;
 504			if (std_res->end > res->end) {
 505				sub_res = alloc_bootmem_low(sizeof(*sub_res));
 
 
 
 506				*sub_res = *std_res;
 507				sub_res->end = res->end;
 508				std_res->start = res->end + 1;
 509				request_resource(res, sub_res);
 510			} else {
 511				request_resource(res, std_res);
 512			}
 513		}
 514	}
 515}
 516
 517unsigned long real_memory_size;
 518EXPORT_SYMBOL_GPL(real_memory_size);
 519
 520static void __init setup_memory_end(void)
 521{
 522	unsigned long vmax, vmalloc_size, tmp;
 523	int i;
 524
 525
 526#ifdef CONFIG_ZFCPDUMP
 527	if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
 528		memory_end = ZFCPDUMP_HSA_SIZE;
 529		memory_end_set = 1;
 530	}
 531#endif
 532	real_memory_size = 0;
 533	memory_end &= PAGE_MASK;
 534
 535	/*
 536	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
 537	 * extra checks that HOLES_IN_ZONE would require.
 
 
 
 538	 */
 539	for (i = 0; i < MEMORY_CHUNKS; i++) {
 540		unsigned long start, end;
 541		struct mem_chunk *chunk;
 542		unsigned long align;
 543
 544		chunk = &memory_chunk[i];
 545		align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
 546		start = (chunk->addr + align - 1) & ~(align - 1);
 547		end = (chunk->addr + chunk->size) & ~(align - 1);
 548		if (start >= end)
 549			memset(chunk, 0, sizeof(*chunk));
 550		else {
 551			chunk->addr = start;
 552			chunk->size = end - start;
 553		}
 554		real_memory_size = max(real_memory_size,
 555				       chunk->addr + chunk->size);
 556	}
 557
 558	/* Choose kernel address space layout: 2, 3, or 4 levels. */
 559#ifdef CONFIG_64BIT
 560	vmalloc_size = VMALLOC_END ?: 128UL << 30;
 561	tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
 562	tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
 563	if (tmp <= (1UL << 42))
 564		vmax = 1UL << 42;	/* 3-level kernel page table */
 565	else
 566		vmax = 1UL << 53;	/* 4-level kernel page table */
 567#else
 568	vmalloc_size = VMALLOC_END ?: 96UL << 20;
 569	vmax = 1UL << 31;		/* 2-level kernel page table */
 570#endif
 571	/* vmalloc area is at the end of the kernel address space. */
 572	VMALLOC_END = vmax;
 573	VMALLOC_START = vmax - vmalloc_size;
 574
 575	/* Split remaining virtual space between 1:1 mapping & vmemmap array */
 576	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
 577	tmp = VMALLOC_START - tmp * sizeof(struct page);
 578	tmp &= ~((vmax >> 11) - 1);	/* align to page table level */
 579	tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
 580	vmemmap = (struct page *) tmp;
 581
 582	/* Take care that memory_end is set and <= vmemmap */
 583	memory_end = min(memory_end ?: real_memory_size, tmp);
 584
 585	/* Fixup memory chunk array to fit into 0..memory_end */
 586	for (i = 0; i < MEMORY_CHUNKS; i++) {
 587		struct mem_chunk *chunk = &memory_chunk[i];
 588
 589		if (chunk->addr >= memory_end) {
 590			memset(chunk, 0, sizeof(*chunk));
 591			continue;
 592		}
 593		if (chunk->addr + chunk->size > memory_end)
 594			chunk->size = memory_end - chunk->addr;
 595	}
 596}
 597
 598static void __init setup_vmcoreinfo(void)
 599{
 600#ifdef CONFIG_KEXEC
 601	unsigned long ptr = paddr_vmcoreinfo_note();
 602
 603	memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
 604#endif
 605}
 606
 607#ifdef CONFIG_CRASH_DUMP
 608
 609/*
 610 * Find suitable location for crashkernel memory
 611 */
 612static unsigned long __init find_crash_base(unsigned long crash_size,
 613					    char **msg)
 614{
 615	unsigned long crash_base;
 616	struct mem_chunk *chunk;
 617	int i;
 618
 619	if (memory_chunk[0].size < crash_size) {
 620		*msg = "first memory chunk must be at least crashkernel size";
 621		return 0;
 622	}
 623	if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
 624		return OLDMEM_BASE;
 625
 626	for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
 627		chunk = &memory_chunk[i];
 628		if (chunk->size == 0)
 629			continue;
 630		if (chunk->type != CHUNK_READ_WRITE)
 631			continue;
 632		if (chunk->size < crash_size)
 633			continue;
 634		crash_base = (chunk->addr + chunk->size) - crash_size;
 635		if (crash_base < crash_size)
 636			continue;
 637		if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
 638			continue;
 639		if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
 640			continue;
 641		return crash_base;
 642	}
 643	*msg = "no suitable area found";
 644	return 0;
 645}
 646
 647/*
 648 * Check if crash_base and crash_size is valid
 649 */
 650static int __init verify_crash_base(unsigned long crash_base,
 651				    unsigned long crash_size,
 652				    char **msg)
 653{
 654	struct mem_chunk *chunk;
 655	int i;
 656
 657	/*
 658	 * Because we do the swap to zero, we must have at least 'crash_size'
 659	 * bytes free space before crash_base
 660	 */
 661	if (crash_size > crash_base) {
 662		*msg = "crashkernel offset must be greater than size";
 663		return -EINVAL;
 664	}
 665
 666	/* First memory chunk must be at least crash_size */
 667	if (memory_chunk[0].size < crash_size) {
 668		*msg = "first memory chunk must be at least crashkernel size";
 669		return -EINVAL;
 670	}
 671	/* Check if we fit into the respective memory chunk */
 672	for (i = 0; i < MEMORY_CHUNKS; i++) {
 673		chunk = &memory_chunk[i];
 674		if (chunk->size == 0)
 675			continue;
 676		if (crash_base < chunk->addr)
 677			continue;
 678		if (crash_base >= chunk->addr + chunk->size)
 679			continue;
 680		/* we have found the memory chunk */
 681		if (crash_base + crash_size > chunk->addr + chunk->size) {
 682			*msg = "selected memory chunk is too small for "
 683				"crashkernel memory";
 684			return -EINVAL;
 685		}
 686		return 0;
 687	}
 688	*msg = "invalid memory range specified";
 689	return -EINVAL;
 690}
 691
 692/*
 693 * Reserve kdump memory by creating a memory hole in the mem_chunk array
 694 */
 695static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
 696					 int type)
 697{
 698	create_mem_hole(memory_chunk, addr, size, type);
 699}
 700
 701/*
 702 * When kdump is enabled, we have to ensure that no memory from
 703 * the area [0 - crashkernel memory size] and
 704 * [crashk_res.start - crashk_res.end] is set offline.
 705 */
 706static int kdump_mem_notifier(struct notifier_block *nb,
 707			      unsigned long action, void *data)
 708{
 709	struct memory_notify *arg = data;
 710
 
 
 711	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
 712		return NOTIFY_BAD;
 713	if (arg->start_pfn > PFN_DOWN(crashk_res.end))
 714		return NOTIFY_OK;
 715	if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
 716		return NOTIFY_OK;
 717	return NOTIFY_BAD;
 718}
 719
 720static struct notifier_block kdump_mem_nb = {
 721	.notifier_call = kdump_mem_notifier,
 722};
 723
 724#endif
 725
 726/*
 727 * Make sure that oldmem, where the dump is stored, is protected
 728 */
 729static void reserve_oldmem(void)
 730{
 731#ifdef CONFIG_CRASH_DUMP
 732	if (!OLDMEM_BASE)
 733		return;
 734
 735	reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
 736	reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
 737			      CHUNK_OLDMEM);
 738	if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
 739		saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
 740	else
 741		saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
 742#endif
 743}
 744
 745/*
 746 * Reserve memory for kdump kernel to be loaded with kexec
 747 */
 748static void __init reserve_crashkernel(void)
 749{
 750#ifdef CONFIG_CRASH_DUMP
 751	unsigned long long crash_base, crash_size;
 752	char *msg = NULL;
 753	int rc;
 754
 755	rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
 756			       &crash_base);
 
 
 
 757	if (rc || crash_size == 0)
 758		return;
 759	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
 760	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
 761	if (register_memory_notifier(&kdump_mem_nb))
 
 762		return;
 763	if (!crash_base)
 764		crash_base = find_crash_base(crash_size, &msg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 765	if (!crash_base) {
 766		pr_info("crashkernel reservation failed: %s\n", msg);
 767		unregister_memory_notifier(&kdump_mem_nb);
 768		return;
 769	}
 770	if (verify_crash_base(crash_base, crash_size, &msg)) {
 771		pr_info("crashkernel reservation failed: %s\n", msg);
 772		unregister_memory_notifier(&kdump_mem_nb);
 773		return;
 774	}
 775	if (!OLDMEM_BASE && MACHINE_IS_VM)
 
 776		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
 777	crashk_res.start = crash_base;
 778	crashk_res.end = crash_base + crash_size - 1;
 779	insert_resource(&iomem_resource, &crashk_res);
 780	reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
 781	pr_info("Reserving %lluMB of memory at %lluMB "
 782		"for crashkernel (System RAM: %luMB)\n",
 783		crash_size >> 20, crash_base >> 20, memory_end >> 20);
 
 784	os_info_crashkernel_add(crash_base, crash_size);
 785#endif
 786}
 787
 788static void __init setup_memory(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789{
 790        unsigned long bootmap_size;
 791	unsigned long start_pfn, end_pfn;
 792	int i;
 793
 794	/*
 795	 * partially used pages are not usable - thus
 796	 * we are rounding upwards:
 797	 */
 798	start_pfn = PFN_UP(__pa(&_end));
 799	end_pfn = max_pfn = PFN_DOWN(memory_end);
 800
 801#ifdef CONFIG_BLK_DEV_INITRD
 802	/*
 803	 * Move the initrd in case the bitmap of the bootmem allocater
 804	 * would overwrite it.
 805	 */
 806
 807	if (INITRD_START && INITRD_SIZE) {
 808		unsigned long bmap_size;
 809		unsigned long start;
 810
 811		bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
 812		bmap_size = PFN_PHYS(bmap_size);
 
 
 813
 814		if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
 815			start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 816
 817#ifdef CONFIG_CRASH_DUMP
 818			if (OLDMEM_BASE) {
 819				/* Move initrd behind kdump oldmem */
 820				if (start + INITRD_SIZE > OLDMEM_BASE &&
 821				    start < OLDMEM_BASE + OLDMEM_SIZE)
 822					start = OLDMEM_BASE + OLDMEM_SIZE;
 823			}
 824#endif
 825			if (start + INITRD_SIZE > memory_end) {
 826				pr_err("initrd extends beyond end of "
 827				       "memory (0x%08lx > 0x%08lx) "
 828				       "disabling initrd\n",
 829				       start + INITRD_SIZE, memory_end);
 830				INITRD_START = INITRD_SIZE = 0;
 831			} else {
 832				pr_info("Moving initrd (0x%08lx -> "
 833					"0x%08lx, size: %ld)\n",
 834					INITRD_START, start, INITRD_SIZE);
 835				memmove((void *) start, (void *) INITRD_START,
 836					INITRD_SIZE);
 837				INITRD_START = start;
 838			}
 839		}
 
 
 
 
 
 840	}
 841#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842
 843	/*
 844	 * Initialize the boot-time allocator
 845	 */
 846	bootmap_size = init_bootmem(start_pfn, end_pfn);
 
 847
 848	/*
 849	 * Register RAM areas with the bootmem allocator.
 850	 */
 851
 852	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
 853		unsigned long start_chunk, end_chunk, pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854
 855		if (memory_chunk[i].type != CHUNK_READ_WRITE &&
 856		    memory_chunk[i].type != CHUNK_CRASHK)
 857			continue;
 858		start_chunk = PFN_DOWN(memory_chunk[i].addr);
 859		end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
 860		end_chunk = min(end_chunk, end_pfn);
 861		if (start_chunk >= end_chunk)
 862			continue;
 863		memblock_add_node(PFN_PHYS(start_chunk),
 864				  PFN_PHYS(end_chunk - start_chunk), 0);
 865		pfn = max(start_chunk, start_pfn);
 866		for (; pfn < end_chunk; pfn++)
 867			page_set_storage_key(PFN_PHYS(pfn),
 868					     PAGE_DEFAULT_KEY, 0);
 869	}
 870
 871	psw_set_key(PAGE_DEFAULT_KEY);
 
 
 
 
 
 872
 873	free_bootmem_with_active_regions(0, max_pfn);
 
 
 874
 875	/*
 876	 * Reserve memory used for lowcore/command line/kernel image.
 877	 */
 878	reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
 879	reserve_bootmem((unsigned long)_stext,
 880			PFN_PHYS(start_pfn) - (unsigned long)_stext,
 881			BOOTMEM_DEFAULT);
 882	/*
 883	 * Reserve the bootmem bitmap itself as well. We do this in two
 884	 * steps (first step was init_bootmem()) because this catches
 885	 * the (very unlikely) case of us accidentally initializing the
 886	 * bootmem allocator with an invalid RAM area.
 887	 */
 888	reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
 889			BOOTMEM_DEFAULT);
 890
 891#ifdef CONFIG_CRASH_DUMP
 892	if (crashk_res.start)
 893		reserve_bootmem(crashk_res.start,
 894				crashk_res.end - crashk_res.start + 1,
 895				BOOTMEM_DEFAULT);
 896	if (is_kdump_kernel())
 897		reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
 898				PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
 899#endif
 900#ifdef CONFIG_BLK_DEV_INITRD
 901	if (INITRD_START && INITRD_SIZE) {
 902		if (INITRD_START + INITRD_SIZE <= memory_end) {
 903			reserve_bootmem(INITRD_START, INITRD_SIZE,
 904					BOOTMEM_DEFAULT);
 905			initrd_start = INITRD_START;
 906			initrd_end = initrd_start + INITRD_SIZE;
 907		} else {
 908			pr_err("initrd extends beyond end of "
 909			       "memory (0x%08lx > 0x%08lx) "
 910			       "disabling initrd\n",
 911			       initrd_start + INITRD_SIZE, memory_end);
 912			initrd_start = initrd_end = 0;
 913		}
 914	}
 915#endif
 916}
 917
 918/*
 919 * Setup hardware capabilities.
 
 920 */
 921static void __init setup_hwcaps(void)
 922{
 923	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
 924	struct cpuid cpu_id;
 925	int i;
 
 926
 927	/*
 928	 * The store facility list bits numbers as found in the principles
 929	 * of operation are numbered with bit 1UL<<31 as number 0 to
 930	 * bit 1UL<<0 as number 31.
 931	 *   Bit 0: instructions named N3, "backported" to esa-mode
 932	 *   Bit 2: z/Architecture mode is active
 933	 *   Bit 7: the store-facility-list-extended facility is installed
 934	 *   Bit 17: the message-security assist is installed
 935	 *   Bit 19: the long-displacement facility is installed
 936	 *   Bit 21: the extended-immediate facility is installed
 937	 *   Bit 22: extended-translation facility 3 is installed
 938	 *   Bit 30: extended-translation facility 3 enhancement facility
 939	 * These get translated to:
 940	 *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
 941	 *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
 942	 *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
 943	 *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
 944	 */
 945	for (i = 0; i < 6; i++)
 946		if (test_facility(stfl_bits[i]))
 947			elf_hwcap |= 1UL << i;
 948
 949	if (test_facility(22) && test_facility(30))
 950		elf_hwcap |= HWCAP_S390_ETF3EH;
 
 951
 952	/*
 953	 * Check for additional facilities with store-facility-list-extended.
 954	 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
 955	 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
 956	 * as stored by stfl, bits 32-xxx contain additional facilities.
 957	 * How many facility words are stored depends on the number of
 958	 * doublewords passed to the instruction. The additional facilities
 959	 * are:
 960	 *   Bit 42: decimal floating point facility is installed
 961	 *   Bit 44: perform floating point operation facility is installed
 962	 * translated to:
 963	 *   HWCAP_S390_DFP bit 6 (42 && 44).
 964	 */
 965	if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
 966		elf_hwcap |= HWCAP_S390_DFP;
 967
 968	/*
 969	 * Huge page support HWCAP_S390_HPAGE is bit 7.
 970	 */
 971	if (MACHINE_HAS_HPAGE)
 972		elf_hwcap |= HWCAP_S390_HPAGE;
 973
 974	/*
 975	 * 64-bit register support for 31-bit processes
 976	 * HWCAP_S390_HIGH_GPRS is bit 9.
 977	 */
 978	elf_hwcap |= HWCAP_S390_HIGH_GPRS;
 979
 980	get_cpu_id(&cpu_id);
 981	switch (cpu_id.machine) {
 982	case 0x9672:
 983#if !defined(CONFIG_64BIT)
 984	default:	/* Use "g5" as default for 31 bit kernels. */
 985#endif
 986		strcpy(elf_platform, "g5");
 987		break;
 988	case 0x2064:
 989	case 0x2066:
 990#if defined(CONFIG_64BIT)
 991	default:	/* Use "z900" as default for 64 bit kernels. */
 992#endif
 993		strcpy(elf_platform, "z900");
 994		break;
 995	case 0x2084:
 996	case 0x2086:
 997		strcpy(elf_platform, "z990");
 998		break;
 999	case 0x2094:
1000	case 0x2096:
1001		strcpy(elf_platform, "z9-109");
1002		break;
1003	case 0x2097:
1004	case 0x2098:
1005		strcpy(elf_platform, "z10");
1006		break;
1007	case 0x2817:
1008	case 0x2818:
1009		strcpy(elf_platform, "z196");
1010		break;
1011	}
1012}
1013
1014/*
1015 * Setup function called from init/main.c just after the banner
1016 * was printed.
1017 */
1018
1019void __init setup_arch(char **cmdline_p)
1020{
1021        /*
1022         * print what head.S has found out about the machine
1023         */
1024#ifndef CONFIG_64BIT
1025	if (MACHINE_IS_VM)
1026		pr_info("Linux is running as a z/VM "
1027			"guest operating system in 31-bit mode\n");
1028	else if (MACHINE_IS_LPAR)
1029		pr_info("Linux is running natively in 31-bit mode\n");
1030	if (MACHINE_HAS_IEEE)
1031		pr_info("The hardware system has IEEE compatible "
1032			"floating point units\n");
1033	else
1034		pr_info("The hardware system has no IEEE compatible "
1035			"floating point units\n");
1036#else /* CONFIG_64BIT */
1037	if (MACHINE_IS_VM)
1038		pr_info("Linux is running as a z/VM "
1039			"guest operating system in 64-bit mode\n");
1040	else if (MACHINE_IS_KVM)
1041		pr_info("Linux is running under KVM in 64-bit mode\n");
1042	else if (MACHINE_IS_LPAR)
1043		pr_info("Linux is running natively in 64-bit mode\n");
1044#endif /* CONFIG_64BIT */
 
 
 
1045
1046	/* Have one command line that is parsed and saved in /proc/cmdline */
1047	/* boot_command_line has been already set up in early.c */
1048	*cmdline_p = boot_command_line;
1049
1050        ROOT_DEV = Root_RAM0;
1051
1052	init_mm.start_code = PAGE_OFFSET;
1053	init_mm.end_code = (unsigned long) &_etext;
1054	init_mm.end_data = (unsigned long) &_edata;
1055	init_mm.brk = (unsigned long) &_end;
1056
1057	if (MACHINE_HAS_MVCOS)
1058		memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
1059	else
1060		memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
1061
 
1062	parse_early_param();
 
 
 
 
1063
1064	os_info_init();
1065	setup_ipl();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066	setup_memory_end();
1067	setup_addressing_mode();
1068	reserve_oldmem();
1069	reserve_crashkernel();
1070	setup_memory();
1071	setup_resources();
1072	setup_vmcoreinfo();
1073	setup_lowcore();
1074
1075        cpu_init();
1076	s390_init_cpu_topology();
 
 
 
 
 
1077
 
 
 
1078	/*
1079	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
 
1080	 */
1081	setup_hwcaps();
 
 
 
 
 
 
 
 
 
 
 
 
 
1082
1083	/*
1084	 * Create kernel page tables and switch to virtual addressing.
1085	 */
1086        paging_init();
 
 
 
 
 
 
 
 
 
1087
1088        /* Setup default console */
1089	conmode_default();
1090	set_preferred_console();
1091
1092	/* Setup zfcpdump support */
1093	setup_zfcpdump(console_devno);
 
 
 
 
 
 
 
1094}