Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  S390 version
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   7 *
   8 *  Derived from "arch/i386/kernel/setup.c"
   9 *    Copyright (C) 1995, Linus Torvalds
  10 */
  11
  12/*
  13 * This file handles the architecture-dependent parts of initialization
  14 */
  15
  16#define KMSG_COMPONENT "setup"
  17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18
  19#include <linux/errno.h>
  20#include <linux/export.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task.h>
  23#include <linux/cpu.h>
  24#include <linux/kernel.h>
  25#include <linux/memblock.h>
  26#include <linux/mm.h>
  27#include <linux/stddef.h>
  28#include <linux/unistd.h>
  29#include <linux/ptrace.h>
  30#include <linux/random.h>
  31#include <linux/user.h>
  32#include <linux/tty.h>
  33#include <linux/ioport.h>
  34#include <linux/delay.h>
  35#include <linux/init.h>
  36#include <linux/initrd.h>
  37#include <linux/root_dev.h>
  38#include <linux/console.h>
  39#include <linux/kernel_stat.h>
  40#include <linux/dma-contiguous.h>
  41#include <linux/device.h>
  42#include <linux/notifier.h>
  43#include <linux/pfn.h>
  44#include <linux/ctype.h>
  45#include <linux/reboot.h>
  46#include <linux/topology.h>
  47#include <linux/kexec.h>
  48#include <linux/crash_dump.h>
  49#include <linux/memory.h>
  50#include <linux/compat.h>
  51#include <linux/start_kernel.h>
 
 
  52
 
  53#include <asm/boot_data.h>
  54#include <asm/ipl.h>
  55#include <asm/facility.h>
  56#include <asm/smp.h>
  57#include <asm/mmu_context.h>
  58#include <asm/cpcmd.h>
  59#include <asm/lowcore.h>
  60#include <asm/nmi.h>
  61#include <asm/irq.h>
  62#include <asm/page.h>
  63#include <asm/ptrace.h>
  64#include <asm/sections.h>
  65#include <asm/ebcdic.h>
  66#include <asm/diag.h>
  67#include <asm/os_info.h>
  68#include <asm/sclp.h>
  69#include <asm/stacktrace.h>
  70#include <asm/sysinfo.h>
  71#include <asm/numa.h>
  72#include <asm/alternative.h>
  73#include <asm/nospec-branch.h>
  74#include <asm/mem_detect.h>
 
  75#include <asm/uv.h>
 
  76#include "entry.h"
  77
  78/*
  79 * Machine setup..
  80 */
  81unsigned int console_mode = 0;
  82EXPORT_SYMBOL(console_mode);
  83
  84unsigned int console_devno = -1;
  85EXPORT_SYMBOL(console_devno);
  86
  87unsigned int console_irq = -1;
  88EXPORT_SYMBOL(console_irq);
  89
  90unsigned long elf_hwcap __read_mostly = 0;
  91char elf_platform[ELF_PLATFORM_SIZE];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  92
  93unsigned long int_hwcap = 0;
  94
  95#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
  96int __bootdata_preserved(prot_virt_guest);
  97#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  98
  99int __bootdata(noexec_disabled);
 100int __bootdata(memory_end_set);
 101unsigned long __bootdata(memory_end);
 102unsigned long __bootdata(vmalloc_size);
 103unsigned long __bootdata(max_physmem_end);
 104struct mem_detect_info __bootdata(mem_detect);
 
 105
 106struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
 107struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
 108unsigned long __bootdata_preserved(__swsusp_reset_dma);
 109unsigned long __bootdata_preserved(__stext_dma);
 110unsigned long __bootdata_preserved(__etext_dma);
 111unsigned long __bootdata_preserved(__sdma);
 112unsigned long __bootdata_preserved(__edma);
 113unsigned long __bootdata_preserved(__kaslr_offset);
 
 
 
 
 
 
 
 114
 115unsigned long VMALLOC_START;
 116EXPORT_SYMBOL(VMALLOC_START);
 117
 118unsigned long VMALLOC_END;
 119EXPORT_SYMBOL(VMALLOC_END);
 120
 121struct page *vmemmap;
 122EXPORT_SYMBOL(vmemmap);
 
 123
 124unsigned long MODULES_VADDR;
 125unsigned long MODULES_END;
 126
 127/* An array with a pointer to the lowcore of every CPU. */
 128struct lowcore *lowcore_ptr[NR_CPUS];
 129EXPORT_SYMBOL(lowcore_ptr);
 130
 
 
 
 
 
 
 
 
 131/*
 132 * This is set up by the setup-routine at boot-time
 133 * for S390 need to find out, what we have to setup
 134 * using address 0x10400 ...
 135 */
 136
 137#include <asm/setup.h>
 138
 139/*
 140 * condev= and conmode= setup parameter.
 141 */
 142
 143static int __init condev_setup(char *str)
 144{
 145	int vdev;
 146
 147	vdev = simple_strtoul(str, &str, 0);
 148	if (vdev >= 0 && vdev < 65536) {
 149		console_devno = vdev;
 150		console_irq = -1;
 151	}
 152	return 1;
 153}
 154
 155__setup("condev=", condev_setup);
 156
 157static void __init set_preferred_console(void)
 158{
 159	if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
 160		add_preferred_console("ttyS", 0, NULL);
 161	else if (CONSOLE_IS_3270)
 162		add_preferred_console("tty3270", 0, NULL);
 163	else if (CONSOLE_IS_VT220)
 164		add_preferred_console("ttyS", 1, NULL);
 165	else if (CONSOLE_IS_HVC)
 166		add_preferred_console("hvc", 0, NULL);
 167}
 168
 169static int __init conmode_setup(char *str)
 170{
 171#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 172	if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
 173                SET_CONSOLE_SCLP;
 174#endif
 175#if defined(CONFIG_TN3215_CONSOLE)
 176	if (!strcmp(str, "3215"))
 177		SET_CONSOLE_3215;
 178#endif
 179#if defined(CONFIG_TN3270_CONSOLE)
 180	if (!strcmp(str, "3270"))
 181		SET_CONSOLE_3270;
 182#endif
 183	set_preferred_console();
 184        return 1;
 185}
 186
 187__setup("conmode=", conmode_setup);
 188
 189static void __init conmode_default(void)
 190{
 191	char query_buffer[1024];
 192	char *ptr;
 193
 194        if (MACHINE_IS_VM) {
 195		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
 196		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
 197		ptr = strstr(query_buffer, "SUBCHANNEL =");
 198		console_irq = simple_strtoul(ptr + 13, NULL, 16);
 199		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
 200		ptr = strstr(query_buffer, "CONMODE");
 201		/*
 202		 * Set the conmode to 3215 so that the device recognition 
 203		 * will set the cu_type of the console to 3215. If the
 204		 * conmode is 3270 and we don't set it back then both
 205		 * 3215 and the 3270 driver will try to access the console
 206		 * device (3215 as console and 3270 as normal tty).
 207		 */
 208		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
 209		if (ptr == NULL) {
 210#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 211			SET_CONSOLE_SCLP;
 212#endif
 213			return;
 214		}
 215		if (str_has_prefix(ptr + 8, "3270")) {
 216#if defined(CONFIG_TN3270_CONSOLE)
 217			SET_CONSOLE_3270;
 218#elif defined(CONFIG_TN3215_CONSOLE)
 219			SET_CONSOLE_3215;
 220#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 221			SET_CONSOLE_SCLP;
 222#endif
 223		} else if (str_has_prefix(ptr + 8, "3215")) {
 224#if defined(CONFIG_TN3215_CONSOLE)
 225			SET_CONSOLE_3215;
 226#elif defined(CONFIG_TN3270_CONSOLE)
 227			SET_CONSOLE_3270;
 228#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 229			SET_CONSOLE_SCLP;
 230#endif
 231		}
 232	} else if (MACHINE_IS_KVM) {
 233		if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
 234			SET_CONSOLE_VT220;
 235		else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
 236			SET_CONSOLE_SCLP;
 237		else
 238			SET_CONSOLE_HVC;
 239	} else {
 240#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 241		SET_CONSOLE_SCLP;
 242#endif
 243	}
 244	if (IS_ENABLED(CONFIG_VT) && IS_ENABLED(CONFIG_DUMMY_CONSOLE))
 245		conswitchp = &dummy_con;
 246}
 247
 248#ifdef CONFIG_CRASH_DUMP
 249static void __init setup_zfcpdump(void)
 250{
 251	if (ipl_info.type != IPL_TYPE_FCP_DUMP)
 252		return;
 253	if (OLDMEM_BASE)
 254		return;
 255	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
 256	console_loglevel = 2;
 257}
 258#else
 259static inline void setup_zfcpdump(void) {}
 260#endif /* CONFIG_CRASH_DUMP */
 261
 262 /*
 263 * Reboot, halt and power_off stubs. They just call _machine_restart,
 264 * _machine_halt or _machine_power_off. 
 265 */
 266
 267void machine_restart(char *command)
 268{
 269	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
 270		/*
 271		 * Only unblank the console if we are called in enabled
 272		 * context or a bust_spinlocks cleared the way for us.
 273		 */
 274		console_unblank();
 275	_machine_restart(command);
 276}
 277
 278void machine_halt(void)
 279{
 280	if (!in_interrupt() || oops_in_progress)
 281		/*
 282		 * Only unblank the console if we are called in enabled
 283		 * context or a bust_spinlocks cleared the way for us.
 284		 */
 285		console_unblank();
 286	_machine_halt();
 287}
 288
 289void machine_power_off(void)
 290{
 291	if (!in_interrupt() || oops_in_progress)
 292		/*
 293		 * Only unblank the console if we are called in enabled
 294		 * context or a bust_spinlocks cleared the way for us.
 295		 */
 296		console_unblank();
 297	_machine_power_off();
 298}
 299
 300/*
 301 * Dummy power off function.
 302 */
 303void (*pm_power_off)(void) = machine_power_off;
 304EXPORT_SYMBOL_GPL(pm_power_off);
 305
 306void *restart_stack __section(.data);
 307
 308unsigned long stack_alloc(void)
 309{
 310#ifdef CONFIG_VMAP_STACK
 311	return (unsigned long)
 312		__vmalloc_node_range(THREAD_SIZE, THREAD_SIZE,
 313				     VMALLOC_START, VMALLOC_END,
 314				     THREADINFO_GFP,
 315				     PAGE_KERNEL, 0, NUMA_NO_NODE,
 316				     __builtin_return_address(0));
 317#else
 318	return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
 319#endif
 320}
 321
 322void stack_free(unsigned long stack)
 323{
 324#ifdef CONFIG_VMAP_STACK
 325	vfree((void *) stack);
 326#else
 327	free_pages(stack, THREAD_SIZE_ORDER);
 328#endif
 329}
 330
 331int __init arch_early_irq_init(void)
 332{
 333	unsigned long stack;
 334
 335	stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
 336	if (!stack)
 337		panic("Couldn't allocate async stack");
 338	S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
 339	return 0;
 340}
 341
 342static int __init async_stack_realloc(void)
 343{
 344	unsigned long old, new;
 345
 346	old = S390_lowcore.async_stack - STACK_INIT_OFFSET;
 347	new = stack_alloc();
 348	if (!new)
 349		panic("Couldn't allocate async stack");
 350	S390_lowcore.async_stack = new + STACK_INIT_OFFSET;
 351	free_pages(old, THREAD_SIZE_ORDER);
 352	return 0;
 353}
 354early_initcall(async_stack_realloc);
 355
 356void __init arch_call_rest_init(void)
 357{
 358	struct stack_frame *frame;
 359	unsigned long stack;
 360
 
 361	stack = stack_alloc();
 362	if (!stack)
 363		panic("Couldn't allocate kernel stack");
 364	current->stack = (void *) stack;
 365#ifdef CONFIG_VMAP_STACK
 366	current->stack_vm_area = (void *) stack;
 367#endif
 368	set_task_stack_end_magic(current);
 369	stack += STACK_INIT_OFFSET;
 370	S390_lowcore.kernel_stack = stack;
 371	frame = (struct stack_frame *) stack;
 372	memset(frame, 0, sizeof(*frame));
 373	/* Branch to rest_init on the new stack, never returns */
 374	asm volatile(
 375		"	la	15,0(%[_frame])\n"
 376		"	jg	rest_init\n"
 377		: : [_frame] "a" (frame));
 378}
 379
 380static void __init setup_lowcore_dat_off(void)
 381{
 382	struct lowcore *lc;
 
 
 
 
 
 
 383
 384	/*
 385	 * Setup lowcore for boot cpu
 386	 */
 387	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
 388	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
 389	if (!lc)
 390		panic("%s: Failed to allocate %zu bytes align=%zx\n",
 391		      __func__, sizeof(*lc), sizeof(*lc));
 392
 393	lc->restart_psw.mask = PSW_KERNEL_BITS;
 394	lc->restart_psw.addr = (unsigned long) restart_int_handler;
 395	lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
 396	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
 397	lc->svc_new_psw.mask = PSW_KERNEL_BITS |
 398		PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
 399	lc->svc_new_psw.addr = (unsigned long) system_call;
 400	lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
 401	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
 402	lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
 403	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
 404	lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
 405	lc->io_new_psw.addr = (unsigned long) io_int_handler;
 406	lc->clock_comparator = clock_comparator_max;
 407	lc->nodat_stack = ((unsigned long) &init_thread_union)
 408		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 409	lc->current_task = (unsigned long)&init_task;
 410	lc->lpp = LPP_MAGIC;
 411	lc->machine_flags = S390_lowcore.machine_flags;
 412	lc->preempt_count = S390_lowcore.preempt_count;
 413	lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
 414	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
 415	       sizeof(lc->stfle_fac_list));
 416	memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
 417	       sizeof(lc->alt_stfle_fac_list));
 418	nmi_alloc_boot_cpu(lc);
 419	vdso_alloc_boot_cpu(lc);
 420	lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
 421	lc->async_enter_timer = S390_lowcore.async_enter_timer;
 422	lc->exit_timer = S390_lowcore.exit_timer;
 423	lc->user_timer = S390_lowcore.user_timer;
 424	lc->system_timer = S390_lowcore.system_timer;
 425	lc->steal_timer = S390_lowcore.steal_timer;
 426	lc->last_update_timer = S390_lowcore.last_update_timer;
 427	lc->last_update_clock = S390_lowcore.last_update_clock;
 428
 429	/*
 430	 * Allocate the global restart stack which is the same for
 431	 * all CPUs in cast *one* of them does a PSW restart.
 432	 */
 433	restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 434	if (!restart_stack)
 435		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 436		      __func__, THREAD_SIZE, THREAD_SIZE);
 437	restart_stack += STACK_INIT_OFFSET;
 438
 439	/*
 440	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
 441	 * restart data to the absolute zero lowcore. This is necessary if
 442	 * PSW restart is done on an offline CPU that has lowcore zero.
 443	 */
 444	lc->restart_stack = (unsigned long) restart_stack;
 445	lc->restart_fn = (unsigned long) do_restart;
 446	lc->restart_data = 0;
 447	lc->restart_source = -1UL;
 448
 449	/* Setup absolute zero lowcore */
 450	mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
 451	mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
 452	mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
 453	mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
 454	mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
 
 
 
 
 
 
 
 
 455
 456	lc->spinlock_lockval = arch_spin_lockval(0);
 457	lc->spinlock_index = 0;
 458	arch_spin_lock_setup(0);
 459	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
 
 
 460
 461	set_prefix((u32)(unsigned long) lc);
 462	lowcore_ptr[0] = lc;
 463}
 464
 465static void __init setup_lowcore_dat_on(void)
 466{
 
 
 
 
 467	__ctl_clear_bit(0, 28);
 468	S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
 469	S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
 470	S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
 
 471	S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
 472	__ctl_set_bit(0, 28);
 
 
 
 
 
 
 
 
 
 
 473}
 474
 475static struct resource code_resource = {
 476	.name  = "Kernel code",
 477	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 478};
 479
 480static struct resource data_resource = {
 481	.name = "Kernel data",
 482	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 483};
 484
 485static struct resource bss_resource = {
 486	.name = "Kernel bss",
 487	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 488};
 489
 490static struct resource __initdata *standard_resources[] = {
 491	&code_resource,
 492	&data_resource,
 493	&bss_resource,
 494};
 495
 496static void __init setup_resources(void)
 497{
 498	struct resource *res, *std_res, *sub_res;
 499	struct memblock_region *reg;
 500	int j;
 
 501
 502	code_resource.start = (unsigned long) _text;
 503	code_resource.end = (unsigned long) _etext - 1;
 504	data_resource.start = (unsigned long) _etext;
 505	data_resource.end = (unsigned long) _edata - 1;
 506	bss_resource.start = (unsigned long) __bss_start;
 507	bss_resource.end = (unsigned long) __bss_stop - 1;
 508
 509	for_each_memblock(memory, reg) {
 510		res = memblock_alloc(sizeof(*res), 8);
 511		if (!res)
 512			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 513			      __func__, sizeof(*res), 8);
 514		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 515
 516		res->name = "System RAM";
 517		res->start = reg->base;
 518		res->end = reg->base + reg->size - 1;
 
 
 
 
 
 519		request_resource(&iomem_resource, res);
 520
 521		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
 522			std_res = standard_resources[j];
 523			if (std_res->start < res->start ||
 524			    std_res->start > res->end)
 525				continue;
 526			if (std_res->end > res->end) {
 527				sub_res = memblock_alloc(sizeof(*sub_res), 8);
 528				if (!sub_res)
 529					panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 530					      __func__, sizeof(*sub_res), 8);
 531				*sub_res = *std_res;
 532				sub_res->end = res->end;
 533				std_res->start = res->end + 1;
 534				request_resource(res, sub_res);
 535			} else {
 536				request_resource(res, std_res);
 537			}
 538		}
 539	}
 540#ifdef CONFIG_CRASH_DUMP
 541	/*
 542	 * Re-add removed crash kernel memory as reserved memory. This makes
 543	 * sure it will be mapped with the identity mapping and struct pages
 544	 * will be created, so it can be resized later on.
 545	 * However add it later since the crash kernel resource should not be
 546	 * part of the System RAM resource.
 547	 */
 548	if (crashk_res.end) {
 549		memblock_add_node(crashk_res.start, resource_size(&crashk_res), 0);
 
 550		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
 551		insert_resource(&iomem_resource, &crashk_res);
 552	}
 553#endif
 554}
 555
 556static void __init setup_memory_end(void)
 557{
 558	unsigned long vmax, tmp;
 559
 560	/* Choose kernel address space layout: 3 or 4 levels. */
 561	if (IS_ENABLED(CONFIG_KASAN)) {
 562		vmax = IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)
 563			   ? _REGION1_SIZE
 564			   : _REGION2_SIZE;
 565	} else {
 566		tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
 567		tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
 568		if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
 569			vmax = _REGION2_SIZE; /* 3-level kernel page table */
 570		else
 571			vmax = _REGION1_SIZE; /* 4-level kernel page table */
 572	}
 573
 574	/* module area is at the end of the kernel address space. */
 575	MODULES_END = vmax;
 576	MODULES_VADDR = MODULES_END - MODULES_LEN;
 577	VMALLOC_END = MODULES_VADDR;
 578	VMALLOC_START = VMALLOC_END - vmalloc_size;
 579
 580	/* Split remaining virtual space between 1:1 mapping & vmemmap array */
 581	tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
 582	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
 583	tmp = SECTION_ALIGN_UP(tmp);
 584	tmp = VMALLOC_START - tmp * sizeof(struct page);
 585	tmp &= ~((vmax >> 11) - 1);	/* align to page table level */
 586	tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
 587	vmemmap = (struct page *) tmp;
 588
 589	/* Take care that memory_end is set and <= vmemmap */
 590	memory_end = min(memory_end ?: max_physmem_end, (unsigned long)vmemmap);
 591#ifdef CONFIG_KASAN
 592	/* fit in kasan shadow memory region between 1:1 and vmemmap */
 593	memory_end = min(memory_end, KASAN_SHADOW_START);
 594	vmemmap = max(vmemmap, (struct page *)KASAN_SHADOW_END);
 595#endif
 596	max_pfn = max_low_pfn = PFN_DOWN(memory_end);
 597	memblock_remove(memory_end, ULONG_MAX);
 598
 599	pr_notice("The maximum memory size is %luMB\n", memory_end >> 20);
 600}
 601
 602#ifdef CONFIG_CRASH_DUMP
 603
 604/*
 605 * When kdump is enabled, we have to ensure that no memory from
 606 * the area [0 - crashkernel memory size] and
 607 * [crashk_res.start - crashk_res.end] is set offline.
 
 608 */
 609static int kdump_mem_notifier(struct notifier_block *nb,
 610			      unsigned long action, void *data)
 611{
 612	struct memory_notify *arg = data;
 613
 614	if (action != MEM_GOING_OFFLINE)
 615		return NOTIFY_OK;
 616	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
 617		return NOTIFY_BAD;
 618	if (arg->start_pfn > PFN_DOWN(crashk_res.end))
 619		return NOTIFY_OK;
 620	if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
 621		return NOTIFY_OK;
 622	return NOTIFY_BAD;
 623}
 624
 625static struct notifier_block kdump_mem_nb = {
 626	.notifier_call = kdump_mem_notifier,
 627};
 628
 629#endif
 630
 631/*
 632 * Make sure that the area behind memory_end is protected
 633 */
 634static void reserve_memory_end(void)
 635{
 636	if (memory_end_set)
 637		memblock_reserve(memory_end, ULONG_MAX);
 638}
 639
 640/*
 641 * Make sure that oldmem, where the dump is stored, is protected
 642 */
 643static void reserve_oldmem(void)
 644{
 645#ifdef CONFIG_CRASH_DUMP
 646	if (OLDMEM_BASE)
 647		/* Forget all memory above the running kdump system */
 648		memblock_reserve(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
 649#endif
 650}
 651
 652/*
 653 * Make sure that oldmem, where the dump is stored, is protected
 654 */
 655static void remove_oldmem(void)
 656{
 657#ifdef CONFIG_CRASH_DUMP
 658	if (OLDMEM_BASE)
 659		/* Forget all memory above the running kdump system */
 660		memblock_remove(OLDMEM_SIZE, (phys_addr_t)ULONG_MAX);
 661#endif
 662}
 663
 664/*
 665 * Reserve memory for kdump kernel to be loaded with kexec
 666 */
 667static void __init reserve_crashkernel(void)
 668{
 669#ifdef CONFIG_CRASH_DUMP
 670	unsigned long long crash_base, crash_size;
 671	phys_addr_t low, high;
 672	int rc;
 673
 674	rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
 675			       &crash_base);
 676
 677	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
 678	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
 679	if (rc || crash_size == 0)
 680		return;
 681
 682	if (memblock.memory.regions[0].size < crash_size) {
 683		pr_info("crashkernel reservation failed: %s\n",
 684			"first memory chunk must be at least crashkernel size");
 685		return;
 686	}
 687
 688	low = crash_base ?: OLDMEM_BASE;
 689	high = low + crash_size;
 690	if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
 691		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
 692		crash_base = low;
 693	} else {
 694		/* Find suitable area in free memory */
 695		low = max_t(unsigned long, crash_size, sclp.hsa_size);
 696		high = crash_base ? crash_base + crash_size : ULONG_MAX;
 697
 698		if (crash_base && crash_base < low) {
 699			pr_info("crashkernel reservation failed: %s\n",
 700				"crash_base too low");
 701			return;
 702		}
 703		low = crash_base ?: low;
 704		crash_base = memblock_find_in_range(low, high, crash_size,
 705						    KEXEC_CRASH_MEM_ALIGN);
 
 706	}
 707
 708	if (!crash_base) {
 709		pr_info("crashkernel reservation failed: %s\n",
 710			"no suitable area found");
 711		return;
 712	}
 713
 714	if (register_memory_notifier(&kdump_mem_nb))
 
 715		return;
 
 716
 717	if (!OLDMEM_BASE && MACHINE_IS_VM)
 718		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
 719	crashk_res.start = crash_base;
 720	crashk_res.end = crash_base + crash_size - 1;
 721	memblock_remove(crash_base, crash_size);
 722	pr_info("Reserving %lluMB of memory at %lluMB "
 723		"for crashkernel (System RAM: %luMB)\n",
 724		crash_size >> 20, crash_base >> 20,
 725		(unsigned long)memblock.memory.total_size >> 20);
 726	os_info_crashkernel_add(crash_base, crash_size);
 727#endif
 728}
 729
 730/*
 731 * Reserve the initrd from being used by memblock
 732 */
 733static void __init reserve_initrd(void)
 734{
 735#ifdef CONFIG_BLK_DEV_INITRD
 736	if (!INITRD_START || !INITRD_SIZE)
 737		return;
 738	initrd_start = INITRD_START;
 739	initrd_end = initrd_start + INITRD_SIZE;
 740	memblock_reserve(INITRD_START, INITRD_SIZE);
 741#endif
 742}
 743
 744/*
 745 * Reserve the memory area used to pass the certificate lists
 746 */
 747static void __init reserve_certificate_list(void)
 748{
 749	if (ipl_cert_list_addr)
 750		memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
 751}
 752
 753static void __init reserve_mem_detect_info(void)
 754{
 755	unsigned long start, size;
 756
 757	get_mem_detect_reserved(&start, &size);
 758	if (size)
 759		memblock_reserve(start, size);
 760}
 761
 762static void __init free_mem_detect_info(void)
 763{
 764	unsigned long start, size;
 765
 766	get_mem_detect_reserved(&start, &size);
 767	if (size)
 768		memblock_free(start, size);
 769}
 770
 771static void __init memblock_physmem_add(phys_addr_t start, phys_addr_t size)
 772{
 773	memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
 774		     start, start + size - 1);
 775	memblock_add_range(&memblock.memory, start, size, 0, 0);
 776	memblock_add_range(&memblock.physmem, start, size, 0, 0);
 777}
 778
 779static const char * __init get_mem_info_source(void)
 780{
 781	switch (mem_detect.info_source) {
 782	case MEM_DETECT_SCLP_STOR_INFO:
 783		return "sclp storage info";
 784	case MEM_DETECT_DIAG260:
 785		return "diag260";
 786	case MEM_DETECT_SCLP_READ_INFO:
 787		return "sclp read info";
 788	case MEM_DETECT_BIN_SEARCH:
 789		return "binary search";
 790	}
 791	return "none";
 792}
 793
 794static void __init memblock_add_mem_detect_info(void)
 795{
 796	unsigned long start, end;
 797	int i;
 798
 799	memblock_dbg("physmem info source: %s (%hhd)\n",
 800		     get_mem_info_source(), mem_detect.info_source);
 801	/* keep memblock lists close to the kernel */
 802	memblock_set_bottom_up(true);
 803	for_each_mem_detect_block(i, &start, &end)
 
 804		memblock_physmem_add(start, end - start);
 
 805	memblock_set_bottom_up(false);
 806	memblock_dump_all();
 807}
 808
 809/*
 810 * Check for initrd being in usable memory
 811 */
 812static void __init check_initrd(void)
 813{
 814#ifdef CONFIG_BLK_DEV_INITRD
 815	if (INITRD_START && INITRD_SIZE &&
 816	    !memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
 817		pr_err("The initial RAM disk does not fit into the memory\n");
 818		memblock_free(INITRD_START, INITRD_SIZE);
 819		initrd_start = initrd_end = 0;
 820	}
 821#endif
 822}
 823
 824/*
 825 * Reserve memory used for lowcore/command line/kernel image.
 826 */
 827static void __init reserve_kernel(void)
 828{
 829	unsigned long start_pfn = PFN_UP(__pa(_end));
 830
 831	memblock_reserve(0, HEAD_END);
 832	memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
 833			 - (unsigned long)_stext);
 834	memblock_reserve(__sdma, __edma - __sdma);
 835}
 836
 837static void __init setup_memory(void)
 838{
 839	struct memblock_region *reg;
 
 840
 841	/*
 842	 * Init storage key for present memory
 843	 */
 844	for_each_memblock(memory, reg) {
 845		storage_key_init_range(reg->base, reg->base + reg->size);
 846	}
 847	psw_set_key(PAGE_DEFAULT_KEY);
 848
 849	/* Only cosmetics */
 850	memblock_enforce_memory_limit(memblock_end_of_DRAM());
 851}
 852
 853/*
 854 * Setup hardware capabilities.
 855 */
 856static int __init setup_hwcaps(void)
 857{
 858	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
 859	struct cpuid cpu_id;
 860	int i;
 861
 862	/*
 863	 * The store facility list bits numbers as found in the principles
 864	 * of operation are numbered with bit 1UL<<31 as number 0 to
 865	 * bit 1UL<<0 as number 31.
 866	 *   Bit 0: instructions named N3, "backported" to esa-mode
 867	 *   Bit 2: z/Architecture mode is active
 868	 *   Bit 7: the store-facility-list-extended facility is installed
 869	 *   Bit 17: the message-security assist is installed
 870	 *   Bit 19: the long-displacement facility is installed
 871	 *   Bit 21: the extended-immediate facility is installed
 872	 *   Bit 22: extended-translation facility 3 is installed
 873	 *   Bit 30: extended-translation facility 3 enhancement facility
 874	 * These get translated to:
 875	 *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
 876	 *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
 877	 *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
 878	 *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
 879	 */
 880	for (i = 0; i < 6; i++)
 881		if (test_facility(stfl_bits[i]))
 882			elf_hwcap |= 1UL << i;
 883
 884	if (test_facility(22) && test_facility(30))
 885		elf_hwcap |= HWCAP_S390_ETF3EH;
 886
 887	/*
 888	 * Check for additional facilities with store-facility-list-extended.
 889	 * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
 890	 * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
 891	 * as stored by stfl, bits 32-xxx contain additional facilities.
 892	 * How many facility words are stored depends on the number of
 893	 * doublewords passed to the instruction. The additional facilities
 894	 * are:
 895	 *   Bit 42: decimal floating point facility is installed
 896	 *   Bit 44: perform floating point operation facility is installed
 897	 * translated to:
 898	 *   HWCAP_S390_DFP bit 6 (42 && 44).
 899	 */
 900	if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
 901		elf_hwcap |= HWCAP_S390_DFP;
 902
 903	/*
 904	 * Huge page support HWCAP_S390_HPAGE is bit 7.
 905	 */
 906	if (MACHINE_HAS_EDAT1)
 907		elf_hwcap |= HWCAP_S390_HPAGE;
 908
 909	/*
 910	 * 64-bit register support for 31-bit processes
 911	 * HWCAP_S390_HIGH_GPRS is bit 9.
 912	 */
 913	elf_hwcap |= HWCAP_S390_HIGH_GPRS;
 914
 915	/*
 916	 * Transactional execution support HWCAP_S390_TE is bit 10.
 917	 */
 918	if (MACHINE_HAS_TE)
 919		elf_hwcap |= HWCAP_S390_TE;
 920
 921	/*
 922	 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
 923	 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
 924	 * instead of facility bit 129.
 925	 */
 926	if (MACHINE_HAS_VX) {
 927		elf_hwcap |= HWCAP_S390_VXRS;
 928		if (test_facility(134))
 929			elf_hwcap |= HWCAP_S390_VXRS_EXT;
 930		if (test_facility(135))
 931			elf_hwcap |= HWCAP_S390_VXRS_BCD;
 932		if (test_facility(148))
 933			elf_hwcap |= HWCAP_S390_VXRS_EXT2;
 934		if (test_facility(152))
 935			elf_hwcap |= HWCAP_S390_VXRS_PDE;
 936	}
 937	if (test_facility(150))
 938		elf_hwcap |= HWCAP_S390_SORT;
 939	if (test_facility(151))
 940		elf_hwcap |= HWCAP_S390_DFLT;
 941
 942	/*
 943	 * Guarded storage support HWCAP_S390_GS is bit 12.
 944	 */
 945	if (MACHINE_HAS_GS)
 946		elf_hwcap |= HWCAP_S390_GS;
 947
 948	get_cpu_id(&cpu_id);
 949	add_device_randomness(&cpu_id, sizeof(cpu_id));
 950	switch (cpu_id.machine) {
 951	case 0x2064:
 952	case 0x2066:
 953	default:	/* Use "z900" as default for 64 bit kernels. */
 954		strcpy(elf_platform, "z900");
 955		break;
 956	case 0x2084:
 957	case 0x2086:
 958		strcpy(elf_platform, "z990");
 959		break;
 960	case 0x2094:
 961	case 0x2096:
 962		strcpy(elf_platform, "z9-109");
 963		break;
 964	case 0x2097:
 965	case 0x2098:
 966		strcpy(elf_platform, "z10");
 967		break;
 968	case 0x2817:
 969	case 0x2818:
 970		strcpy(elf_platform, "z196");
 971		break;
 972	case 0x2827:
 973	case 0x2828:
 974		strcpy(elf_platform, "zEC12");
 975		break;
 976	case 0x2964:
 977	case 0x2965:
 978		strcpy(elf_platform, "z13");
 979		break;
 980	case 0x3906:
 981	case 0x3907:
 982		strcpy(elf_platform, "z14");
 983		break;
 984	case 0x8561:
 985	case 0x8562:
 986		strcpy(elf_platform, "z15");
 987		break;
 988	}
 989
 990	/*
 991	 * Virtualization support HWCAP_INT_SIE is bit 0.
 992	 */
 993	if (sclp.has_sief2)
 994		int_hwcap |= HWCAP_INT_SIE;
 995
 996	return 0;
 997}
 998arch_initcall(setup_hwcaps);
 999
1000/*
1001 * Add system information as device randomness
1002 */
1003static void __init setup_randomness(void)
1004{
1005	struct sysinfo_3_2_2 *vmms;
1006
1007	vmms = (struct sysinfo_3_2_2 *) memblock_phys_alloc(PAGE_SIZE,
1008							    PAGE_SIZE);
1009	if (!vmms)
1010		panic("Failed to allocate memory for sysinfo structure\n");
1011
1012	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
1013		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
1014	memblock_free((unsigned long) vmms, PAGE_SIZE);
 
 
 
1015}
1016
1017/*
1018 * Find the correct size for the task_struct. This depends on
1019 * the size of the struct fpu at the end of the thread_struct
1020 * which is embedded in the task_struct.
1021 */
1022static void __init setup_task_size(void)
1023{
1024	int task_size = sizeof(struct task_struct);
1025
1026	if (!MACHINE_HAS_VX) {
1027		task_size -= sizeof(__vector128) * __NUM_VXRS;
1028		task_size += sizeof(freg_t) * __NUM_FPRS;
1029	}
1030	arch_task_struct_size = task_size;
1031}
1032
1033/*
1034 * Issue diagnose 318 to set the control program name and
1035 * version codes.
1036 */
1037static void __init setup_control_program_code(void)
1038{
1039	union diag318_info diag318_info = {
1040		.cpnc = CPNC_LINUX,
1041		.cpvc_linux = 0,
1042		.cpvc_distro = {0},
1043	};
1044
1045	if (!sclp.has_diag318)
1046		return;
1047
1048	diag_stat_inc(DIAG_STAT_X318);
1049	asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
1050}
1051
1052/*
1053 * Print the component list from the IPL report
1054 */
1055static void __init log_component_list(void)
1056{
1057	struct ipl_rb_component_entry *ptr, *end;
1058	char *str;
1059
1060	if (!early_ipl_comp_list_addr)
1061		return;
1062	if (ipl_block.hdr.flags & IPL_PL_FLAG_IPLSR)
1063		pr_info("Linux is running with Secure-IPL enabled\n");
1064	else
1065		pr_info("Linux is running with Secure-IPL disabled\n");
1066	ptr = (void *) early_ipl_comp_list_addr;
1067	end = (void *) ptr + early_ipl_comp_list_size;
1068	pr_info("The IPL report contains the following components:\n");
1069	while (ptr < end) {
1070		if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
1071			if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
1072				str = "signed, verified";
1073			else
1074				str = "signed, verification failed";
1075		} else {
1076			str = "not signed";
1077		}
1078		pr_info("%016llx - %016llx (%s)\n",
1079			ptr->addr, ptr->addr + ptr->len, str);
1080		ptr++;
1081	}
1082}
1083
1084/*
1085 * Setup function called from init/main.c just after the banner
1086 * was printed.
1087 */
1088
1089void __init setup_arch(char **cmdline_p)
1090{
1091        /*
1092         * print what head.S has found out about the machine
1093         */
1094	if (MACHINE_IS_VM)
1095		pr_info("Linux is running as a z/VM "
1096			"guest operating system in 64-bit mode\n");
1097	else if (MACHINE_IS_KVM)
1098		pr_info("Linux is running under KVM in 64-bit mode\n");
1099	else if (MACHINE_IS_LPAR)
1100		pr_info("Linux is running natively in 64-bit mode\n");
1101	else
1102		pr_info("Linux is running as a guest in 64-bit mode\n");
1103
1104	log_component_list();
1105
1106	/* Have one command line that is parsed and saved in /proc/cmdline */
1107	/* boot_command_line has been already set up in early.c */
1108	*cmdline_p = boot_command_line;
1109
1110        ROOT_DEV = Root_RAM0;
1111
1112	init_mm.start_code = (unsigned long) _text;
1113	init_mm.end_code = (unsigned long) _etext;
1114	init_mm.end_data = (unsigned long) _edata;
1115	init_mm.brk = (unsigned long) _end;
1116
1117	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
1118		nospec_auto_detect();
1119
 
1120	parse_early_param();
1121#ifdef CONFIG_CRASH_DUMP
1122	/* Deactivate elfcorehdr= kernel parameter */
1123	elfcorehdr_addr = ELFCORE_ADDR_MAX;
1124#endif
1125
1126	os_info_init();
1127	setup_ipl();
1128	setup_task_size();
1129	setup_control_program_code();
1130
1131	/* Do some memory reservations *before* memory is added to memblock */
1132	reserve_memory_end();
1133	reserve_oldmem();
1134	reserve_kernel();
1135	reserve_initrd();
1136	reserve_certificate_list();
1137	reserve_mem_detect_info();
 
1138	memblock_allow_resize();
1139
1140	/* Get information about *all* installed memory */
1141	memblock_add_mem_detect_info();
1142
1143	free_mem_detect_info();
1144	remove_oldmem();
1145
1146	/*
1147	 * Make sure all chunks are MAX_ORDER aligned so we don't need the
1148	 * extra checks that HOLES_IN_ZONE would require.
1149	 *
1150	 * Is this still required?
1151	 */
1152	memblock_trim_memory(1UL << (MAX_ORDER - 1 + PAGE_SHIFT));
1153
1154	setup_memory_end();
 
1155	setup_memory();
1156	dma_contiguous_reserve(memory_end);
 
 
 
 
1157	vmcp_cma_reserve();
 
 
1158
1159	check_initrd();
1160	reserve_crashkernel();
1161#ifdef CONFIG_CRASH_DUMP
1162	/*
1163	 * Be aware that smp_save_dump_cpus() triggers a system reset.
1164	 * Therefore CPU and device initialization should be done afterwards.
1165	 */
1166	smp_save_dump_cpus();
1167#endif
1168
1169	setup_resources();
1170	setup_lowcore_dat_off();
1171	smp_fill_possible_mask();
1172	cpu_detect_mhz_feature();
1173        cpu_init();
1174	numa_setup();
1175	smp_detect_cpus();
1176	topology_init_early();
1177
 
 
 
1178	/*
1179	 * Create kernel page tables and switch to virtual addressing.
1180	 */
1181        paging_init();
1182
1183	/*
1184	 * After paging_init created the kernel page table, the new PSWs
1185	 * in lowcore can now run with DAT enabled.
1186	 */
1187	setup_lowcore_dat_on();
 
 
 
1188
1189        /* Setup default console */
1190	conmode_default();
1191	set_preferred_console();
1192
1193	apply_alternative_instructions();
1194	if (IS_ENABLED(CONFIG_EXPOLINE))
1195		nospec_init_branches();
1196
1197	/* Setup zfcpdump support */
1198	setup_zfcpdump();
1199
1200	/* Add system specific data to the random pool */
1201	setup_randomness();
1202}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  S390 version
   4 *    Copyright IBM Corp. 1999, 2012
   5 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   6 *               Martin Schwidefsky (schwidefsky@de.ibm.com)
   7 *
   8 *  Derived from "arch/i386/kernel/setup.c"
   9 *    Copyright (C) 1995, Linus Torvalds
  10 */
  11
  12/*
  13 * This file handles the architecture-dependent parts of initialization
  14 */
  15
  16#define KMSG_COMPONENT "setup"
  17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  18
  19#include <linux/errno.h>
  20#include <linux/export.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task.h>
  23#include <linux/cpu.h>
  24#include <linux/kernel.h>
  25#include <linux/memblock.h>
  26#include <linux/mm.h>
  27#include <linux/stddef.h>
  28#include <linux/unistd.h>
  29#include <linux/ptrace.h>
  30#include <linux/random.h>
  31#include <linux/user.h>
  32#include <linux/tty.h>
  33#include <linux/ioport.h>
  34#include <linux/delay.h>
  35#include <linux/init.h>
  36#include <linux/initrd.h>
  37#include <linux/root_dev.h>
  38#include <linux/console.h>
  39#include <linux/kernel_stat.h>
  40#include <linux/dma-map-ops.h>
  41#include <linux/device.h>
  42#include <linux/notifier.h>
  43#include <linux/pfn.h>
  44#include <linux/ctype.h>
  45#include <linux/reboot.h>
  46#include <linux/topology.h>
  47#include <linux/kexec.h>
  48#include <linux/crash_dump.h>
  49#include <linux/memory.h>
  50#include <linux/compat.h>
  51#include <linux/start_kernel.h>
  52#include <linux/hugetlb.h>
  53#include <linux/kmemleak.h>
  54
  55#include <asm/archrandom.h>
  56#include <asm/boot_data.h>
  57#include <asm/ipl.h>
  58#include <asm/facility.h>
  59#include <asm/smp.h>
  60#include <asm/mmu_context.h>
  61#include <asm/cpcmd.h>
  62#include <asm/abs_lowcore.h>
  63#include <asm/nmi.h>
  64#include <asm/irq.h>
  65#include <asm/page.h>
  66#include <asm/ptrace.h>
  67#include <asm/sections.h>
  68#include <asm/ebcdic.h>
  69#include <asm/diag.h>
  70#include <asm/os_info.h>
  71#include <asm/sclp.h>
  72#include <asm/stacktrace.h>
  73#include <asm/sysinfo.h>
  74#include <asm/numa.h>
  75#include <asm/alternative.h>
  76#include <asm/nospec-branch.h>
  77#include <asm/mem_detect.h>
  78#include <asm/maccess.h>
  79#include <asm/uv.h>
  80#include <asm/asm-offsets.h>
  81#include "entry.h"
  82
  83/*
  84 * Machine setup..
  85 */
  86unsigned int console_mode = 0;
  87EXPORT_SYMBOL(console_mode);
  88
  89unsigned int console_devno = -1;
  90EXPORT_SYMBOL(console_devno);
  91
  92unsigned int console_irq = -1;
  93EXPORT_SYMBOL(console_irq);
  94
  95/*
  96 * Some code and data needs to stay below 2 GB, even when the kernel would be
  97 * relocated above 2 GB, because it has to use 31 bit addresses.
  98 * Such code and data is part of the .amode31 section.
  99 */
 100unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
 101unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
 102unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
 103unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
 104struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
 105struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
 106
 107/*
 108 * Control registers CR2, CR5 and CR15 are initialized with addresses
 109 * of tables that must be placed below 2G which is handled by the AMODE31
 110 * sections.
 111 * Because the AMODE31 sections are relocated below 2G at startup,
 112 * the content of control registers CR2, CR5 and CR15 must be updated
 113 * with new addresses after the relocation. The initial initialization of
 114 * control registers occurs in head64.S and then gets updated again after AMODE31
 115 * relocation. We must access the relevant AMODE31 tables indirectly via
 116 * pointers placed in the .amode31.refs linker section. Those pointers get
 117 * updated automatically during AMODE31 relocation and always contain a valid
 118 * address within AMODE31 sections.
 119 */
 120
 121static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
 122
 123static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
 124	[1] = 0xffffffffffffffff
 125};
 126
 127static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
 128	0x80000000, 0, 0, 0,
 129	0x80000000, 0, 0, 0,
 130	0x80000000, 0, 0, 0,
 131	0x80000000, 0, 0, 0,
 132	0x80000000, 0, 0, 0,
 133	0x80000000, 0, 0, 0,
 134	0x80000000, 0, 0, 0,
 135	0x80000000, 0, 0, 0
 136};
 137
 138static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
 139	0, 0, 0x89000000, 0,
 140	0, 0, 0x8a000000, 0
 141};
 142
 143static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
 144static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
 145static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
 146static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
 147
 148int __bootdata(noexec_disabled);
 149unsigned long __bootdata(ident_map_size);
 
 
 
 150struct mem_detect_info __bootdata(mem_detect);
 151struct initrd_data __bootdata(initrd_data);
 152
 
 
 
 
 
 
 
 153unsigned long __bootdata_preserved(__kaslr_offset);
 154unsigned long __bootdata(__amode31_base);
 155unsigned int __bootdata_preserved(zlib_dfltcc_support);
 156EXPORT_SYMBOL(zlib_dfltcc_support);
 157u64 __bootdata_preserved(stfle_fac_list[16]);
 158EXPORT_SYMBOL(stfle_fac_list);
 159u64 __bootdata_preserved(alt_stfle_fac_list[16]);
 160struct oldmem_data __bootdata_preserved(oldmem_data);
 161
 162unsigned long VMALLOC_START;
 163EXPORT_SYMBOL(VMALLOC_START);
 164
 165unsigned long VMALLOC_END;
 166EXPORT_SYMBOL(VMALLOC_END);
 167
 168struct page *vmemmap;
 169EXPORT_SYMBOL(vmemmap);
 170unsigned long vmemmap_size;
 171
 172unsigned long MODULES_VADDR;
 173unsigned long MODULES_END;
 174
 175/* An array with a pointer to the lowcore of every CPU. */
 176struct lowcore *lowcore_ptr[NR_CPUS];
 177EXPORT_SYMBOL(lowcore_ptr);
 178
 179DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
 180
 181/*
 182 * The Write Back bit position in the physaddr is given by the SLPC PCI.
 183 * Leaving the mask zero always uses write through which is safe
 184 */
 185unsigned long mio_wb_bit_mask __ro_after_init;
 186
 187/*
 188 * This is set up by the setup-routine at boot-time
 189 * for S390 need to find out, what we have to setup
 190 * using address 0x10400 ...
 191 */
 192
 193#include <asm/setup.h>
 194
 195/*
 196 * condev= and conmode= setup parameter.
 197 */
 198
 199static int __init condev_setup(char *str)
 200{
 201	int vdev;
 202
 203	vdev = simple_strtoul(str, &str, 0);
 204	if (vdev >= 0 && vdev < 65536) {
 205		console_devno = vdev;
 206		console_irq = -1;
 207	}
 208	return 1;
 209}
 210
 211__setup("condev=", condev_setup);
 212
 213static void __init set_preferred_console(void)
 214{
 215	if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
 216		add_preferred_console("ttyS", 0, NULL);
 217	else if (CONSOLE_IS_3270)
 218		add_preferred_console("tty3270", 0, NULL);
 219	else if (CONSOLE_IS_VT220)
 220		add_preferred_console("ttysclp", 0, NULL);
 221	else if (CONSOLE_IS_HVC)
 222		add_preferred_console("hvc", 0, NULL);
 223}
 224
 225static int __init conmode_setup(char *str)
 226{
 227#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 228	if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
 229                SET_CONSOLE_SCLP;
 230#endif
 231#if defined(CONFIG_TN3215_CONSOLE)
 232	if (!strcmp(str, "3215"))
 233		SET_CONSOLE_3215;
 234#endif
 235#if defined(CONFIG_TN3270_CONSOLE)
 236	if (!strcmp(str, "3270"))
 237		SET_CONSOLE_3270;
 238#endif
 239	set_preferred_console();
 240        return 1;
 241}
 242
 243__setup("conmode=", conmode_setup);
 244
 245static void __init conmode_default(void)
 246{
 247	char query_buffer[1024];
 248	char *ptr;
 249
 250        if (MACHINE_IS_VM) {
 251		cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
 252		console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
 253		ptr = strstr(query_buffer, "SUBCHANNEL =");
 254		console_irq = simple_strtoul(ptr + 13, NULL, 16);
 255		cpcmd("QUERY TERM", query_buffer, 1024, NULL);
 256		ptr = strstr(query_buffer, "CONMODE");
 257		/*
 258		 * Set the conmode to 3215 so that the device recognition 
 259		 * will set the cu_type of the console to 3215. If the
 260		 * conmode is 3270 and we don't set it back then both
 261		 * 3215 and the 3270 driver will try to access the console
 262		 * device (3215 as console and 3270 as normal tty).
 263		 */
 264		cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
 265		if (ptr == NULL) {
 266#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 267			SET_CONSOLE_SCLP;
 268#endif
 269			return;
 270		}
 271		if (str_has_prefix(ptr + 8, "3270")) {
 272#if defined(CONFIG_TN3270_CONSOLE)
 273			SET_CONSOLE_3270;
 274#elif defined(CONFIG_TN3215_CONSOLE)
 275			SET_CONSOLE_3215;
 276#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 277			SET_CONSOLE_SCLP;
 278#endif
 279		} else if (str_has_prefix(ptr + 8, "3215")) {
 280#if defined(CONFIG_TN3215_CONSOLE)
 281			SET_CONSOLE_3215;
 282#elif defined(CONFIG_TN3270_CONSOLE)
 283			SET_CONSOLE_3270;
 284#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 285			SET_CONSOLE_SCLP;
 286#endif
 287		}
 288	} else if (MACHINE_IS_KVM) {
 289		if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
 290			SET_CONSOLE_VT220;
 291		else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
 292			SET_CONSOLE_SCLP;
 293		else
 294			SET_CONSOLE_HVC;
 295	} else {
 296#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
 297		SET_CONSOLE_SCLP;
 298#endif
 299	}
 
 
 300}
 301
 302#ifdef CONFIG_CRASH_DUMP
 303static void __init setup_zfcpdump(void)
 304{
 305	if (!is_ipl_type_dump())
 306		return;
 307	if (oldmem_data.start)
 308		return;
 309	strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
 310	console_loglevel = 2;
 311}
 312#else
 313static inline void setup_zfcpdump(void) {}
 314#endif /* CONFIG_CRASH_DUMP */
 315
 316 /*
 317 * Reboot, halt and power_off stubs. They just call _machine_restart,
 318 * _machine_halt or _machine_power_off. 
 319 */
 320
 321void machine_restart(char *command)
 322{
 323	if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
 324		/*
 325		 * Only unblank the console if we are called in enabled
 326		 * context or a bust_spinlocks cleared the way for us.
 327		 */
 328		console_unblank();
 329	_machine_restart(command);
 330}
 331
 332void machine_halt(void)
 333{
 334	if (!in_interrupt() || oops_in_progress)
 335		/*
 336		 * Only unblank the console if we are called in enabled
 337		 * context or a bust_spinlocks cleared the way for us.
 338		 */
 339		console_unblank();
 340	_machine_halt();
 341}
 342
 343void machine_power_off(void)
 344{
 345	if (!in_interrupt() || oops_in_progress)
 346		/*
 347		 * Only unblank the console if we are called in enabled
 348		 * context or a bust_spinlocks cleared the way for us.
 349		 */
 350		console_unblank();
 351	_machine_power_off();
 352}
 353
 354/*
 355 * Dummy power off function.
 356 */
 357void (*pm_power_off)(void) = machine_power_off;
 358EXPORT_SYMBOL_GPL(pm_power_off);
 359
 360void *restart_stack;
 361
 362unsigned long stack_alloc(void)
 363{
 364#ifdef CONFIG_VMAP_STACK
 365	void *ret;
 366
 367	ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
 368			     NUMA_NO_NODE, __builtin_return_address(0));
 369	kmemleak_not_leak(ret);
 370	return (unsigned long)ret;
 371#else
 372	return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
 373#endif
 374}
 375
 376void stack_free(unsigned long stack)
 377{
 378#ifdef CONFIG_VMAP_STACK
 379	vfree((void *) stack);
 380#else
 381	free_pages(stack, THREAD_SIZE_ORDER);
 382#endif
 383}
 384
 385int __init arch_early_irq_init(void)
 386{
 387	unsigned long stack;
 388
 389	stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
 390	if (!stack)
 391		panic("Couldn't allocate async stack");
 392	S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
 393	return 0;
 394}
 395
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 396void __init arch_call_rest_init(void)
 397{
 
 398	unsigned long stack;
 399
 400	smp_reinit_ipl_cpu();
 401	stack = stack_alloc();
 402	if (!stack)
 403		panic("Couldn't allocate kernel stack");
 404	current->stack = (void *) stack;
 405#ifdef CONFIG_VMAP_STACK
 406	current->stack_vm_area = (void *) stack;
 407#endif
 408	set_task_stack_end_magic(current);
 409	stack += STACK_INIT_OFFSET;
 410	S390_lowcore.kernel_stack = stack;
 411	call_on_stack_noreturn(rest_init, stack);
 
 
 
 
 
 
 412}
 413
 414static void __init setup_lowcore_dat_off(void)
 415{
 416	unsigned long int_psw_mask = PSW_KERNEL_BITS;
 417	struct lowcore *abs_lc, *lc;
 418	unsigned long mcck_stack;
 419	unsigned long flags;
 420
 421	if (IS_ENABLED(CONFIG_KASAN))
 422		int_psw_mask |= PSW_MASK_DAT;
 423
 424	/*
 425	 * Setup lowcore for boot cpu
 426	 */
 427	BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
 428	lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
 429	if (!lc)
 430		panic("%s: Failed to allocate %zu bytes align=%zx\n",
 431		      __func__, sizeof(*lc), sizeof(*lc));
 432
 433	lc->restart_psw.mask = PSW_KERNEL_BITS;
 434	lc->restart_psw.addr = (unsigned long) restart_int_handler;
 435	lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 436	lc->external_new_psw.addr = (unsigned long) ext_int_handler;
 437	lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 
 438	lc->svc_new_psw.addr = (unsigned long) system_call;
 439	lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 440	lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
 441	lc->mcck_new_psw.mask = int_psw_mask;
 442	lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
 443	lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
 444	lc->io_new_psw.addr = (unsigned long) io_int_handler;
 445	lc->clock_comparator = clock_comparator_max;
 446	lc->nodat_stack = ((unsigned long) &init_thread_union)
 447		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
 448	lc->current_task = (unsigned long)&init_task;
 449	lc->lpp = LPP_MAGIC;
 450	lc->machine_flags = S390_lowcore.machine_flags;
 451	lc->preempt_count = S390_lowcore.preempt_count;
 452	nmi_alloc_mcesa_early(&lc->mcesad);
 453	lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
 
 
 
 
 
 
 
 454	lc->exit_timer = S390_lowcore.exit_timer;
 455	lc->user_timer = S390_lowcore.user_timer;
 456	lc->system_timer = S390_lowcore.system_timer;
 457	lc->steal_timer = S390_lowcore.steal_timer;
 458	lc->last_update_timer = S390_lowcore.last_update_timer;
 459	lc->last_update_clock = S390_lowcore.last_update_clock;
 460
 461	/*
 462	 * Allocate the global restart stack which is the same for
 463	 * all CPUs in cast *one* of them does a PSW restart.
 464	 */
 465	restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 466	if (!restart_stack)
 467		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 468		      __func__, THREAD_SIZE, THREAD_SIZE);
 469	restart_stack += STACK_INIT_OFFSET;
 470
 471	/*
 472	 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
 473	 * restart data to the absolute zero lowcore. This is necessary if
 474	 * PSW restart is done on an offline CPU that has lowcore zero.
 475	 */
 476	lc->restart_stack = (unsigned long) restart_stack;
 477	lc->restart_fn = (unsigned long) do_restart;
 478	lc->restart_data = 0;
 479	lc->restart_source = -1U;
 480
 481	abs_lc = get_abs_lowcore(&flags);
 482	abs_lc->restart_stack = lc->restart_stack;
 483	abs_lc->restart_fn = lc->restart_fn;
 484	abs_lc->restart_data = lc->restart_data;
 485	abs_lc->restart_source = lc->restart_source;
 486	abs_lc->restart_psw = lc->restart_psw;
 487	abs_lc->mcesad = lc->mcesad;
 488	put_abs_lowcore(abs_lc, flags);
 489
 490	mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
 491	if (!mcck_stack)
 492		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 493		      __func__, THREAD_SIZE, THREAD_SIZE);
 494	lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
 495
 496	lc->spinlock_lockval = arch_spin_lockval(0);
 497	lc->spinlock_index = 0;
 498	arch_spin_lock_setup(0);
 499	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
 500	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
 501	lc->preempt_count = PREEMPT_DISABLED;
 502
 503	set_prefix(__pa(lc));
 504	lowcore_ptr[0] = lc;
 505}
 506
 507static void __init setup_lowcore_dat_on(void)
 508{
 509	struct lowcore *abs_lc;
 510	unsigned long flags;
 511	int i;
 512
 513	__ctl_clear_bit(0, 28);
 514	S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
 515	S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
 516	S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
 517	S390_lowcore.mcck_new_psw.mask |= PSW_MASK_DAT;
 518	S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
 519	__ctl_set_bit(0, 28);
 520	__ctl_store(S390_lowcore.cregs_save_area, 0, 15);
 521	if (abs_lowcore_map(0, lowcore_ptr[0], true))
 522		panic("Couldn't setup absolute lowcore");
 523	abs_lowcore_mapped = true;
 524	abs_lc = get_abs_lowcore(&flags);
 525	abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
 526	abs_lc->program_new_psw = S390_lowcore.program_new_psw;
 527	for (i = 0; i < 16; i++)
 528		abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i];
 529	put_abs_lowcore(abs_lc, flags);
 530}
 531
 532static struct resource code_resource = {
 533	.name  = "Kernel code",
 534	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 535};
 536
 537static struct resource data_resource = {
 538	.name = "Kernel data",
 539	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 540};
 541
 542static struct resource bss_resource = {
 543	.name = "Kernel bss",
 544	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 545};
 546
 547static struct resource __initdata *standard_resources[] = {
 548	&code_resource,
 549	&data_resource,
 550	&bss_resource,
 551};
 552
 553static void __init setup_resources(void)
 554{
 555	struct resource *res, *std_res, *sub_res;
 556	phys_addr_t start, end;
 557	int j;
 558	u64 i;
 559
 560	code_resource.start = (unsigned long) _text;
 561	code_resource.end = (unsigned long) _etext - 1;
 562	data_resource.start = (unsigned long) _etext;
 563	data_resource.end = (unsigned long) _edata - 1;
 564	bss_resource.start = (unsigned long) __bss_start;
 565	bss_resource.end = (unsigned long) __bss_stop - 1;
 566
 567	for_each_mem_range(i, &start, &end) {
 568		res = memblock_alloc(sizeof(*res), 8);
 569		if (!res)
 570			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 571			      __func__, sizeof(*res), 8);
 572		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 573
 574		res->name = "System RAM";
 575		res->start = start;
 576		/*
 577		 * In memblock, end points to the first byte after the
 578		 * range while in resourses, end points to the last byte in
 579		 * the range.
 580		 */
 581		res->end = end - 1;
 582		request_resource(&iomem_resource, res);
 583
 584		for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
 585			std_res = standard_resources[j];
 586			if (std_res->start < res->start ||
 587			    std_res->start > res->end)
 588				continue;
 589			if (std_res->end > res->end) {
 590				sub_res = memblock_alloc(sizeof(*sub_res), 8);
 591				if (!sub_res)
 592					panic("%s: Failed to allocate %zu bytes align=0x%x\n",
 593					      __func__, sizeof(*sub_res), 8);
 594				*sub_res = *std_res;
 595				sub_res->end = res->end;
 596				std_res->start = res->end + 1;
 597				request_resource(res, sub_res);
 598			} else {
 599				request_resource(res, std_res);
 600			}
 601		}
 602	}
 603#ifdef CONFIG_CRASH_DUMP
 604	/*
 605	 * Re-add removed crash kernel memory as reserved memory. This makes
 606	 * sure it will be mapped with the identity mapping and struct pages
 607	 * will be created, so it can be resized later on.
 608	 * However add it later since the crash kernel resource should not be
 609	 * part of the System RAM resource.
 610	 */
 611	if (crashk_res.end) {
 612		memblock_add_node(crashk_res.start, resource_size(&crashk_res),
 613				  0, MEMBLOCK_NONE);
 614		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
 615		insert_resource(&iomem_resource, &crashk_res);
 616	}
 617#endif
 618}
 619
 620static void __init setup_memory_end(void)
 621{
 622	memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
 623	max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
 624	pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625}
 626
 627#ifdef CONFIG_CRASH_DUMP
 628
 629/*
 630 * When kdump is enabled, we have to ensure that no memory from the area
 631 * [0 - crashkernel memory size] is set offline - it will be exchanged with
 632 * the crashkernel memory region when kdump is triggered. The crashkernel
 633 * memory region can never get offlined (pages are unmovable).
 634 */
 635static int kdump_mem_notifier(struct notifier_block *nb,
 636			      unsigned long action, void *data)
 637{
 638	struct memory_notify *arg = data;
 639
 640	if (action != MEM_GOING_OFFLINE)
 641		return NOTIFY_OK;
 642	if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
 643		return NOTIFY_BAD;
 644	return NOTIFY_OK;
 
 
 
 
 645}
 646
 647static struct notifier_block kdump_mem_nb = {
 648	.notifier_call = kdump_mem_notifier,
 649};
 650
 651#endif
 652
 653/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654 * Reserve memory for kdump kernel to be loaded with kexec
 655 */
 656static void __init reserve_crashkernel(void)
 657{
 658#ifdef CONFIG_CRASH_DUMP
 659	unsigned long long crash_base, crash_size;
 660	phys_addr_t low, high;
 661	int rc;
 662
 663	rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
 664			       &crash_base);
 665
 666	crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
 667	crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
 668	if (rc || crash_size == 0)
 669		return;
 670
 671	if (memblock.memory.regions[0].size < crash_size) {
 672		pr_info("crashkernel reservation failed: %s\n",
 673			"first memory chunk must be at least crashkernel size");
 674		return;
 675	}
 676
 677	low = crash_base ?: oldmem_data.start;
 678	high = low + crash_size;
 679	if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
 680		/* The crashkernel fits into OLDMEM, reuse OLDMEM */
 681		crash_base = low;
 682	} else {
 683		/* Find suitable area in free memory */
 684		low = max_t(unsigned long, crash_size, sclp.hsa_size);
 685		high = crash_base ? crash_base + crash_size : ULONG_MAX;
 686
 687		if (crash_base && crash_base < low) {
 688			pr_info("crashkernel reservation failed: %s\n",
 689				"crash_base too low");
 690			return;
 691		}
 692		low = crash_base ?: low;
 693		crash_base = memblock_phys_alloc_range(crash_size,
 694						       KEXEC_CRASH_MEM_ALIGN,
 695						       low, high);
 696	}
 697
 698	if (!crash_base) {
 699		pr_info("crashkernel reservation failed: %s\n",
 700			"no suitable area found");
 701		return;
 702	}
 703
 704	if (register_memory_notifier(&kdump_mem_nb)) {
 705		memblock_phys_free(crash_base, crash_size);
 706		return;
 707	}
 708
 709	if (!oldmem_data.start && MACHINE_IS_VM)
 710		diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
 711	crashk_res.start = crash_base;
 712	crashk_res.end = crash_base + crash_size - 1;
 713	memblock_remove(crash_base, crash_size);
 714	pr_info("Reserving %lluMB of memory at %lluMB "
 715		"for crashkernel (System RAM: %luMB)\n",
 716		crash_size >> 20, crash_base >> 20,
 717		(unsigned long)memblock.memory.total_size >> 20);
 718	os_info_crashkernel_add(crash_base, crash_size);
 719#endif
 720}
 721
 722/*
 723 * Reserve the initrd from being used by memblock
 724 */
 725static void __init reserve_initrd(void)
 726{
 727#ifdef CONFIG_BLK_DEV_INITRD
 728	if (!initrd_data.start || !initrd_data.size)
 729		return;
 730	initrd_start = (unsigned long)__va(initrd_data.start);
 731	initrd_end = initrd_start + initrd_data.size;
 732	memblock_reserve(initrd_data.start, initrd_data.size);
 733#endif
 734}
 735
 736/*
 737 * Reserve the memory area used to pass the certificate lists
 738 */
 739static void __init reserve_certificate_list(void)
 740{
 741	if (ipl_cert_list_addr)
 742		memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
 743}
 744
 745static void __init reserve_mem_detect_info(void)
 746{
 747	unsigned long start, size;
 748
 749	get_mem_detect_reserved(&start, &size);
 750	if (size)
 751		memblock_reserve(start, size);
 752}
 753
 754static void __init free_mem_detect_info(void)
 755{
 756	unsigned long start, size;
 757
 758	get_mem_detect_reserved(&start, &size);
 759	if (size)
 760		memblock_phys_free(start, size);
 
 
 
 
 
 
 
 
 761}
 762
 763static const char * __init get_mem_info_source(void)
 764{
 765	switch (mem_detect.info_source) {
 766	case MEM_DETECT_SCLP_STOR_INFO:
 767		return "sclp storage info";
 768	case MEM_DETECT_DIAG260:
 769		return "diag260";
 770	case MEM_DETECT_SCLP_READ_INFO:
 771		return "sclp read info";
 772	case MEM_DETECT_BIN_SEARCH:
 773		return "binary search";
 774	}
 775	return "none";
 776}
 777
 778static void __init memblock_add_mem_detect_info(void)
 779{
 780	unsigned long start, end;
 781	int i;
 782
 783	pr_debug("physmem info source: %s (%hhd)\n",
 784		 get_mem_info_source(), mem_detect.info_source);
 785	/* keep memblock lists close to the kernel */
 786	memblock_set_bottom_up(true);
 787	for_each_mem_detect_block(i, &start, &end) {
 788		memblock_add(start, end - start);
 789		memblock_physmem_add(start, end - start);
 790	}
 791	memblock_set_bottom_up(false);
 792	memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
 793}
 794
 795/*
 796 * Check for initrd being in usable memory
 797 */
 798static void __init check_initrd(void)
 799{
 800#ifdef CONFIG_BLK_DEV_INITRD
 801	if (initrd_data.start && initrd_data.size &&
 802	    !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
 803		pr_err("The initial RAM disk does not fit into the memory\n");
 804		memblock_phys_free(initrd_data.start, initrd_data.size);
 805		initrd_start = initrd_end = 0;
 806	}
 807#endif
 808}
 809
 810/*
 811 * Reserve memory used for lowcore/command line/kernel image.
 812 */
 813static void __init reserve_kernel(void)
 814{
 815	memblock_reserve(0, STARTUP_NORMAL_OFFSET);
 816	memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
 817	memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
 818	memblock_reserve(__amode31_base, __eamode31 - __samode31);
 819	memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
 820	memblock_reserve(__pa(_stext), _end - _stext);
 821}
 822
 823static void __init setup_memory(void)
 824{
 825	phys_addr_t start, end;
 826	u64 i;
 827
 828	/*
 829	 * Init storage key for present memory
 830	 */
 831	for_each_mem_range(i, &start, &end)
 832		storage_key_init_range(start, end);
 
 
 833
 834	psw_set_key(PAGE_DEFAULT_KEY);
 
 835}
 836
 837static void __init relocate_amode31_section(void)
 
 
 
 838{
 839	unsigned long amode31_size = __eamode31 - __samode31;
 840	long amode31_offset = __amode31_base - __samode31;
 841	long *ptr;
 842
 843	pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
 844
 845	/* Move original AMODE31 section to the new one */
 846	memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
 847	/* Zero out the old AMODE31 section to catch invalid accesses within it */
 848	memset((void *)__samode31, 0, amode31_size);
 849
 850	/* Update all AMODE31 region references */
 851	for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
 852		*ptr += amode31_offset;
 853}
 854
 855/* This must be called after AMODE31 relocation */
 856static void __init setup_cr(void)
 857{
 858	union ctlreg2 cr2;
 859	union ctlreg5 cr5;
 860	union ctlreg15 cr15;
 861
 862	__ctl_duct[1] = (unsigned long)__ctl_aste;
 863	__ctl_duct[2] = (unsigned long)__ctl_aste;
 864	__ctl_duct[4] = (unsigned long)__ctl_duald;
 865
 866	/* Update control registers CR2, CR5 and CR15 */
 867	__ctl_store(cr2.val, 2, 2);
 868	__ctl_store(cr5.val, 5, 5);
 869	__ctl_store(cr15.val, 15, 15);
 870	cr2.ducto = (unsigned long)__ctl_duct >> 6;
 871	cr5.pasteo = (unsigned long)__ctl_duct >> 6;
 872	cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
 873	__ctl_load(cr2.val, 2, 2);
 874	__ctl_load(cr5.val, 5, 5);
 875	__ctl_load(cr15.val, 15, 15);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876}
 
 877
 878/*
 879 * Add system information as device randomness
 880 */
 881static void __init setup_randomness(void)
 882{
 883	struct sysinfo_3_2_2 *vmms;
 884
 885	vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
 
 886	if (!vmms)
 887		panic("Failed to allocate memory for sysinfo structure\n");
 
 888	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
 889		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
 890	memblock_free(vmms, PAGE_SIZE);
 891
 892	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
 893		static_branch_enable(&s390_arch_random_available);
 894}
 895
 896/*
 897 * Find the correct size for the task_struct. This depends on
 898 * the size of the struct fpu at the end of the thread_struct
 899 * which is embedded in the task_struct.
 900 */
 901static void __init setup_task_size(void)
 902{
 903	int task_size = sizeof(struct task_struct);
 904
 905	if (!MACHINE_HAS_VX) {
 906		task_size -= sizeof(__vector128) * __NUM_VXRS;
 907		task_size += sizeof(freg_t) * __NUM_FPRS;
 908	}
 909	arch_task_struct_size = task_size;
 910}
 911
 912/*
 913 * Issue diagnose 318 to set the control program name and
 914 * version codes.
 915 */
 916static void __init setup_control_program_code(void)
 917{
 918	union diag318_info diag318_info = {
 919		.cpnc = CPNC_LINUX,
 920		.cpvc = 0,
 
 921	};
 922
 923	if (!sclp.has_diag318)
 924		return;
 925
 926	diag_stat_inc(DIAG_STAT_X318);
 927	asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
 928}
 929
 930/*
 931 * Print the component list from the IPL report
 932 */
 933static void __init log_component_list(void)
 934{
 935	struct ipl_rb_component_entry *ptr, *end;
 936	char *str;
 937
 938	if (!early_ipl_comp_list_addr)
 939		return;
 940	if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
 941		pr_info("Linux is running with Secure-IPL enabled\n");
 942	else
 943		pr_info("Linux is running with Secure-IPL disabled\n");
 944	ptr = (void *) early_ipl_comp_list_addr;
 945	end = (void *) ptr + early_ipl_comp_list_size;
 946	pr_info("The IPL report contains the following components:\n");
 947	while (ptr < end) {
 948		if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
 949			if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
 950				str = "signed, verified";
 951			else
 952				str = "signed, verification failed";
 953		} else {
 954			str = "not signed";
 955		}
 956		pr_info("%016llx - %016llx (%s)\n",
 957			ptr->addr, ptr->addr + ptr->len, str);
 958		ptr++;
 959	}
 960}
 961
 962/*
 963 * Setup function called from init/main.c just after the banner
 964 * was printed.
 965 */
 966
 967void __init setup_arch(char **cmdline_p)
 968{
 969        /*
 970         * print what head.S has found out about the machine
 971         */
 972	if (MACHINE_IS_VM)
 973		pr_info("Linux is running as a z/VM "
 974			"guest operating system in 64-bit mode\n");
 975	else if (MACHINE_IS_KVM)
 976		pr_info("Linux is running under KVM in 64-bit mode\n");
 977	else if (MACHINE_IS_LPAR)
 978		pr_info("Linux is running natively in 64-bit mode\n");
 979	else
 980		pr_info("Linux is running as a guest in 64-bit mode\n");
 981
 982	log_component_list();
 983
 984	/* Have one command line that is parsed and saved in /proc/cmdline */
 985	/* boot_command_line has been already set up in early.c */
 986	*cmdline_p = boot_command_line;
 987
 988        ROOT_DEV = Root_RAM0;
 989
 990	setup_initial_init_mm(_text, _etext, _edata, _end);
 
 
 
 991
 992	if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
 993		nospec_auto_detect();
 994
 995	jump_label_init();
 996	parse_early_param();
 997#ifdef CONFIG_CRASH_DUMP
 998	/* Deactivate elfcorehdr= kernel parameter */
 999	elfcorehdr_addr = ELFCORE_ADDR_MAX;
1000#endif
1001
1002	os_info_init();
1003	setup_ipl();
1004	setup_task_size();
1005	setup_control_program_code();
1006
1007	/* Do some memory reservations *before* memory is added to memblock */
 
 
1008	reserve_kernel();
1009	reserve_initrd();
1010	reserve_certificate_list();
1011	reserve_mem_detect_info();
1012	memblock_set_current_limit(ident_map_size);
1013	memblock_allow_resize();
1014
1015	/* Get information about *all* installed memory */
1016	memblock_add_mem_detect_info();
1017
1018	free_mem_detect_info();
 
 
 
 
 
 
 
 
 
 
1019	setup_memory_end();
1020	memblock_dump_all();
1021	setup_memory();
1022
1023	relocate_amode31_section();
1024	setup_cr();
1025	setup_uv();
1026	dma_contiguous_reserve(ident_map_size);
1027	vmcp_cma_reserve();
1028	if (MACHINE_HAS_EDAT2)
1029		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1030
1031	check_initrd();
1032	reserve_crashkernel();
1033#ifdef CONFIG_CRASH_DUMP
1034	/*
1035	 * Be aware that smp_save_dump_secondary_cpus() triggers a system reset.
1036	 * Therefore CPU and device initialization should be done afterwards.
1037	 */
1038	smp_save_dump_secondary_cpus();
1039#endif
1040
1041	setup_resources();
1042	setup_lowcore_dat_off();
1043	smp_fill_possible_mask();
1044	cpu_detect_mhz_feature();
1045        cpu_init();
1046	numa_setup();
1047	smp_detect_cpus();
1048	topology_init_early();
1049
1050	if (test_facility(193))
1051		static_branch_enable(&cpu_has_bear);
1052
1053	/*
1054	 * Create kernel page tables and switch to virtual addressing.
1055	 */
1056        paging_init();
1057	memcpy_real_init();
1058	/*
1059	 * After paging_init created the kernel page table, the new PSWs
1060	 * in lowcore can now run with DAT enabled.
1061	 */
1062	setup_lowcore_dat_on();
1063#ifdef CONFIG_CRASH_DUMP
1064	smp_save_dump_ipl_cpu();
1065#endif
1066
1067        /* Setup default console */
1068	conmode_default();
1069	set_preferred_console();
1070
1071	apply_alternative_instructions();
1072	if (IS_ENABLED(CONFIG_EXPOLINE))
1073		nospec_init_branches();
1074
1075	/* Setup zfcp/nvme dump support */
1076	setup_zfcpdump();
1077
1078	/* Add system specific data to the random pool */
1079	setup_randomness();
1080}