Loading...
Note: File does not exist in v4.10.11.
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/string.h>
3#include <linux/elf.h>
4#include <asm/page-states.h>
5#include <asm/boot_data.h>
6#include <asm/sections.h>
7#include <asm/maccess.h>
8#include <asm/cpu_mf.h>
9#include <asm/setup.h>
10#include <asm/kasan.h>
11#include <asm/kexec.h>
12#include <asm/sclp.h>
13#include <asm/diag.h>
14#include <asm/uv.h>
15#include <asm/abs_lowcore.h>
16#include <asm/physmem_info.h>
17#include "decompressor.h"
18#include "boot.h"
19#include "uv.h"
20
21unsigned long __bootdata_preserved(__kaslr_offset);
22unsigned long __bootdata_preserved(__abs_lowcore);
23unsigned long __bootdata_preserved(__memcpy_real_area);
24pte_t *__bootdata_preserved(memcpy_real_ptep);
25unsigned long __bootdata_preserved(VMALLOC_START);
26unsigned long __bootdata_preserved(VMALLOC_END);
27struct page *__bootdata_preserved(vmemmap);
28unsigned long __bootdata_preserved(vmemmap_size);
29unsigned long __bootdata_preserved(MODULES_VADDR);
30unsigned long __bootdata_preserved(MODULES_END);
31unsigned long __bootdata_preserved(max_mappable);
32unsigned long __bootdata(ident_map_size);
33
34u64 __bootdata_preserved(stfle_fac_list[16]);
35struct oldmem_data __bootdata_preserved(oldmem_data);
36
37struct machine_info machine;
38
39void error(char *x)
40{
41 sclp_early_printk("\n\n");
42 sclp_early_printk(x);
43 sclp_early_printk("\n\n -- System halted");
44
45 disabled_wait();
46}
47
48static void detect_facilities(void)
49{
50 if (test_facility(8)) {
51 machine.has_edat1 = 1;
52 local_ctl_set_bit(0, CR0_EDAT_BIT);
53 }
54 if (test_facility(78))
55 machine.has_edat2 = 1;
56 if (test_facility(130))
57 machine.has_nx = 1;
58}
59
60static int cmma_test_essa(void)
61{
62 unsigned long reg1, reg2, tmp = 0;
63 int rc = 1;
64 psw_t old;
65
66 /* Test ESSA_GET_STATE */
67 asm volatile(
68 " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n"
69 " epsw %[reg1],%[reg2]\n"
70 " st %[reg1],0(%[psw_pgm])\n"
71 " st %[reg2],4(%[psw_pgm])\n"
72 " larl %[reg1],1f\n"
73 " stg %[reg1],8(%[psw_pgm])\n"
74 " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
75 " la %[rc],0\n"
76 "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n"
77 : [reg1] "=&d" (reg1),
78 [reg2] "=&a" (reg2),
79 [rc] "+&d" (rc),
80 [tmp] "=&d" (tmp),
81 "+Q" (S390_lowcore.program_new_psw),
82 "=Q" (old)
83 : [psw_old] "a" (&old),
84 [psw_pgm] "a" (&S390_lowcore.program_new_psw),
85 [cmd] "i" (ESSA_GET_STATE)
86 : "cc", "memory");
87 return rc;
88}
89
90static void cmma_init(void)
91{
92 if (!cmma_flag)
93 return;
94 if (cmma_test_essa()) {
95 cmma_flag = 0;
96 return;
97 }
98 if (test_facility(147))
99 cmma_flag = 2;
100}
101
102static void setup_lpp(void)
103{
104 S390_lowcore.current_pid = 0;
105 S390_lowcore.lpp = LPP_MAGIC;
106 if (test_facility(40))
107 lpp(&S390_lowcore.lpp);
108}
109
110#ifdef CONFIG_KERNEL_UNCOMPRESSED
111unsigned long mem_safe_offset(void)
112{
113 return vmlinux.default_lma + vmlinux.image_size + vmlinux.bss_size;
114}
115#endif
116
117static void rescue_initrd(unsigned long min, unsigned long max)
118{
119 unsigned long old_addr, addr, size;
120
121 if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
122 return;
123 if (!get_physmem_reserved(RR_INITRD, &addr, &size))
124 return;
125 if (addr >= min && addr + size <= max)
126 return;
127 old_addr = addr;
128 physmem_free(RR_INITRD);
129 addr = physmem_alloc_top_down(RR_INITRD, size, 0);
130 memmove((void *)addr, (void *)old_addr, size);
131}
132
133static void copy_bootdata(void)
134{
135 if (__boot_data_end - __boot_data_start != vmlinux.bootdata_size)
136 error(".boot.data section size mismatch");
137 memcpy((void *)vmlinux.bootdata_off, __boot_data_start, vmlinux.bootdata_size);
138 if (__boot_data_preserved_end - __boot_data_preserved_start != vmlinux.bootdata_preserved_size)
139 error(".boot.preserved.data section size mismatch");
140 memcpy((void *)vmlinux.bootdata_preserved_off, __boot_data_preserved_start, vmlinux.bootdata_preserved_size);
141}
142
143#ifdef CONFIG_PIE_BUILD
144static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
145{
146 Elf64_Rela *rela_start, *rela_end, *rela;
147 int r_type, r_sym, rc;
148 Elf64_Addr loc, val;
149 Elf64_Sym *dynsym;
150
151 rela_start = (Elf64_Rela *) vmlinux.rela_dyn_start;
152 rela_end = (Elf64_Rela *) vmlinux.rela_dyn_end;
153 dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
154 for (rela = rela_start; rela < rela_end; rela++) {
155 loc = rela->r_offset + offset;
156 val = rela->r_addend;
157 r_sym = ELF64_R_SYM(rela->r_info);
158 if (r_sym) {
159 if (dynsym[r_sym].st_shndx != SHN_UNDEF)
160 val += dynsym[r_sym].st_value + offset;
161 } else {
162 /*
163 * 0 == undefined symbol table index (STN_UNDEF),
164 * used for R_390_RELATIVE, only add KASLR offset
165 */
166 val += offset;
167 }
168 r_type = ELF64_R_TYPE(rela->r_info);
169 rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
170 if (rc)
171 error("Unknown relocation type");
172 }
173}
174
175static void kaslr_adjust_got(unsigned long offset) {}
176static void rescue_relocs(void) {}
177static void free_relocs(void) {}
178#else
179static int *vmlinux_relocs_64_start;
180static int *vmlinux_relocs_64_end;
181
182static void rescue_relocs(void)
183{
184 unsigned long size = __vmlinux_relocs_64_end - __vmlinux_relocs_64_start;
185
186 vmlinux_relocs_64_start = (void *)physmem_alloc_top_down(RR_RELOC, size, 0);
187 vmlinux_relocs_64_end = (void *)vmlinux_relocs_64_start + size;
188 memmove(vmlinux_relocs_64_start, __vmlinux_relocs_64_start, size);
189}
190
191static void free_relocs(void)
192{
193 physmem_free(RR_RELOC);
194}
195
196static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr, unsigned long offset)
197{
198 int *reloc;
199 long loc;
200
201 /* Adjust R_390_64 relocations */
202 for (reloc = vmlinux_relocs_64_start; reloc < vmlinux_relocs_64_end; reloc++) {
203 loc = (long)*reloc + offset;
204 if (loc < min_addr || loc > max_addr)
205 error("64-bit relocation outside of kernel!\n");
206 *(u64 *)loc += offset;
207 }
208}
209
210static void kaslr_adjust_got(unsigned long offset)
211{
212 u64 *entry;
213
214 /*
215 * Even without -fPIE, Clang still uses a global offset table for some
216 * reason. Adjust the GOT entries.
217 */
218 for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++)
219 *entry += offset;
220}
221#endif
222
223/*
224 * Merge information from several sources into a single ident_map_size value.
225 * "ident_map_size" represents the upper limit of physical memory we may ever
226 * reach. It might not be all online memory, but also include standby (offline)
227 * memory. "ident_map_size" could be lower then actual standby or even online
228 * memory present, due to limiting factors. We should never go above this limit.
229 * It is the size of our identity mapping.
230 *
231 * Consider the following factors:
232 * 1. max_physmem_end - end of physical memory online or standby.
233 * Always >= end of the last online memory range (get_physmem_online_end()).
234 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
235 * kernel is able to support.
236 * 3. "mem=" kernel command line option which limits physical memory usage.
237 * 4. OLDMEM_BASE which is a kdump memory limit when the kernel is executed as
238 * crash kernel.
239 * 5. "hsa" size which is a memory limit when the kernel is executed during
240 * zfcp/nvme dump.
241 */
242static void setup_ident_map_size(unsigned long max_physmem_end)
243{
244 unsigned long hsa_size;
245
246 ident_map_size = max_physmem_end;
247 if (memory_limit)
248 ident_map_size = min(ident_map_size, memory_limit);
249 ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
250
251#ifdef CONFIG_CRASH_DUMP
252 if (oldmem_data.start) {
253 __kaslr_enabled = 0;
254 ident_map_size = min(ident_map_size, oldmem_data.size);
255 } else if (ipl_block_valid && is_ipl_block_dump()) {
256 __kaslr_enabled = 0;
257 if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
258 ident_map_size = min(ident_map_size, hsa_size);
259 }
260#endif
261}
262
263static unsigned long setup_kernel_memory_layout(void)
264{
265 unsigned long vmemmap_start;
266 unsigned long asce_limit;
267 unsigned long rte_size;
268 unsigned long pages;
269 unsigned long vsize;
270 unsigned long vmax;
271
272 pages = ident_map_size / PAGE_SIZE;
273 /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
274 vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
275
276 /* choose kernel address space layout: 4 or 3 levels. */
277 vsize = round_up(ident_map_size, _REGION3_SIZE) + vmemmap_size +
278 MODULES_LEN + MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE;
279 vsize = size_add(vsize, vmalloc_size);
280 if (IS_ENABLED(CONFIG_KASAN) || (vsize > _REGION2_SIZE)) {
281 asce_limit = _REGION1_SIZE;
282 rte_size = _REGION2_SIZE;
283 } else {
284 asce_limit = _REGION2_SIZE;
285 rte_size = _REGION3_SIZE;
286 }
287
288 /*
289 * Forcing modules and vmalloc area under the ultravisor
290 * secure storage limit, so that any vmalloc allocation
291 * we do could be used to back secure guest storage.
292 */
293 vmax = adjust_to_uv_max(asce_limit);
294#ifdef CONFIG_KASAN
295 /* force vmalloc and modules below kasan shadow */
296 vmax = min(vmax, KASAN_SHADOW_START);
297#endif
298 __memcpy_real_area = round_down(vmax - MEMCPY_REAL_SIZE, PAGE_SIZE);
299 __abs_lowcore = round_down(__memcpy_real_area - ABS_LOWCORE_MAP_SIZE,
300 sizeof(struct lowcore));
301 MODULES_END = round_down(__abs_lowcore, _SEGMENT_SIZE);
302 MODULES_VADDR = MODULES_END - MODULES_LEN;
303 VMALLOC_END = MODULES_VADDR;
304
305 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
306 vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE);
307 vmalloc_size = min(vmalloc_size, vsize);
308 VMALLOC_START = VMALLOC_END - vmalloc_size;
309
310 /* split remaining virtual space between 1:1 mapping & vmemmap array */
311 pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
312 pages = SECTION_ALIGN_UP(pages);
313 /* keep vmemmap_start aligned to a top level region table entry */
314 vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
315 vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
316 /* maximum mappable address as seen by arch_get_mappable_range() */
317 max_mappable = vmemmap_start;
318 /* make sure identity map doesn't overlay with vmemmap */
319 ident_map_size = min(ident_map_size, vmemmap_start);
320 vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
321 /* make sure vmemmap doesn't overlay with vmalloc area */
322 VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
323 vmemmap = (struct page *)vmemmap_start;
324
325 return asce_limit;
326}
327
328/*
329 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
330 */
331static void clear_bss_section(unsigned long vmlinux_lma)
332{
333 memset((void *)vmlinux_lma + vmlinux.image_size, 0, vmlinux.bss_size);
334}
335
336/*
337 * Set vmalloc area size to an 8th of (potential) physical memory
338 * size, unless size has been set by kernel command line parameter.
339 */
340static void setup_vmalloc_size(void)
341{
342 unsigned long size;
343
344 if (vmalloc_size_set)
345 return;
346 size = round_up(ident_map_size / 8, _SEGMENT_SIZE);
347 vmalloc_size = max(size, vmalloc_size);
348}
349
350static void kaslr_adjust_vmlinux_info(unsigned long offset)
351{
352 *(unsigned long *)(&vmlinux.entry) += offset;
353 vmlinux.bootdata_off += offset;
354 vmlinux.bootdata_preserved_off += offset;
355#ifdef CONFIG_PIE_BUILD
356 vmlinux.rela_dyn_start += offset;
357 vmlinux.rela_dyn_end += offset;
358 vmlinux.dynsym_start += offset;
359#else
360 vmlinux.got_start += offset;
361 vmlinux.got_end += offset;
362#endif
363 vmlinux.init_mm_off += offset;
364 vmlinux.swapper_pg_dir_off += offset;
365 vmlinux.invalid_pg_dir_off += offset;
366#ifdef CONFIG_KASAN
367 vmlinux.kasan_early_shadow_page_off += offset;
368 vmlinux.kasan_early_shadow_pte_off += offset;
369 vmlinux.kasan_early_shadow_pmd_off += offset;
370 vmlinux.kasan_early_shadow_pud_off += offset;
371 vmlinux.kasan_early_shadow_p4d_off += offset;
372#endif
373}
374
375void startup_kernel(void)
376{
377 unsigned long max_physmem_end;
378 unsigned long vmlinux_lma = 0;
379 unsigned long amode31_lma = 0;
380 unsigned long asce_limit;
381 unsigned long safe_addr;
382 void *img;
383 psw_t psw;
384
385 setup_lpp();
386 safe_addr = mem_safe_offset();
387
388 /*
389 * Reserve decompressor memory together with decompression heap, buffer and
390 * memory which might be occupied by uncompressed kernel at default 1Mb
391 * position (if KASLR is off or failed).
392 */
393 physmem_reserve(RR_DECOMPRESSOR, 0, safe_addr);
394 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && parmarea.initrd_size)
395 physmem_reserve(RR_INITRD, parmarea.initrd_start, parmarea.initrd_size);
396 oldmem_data.start = parmarea.oldmem_base;
397 oldmem_data.size = parmarea.oldmem_size;
398
399 store_ipl_parmblock();
400 read_ipl_report();
401 uv_query_info();
402 sclp_early_read_info();
403 setup_boot_command_line();
404 parse_boot_command_line();
405 detect_facilities();
406 cmma_init();
407 sanitize_prot_virt_host();
408 max_physmem_end = detect_max_physmem_end();
409 setup_ident_map_size(max_physmem_end);
410 setup_vmalloc_size();
411 asce_limit = setup_kernel_memory_layout();
412 /* got final ident_map_size, physmem allocations could be performed now */
413 physmem_set_usable_limit(ident_map_size);
414 detect_physmem_online_ranges(max_physmem_end);
415 save_ipl_cert_comp_list();
416 rescue_initrd(safe_addr, ident_map_size);
417 rescue_relocs();
418
419 if (kaslr_enabled()) {
420 vmlinux_lma = randomize_within_range(vmlinux.image_size + vmlinux.bss_size,
421 THREAD_SIZE, vmlinux.default_lma,
422 ident_map_size);
423 if (vmlinux_lma) {
424 __kaslr_offset = vmlinux_lma - vmlinux.default_lma;
425 kaslr_adjust_vmlinux_info(__kaslr_offset);
426 }
427 }
428 vmlinux_lma = vmlinux_lma ?: vmlinux.default_lma;
429 physmem_reserve(RR_VMLINUX, vmlinux_lma, vmlinux.image_size + vmlinux.bss_size);
430
431 if (!IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) {
432 img = decompress_kernel();
433 memmove((void *)vmlinux_lma, img, vmlinux.image_size);
434 } else if (__kaslr_offset) {
435 img = (void *)vmlinux.default_lma;
436 memmove((void *)vmlinux_lma, img, vmlinux.image_size);
437 memset(img, 0, vmlinux.image_size);
438 }
439
440 /* vmlinux decompression is done, shrink reserved low memory */
441 physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
442 if (kaslr_enabled())
443 amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, 0, SZ_2G);
444 amode31_lma = amode31_lma ?: vmlinux.default_lma - vmlinux.amode31_size;
445 physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
446
447 /*
448 * The order of the following operations is important:
449 *
450 * - kaslr_adjust_relocs() must follow clear_bss_section() to establish
451 * static memory references to data in .bss to be used by setup_vmem()
452 * (i.e init_mm.pgd)
453 *
454 * - setup_vmem() must follow kaslr_adjust_relocs() to be able using
455 * static memory references to data in .bss (i.e init_mm.pgd)
456 *
457 * - copy_bootdata() must follow setup_vmem() to propagate changes
458 * to bootdata made by setup_vmem()
459 */
460 clear_bss_section(vmlinux_lma);
461 kaslr_adjust_relocs(vmlinux_lma, vmlinux_lma + vmlinux.image_size, __kaslr_offset);
462 kaslr_adjust_got(__kaslr_offset);
463 free_relocs();
464 setup_vmem(asce_limit);
465 copy_bootdata();
466
467 /*
468 * Save KASLR offset for early dumps, before vmcore_info is set.
469 * Mark as uneven to distinguish from real vmcore_info pointer.
470 */
471 S390_lowcore.vmcore_info = __kaslr_offset ? __kaslr_offset | 0x1UL : 0;
472
473 /*
474 * Jump to the decompressed kernel entry point and switch DAT mode on.
475 */
476 psw.addr = vmlinux.entry;
477 psw.mask = PSW_KERNEL_BITS;
478 __load_psw(psw);
479}