Loading...
1/*
2 * Copyright IBM Corp. 2007, 2009
3 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
4 * Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
7#define KMSG_COMPONENT "setup"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/compiler.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/ctype.h>
15#include <linux/lockdep.h>
16#include <linux/extable.h>
17#include <linux/pfn.h>
18#include <linux/uaccess.h>
19#include <linux/kernel.h>
20#include <asm/diag.h>
21#include <asm/ebcdic.h>
22#include <asm/ipl.h>
23#include <asm/lowcore.h>
24#include <asm/processor.h>
25#include <asm/sections.h>
26#include <asm/setup.h>
27#include <asm/sysinfo.h>
28#include <asm/cpcmd.h>
29#include <asm/sclp.h>
30#include <asm/facility.h>
31#include "entry.h"
32
33/*
34 * Create a Kernel NSS if the SAVESYS= parameter is defined
35 */
36#define DEFSYS_CMD_SIZE 128
37#define SAVESYS_CMD_SIZE 32
38
39char kernel_nss_name[NSS_NAME_SIZE + 1];
40
41static void __init setup_boot_command_line(void);
42
43/*
44 * Get the TOD clock running.
45 */
46static void __init reset_tod_clock(void)
47{
48 u64 time;
49
50 if (store_tod_clock(&time) == 0)
51 return;
52 /* TOD clock not running. Set the clock to Unix Epoch. */
53 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
54 disabled_wait(0);
55
56 sched_clock_base_cc = TOD_UNIX_EPOCH;
57 S390_lowcore.last_update_clock = sched_clock_base_cc;
58}
59
60#ifdef CONFIG_SHARED_KERNEL
61int __init savesys_ipl_nss(char *cmd, const int cmdlen);
62
63asm(
64 " .section .init.text,\"ax\",@progbits\n"
65 " .align 4\n"
66 " .type savesys_ipl_nss, @function\n"
67 "savesys_ipl_nss:\n"
68 " stmg 6,15,48(15)\n"
69 " lgr 14,3\n"
70 " sam31\n"
71 " diag 2,14,0x8\n"
72 " sam64\n"
73 " lgr 2,14\n"
74 " lmg 6,15,48(15)\n"
75 " br 14\n"
76 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
77 " .previous\n");
78
79static __initdata char upper_command_line[COMMAND_LINE_SIZE];
80
81static noinline __init void create_kernel_nss(void)
82{
83 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
84#ifdef CONFIG_BLK_DEV_INITRD
85 unsigned int sinitrd_pfn, einitrd_pfn;
86#endif
87 int response;
88 int hlen;
89 size_t len;
90 char *savesys_ptr;
91 char defsys_cmd[DEFSYS_CMD_SIZE];
92 char savesys_cmd[SAVESYS_CMD_SIZE];
93
94 /* Do nothing if we are not running under VM */
95 if (!MACHINE_IS_VM)
96 return;
97
98 /* Convert COMMAND_LINE to upper case */
99 for (i = 0; i < strlen(boot_command_line); i++)
100 upper_command_line[i] = toupper(boot_command_line[i]);
101
102 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
103
104 if (!savesys_ptr)
105 return;
106
107 savesys_ptr += 8; /* Point to the beginning of the NSS name */
108 for (i = 0; i < NSS_NAME_SIZE; i++) {
109 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
110 break;
111 kernel_nss_name[i] = savesys_ptr[i];
112 }
113
114 stext_pfn = PFN_DOWN(__pa(&_stext));
115 eshared_pfn = PFN_DOWN(__pa(&_eshared));
116 end_pfn = PFN_UP(__pa(&_end));
117 min_size = end_pfn << 2;
118
119 hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
120 "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
121 kernel_nss_name, stext_pfn - 1, stext_pfn,
122 eshared_pfn - 1, eshared_pfn, end_pfn);
123
124#ifdef CONFIG_BLK_DEV_INITRD
125 if (INITRD_START && INITRD_SIZE) {
126 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
127 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
128 min_size = einitrd_pfn << 2;
129 hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
130 " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
131 }
132#endif
133
134 snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
135 " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
136 defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
137 snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
138 kernel_nss_name, kernel_nss_name);
139 savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
140
141 __cpcmd(defsys_cmd, NULL, 0, &response);
142
143 if (response != 0) {
144 pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
145 response);
146 kernel_nss_name[0] = '\0';
147 return;
148 }
149
150 len = strlen(savesys_cmd);
151 ASCEBC(savesys_cmd, len);
152 response = savesys_ipl_nss(savesys_cmd, len);
153
154 /* On success: response is equal to the command size,
155 * max SAVESYS_CMD_SIZE
156 * On error: response contains the numeric portion of cp error message.
157 * for SAVESYS it will be >= 263
158 * for missing privilege class, it will be 1
159 */
160 if (response > SAVESYS_CMD_SIZE || response == 1) {
161 pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
162 response);
163 kernel_nss_name[0] = '\0';
164 return;
165 }
166
167 /* re-initialize cputime accounting. */
168 sched_clock_base_cc = get_tod_clock();
169 S390_lowcore.last_update_clock = sched_clock_base_cc;
170 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
171 S390_lowcore.user_timer = 0;
172 S390_lowcore.system_timer = 0;
173 asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
174
175 /* re-setup boot command line with new ipl vm parms */
176 ipl_update_parameters();
177 setup_boot_command_line();
178
179 ipl_flags = IPL_NSS_VALID;
180}
181
182#else /* CONFIG_SHARED_KERNEL */
183
184static inline void create_kernel_nss(void) { }
185
186#endif /* CONFIG_SHARED_KERNEL */
187
188/*
189 * Clear bss memory
190 */
191static noinline __init void clear_bss_section(void)
192{
193 memset(__bss_start, 0, __bss_stop - __bss_start);
194}
195
196/*
197 * Initialize storage key for kernel pages
198 */
199static noinline __init void init_kernel_storage_key(void)
200{
201#if PAGE_DEFAULT_KEY
202 unsigned long end_pfn, init_pfn;
203
204 end_pfn = PFN_UP(__pa(&_end));
205
206 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
207 page_set_storage_key(init_pfn << PAGE_SHIFT,
208 PAGE_DEFAULT_KEY, 0);
209#endif
210}
211
212static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
213
214static noinline __init void detect_machine_type(void)
215{
216 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
217
218 /* Check current-configuration-level */
219 if (stsi(NULL, 0, 0, 0) <= 2) {
220 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
221 return;
222 }
223 /* Get virtual-machine cpu information. */
224 if (stsi(vmms, 3, 2, 2) || !vmms->count)
225 return;
226
227 /* Running under KVM? If not we assume z/VM */
228 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
229 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
230 else
231 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
232}
233
234static noinline __init void setup_arch_string(void)
235{
236 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
237
238 if (stsi(mach, 1, 1, 1))
239 return;
240 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
241 EBCASC(mach->type, sizeof(mach->type));
242 EBCASC(mach->model, sizeof(mach->model));
243 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
244 dump_stack_set_arch_desc("%-16.16s %-4.4s %-16.16s %-16.16s (%s)",
245 mach->manufacturer,
246 mach->type,
247 mach->model,
248 mach->model_capacity,
249 MACHINE_IS_LPAR ? "LPAR" :
250 MACHINE_IS_VM ? "z/VM" :
251 MACHINE_IS_KVM ? "KVM" : "unknown");
252}
253
254static __init void setup_topology(void)
255{
256 int max_mnest;
257
258 if (!test_facility(11))
259 return;
260 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
261 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
262 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
263 break;
264 }
265 topology_max_mnest = max_mnest;
266}
267
268static void early_pgm_check_handler(void)
269{
270 const struct exception_table_entry *fixup;
271 unsigned long cr0, cr0_new;
272 unsigned long addr;
273
274 addr = S390_lowcore.program_old_psw.addr;
275 fixup = search_exception_tables(addr);
276 if (!fixup)
277 disabled_wait(0);
278 /* Disable low address protection before storing into lowcore. */
279 __ctl_store(cr0, 0, 0);
280 cr0_new = cr0 & ~(1UL << 28);
281 __ctl_load(cr0_new, 0, 0);
282 S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
283 __ctl_load(cr0, 0, 0);
284}
285
286static noinline __init void setup_lowcore_early(void)
287{
288 psw_t psw;
289
290 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
291 psw.addr = (unsigned long) s390_base_ext_handler;
292 S390_lowcore.external_new_psw = psw;
293 psw.addr = (unsigned long) s390_base_pgm_handler;
294 S390_lowcore.program_new_psw = psw;
295 s390_base_pgm_handler_fn = early_pgm_check_handler;
296 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
297}
298
299static noinline __init void setup_facility_list(void)
300{
301 stfle(S390_lowcore.stfle_fac_list,
302 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
303}
304
305static __init void detect_diag9c(void)
306{
307 unsigned int cpu_address;
308 int rc;
309
310 cpu_address = stap();
311 diag_stat_inc(DIAG_STAT_X09C);
312 asm volatile(
313 " diag %2,0,0x9c\n"
314 "0: la %0,0\n"
315 "1:\n"
316 EX_TABLE(0b,1b)
317 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
318 if (!rc)
319 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
320}
321
322static __init void detect_diag44(void)
323{
324 int rc;
325
326 diag_stat_inc(DIAG_STAT_X044);
327 asm volatile(
328 " diag 0,0,0x44\n"
329 "0: la %0,0\n"
330 "1:\n"
331 EX_TABLE(0b,1b)
332 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
333 if (!rc)
334 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
335}
336
337static __init void detect_machine_facilities(void)
338{
339 if (test_facility(8)) {
340 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
341 __ctl_set_bit(0, 23);
342 }
343 if (test_facility(78))
344 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
345 if (test_facility(3))
346 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
347 if (test_facility(40))
348 S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
349 if (test_facility(50) && test_facility(73))
350 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
351 if (test_facility(51))
352 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
353 if (test_facility(129)) {
354 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
355 __ctl_set_bit(0, 17);
356 }
357}
358
359static inline void save_vector_registers(void)
360{
361#ifdef CONFIG_CRASH_DUMP
362 if (test_facility(129))
363 save_vx_regs(boot_cpu_vector_save_area);
364#endif
365}
366
367static int __init disable_vector_extension(char *str)
368{
369 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
370 __ctl_clear_bit(0, 17);
371 return 1;
372}
373early_param("novx", disable_vector_extension);
374
375static int __init cad_setup(char *str)
376{
377 int val;
378
379 get_option(&str, &val);
380 if (val && test_facility(128))
381 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
382 return 0;
383}
384early_param("cad", cad_setup);
385
386static int __init cad_init(void)
387{
388 if (MACHINE_HAS_CAD)
389 /* Enable problem state CAD. */
390 __ctl_set_bit(2, 3);
391 return 0;
392}
393early_initcall(cad_init);
394
395static __init void memmove_early(void *dst, const void *src, size_t n)
396{
397 unsigned long addr;
398 long incr;
399 psw_t old;
400
401 if (!n)
402 return;
403 incr = 1;
404 if (dst > src) {
405 incr = -incr;
406 dst += n - 1;
407 src += n - 1;
408 }
409 old = S390_lowcore.program_new_psw;
410 S390_lowcore.program_new_psw.mask = __extract_psw();
411 asm volatile(
412 " larl %[addr],1f\n"
413 " stg %[addr],%[psw_pgm_addr]\n"
414 "0: mvc 0(1,%[dst]),0(%[src])\n"
415 " agr %[dst],%[incr]\n"
416 " agr %[src],%[incr]\n"
417 " brctg %[n],0b\n"
418 "1:\n"
419 : [addr] "=&d" (addr),
420 [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
421 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
422 : [incr] "d" (incr)
423 : "cc", "memory");
424 S390_lowcore.program_new_psw = old;
425}
426
427static __init noinline void ipl_save_parameters(void)
428{
429 void *src, *dst;
430
431 src = (void *)(unsigned long) S390_lowcore.ipl_parmblock_ptr;
432 dst = (void *) IPL_PARMBLOCK_ORIGIN;
433 memmove_early(dst, src, PAGE_SIZE);
434 S390_lowcore.ipl_parmblock_ptr = IPL_PARMBLOCK_ORIGIN;
435}
436
437static __init noinline void rescue_initrd(void)
438{
439#ifdef CONFIG_BLK_DEV_INITRD
440 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
441 /*
442 * Just like in case of IPL from VM reader we make sure there is a
443 * gap of 4MB between end of kernel and start of initrd.
444 * That way we can also be sure that saving an NSS will succeed,
445 * which however only requires different segments.
446 */
447 if (!INITRD_START || !INITRD_SIZE)
448 return;
449 if (INITRD_START >= min_initrd_addr)
450 return;
451 memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
452 INITRD_START = min_initrd_addr;
453#endif
454}
455
456/* Set up boot command line */
457static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
458{
459 char *parm, *delim;
460 size_t rc, len;
461
462 len = strlen(boot_command_line);
463
464 delim = boot_command_line + len; /* '\0' character position */
465 parm = boot_command_line + len + 1; /* append right after '\0' */
466
467 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
468 if (rc) {
469 if (*parm == '=')
470 memmove(boot_command_line, parm + 1, rc);
471 else
472 *delim = ' '; /* replace '\0' with space */
473 }
474}
475
476static inline int has_ebcdic_char(const char *str)
477{
478 int i;
479
480 for (i = 0; str[i]; i++)
481 if (str[i] & 0x80)
482 return 1;
483 return 0;
484}
485
486static void __init setup_boot_command_line(void)
487{
488 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
489 /* convert arch command line to ascii if necessary */
490 if (has_ebcdic_char(COMMAND_LINE))
491 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
492 /* copy arch command line */
493 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
494 ARCH_COMMAND_LINE_SIZE);
495
496 /* append IPL PARM data to the boot command line */
497 if (MACHINE_IS_VM)
498 append_to_cmdline(append_ipl_vmparm);
499
500 append_to_cmdline(append_ipl_scpdata);
501}
502
503/*
504 * Save ipl parameters, clear bss memory, initialize storage keys
505 * and create a kernel NSS at startup if the SAVESYS= parm is defined
506 */
507void __init startup_init(void)
508{
509 reset_tod_clock();
510 ipl_save_parameters();
511 rescue_initrd();
512 clear_bss_section();
513 ipl_verify_parameters();
514 time_early_init();
515 init_kernel_storage_key();
516 lockdep_off();
517 setup_lowcore_early();
518 setup_facility_list();
519 detect_machine_type();
520 setup_arch_string();
521 ipl_update_parameters();
522 setup_boot_command_line();
523 create_kernel_nss();
524 detect_diag9c();
525 detect_diag44();
526 detect_machine_facilities();
527 save_vector_registers();
528 setup_topology();
529 sclp_early_detect();
530 lockdep_on();
531}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#define KMSG_COMPONENT "setup"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/compiler.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/ctype.h>
16#include <linux/lockdep.h>
17#include <linux/extable.h>
18#include <linux/pfn.h>
19#include <linux/uaccess.h>
20#include <linux/kernel.h>
21#include <asm/diag.h>
22#include <asm/ebcdic.h>
23#include <asm/ipl.h>
24#include <asm/lowcore.h>
25#include <asm/processor.h>
26#include <asm/sections.h>
27#include <asm/setup.h>
28#include <asm/sysinfo.h>
29#include <asm/cpcmd.h>
30#include <asm/sclp.h>
31#include <asm/facility.h>
32#include "entry.h"
33
34static void __init setup_boot_command_line(void);
35
36/*
37 * Get the TOD clock running.
38 */
39static void __init reset_tod_clock(void)
40{
41 u64 time;
42
43 if (store_tod_clock(&time) == 0)
44 return;
45 /* TOD clock not running. Set the clock to Unix Epoch. */
46 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
47 disabled_wait(0);
48
49 memset(tod_clock_base, 0, 16);
50 *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
51 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
52}
53
54/*
55 * Clear bss memory
56 */
57static noinline __init void clear_bss_section(void)
58{
59 memset(__bss_start, 0, __bss_stop - __bss_start);
60}
61
62/*
63 * Initialize storage key for kernel pages
64 */
65static noinline __init void init_kernel_storage_key(void)
66{
67#if PAGE_DEFAULT_KEY
68 unsigned long end_pfn, init_pfn;
69
70 end_pfn = PFN_UP(__pa(_end));
71
72 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
73 page_set_storage_key(init_pfn << PAGE_SHIFT,
74 PAGE_DEFAULT_KEY, 0);
75#endif
76}
77
78static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
79
80static noinline __init void detect_machine_type(void)
81{
82 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
83
84 /* Check current-configuration-level */
85 if (stsi(NULL, 0, 0, 0) <= 2) {
86 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
87 return;
88 }
89 /* Get virtual-machine cpu information. */
90 if (stsi(vmms, 3, 2, 2) || !vmms->count)
91 return;
92
93 /* Running under KVM? If not we assume z/VM */
94 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
95 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
96 else
97 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
98}
99
100/* Remove leading, trailing and double whitespace. */
101static inline void strim_all(char *str)
102{
103 char *s;
104
105 s = strim(str);
106 if (s != str)
107 memmove(str, s, strlen(s));
108 while (*str) {
109 if (!isspace(*str++))
110 continue;
111 if (isspace(*str)) {
112 s = skip_spaces(str);
113 memmove(str, s, strlen(s) + 1);
114 }
115 }
116}
117
118static noinline __init void setup_arch_string(void)
119{
120 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
121 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
122 char mstr[80], hvstr[17];
123
124 if (stsi(mach, 1, 1, 1))
125 return;
126 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
127 EBCASC(mach->type, sizeof(mach->type));
128 EBCASC(mach->model, sizeof(mach->model));
129 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
130 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
131 mach->manufacturer, mach->type,
132 mach->model, mach->model_capacity);
133 strim_all(mstr);
134 if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
135 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
136 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
137 strim_all(hvstr);
138 } else {
139 sprintf(hvstr, "%s",
140 MACHINE_IS_LPAR ? "LPAR" :
141 MACHINE_IS_VM ? "z/VM" :
142 MACHINE_IS_KVM ? "KVM" : "unknown");
143 }
144 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
145}
146
147static __init void setup_topology(void)
148{
149 int max_mnest;
150
151 if (!test_facility(11))
152 return;
153 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
154 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
155 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
156 break;
157 }
158 topology_max_mnest = max_mnest;
159}
160
161static void early_pgm_check_handler(void)
162{
163 const struct exception_table_entry *fixup;
164 unsigned long cr0, cr0_new;
165 unsigned long addr;
166
167 addr = S390_lowcore.program_old_psw.addr;
168 fixup = search_exception_tables(addr);
169 if (!fixup)
170 disabled_wait(0);
171 /* Disable low address protection before storing into lowcore. */
172 __ctl_store(cr0, 0, 0);
173 cr0_new = cr0 & ~(1UL << 28);
174 __ctl_load(cr0_new, 0, 0);
175 S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
176 __ctl_load(cr0, 0, 0);
177}
178
179static noinline __init void setup_lowcore_early(void)
180{
181 psw_t psw;
182
183 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
184 psw.addr = (unsigned long) s390_base_ext_handler;
185 S390_lowcore.external_new_psw = psw;
186 psw.addr = (unsigned long) s390_base_pgm_handler;
187 S390_lowcore.program_new_psw = psw;
188 s390_base_pgm_handler_fn = early_pgm_check_handler;
189 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
190}
191
192static noinline __init void setup_facility_list(void)
193{
194 stfle(S390_lowcore.stfle_fac_list,
195 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
196 memcpy(S390_lowcore.alt_stfle_fac_list,
197 S390_lowcore.stfle_fac_list,
198 sizeof(S390_lowcore.alt_stfle_fac_list));
199 if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
200 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
201}
202
203static __init void detect_diag9c(void)
204{
205 unsigned int cpu_address;
206 int rc;
207
208 cpu_address = stap();
209 diag_stat_inc(DIAG_STAT_X09C);
210 asm volatile(
211 " diag %2,0,0x9c\n"
212 "0: la %0,0\n"
213 "1:\n"
214 EX_TABLE(0b,1b)
215 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
216 if (!rc)
217 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
218}
219
220static __init void detect_diag44(void)
221{
222 int rc;
223
224 diag_stat_inc(DIAG_STAT_X044);
225 asm volatile(
226 " diag 0,0,0x44\n"
227 "0: la %0,0\n"
228 "1:\n"
229 EX_TABLE(0b,1b)
230 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
231 if (!rc)
232 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
233}
234
235static __init void detect_machine_facilities(void)
236{
237 if (test_facility(8)) {
238 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
239 __ctl_set_bit(0, 23);
240 }
241 if (test_facility(78))
242 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
243 if (test_facility(3))
244 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
245 if (test_facility(50) && test_facility(73)) {
246 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
247 __ctl_set_bit(0, 55);
248 }
249 if (test_facility(51))
250 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
251 if (test_facility(129)) {
252 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
253 __ctl_set_bit(0, 17);
254 }
255 if (test_facility(130)) {
256 S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
257 __ctl_set_bit(0, 20);
258 }
259 if (test_facility(133))
260 S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
261 if (test_facility(139) && (tod_clock_base[1] & 0x80)) {
262 /* Enabled signed clock comparator comparisons */
263 S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
264 clock_comparator_max = -1ULL >> 1;
265 __ctl_set_bit(0, 53);
266 }
267}
268
269static inline void save_vector_registers(void)
270{
271#ifdef CONFIG_CRASH_DUMP
272 if (test_facility(129))
273 save_vx_regs(boot_cpu_vector_save_area);
274#endif
275}
276
277static int __init disable_vector_extension(char *str)
278{
279 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
280 __ctl_clear_bit(0, 17);
281 return 0;
282}
283early_param("novx", disable_vector_extension);
284
285static int __init noexec_setup(char *str)
286{
287 bool enabled;
288 int rc;
289
290 rc = kstrtobool(str, &enabled);
291 if (!rc && !enabled) {
292 /* Disable no-execute support */
293 S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
294 __ctl_clear_bit(0, 20);
295 }
296 return rc;
297}
298early_param("noexec", noexec_setup);
299
300static int __init cad_setup(char *str)
301{
302 bool enabled;
303 int rc;
304
305 rc = kstrtobool(str, &enabled);
306 if (!rc && enabled && test_facility(128))
307 /* Enable problem state CAD. */
308 __ctl_set_bit(2, 3);
309 return rc;
310}
311early_param("cad", cad_setup);
312
313static __init void memmove_early(void *dst, const void *src, size_t n)
314{
315 unsigned long addr;
316 long incr;
317 psw_t old;
318
319 if (!n)
320 return;
321 incr = 1;
322 if (dst > src) {
323 incr = -incr;
324 dst += n - 1;
325 src += n - 1;
326 }
327 old = S390_lowcore.program_new_psw;
328 S390_lowcore.program_new_psw.mask = __extract_psw();
329 asm volatile(
330 " larl %[addr],1f\n"
331 " stg %[addr],%[psw_pgm_addr]\n"
332 "0: mvc 0(1,%[dst]),0(%[src])\n"
333 " agr %[dst],%[incr]\n"
334 " agr %[src],%[incr]\n"
335 " brctg %[n],0b\n"
336 "1:\n"
337 : [addr] "=&d" (addr),
338 [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
339 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
340 : [incr] "d" (incr)
341 : "cc", "memory");
342 S390_lowcore.program_new_psw = old;
343}
344
345static __init noinline void rescue_initrd(void)
346{
347#ifdef CONFIG_BLK_DEV_INITRD
348 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
349 /*
350 * Just like in case of IPL from VM reader we make sure there is a
351 * gap of 4MB between end of kernel and start of initrd.
352 * That way we can also be sure that saving an NSS will succeed,
353 * which however only requires different segments.
354 */
355 if (!INITRD_START || !INITRD_SIZE)
356 return;
357 if (INITRD_START >= min_initrd_addr)
358 return;
359 memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
360 INITRD_START = min_initrd_addr;
361#endif
362}
363
364/* Set up boot command line */
365static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
366{
367 char *parm, *delim;
368 size_t rc, len;
369
370 len = strlen(boot_command_line);
371
372 delim = boot_command_line + len; /* '\0' character position */
373 parm = boot_command_line + len + 1; /* append right after '\0' */
374
375 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
376 if (rc) {
377 if (*parm == '=')
378 memmove(boot_command_line, parm + 1, rc);
379 else
380 *delim = ' '; /* replace '\0' with space */
381 }
382}
383
384static inline int has_ebcdic_char(const char *str)
385{
386 int i;
387
388 for (i = 0; str[i]; i++)
389 if (str[i] & 0x80)
390 return 1;
391 return 0;
392}
393
394static void __init setup_boot_command_line(void)
395{
396 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
397 /* convert arch command line to ascii if necessary */
398 if (has_ebcdic_char(COMMAND_LINE))
399 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
400 /* copy arch command line */
401 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
402 ARCH_COMMAND_LINE_SIZE);
403
404 /* append IPL PARM data to the boot command line */
405 if (MACHINE_IS_VM)
406 append_to_cmdline(append_ipl_vmparm);
407
408 append_to_cmdline(append_ipl_scpdata);
409}
410
411void __init startup_init(void)
412{
413 reset_tod_clock();
414 rescue_initrd();
415 clear_bss_section();
416 time_early_init();
417 init_kernel_storage_key();
418 lockdep_off();
419 setup_lowcore_early();
420 setup_facility_list();
421 detect_machine_type();
422 setup_arch_string();
423 ipl_store_parameters();
424 setup_boot_command_line();
425 detect_diag9c();
426 detect_diag44();
427 detect_machine_facilities();
428 save_vector_registers();
429 setup_topology();
430 sclp_early_detect();
431 lockdep_on();
432}