Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5 */
6
7#define KMSG_COMPONENT "setup"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/sched/debug.h>
11#include <linux/compiler.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/ctype.h>
16#include <linux/lockdep.h>
17#include <linux/extable.h>
18#include <linux/pfn.h>
19#include <linux/uaccess.h>
20#include <linux/kernel.h>
21#include <asm/asm-extable.h>
22#include <linux/memblock.h>
23#include <asm/access-regs.h>
24#include <asm/diag.h>
25#include <asm/ebcdic.h>
26#include <asm/fpu.h>
27#include <asm/ipl.h>
28#include <asm/lowcore.h>
29#include <asm/processor.h>
30#include <asm/sections.h>
31#include <asm/setup.h>
32#include <asm/sysinfo.h>
33#include <asm/cpcmd.h>
34#include <asm/sclp.h>
35#include <asm/facility.h>
36#include <asm/boot_data.h>
37#include "entry.h"
38
39#define decompressor_handled_param(param) \
40static int __init ignore_decompressor_param_##param(char *s) \
41{ \
42 return 0; \
43} \
44early_param(#param, ignore_decompressor_param_##param)
45
46decompressor_handled_param(mem);
47decompressor_handled_param(vmalloc);
48decompressor_handled_param(dfltcc);
49decompressor_handled_param(facilities);
50decompressor_handled_param(nokaslr);
51decompressor_handled_param(cmma);
52decompressor_handled_param(relocate_lowcore);
53#if IS_ENABLED(CONFIG_KVM)
54decompressor_handled_param(prot_virt);
55#endif
56
57static void __init kasan_early_init(void)
58{
59#ifdef CONFIG_KASAN
60 init_task.kasan_depth = 0;
61 sclp_early_printk("KernelAddressSanitizer initialized\n");
62#endif
63}
64
65static void __init reset_tod_clock(void)
66{
67 union tod_clock clk;
68
69 if (store_tod_clock_ext_cc(&clk) == 0)
70 return;
71 /* TOD clock not running. Set the clock to Unix Epoch. */
72 if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
73 disabled_wait();
74
75 memset(&tod_clock_base, 0, sizeof(tod_clock_base));
76 tod_clock_base.tod = TOD_UNIX_EPOCH;
77 get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
78}
79
80/*
81 * Initialize storage key for kernel pages
82 */
83static noinline __init void init_kernel_storage_key(void)
84{
85#if PAGE_DEFAULT_KEY
86 unsigned long end_pfn, init_pfn;
87
88 end_pfn = PFN_UP(__pa(_end));
89
90 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
91 page_set_storage_key(init_pfn << PAGE_SHIFT,
92 PAGE_DEFAULT_KEY, 0);
93#endif
94}
95
96static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
97
98static noinline __init void detect_machine_type(void)
99{
100 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
101
102 /* Check current-configuration-level */
103 if (stsi(NULL, 0, 0, 0) <= 2) {
104 get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR;
105 return;
106 }
107 /* Get virtual-machine cpu information. */
108 if (stsi(vmms, 3, 2, 2) || !vmms->count)
109 return;
110
111 /* Detect known hypervisors */
112 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
113 get_lowcore()->machine_flags |= MACHINE_FLAG_KVM;
114 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
115 get_lowcore()->machine_flags |= MACHINE_FLAG_VM;
116}
117
118/* Remove leading, trailing and double whitespace. */
119static inline void strim_all(char *str)
120{
121 char *s;
122
123 s = strim(str);
124 if (s != str)
125 memmove(str, s, strlen(s));
126 while (*str) {
127 if (!isspace(*str++))
128 continue;
129 if (isspace(*str)) {
130 s = skip_spaces(str);
131 memmove(str, s, strlen(s) + 1);
132 }
133 }
134}
135
136static noinline __init void setup_arch_string(void)
137{
138 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
139 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
140 char mstr[80], hvstr[17];
141
142 if (stsi(mach, 1, 1, 1))
143 return;
144 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
145 EBCASC(mach->type, sizeof(mach->type));
146 EBCASC(mach->model, sizeof(mach->model));
147 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
148 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
149 mach->manufacturer, mach->type,
150 mach->model, mach->model_capacity);
151 strim_all(mstr);
152 if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
153 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
154 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
155 strim_all(hvstr);
156 } else {
157 sprintf(hvstr, "%s",
158 MACHINE_IS_LPAR ? "LPAR" :
159 MACHINE_IS_VM ? "z/VM" :
160 MACHINE_IS_KVM ? "KVM" : "unknown");
161 }
162 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
163}
164
165static __init void setup_topology(void)
166{
167 int max_mnest;
168
169 if (!test_facility(11))
170 return;
171 get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY;
172 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
173 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
174 break;
175 }
176 topology_max_mnest = max_mnest;
177}
178
179void __init __do_early_pgm_check(struct pt_regs *regs)
180{
181 struct lowcore *lc = get_lowcore();
182 unsigned long ip;
183
184 regs->int_code = lc->pgm_int_code;
185 regs->int_parm_long = lc->trans_exc_code;
186 ip = __rewind_psw(regs->psw, regs->int_code >> 16);
187
188 /* Monitor Event? Might be a warning */
189 if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) {
190 if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN)
191 return;
192 }
193 if (fixup_exception(regs))
194 return;
195 /*
196 * Unhandled exception - system cannot continue but try to get some
197 * helpful messages to the console. Use early_printk() to print
198 * some basic information in case it is too early for printk().
199 */
200 register_early_console();
201 early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n",
202 regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr);
203 show_regs(regs);
204 disabled_wait();
205}
206
207static noinline __init void setup_lowcore_early(void)
208{
209 struct lowcore *lc = get_lowcore();
210 psw_t psw;
211
212 psw.addr = (unsigned long)early_pgm_check_handler;
213 psw.mask = PSW_KERNEL_BITS;
214 lc->program_new_psw = psw;
215 lc->preempt_count = INIT_PREEMPT_COUNT;
216 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
217 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
218}
219
220static __init void detect_diag9c(void)
221{
222 unsigned int cpu_address;
223 int rc;
224
225 cpu_address = stap();
226 diag_stat_inc(DIAG_STAT_X09C);
227 asm volatile(
228 " diag %2,0,0x9c\n"
229 "0: la %0,0\n"
230 "1:\n"
231 EX_TABLE(0b,1b)
232 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
233 if (!rc)
234 get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C;
235}
236
237static __init void detect_machine_facilities(void)
238{
239 if (test_facility(8)) {
240 get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1;
241 system_ctl_set_bit(0, CR0_EDAT_BIT);
242 }
243 if (test_facility(78))
244 get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2;
245 if (test_facility(3))
246 get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE;
247 if (test_facility(50) && test_facility(73)) {
248 get_lowcore()->machine_flags |= MACHINE_FLAG_TE;
249 system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
250 }
251 if (test_facility(51))
252 get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC;
253 if (test_facility(129))
254 system_ctl_set_bit(0, CR0_VECTOR_BIT);
255 if (test_facility(130))
256 get_lowcore()->machine_flags |= MACHINE_FLAG_NX;
257 if (test_facility(133))
258 get_lowcore()->machine_flags |= MACHINE_FLAG_GS;
259 if (test_facility(139) && (tod_clock_base.tod >> 63)) {
260 /* Enabled signed clock comparator comparisons */
261 get_lowcore()->machine_flags |= MACHINE_FLAG_SCC;
262 clock_comparator_max = -1ULL >> 1;
263 system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
264 }
265 if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
266 get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO;
267 /* the control bit is set during PCI initialization */
268 }
269 if (test_facility(194))
270 get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
271 if (test_facility(85))
272 get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN;
273}
274
275static inline void save_vector_registers(void)
276{
277#ifdef CONFIG_CRASH_DUMP
278 if (test_facility(129))
279 save_vx_regs(boot_cpu_vector_save_area);
280#endif
281}
282
283static inline void setup_low_address_protection(void)
284{
285 system_ctl_set_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
286}
287
288static inline void setup_access_registers(void)
289{
290 unsigned int acrs[NUM_ACRS] = { 0 };
291
292 restore_access_regs(acrs);
293}
294
295char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
296static void __init setup_boot_command_line(void)
297{
298 /* copy arch command line */
299 strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
300}
301
302static void __init sort_amode31_extable(void)
303{
304 sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
305}
306
307void __init startup_init(void)
308{
309 kasan_early_init();
310 reset_tod_clock();
311 time_early_init();
312 init_kernel_storage_key();
313 lockdep_off();
314 sort_amode31_extable();
315 setup_lowcore_early();
316 detect_machine_type();
317 setup_arch_string();
318 setup_boot_command_line();
319 detect_diag9c();
320 detect_machine_facilities();
321 save_vector_registers();
322 setup_topology();
323 sclp_early_detect();
324 setup_low_address_protection();
325 setup_access_registers();
326 lockdep_on();
327}
1/*
2 * arch/s390/kernel/early.c
3 *
4 * Copyright IBM Corp. 2007, 2009
5 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
6 * Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#define KMSG_COMPONENT "setup"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/compiler.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/ctype.h>
17#include <linux/ftrace.h>
18#include <linux/lockdep.h>
19#include <linux/module.h>
20#include <linux/pfn.h>
21#include <linux/uaccess.h>
22#include <linux/kernel.h>
23#include <asm/ebcdic.h>
24#include <asm/ipl.h>
25#include <asm/lowcore.h>
26#include <asm/processor.h>
27#include <asm/sections.h>
28#include <asm/setup.h>
29#include <asm/sysinfo.h>
30#include <asm/cpcmd.h>
31#include <asm/sclp.h>
32#include "entry.h"
33
34/*
35 * Create a Kernel NSS if the SAVESYS= parameter is defined
36 */
37#define DEFSYS_CMD_SIZE 128
38#define SAVESYS_CMD_SIZE 32
39
40char kernel_nss_name[NSS_NAME_SIZE + 1];
41
42static void __init setup_boot_command_line(void);
43
44/*
45 * Get the TOD clock running.
46 */
47static void __init reset_tod_clock(void)
48{
49 u64 time;
50
51 if (store_clock(&time) == 0)
52 return;
53 /* TOD clock not running. Set the clock to Unix Epoch. */
54 if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
55 disabled_wait(0);
56
57 sched_clock_base_cc = TOD_UNIX_EPOCH;
58 S390_lowcore.last_update_clock = sched_clock_base_cc;
59}
60
61#ifdef CONFIG_SHARED_KERNEL
62int __init savesys_ipl_nss(char *cmd, const int cmdlen);
63
64asm(
65 " .section .init.text,\"ax\",@progbits\n"
66 " .align 4\n"
67 " .type savesys_ipl_nss, @function\n"
68 "savesys_ipl_nss:\n"
69#ifdef CONFIG_64BIT
70 " stmg 6,15,48(15)\n"
71 " lgr 14,3\n"
72 " sam31\n"
73 " diag 2,14,0x8\n"
74 " sam64\n"
75 " lgr 2,14\n"
76 " lmg 6,15,48(15)\n"
77#else
78 " stm 6,15,24(15)\n"
79 " lr 14,3\n"
80 " diag 2,14,0x8\n"
81 " lr 2,14\n"
82 " lm 6,15,24(15)\n"
83#endif
84 " br 14\n"
85 " .size savesys_ipl_nss, .-savesys_ipl_nss\n"
86 " .previous\n");
87
88static __initdata char upper_command_line[COMMAND_LINE_SIZE];
89
90static noinline __init void create_kernel_nss(void)
91{
92 unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
93#ifdef CONFIG_BLK_DEV_INITRD
94 unsigned int sinitrd_pfn, einitrd_pfn;
95#endif
96 int response;
97 int hlen;
98 size_t len;
99 char *savesys_ptr;
100 char defsys_cmd[DEFSYS_CMD_SIZE];
101 char savesys_cmd[SAVESYS_CMD_SIZE];
102
103 /* Do nothing if we are not running under VM */
104 if (!MACHINE_IS_VM)
105 return;
106
107 /* Convert COMMAND_LINE to upper case */
108 for (i = 0; i < strlen(boot_command_line); i++)
109 upper_command_line[i] = toupper(boot_command_line[i]);
110
111 savesys_ptr = strstr(upper_command_line, "SAVESYS=");
112
113 if (!savesys_ptr)
114 return;
115
116 savesys_ptr += 8; /* Point to the beginning of the NSS name */
117 for (i = 0; i < NSS_NAME_SIZE; i++) {
118 if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
119 break;
120 kernel_nss_name[i] = savesys_ptr[i];
121 }
122
123 stext_pfn = PFN_DOWN(__pa(&_stext));
124 eshared_pfn = PFN_DOWN(__pa(&_eshared));
125 end_pfn = PFN_UP(__pa(&_end));
126 min_size = end_pfn << 2;
127
128 hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
129 "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
130 kernel_nss_name, stext_pfn - 1, stext_pfn,
131 eshared_pfn - 1, eshared_pfn, end_pfn);
132
133#ifdef CONFIG_BLK_DEV_INITRD
134 if (INITRD_START && INITRD_SIZE) {
135 sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
136 einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
137 min_size = einitrd_pfn << 2;
138 hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
139 " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
140 }
141#endif
142
143 snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
144 " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
145 defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
146 snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
147 kernel_nss_name, kernel_nss_name);
148 savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
149
150 __cpcmd(defsys_cmd, NULL, 0, &response);
151
152 if (response != 0) {
153 pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
154 response);
155 kernel_nss_name[0] = '\0';
156 return;
157 }
158
159 len = strlen(savesys_cmd);
160 ASCEBC(savesys_cmd, len);
161 response = savesys_ipl_nss(savesys_cmd, len);
162
163 /* On success: response is equal to the command size,
164 * max SAVESYS_CMD_SIZE
165 * On error: response contains the numeric portion of cp error message.
166 * for SAVESYS it will be >= 263
167 * for missing privilege class, it will be 1
168 */
169 if (response > SAVESYS_CMD_SIZE || response == 1) {
170 pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
171 response);
172 kernel_nss_name[0] = '\0';
173 return;
174 }
175
176 /* re-initialize cputime accounting. */
177 sched_clock_base_cc = get_clock();
178 S390_lowcore.last_update_clock = sched_clock_base_cc;
179 S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
180 S390_lowcore.user_timer = 0;
181 S390_lowcore.system_timer = 0;
182 asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
183
184 /* re-setup boot command line with new ipl vm parms */
185 ipl_update_parameters();
186 setup_boot_command_line();
187
188 ipl_flags = IPL_NSS_VALID;
189}
190
191#else /* CONFIG_SHARED_KERNEL */
192
193static inline void create_kernel_nss(void) { }
194
195#endif /* CONFIG_SHARED_KERNEL */
196
197/*
198 * Clear bss memory
199 */
200static noinline __init void clear_bss_section(void)
201{
202 memset(__bss_start, 0, __bss_stop - __bss_start);
203}
204
205/*
206 * Initialize storage key for kernel pages
207 */
208static noinline __init void init_kernel_storage_key(void)
209{
210 unsigned long end_pfn, init_pfn;
211
212 end_pfn = PFN_UP(__pa(&_end));
213
214 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
215 page_set_storage_key(init_pfn << PAGE_SHIFT,
216 PAGE_DEFAULT_KEY, 0);
217}
218
219static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
220
221static noinline __init void detect_machine_type(void)
222{
223 /* Check current-configuration-level */
224 if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
225 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
226 return;
227 }
228 /* Get virtual-machine cpu information. */
229 if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
230 return;
231
232 /* Running under KVM? If not we assume z/VM */
233 if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
234 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
235 else
236 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
237}
238
239static __init void early_pgm_check_handler(void)
240{
241 unsigned long addr;
242 const struct exception_table_entry *fixup;
243
244 addr = S390_lowcore.program_old_psw.addr;
245 fixup = search_exception_tables(addr & PSW_ADDR_INSN);
246 if (!fixup)
247 disabled_wait(0);
248 S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
249}
250
251static noinline __init void setup_lowcore_early(void)
252{
253 psw_t psw;
254
255 psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
256 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
257 S390_lowcore.external_new_psw = psw;
258 psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
259 S390_lowcore.program_new_psw = psw;
260 s390_base_pgm_handler_fn = early_pgm_check_handler;
261}
262
263static noinline __init void setup_facility_list(void)
264{
265 unsigned long nr;
266
267 S390_lowcore.stfl_fac_list = 0;
268 asm volatile(
269 " .insn s,0xb2b10000,0(0)\n" /* stfl */
270 "0:\n"
271 EX_TABLE(0b,0b) : "=m" (S390_lowcore.stfl_fac_list));
272 memcpy(&S390_lowcore.stfle_fac_list, &S390_lowcore.stfl_fac_list, 4);
273 nr = 4; /* # bytes stored by stfl */
274 if (test_facility(7)) {
275 /* More facility bits available with stfle */
276 register unsigned long reg0 asm("0") = MAX_FACILITY_BIT/64 - 1;
277 asm volatile(".insn s,0xb2b00000,%0" /* stfle */
278 : "=m" (S390_lowcore.stfle_fac_list), "+d" (reg0)
279 : : "cc");
280 nr = (reg0 + 1) * 8; /* # bytes stored by stfle */
281 }
282 memset((char *) S390_lowcore.stfle_fac_list + nr, 0,
283 MAX_FACILITY_BIT/8 - nr);
284}
285
286static noinline __init void setup_hpage(void)
287{
288#ifndef CONFIG_DEBUG_PAGEALLOC
289 if (!test_facility(2) || !test_facility(8))
290 return;
291 S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
292 __ctl_set_bit(0, 23);
293#endif
294}
295
296static __init void detect_mvpg(void)
297{
298#ifndef CONFIG_64BIT
299 int rc;
300
301 asm volatile(
302 " la 0,0\n"
303 " mvpg %2,%2\n"
304 "0: la %0,0\n"
305 "1:\n"
306 EX_TABLE(0b,1b)
307 : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
308 if (!rc)
309 S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
310#endif
311}
312
313static __init void detect_ieee(void)
314{
315#ifndef CONFIG_64BIT
316 int rc, tmp;
317
318 asm volatile(
319 " efpc %1,0\n"
320 "0: la %0,0\n"
321 "1:\n"
322 EX_TABLE(0b,1b)
323 : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
324 if (!rc)
325 S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
326#endif
327}
328
329static __init void detect_csp(void)
330{
331#ifndef CONFIG_64BIT
332 int rc;
333
334 asm volatile(
335 " la 0,0\n"
336 " la 1,0\n"
337 " la 2,4\n"
338 " csp 0,2\n"
339 "0: la %0,0\n"
340 "1:\n"
341 EX_TABLE(0b,1b)
342 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
343 if (!rc)
344 S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
345#endif
346}
347
348static __init void detect_diag9c(void)
349{
350 unsigned int cpu_address;
351 int rc;
352
353 cpu_address = stap();
354 asm volatile(
355 " diag %2,0,0x9c\n"
356 "0: la %0,0\n"
357 "1:\n"
358 EX_TABLE(0b,1b)
359 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
360 if (!rc)
361 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
362}
363
364static __init void detect_diag44(void)
365{
366#ifdef CONFIG_64BIT
367 int rc;
368
369 asm volatile(
370 " diag 0,0,0x44\n"
371 "0: la %0,0\n"
372 "1:\n"
373 EX_TABLE(0b,1b)
374 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
375 if (!rc)
376 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
377#endif
378}
379
380static __init void detect_machine_facilities(void)
381{
382#ifdef CONFIG_64BIT
383 if (test_facility(3))
384 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
385 if (test_facility(8))
386 S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
387 if (test_facility(11))
388 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
389 if (test_facility(27))
390 S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
391 if (test_facility(40))
392 S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
393#endif
394}
395
396static __init void rescue_initrd(void)
397{
398#ifdef CONFIG_BLK_DEV_INITRD
399 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
400 /*
401 * Just like in case of IPL from VM reader we make sure there is a
402 * gap of 4MB between end of kernel and start of initrd.
403 * That way we can also be sure that saving an NSS will succeed,
404 * which however only requires different segments.
405 */
406 if (!INITRD_START || !INITRD_SIZE)
407 return;
408 if (INITRD_START >= min_initrd_addr)
409 return;
410 memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
411 INITRD_START = min_initrd_addr;
412#endif
413}
414
415/* Set up boot command line */
416static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
417{
418 char *parm, *delim;
419 size_t rc, len;
420
421 len = strlen(boot_command_line);
422
423 delim = boot_command_line + len; /* '\0' character position */
424 parm = boot_command_line + len + 1; /* append right after '\0' */
425
426 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
427 if (rc) {
428 if (*parm == '=')
429 memmove(boot_command_line, parm + 1, rc);
430 else
431 *delim = ' '; /* replace '\0' with space */
432 }
433}
434
435static void __init setup_boot_command_line(void)
436{
437 int i;
438
439 /* convert arch command line to ascii */
440 for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++)
441 if (COMMAND_LINE[i] & 0x80)
442 break;
443 if (i < ARCH_COMMAND_LINE_SIZE)
444 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
445 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0;
446
447 /* copy arch command line */
448 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
449 ARCH_COMMAND_LINE_SIZE);
450
451 /* append IPL PARM data to the boot command line */
452 if (MACHINE_IS_VM)
453 append_to_cmdline(append_ipl_vmparm);
454
455 append_to_cmdline(append_ipl_scpdata);
456}
457
458
459/*
460 * Save ipl parameters, clear bss memory, initialize storage keys
461 * and create a kernel NSS at startup if the SAVESYS= parm is defined
462 */
463void __init startup_init(void)
464{
465 reset_tod_clock();
466 ipl_save_parameters();
467 rescue_initrd();
468 clear_bss_section();
469 init_kernel_storage_key();
470 lockdep_init();
471 lockdep_off();
472 sort_main_extable();
473 setup_lowcore_early();
474 setup_facility_list();
475 detect_machine_type();
476 ipl_update_parameters();
477 setup_boot_command_line();
478 create_kernel_nss();
479 detect_mvpg();
480 detect_ieee();
481 detect_csp();
482 detect_diag9c();
483 detect_diag44();
484 detect_machine_facilities();
485 setup_hpage();
486 sclp_facilities_detect();
487 detect_memory_layout(memory_chunk);
488#ifdef CONFIG_DYNAMIC_FTRACE
489 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
490#endif
491 lockdep_on();
492}