Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5 */
6
7#define KMSG_COMPONENT "setup"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/compiler.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/ctype.h>
15#include <linux/lockdep.h>
16#include <linux/extable.h>
17#include <linux/pfn.h>
18#include <linux/uaccess.h>
19#include <linux/kernel.h>
20#include <asm/asm-extable.h>
21#include <asm/diag.h>
22#include <asm/ebcdic.h>
23#include <asm/ipl.h>
24#include <asm/lowcore.h>
25#include <asm/processor.h>
26#include <asm/sections.h>
27#include <asm/setup.h>
28#include <asm/sysinfo.h>
29#include <asm/cpcmd.h>
30#include <asm/sclp.h>
31#include <asm/facility.h>
32#include <asm/boot_data.h>
33#include <asm/switch_to.h>
34#include "entry.h"
35
36int __bootdata(is_full_image);
37
38static void __init reset_tod_clock(void)
39{
40 union tod_clock clk;
41
42 if (store_tod_clock_ext_cc(&clk) == 0)
43 return;
44 /* TOD clock not running. Set the clock to Unix Epoch. */
45 if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
46 disabled_wait();
47
48 memset(&tod_clock_base, 0, sizeof(tod_clock_base));
49 tod_clock_base.tod = TOD_UNIX_EPOCH;
50 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
51}
52
53/*
54 * Initialize storage key for kernel pages
55 */
56static noinline __init void init_kernel_storage_key(void)
57{
58#if PAGE_DEFAULT_KEY
59 unsigned long end_pfn, init_pfn;
60
61 end_pfn = PFN_UP(__pa(_end));
62
63 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
64 page_set_storage_key(init_pfn << PAGE_SHIFT,
65 PAGE_DEFAULT_KEY, 0);
66#endif
67}
68
69static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
70
71static noinline __init void detect_machine_type(void)
72{
73 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
74
75 /* Check current-configuration-level */
76 if (stsi(NULL, 0, 0, 0) <= 2) {
77 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
78 return;
79 }
80 /* Get virtual-machine cpu information. */
81 if (stsi(vmms, 3, 2, 2) || !vmms->count)
82 return;
83
84 /* Detect known hypervisors */
85 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
86 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
87 else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
88 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
89}
90
91/* Remove leading, trailing and double whitespace. */
92static inline void strim_all(char *str)
93{
94 char *s;
95
96 s = strim(str);
97 if (s != str)
98 memmove(str, s, strlen(s));
99 while (*str) {
100 if (!isspace(*str++))
101 continue;
102 if (isspace(*str)) {
103 s = skip_spaces(str);
104 memmove(str, s, strlen(s) + 1);
105 }
106 }
107}
108
109static noinline __init void setup_arch_string(void)
110{
111 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
112 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
113 char mstr[80], hvstr[17];
114
115 if (stsi(mach, 1, 1, 1))
116 return;
117 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
118 EBCASC(mach->type, sizeof(mach->type));
119 EBCASC(mach->model, sizeof(mach->model));
120 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
121 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
122 mach->manufacturer, mach->type,
123 mach->model, mach->model_capacity);
124 strim_all(mstr);
125 if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
126 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
127 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
128 strim_all(hvstr);
129 } else {
130 sprintf(hvstr, "%s",
131 MACHINE_IS_LPAR ? "LPAR" :
132 MACHINE_IS_VM ? "z/VM" :
133 MACHINE_IS_KVM ? "KVM" : "unknown");
134 }
135 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
136}
137
138static __init void setup_topology(void)
139{
140 int max_mnest;
141
142 if (!test_facility(11))
143 return;
144 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
145 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
146 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
147 break;
148 }
149 topology_max_mnest = max_mnest;
150}
151
152void __do_early_pgm_check(struct pt_regs *regs)
153{
154 if (!fixup_exception(regs))
155 disabled_wait();
156}
157
158static noinline __init void setup_lowcore_early(void)
159{
160 psw_t psw;
161
162 psw.addr = (unsigned long)early_pgm_check_handler;
163 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
164 if (IS_ENABLED(CONFIG_KASAN))
165 psw.mask |= PSW_MASK_DAT;
166 S390_lowcore.program_new_psw = psw;
167 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
168}
169
170static noinline __init void setup_facility_list(void)
171{
172 memcpy(alt_stfle_fac_list, stfle_fac_list, sizeof(alt_stfle_fac_list));
173 if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
174 __clear_facility(82, alt_stfle_fac_list);
175}
176
177static __init void detect_diag9c(void)
178{
179 unsigned int cpu_address;
180 int rc;
181
182 cpu_address = stap();
183 diag_stat_inc(DIAG_STAT_X09C);
184 asm volatile(
185 " diag %2,0,0x9c\n"
186 "0: la %0,0\n"
187 "1:\n"
188 EX_TABLE(0b,1b)
189 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
190 if (!rc)
191 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
192}
193
194static __init void detect_machine_facilities(void)
195{
196 if (test_facility(8)) {
197 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
198 __ctl_set_bit(0, 23);
199 }
200 if (test_facility(78))
201 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
202 if (test_facility(3))
203 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
204 if (test_facility(50) && test_facility(73)) {
205 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
206 __ctl_set_bit(0, 55);
207 }
208 if (test_facility(51))
209 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
210 if (test_facility(129)) {
211 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
212 __ctl_set_bit(0, 17);
213 }
214 if (test_facility(130) && !noexec_disabled) {
215 S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
216 __ctl_set_bit(0, 20);
217 }
218 if (test_facility(133))
219 S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
220 if (test_facility(139) && (tod_clock_base.tod >> 63)) {
221 /* Enabled signed clock comparator comparisons */
222 S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
223 clock_comparator_max = -1ULL >> 1;
224 __ctl_set_bit(0, 53);
225 }
226 if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
227 S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
228 /* the control bit is set during PCI initialization */
229 }
230}
231
232static inline void save_vector_registers(void)
233{
234#ifdef CONFIG_CRASH_DUMP
235 if (test_facility(129))
236 save_vx_regs(boot_cpu_vector_save_area);
237#endif
238}
239
240static inline void setup_control_registers(void)
241{
242 unsigned long reg;
243
244 __ctl_store(reg, 0, 0);
245 reg |= CR0_LOW_ADDRESS_PROTECTION;
246 reg |= CR0_EMERGENCY_SIGNAL_SUBMASK;
247 reg |= CR0_EXTERNAL_CALL_SUBMASK;
248 __ctl_load(reg, 0, 0);
249}
250
251static inline void setup_access_registers(void)
252{
253 unsigned int acrs[NUM_ACRS] = { 0 };
254
255 restore_access_regs(acrs);
256}
257
258static int __init disable_vector_extension(char *str)
259{
260 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
261 __ctl_clear_bit(0, 17);
262 return 0;
263}
264early_param("novx", disable_vector_extension);
265
266char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
267static void __init setup_boot_command_line(void)
268{
269 /* copy arch command line */
270 strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
271}
272
273static void __init check_image_bootable(void)
274{
275 if (is_full_image)
276 return;
277
278 sclp_early_printk("Linux kernel boot failure: An attempt to boot a vmlinux ELF image failed.\n");
279 sclp_early_printk("This image does not contain all parts necessary for starting up. Use\n");
280 sclp_early_printk("bzImage or arch/s390/boot/compressed/vmlinux instead.\n");
281 disabled_wait();
282}
283
284static void __init sort_amode31_extable(void)
285{
286 sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
287}
288
289void __init startup_init(void)
290{
291 sclp_early_adjust_va();
292 reset_tod_clock();
293 check_image_bootable();
294 time_early_init();
295 init_kernel_storage_key();
296 lockdep_off();
297 sort_amode31_extable();
298 setup_lowcore_early();
299 setup_facility_list();
300 detect_machine_type();
301 setup_arch_string();
302 setup_boot_command_line();
303 detect_diag9c();
304 detect_machine_facilities();
305 save_vector_registers();
306 setup_topology();
307 sclp_early_detect();
308 setup_control_registers();
309 setup_access_registers();
310 lockdep_on();
311}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2009
4 * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
5 * Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#define KMSG_COMPONENT "setup"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/compiler.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/ctype.h>
16#include <linux/lockdep.h>
17#include <linux/extable.h>
18#include <linux/pfn.h>
19#include <linux/uaccess.h>
20#include <linux/kernel.h>
21#include <asm/diag.h>
22#include <asm/ebcdic.h>
23#include <asm/ipl.h>
24#include <asm/lowcore.h>
25#include <asm/processor.h>
26#include <asm/sections.h>
27#include <asm/setup.h>
28#include <asm/sysinfo.h>
29#include <asm/cpcmd.h>
30#include <asm/sclp.h>
31#include <asm/facility.h>
32#include "entry.h"
33
34static void __init setup_boot_command_line(void);
35
36/*
37 * Get the TOD clock running.
38 */
39static void __init reset_tod_clock(void)
40{
41 u64 time;
42
43 if (store_tod_clock(&time) == 0)
44 return;
45 /* TOD clock not running. Set the clock to Unix Epoch. */
46 if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
47 disabled_wait(0);
48
49 memset(tod_clock_base, 0, 16);
50 *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH;
51 S390_lowcore.last_update_clock = TOD_UNIX_EPOCH;
52}
53
54/*
55 * Clear bss memory
56 */
57static noinline __init void clear_bss_section(void)
58{
59 memset(__bss_start, 0, __bss_stop - __bss_start);
60}
61
62/*
63 * Initialize storage key for kernel pages
64 */
65static noinline __init void init_kernel_storage_key(void)
66{
67#if PAGE_DEFAULT_KEY
68 unsigned long end_pfn, init_pfn;
69
70 end_pfn = PFN_UP(__pa(_end));
71
72 for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
73 page_set_storage_key(init_pfn << PAGE_SHIFT,
74 PAGE_DEFAULT_KEY, 0);
75#endif
76}
77
78static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
79
80static noinline __init void detect_machine_type(void)
81{
82 struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
83
84 /* Check current-configuration-level */
85 if (stsi(NULL, 0, 0, 0) <= 2) {
86 S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
87 return;
88 }
89 /* Get virtual-machine cpu information. */
90 if (stsi(vmms, 3, 2, 2) || !vmms->count)
91 return;
92
93 /* Running under KVM? If not we assume z/VM */
94 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
95 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
96 else
97 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
98}
99
100/* Remove leading, trailing and double whitespace. */
101static inline void strim_all(char *str)
102{
103 char *s;
104
105 s = strim(str);
106 if (s != str)
107 memmove(str, s, strlen(s));
108 while (*str) {
109 if (!isspace(*str++))
110 continue;
111 if (isspace(*str)) {
112 s = skip_spaces(str);
113 memmove(str, s, strlen(s) + 1);
114 }
115 }
116}
117
118static noinline __init void setup_arch_string(void)
119{
120 struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
121 struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
122 char mstr[80], hvstr[17];
123
124 if (stsi(mach, 1, 1, 1))
125 return;
126 EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
127 EBCASC(mach->type, sizeof(mach->type));
128 EBCASC(mach->model, sizeof(mach->model));
129 EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
130 sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
131 mach->manufacturer, mach->type,
132 mach->model, mach->model_capacity);
133 strim_all(mstr);
134 if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
135 EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
136 sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
137 strim_all(hvstr);
138 } else {
139 sprintf(hvstr, "%s",
140 MACHINE_IS_LPAR ? "LPAR" :
141 MACHINE_IS_VM ? "z/VM" :
142 MACHINE_IS_KVM ? "KVM" : "unknown");
143 }
144 dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
145}
146
147static __init void setup_topology(void)
148{
149 int max_mnest;
150
151 if (!test_facility(11))
152 return;
153 S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
154 for (max_mnest = 6; max_mnest > 1; max_mnest--) {
155 if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
156 break;
157 }
158 topology_max_mnest = max_mnest;
159}
160
161static void early_pgm_check_handler(void)
162{
163 const struct exception_table_entry *fixup;
164 unsigned long cr0, cr0_new;
165 unsigned long addr;
166
167 addr = S390_lowcore.program_old_psw.addr;
168 fixup = search_exception_tables(addr);
169 if (!fixup)
170 disabled_wait(0);
171 /* Disable low address protection before storing into lowcore. */
172 __ctl_store(cr0, 0, 0);
173 cr0_new = cr0 & ~(1UL << 28);
174 __ctl_load(cr0_new, 0, 0);
175 S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
176 __ctl_load(cr0, 0, 0);
177}
178
179static noinline __init void setup_lowcore_early(void)
180{
181 psw_t psw;
182
183 psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
184 psw.addr = (unsigned long) s390_base_ext_handler;
185 S390_lowcore.external_new_psw = psw;
186 psw.addr = (unsigned long) s390_base_pgm_handler;
187 S390_lowcore.program_new_psw = psw;
188 s390_base_pgm_handler_fn = early_pgm_check_handler;
189 S390_lowcore.preempt_count = INIT_PREEMPT_COUNT;
190}
191
192static noinline __init void setup_facility_list(void)
193{
194 stfle(S390_lowcore.stfle_fac_list,
195 ARRAY_SIZE(S390_lowcore.stfle_fac_list));
196 memcpy(S390_lowcore.alt_stfle_fac_list,
197 S390_lowcore.stfle_fac_list,
198 sizeof(S390_lowcore.alt_stfle_fac_list));
199 if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
200 __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
201}
202
203static __init void detect_diag9c(void)
204{
205 unsigned int cpu_address;
206 int rc;
207
208 cpu_address = stap();
209 diag_stat_inc(DIAG_STAT_X09C);
210 asm volatile(
211 " diag %2,0,0x9c\n"
212 "0: la %0,0\n"
213 "1:\n"
214 EX_TABLE(0b,1b)
215 : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
216 if (!rc)
217 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
218}
219
220static __init void detect_diag44(void)
221{
222 int rc;
223
224 diag_stat_inc(DIAG_STAT_X044);
225 asm volatile(
226 " diag 0,0,0x44\n"
227 "0: la %0,0\n"
228 "1:\n"
229 EX_TABLE(0b,1b)
230 : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
231 if (!rc)
232 S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
233}
234
235static __init void detect_machine_facilities(void)
236{
237 if (test_facility(8)) {
238 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
239 __ctl_set_bit(0, 23);
240 }
241 if (test_facility(78))
242 S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
243 if (test_facility(3))
244 S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
245 if (test_facility(50) && test_facility(73)) {
246 S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
247 __ctl_set_bit(0, 55);
248 }
249 if (test_facility(51))
250 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
251 if (test_facility(129)) {
252 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
253 __ctl_set_bit(0, 17);
254 }
255 if (test_facility(130)) {
256 S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
257 __ctl_set_bit(0, 20);
258 }
259 if (test_facility(133))
260 S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
261 if (test_facility(139) && (tod_clock_base[1] & 0x80)) {
262 /* Enabled signed clock comparator comparisons */
263 S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
264 clock_comparator_max = -1ULL >> 1;
265 __ctl_set_bit(0, 53);
266 }
267}
268
269static inline void save_vector_registers(void)
270{
271#ifdef CONFIG_CRASH_DUMP
272 if (test_facility(129))
273 save_vx_regs(boot_cpu_vector_save_area);
274#endif
275}
276
277static int __init disable_vector_extension(char *str)
278{
279 S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
280 __ctl_clear_bit(0, 17);
281 return 0;
282}
283early_param("novx", disable_vector_extension);
284
285static int __init noexec_setup(char *str)
286{
287 bool enabled;
288 int rc;
289
290 rc = kstrtobool(str, &enabled);
291 if (!rc && !enabled) {
292 /* Disable no-execute support */
293 S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
294 __ctl_clear_bit(0, 20);
295 }
296 return rc;
297}
298early_param("noexec", noexec_setup);
299
300static int __init cad_setup(char *str)
301{
302 bool enabled;
303 int rc;
304
305 rc = kstrtobool(str, &enabled);
306 if (!rc && enabled && test_facility(128))
307 /* Enable problem state CAD. */
308 __ctl_set_bit(2, 3);
309 return rc;
310}
311early_param("cad", cad_setup);
312
313static __init void memmove_early(void *dst, const void *src, size_t n)
314{
315 unsigned long addr;
316 long incr;
317 psw_t old;
318
319 if (!n)
320 return;
321 incr = 1;
322 if (dst > src) {
323 incr = -incr;
324 dst += n - 1;
325 src += n - 1;
326 }
327 old = S390_lowcore.program_new_psw;
328 S390_lowcore.program_new_psw.mask = __extract_psw();
329 asm volatile(
330 " larl %[addr],1f\n"
331 " stg %[addr],%[psw_pgm_addr]\n"
332 "0: mvc 0(1,%[dst]),0(%[src])\n"
333 " agr %[dst],%[incr]\n"
334 " agr %[src],%[incr]\n"
335 " brctg %[n],0b\n"
336 "1:\n"
337 : [addr] "=&d" (addr),
338 [psw_pgm_addr] "=Q" (S390_lowcore.program_new_psw.addr),
339 [dst] "+&a" (dst), [src] "+&a" (src), [n] "+d" (n)
340 : [incr] "d" (incr)
341 : "cc", "memory");
342 S390_lowcore.program_new_psw = old;
343}
344
345static __init noinline void rescue_initrd(void)
346{
347#ifdef CONFIG_BLK_DEV_INITRD
348 unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
349 /*
350 * Just like in case of IPL from VM reader we make sure there is a
351 * gap of 4MB between end of kernel and start of initrd.
352 * That way we can also be sure that saving an NSS will succeed,
353 * which however only requires different segments.
354 */
355 if (!INITRD_START || !INITRD_SIZE)
356 return;
357 if (INITRD_START >= min_initrd_addr)
358 return;
359 memmove_early((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
360 INITRD_START = min_initrd_addr;
361#endif
362}
363
364/* Set up boot command line */
365static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
366{
367 char *parm, *delim;
368 size_t rc, len;
369
370 len = strlen(boot_command_line);
371
372 delim = boot_command_line + len; /* '\0' character position */
373 parm = boot_command_line + len + 1; /* append right after '\0' */
374
375 rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
376 if (rc) {
377 if (*parm == '=')
378 memmove(boot_command_line, parm + 1, rc);
379 else
380 *delim = ' '; /* replace '\0' with space */
381 }
382}
383
384static inline int has_ebcdic_char(const char *str)
385{
386 int i;
387
388 for (i = 0; str[i]; i++)
389 if (str[i] & 0x80)
390 return 1;
391 return 0;
392}
393
394static void __init setup_boot_command_line(void)
395{
396 COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
397 /* convert arch command line to ascii if necessary */
398 if (has_ebcdic_char(COMMAND_LINE))
399 EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
400 /* copy arch command line */
401 strlcpy(boot_command_line, strstrip(COMMAND_LINE),
402 ARCH_COMMAND_LINE_SIZE);
403
404 /* append IPL PARM data to the boot command line */
405 if (MACHINE_IS_VM)
406 append_to_cmdline(append_ipl_vmparm);
407
408 append_to_cmdline(append_ipl_scpdata);
409}
410
411void __init startup_init(void)
412{
413 reset_tod_clock();
414 rescue_initrd();
415 clear_bss_section();
416 time_early_init();
417 init_kernel_storage_key();
418 lockdep_off();
419 setup_lowcore_early();
420 setup_facility_list();
421 detect_machine_type();
422 setup_arch_string();
423 ipl_store_parameters();
424 setup_boot_command_line();
425 detect_diag9c();
426 detect_diag44();
427 detect_machine_facilities();
428 save_vector_registers();
429 setup_topology();
430 sclp_early_detect();
431 lockdep_on();
432}