Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2007, 2009
  4 *    Author(s): Hongjie Yang <hongjie@us.ibm.com>,
 
  5 */
  6
  7#define KMSG_COMPONENT "setup"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/sched/debug.h>
 11#include <linux/compiler.h>
 12#include <linux/init.h>
 13#include <linux/errno.h>
 14#include <linux/string.h>
 15#include <linux/ctype.h>
 
 16#include <linux/lockdep.h>
 17#include <linux/extable.h>
 18#include <linux/pfn.h>
 19#include <linux/uaccess.h>
 20#include <linux/kernel.h>
 21#include <asm/asm-extable.h>
 22#include <linux/memblock.h>
 23#include <asm/access-regs.h>
 24#include <asm/diag.h>
 25#include <asm/ebcdic.h>
 26#include <asm/fpu.h>
 27#include <asm/ipl.h>
 28#include <asm/lowcore.h>
 29#include <asm/processor.h>
 30#include <asm/sections.h>
 31#include <asm/setup.h>
 32#include <asm/sysinfo.h>
 33#include <asm/cpcmd.h>
 34#include <asm/sclp.h>
 35#include <asm/facility.h>
 36#include <asm/boot_data.h>
 37#include "entry.h"
 38
 39#define decompressor_handled_param(param)			\
 40static int __init ignore_decompressor_param_##param(char *s)	\
 41{								\
 42	return 0;						\
 43}								\
 44early_param(#param, ignore_decompressor_param_##param)
 45
 46decompressor_handled_param(mem);
 47decompressor_handled_param(vmalloc);
 48decompressor_handled_param(dfltcc);
 49decompressor_handled_param(facilities);
 50decompressor_handled_param(nokaslr);
 51decompressor_handled_param(cmma);
 52decompressor_handled_param(relocate_lowcore);
 53#if IS_ENABLED(CONFIG_KVM)
 54decompressor_handled_param(prot_virt);
 55#endif
 56
 57static void __init kasan_early_init(void)
 58{
 59#ifdef CONFIG_KASAN
 60	init_task.kasan_depth = 0;
 61	sclp_early_printk("KernelAddressSanitizer initialized\n");
 62#endif
 63}
 64
 
 
 
 
 
 
 
 65static void __init reset_tod_clock(void)
 66{
 67	union tod_clock clk;
 68
 69	if (store_tod_clock_ext_cc(&clk) == 0)
 70		return;
 71	/* TOD clock not running. Set the clock to Unix Epoch. */
 72	if (set_tod_clock(TOD_UNIX_EPOCH) || store_tod_clock_ext_cc(&clk))
 73		disabled_wait();
 74
 75	memset(&tod_clock_base, 0, sizeof(tod_clock_base));
 76	tod_clock_base.tod = TOD_UNIX_EPOCH;
 77	get_lowcore()->last_update_clock = TOD_UNIX_EPOCH;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78}
 79
 80/*
 81 * Initialize storage key for kernel pages
 82 */
 83static noinline __init void init_kernel_storage_key(void)
 84{
 85#if PAGE_DEFAULT_KEY
 86	unsigned long end_pfn, init_pfn;
 87
 88	end_pfn = PFN_UP(__pa(_end));
 89
 90	for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
 91		page_set_storage_key(init_pfn << PAGE_SHIFT,
 92				     PAGE_DEFAULT_KEY, 0);
 93#endif
 94}
 95
 96static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
 97
 98static noinline __init void detect_machine_type(void)
 99{
100	struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
101
102	/* Check current-configuration-level */
103	if (stsi(NULL, 0, 0, 0) <= 2) {
104		get_lowcore()->machine_flags |= MACHINE_FLAG_LPAR;
105		return;
106	}
107	/* Get virtual-machine cpu information. */
108	if (stsi(vmms, 3, 2, 2) || !vmms->count)
109		return;
110
111	/* Detect known hypervisors */
112	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
113		get_lowcore()->machine_flags |= MACHINE_FLAG_KVM;
114	else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
115		get_lowcore()->machine_flags |= MACHINE_FLAG_VM;
116}
117
118/* Remove leading, trailing and double whitespace. */
119static inline void strim_all(char *str)
120{
121	char *s;
122
123	s = strim(str);
124	if (s != str)
125		memmove(str, s, strlen(s));
126	while (*str) {
127		if (!isspace(*str++))
128			continue;
129		if (isspace(*str)) {
130			s = skip_spaces(str);
131			memmove(str, s, strlen(s) + 1);
132		}
133	}
134}
135
136static noinline __init void setup_arch_string(void)
137{
138	struct sysinfo_1_1_1 *mach = (struct sysinfo_1_1_1 *)&sysinfo_page;
139	struct sysinfo_3_2_2 *vm = (struct sysinfo_3_2_2 *)&sysinfo_page;
140	char mstr[80], hvstr[17];
141
142	if (stsi(mach, 1, 1, 1))
143		return;
144	EBCASC(mach->manufacturer, sizeof(mach->manufacturer));
145	EBCASC(mach->type, sizeof(mach->type));
146	EBCASC(mach->model, sizeof(mach->model));
147	EBCASC(mach->model_capacity, sizeof(mach->model_capacity));
148	sprintf(mstr, "%-16.16s %-4.4s %-16.16s %-16.16s",
149		mach->manufacturer, mach->type,
150		mach->model, mach->model_capacity);
151	strim_all(mstr);
152	if (stsi(vm, 3, 2, 2) == 0 && vm->count) {
153		EBCASC(vm->vm[0].cpi, sizeof(vm->vm[0].cpi));
154		sprintf(hvstr, "%-16.16s", vm->vm[0].cpi);
155		strim_all(hvstr);
156	} else {
157		sprintf(hvstr, "%s",
158			MACHINE_IS_LPAR ? "LPAR" :
159			MACHINE_IS_VM ? "z/VM" :
160			MACHINE_IS_KVM ? "KVM" : "unknown");
161	}
162	dump_stack_set_arch_desc("%s (%s)", mstr, hvstr);
163}
164
165static __init void setup_topology(void)
166{
 
167	int max_mnest;
168
169	if (!test_facility(11))
170		return;
171	get_lowcore()->machine_flags |= MACHINE_FLAG_TOPOLOGY;
172	for (max_mnest = 6; max_mnest > 1; max_mnest--) {
173		if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
174			break;
175	}
176	topology_max_mnest = max_mnest;
 
177}
178
179void __init __do_early_pgm_check(struct pt_regs *regs)
180{
181	struct lowcore *lc = get_lowcore();
182	unsigned long ip;
183
184	regs->int_code = lc->pgm_int_code;
185	regs->int_parm_long = lc->trans_exc_code;
186	ip = __rewind_psw(regs->psw, regs->int_code >> 16);
187
188	/* Monitor Event? Might be a warning */
189	if ((regs->int_code & PGM_INT_CODE_MASK) == 0x40) {
190		if (report_bug(ip, regs) == BUG_TRAP_TYPE_WARN)
191			return;
192	}
193	if (fixup_exception(regs))
194		return;
195	/*
196	 * Unhandled exception - system cannot continue but try to get some
197	 * helpful messages to the console. Use early_printk() to print
198	 * some basic information in case it is too early for printk().
199	 */
200	register_early_console();
201	early_printk("PANIC: early exception %04x PSW: %016lx %016lx\n",
202		     regs->int_code & 0xffff, regs->psw.mask, regs->psw.addr);
203	show_regs(regs);
204	disabled_wait();
205}
206
207static noinline __init void setup_lowcore_early(void)
208{
209	struct lowcore *lc = get_lowcore();
210	psw_t psw;
211
212	psw.addr = (unsigned long)early_pgm_check_handler;
213	psw.mask = PSW_KERNEL_BITS;
214	lc->program_new_psw = psw;
215	lc->preempt_count = INIT_PREEMPT_COUNT;
216	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
217	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218}
219
220static __init void detect_diag9c(void)
221{
222	unsigned int cpu_address;
223	int rc;
224
225	cpu_address = stap();
226	diag_stat_inc(DIAG_STAT_X09C);
227	asm volatile(
228		"	diag	%2,0,0x9c\n"
229		"0:	la	%0,0\n"
230		"1:\n"
231		EX_TABLE(0b,1b)
232		: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
233	if (!rc)
234		get_lowcore()->machine_flags |= MACHINE_FLAG_DIAG9C;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235}
236
237static __init void detect_machine_facilities(void)
238{
 
239	if (test_facility(8)) {
240		get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT1;
241		system_ctl_set_bit(0, CR0_EDAT_BIT);
242	}
243	if (test_facility(78))
244		get_lowcore()->machine_flags |= MACHINE_FLAG_EDAT2;
245	if (test_facility(3))
246		get_lowcore()->machine_flags |= MACHINE_FLAG_IDTE;
247	if (test_facility(50) && test_facility(73)) {
248		get_lowcore()->machine_flags |= MACHINE_FLAG_TE;
249		system_ctl_set_bit(0, CR0_TRANSACTIONAL_EXECUTION_BIT);
250	}
 
 
251	if (test_facility(51))
252		get_lowcore()->machine_flags |= MACHINE_FLAG_TLB_LC;
253	if (test_facility(129))
254		system_ctl_set_bit(0, CR0_VECTOR_BIT);
255	if (test_facility(130))
256		get_lowcore()->machine_flags |= MACHINE_FLAG_NX;
257	if (test_facility(133))
258		get_lowcore()->machine_flags |= MACHINE_FLAG_GS;
259	if (test_facility(139) && (tod_clock_base.tod >> 63)) {
260		/* Enabled signed clock comparator comparisons */
261		get_lowcore()->machine_flags |= MACHINE_FLAG_SCC;
262		clock_comparator_max = -1ULL >> 1;
263		system_ctl_set_bit(0, CR0_CLOCK_COMPARATOR_SIGN_BIT);
264	}
265	if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
266		get_lowcore()->machine_flags |= MACHINE_FLAG_PCI_MIO;
267		/* the control bit is set during PCI initialization */
268	}
269	if (test_facility(194))
270		get_lowcore()->machine_flags |= MACHINE_FLAG_RDP;
271	if (test_facility(85))
272		get_lowcore()->machine_flags |= MACHINE_FLAG_SEQ_INSN;
273}
274
275static inline void save_vector_registers(void)
276{
277#ifdef CONFIG_CRASH_DUMP
278	if (test_facility(129))
279		save_vx_regs(boot_cpu_vector_save_area);
 
 
 
 
 
 
 
 
 
 
 
280#endif
281}
282
283static inline void setup_low_address_protection(void)
 
284{
285	system_ctl_set_bit(0, CR0_LOW_ADDRESS_PROTECTION_BIT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
286}
287
288static inline void setup_access_registers(void)
289{
290	unsigned int acrs[NUM_ACRS] = { 0 };
291
292	restore_access_regs(acrs);
 
 
 
293}
294
295char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
296static void __init setup_boot_command_line(void)
297{
 
 
 
 
298	/* copy arch command line */
299	strscpy(boot_command_line, early_command_line, COMMAND_LINE_SIZE);
300}
301
302static void __init sort_amode31_extable(void)
303{
304	sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
 
 
305}
306
 
 
 
 
307void __init startup_init(void)
308{
309	kasan_early_init();
310	reset_tod_clock();
311	time_early_init();
 
 
312	init_kernel_storage_key();
 
313	lockdep_off();
314	sort_amode31_extable();
315	setup_lowcore_early();
 
316	detect_machine_type();
317	setup_arch_string();
318	setup_boot_command_line();
 
 
 
 
319	detect_diag9c();
 
320	detect_machine_facilities();
321	save_vector_registers();
322	setup_topology();
323	sclp_early_detect();
324	setup_low_address_protection();
325	setup_access_registers();
 
326	lockdep_on();
327}
v3.15
 
  1/*
  2 *    Copyright IBM Corp. 2007, 2009
  3 *    Author(s): Hongjie Yang <hongjie@us.ibm.com>,
  4 *		 Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#define KMSG_COMPONENT "setup"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 
 10#include <linux/compiler.h>
 11#include <linux/init.h>
 12#include <linux/errno.h>
 13#include <linux/string.h>
 14#include <linux/ctype.h>
 15#include <linux/ftrace.h>
 16#include <linux/lockdep.h>
 17#include <linux/module.h>
 18#include <linux/pfn.h>
 19#include <linux/uaccess.h>
 20#include <linux/kernel.h>
 
 
 
 
 21#include <asm/ebcdic.h>
 
 22#include <asm/ipl.h>
 23#include <asm/lowcore.h>
 24#include <asm/processor.h>
 25#include <asm/sections.h>
 26#include <asm/setup.h>
 27#include <asm/sysinfo.h>
 28#include <asm/cpcmd.h>
 29#include <asm/sclp.h>
 30#include <asm/facility.h>
 
 31#include "entry.h"
 32
 33/*
 34 * Create a Kernel NSS if the SAVESYS= parameter is defined
 35 */
 36#define DEFSYS_CMD_SIZE		128
 37#define SAVESYS_CMD_SIZE	32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38
 39char kernel_nss_name[NSS_NAME_SIZE + 1];
 40
 41static void __init setup_boot_command_line(void);
 42
 43/*
 44 * Get the TOD clock running.
 45 */
 46static void __init reset_tod_clock(void)
 47{
 48	u64 time;
 49
 50	if (store_tod_clock(&time) == 0)
 51		return;
 52	/* TOD clock not running. Set the clock to Unix Epoch. */
 53	if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0)
 54		disabled_wait(0);
 55
 56	sched_clock_base_cc = TOD_UNIX_EPOCH;
 57	S390_lowcore.last_update_clock = sched_clock_base_cc;
 58}
 59
 60#ifdef CONFIG_SHARED_KERNEL
 61int __init savesys_ipl_nss(char *cmd, const int cmdlen);
 62
 63asm(
 64	"	.section .init.text,\"ax\",@progbits\n"
 65	"	.align	4\n"
 66	"	.type	savesys_ipl_nss, @function\n"
 67	"savesys_ipl_nss:\n"
 68#ifdef CONFIG_64BIT
 69	"	stmg	6,15,48(15)\n"
 70	"	lgr	14,3\n"
 71	"	sam31\n"
 72	"	diag	2,14,0x8\n"
 73	"	sam64\n"
 74	"	lgr	2,14\n"
 75	"	lmg	6,15,48(15)\n"
 76#else
 77	"	stm	6,15,24(15)\n"
 78	"	lr	14,3\n"
 79	"	diag	2,14,0x8\n"
 80	"	lr	2,14\n"
 81	"	lm	6,15,24(15)\n"
 82#endif
 83	"	br	14\n"
 84	"	.size	savesys_ipl_nss, .-savesys_ipl_nss\n"
 85	"	.previous\n");
 86
 87static __initdata char upper_command_line[COMMAND_LINE_SIZE];
 88
 89static noinline __init void create_kernel_nss(void)
 90{
 91	unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
 92#ifdef CONFIG_BLK_DEV_INITRD
 93	unsigned int sinitrd_pfn, einitrd_pfn;
 94#endif
 95	int response;
 96	int hlen;
 97	size_t len;
 98	char *savesys_ptr;
 99	char defsys_cmd[DEFSYS_CMD_SIZE];
100	char savesys_cmd[SAVESYS_CMD_SIZE];
101
102	/* Do nothing if we are not running under VM */
103	if (!MACHINE_IS_VM)
104		return;
105
106	/* Convert COMMAND_LINE to upper case */
107	for (i = 0; i < strlen(boot_command_line); i++)
108		upper_command_line[i] = toupper(boot_command_line[i]);
109
110	savesys_ptr = strstr(upper_command_line, "SAVESYS=");
111
112	if (!savesys_ptr)
113		return;
114
115	savesys_ptr += 8;    /* Point to the beginning of the NSS name */
116	for (i = 0; i < NSS_NAME_SIZE; i++) {
117		if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
118			break;
119		kernel_nss_name[i] = savesys_ptr[i];
120	}
121
122	stext_pfn = PFN_DOWN(__pa(&_stext));
123	eshared_pfn = PFN_DOWN(__pa(&_eshared));
124	end_pfn = PFN_UP(__pa(&_end));
125	min_size = end_pfn << 2;
126
127	hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
128			"DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
129			kernel_nss_name, stext_pfn - 1, stext_pfn,
130			eshared_pfn - 1, eshared_pfn, end_pfn);
131
132#ifdef CONFIG_BLK_DEV_INITRD
133	if (INITRD_START && INITRD_SIZE) {
134		sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
135		einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
136		min_size = einitrd_pfn << 2;
137		hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
138				 " EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
139	}
140#endif
141
142	snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
143		 " EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
144	defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
145	snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
146		 kernel_nss_name, kernel_nss_name);
147	savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
148
149	__cpcmd(defsys_cmd, NULL, 0, &response);
150
151	if (response != 0) {
152		pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
153			response);
154		kernel_nss_name[0] = '\0';
155		return;
156	}
157
158	len = strlen(savesys_cmd);
159	ASCEBC(savesys_cmd, len);
160	response = savesys_ipl_nss(savesys_cmd, len);
161
162	/* On success: response is equal to the command size,
163	 *	       max SAVESYS_CMD_SIZE
164	 * On error: response contains the numeric portion of cp error message.
165	 *	     for SAVESYS it will be >= 263
166	 *	     for missing privilege class, it will be 1
167	 */
168	if (response > SAVESYS_CMD_SIZE || response == 1) {
169		pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
170			response);
171		kernel_nss_name[0] = '\0';
172		return;
173	}
174
175	/* re-initialize cputime accounting. */
176	sched_clock_base_cc = get_tod_clock();
177	S390_lowcore.last_update_clock = sched_clock_base_cc;
178	S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
179	S390_lowcore.user_timer = 0;
180	S390_lowcore.system_timer = 0;
181	asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
182
183	/* re-setup boot command line with new ipl vm parms */
184	ipl_update_parameters();
185	setup_boot_command_line();
186
187	ipl_flags = IPL_NSS_VALID;
188}
189
190#else /* CONFIG_SHARED_KERNEL */
191
192static inline void create_kernel_nss(void) { }
193
194#endif /* CONFIG_SHARED_KERNEL */
195
196/*
197 * Clear bss memory
198 */
199static noinline __init void clear_bss_section(void)
200{
201	memset(__bss_start, 0, __bss_stop - __bss_start);
202}
203
204/*
205 * Initialize storage key for kernel pages
206 */
207static noinline __init void init_kernel_storage_key(void)
208{
209#if PAGE_DEFAULT_KEY
210	unsigned long end_pfn, init_pfn;
211
212	end_pfn = PFN_UP(__pa(&_end));
213
214	for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
215		page_set_storage_key(init_pfn << PAGE_SHIFT,
216				     PAGE_DEFAULT_KEY, 0);
217#endif
218}
219
220static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
221
222static noinline __init void detect_machine_type(void)
223{
224	struct sysinfo_3_2_2 *vmms = (struct sysinfo_3_2_2 *)&sysinfo_page;
225
226	/* Check current-configuration-level */
227	if (stsi(NULL, 0, 0, 0) <= 2) {
228		S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
229		return;
230	}
231	/* Get virtual-machine cpu information. */
232	if (stsi(vmms, 3, 2, 2) || !vmms->count)
233		return;
234
235	/* Running under KVM? If not we assume z/VM */
236	if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
237		S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
238	else
239		S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240}
241
242static __init void setup_topology(void)
243{
244#ifdef CONFIG_64BIT
245	int max_mnest;
246
247	if (!test_facility(11))
248		return;
249	S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
250	for (max_mnest = 6; max_mnest > 1; max_mnest--) {
251		if (stsi(&sysinfo_page, 15, 1, max_mnest) == 0)
252			break;
253	}
254	topology_max_mnest = max_mnest;
255#endif
256}
257
258static void early_pgm_check_handler(void)
259{
260	const struct exception_table_entry *fixup;
261	unsigned long addr;
262
263	addr = S390_lowcore.program_old_psw.addr;
264	fixup = search_exception_tables(addr & PSW_ADDR_INSN);
265	if (!fixup)
266		disabled_wait(0);
267	S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268}
269
270static noinline __init void setup_lowcore_early(void)
271{
 
272	psw_t psw;
273
274	psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
275	psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
276	S390_lowcore.external_new_psw = psw;
277	psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
278	S390_lowcore.program_new_psw = psw;
279	s390_base_pgm_handler_fn = early_pgm_check_handler;
280}
281
282static noinline __init void setup_facility_list(void)
283{
284	stfle(S390_lowcore.stfle_fac_list,
285	      ARRAY_SIZE(S390_lowcore.stfle_fac_list));
286}
287
288static __init void detect_mvpg(void)
289{
290#ifndef CONFIG_64BIT
291	int rc;
292
293	asm volatile(
294		"	la	0,0\n"
295		"	mvpg	%2,%2\n"
296		"0:	la	%0,0\n"
297		"1:\n"
298		EX_TABLE(0b,1b)
299		: "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
300	if (!rc)
301		S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
302#endif
303}
304
305static __init void detect_ieee(void)
306{
307#ifndef CONFIG_64BIT
308	int rc, tmp;
309
310	asm volatile(
311		"	efpc	%1,0\n"
312		"0:	la	%0,0\n"
313		"1:\n"
314		EX_TABLE(0b,1b)
315		: "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
316	if (!rc)
317		S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
318#endif
319}
320
321static __init void detect_csp(void)
322{
323#ifndef CONFIG_64BIT
324	int rc;
325
326	asm volatile(
327		"	la	0,0\n"
328		"	la	1,0\n"
329		"	la	2,4\n"
330		"	csp	0,2\n"
331		"0:	la	%0,0\n"
332		"1:\n"
333		EX_TABLE(0b,1b)
334		: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
335	if (!rc)
336		S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
337#endif
338}
339
340static __init void detect_diag9c(void)
341{
342	unsigned int cpu_address;
343	int rc;
344
345	cpu_address = stap();
 
346	asm volatile(
347		"	diag	%2,0,0x9c\n"
348		"0:	la	%0,0\n"
349		"1:\n"
350		EX_TABLE(0b,1b)
351		: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
352	if (!rc)
353		S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
354}
355
356static __init void detect_diag44(void)
357{
358#ifdef CONFIG_64BIT
359	int rc;
360
361	asm volatile(
362		"	diag	0,0,0x44\n"
363		"0:	la	%0,0\n"
364		"1:\n"
365		EX_TABLE(0b,1b)
366		: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
367	if (!rc)
368		S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
369#endif
370}
371
372static __init void detect_machine_facilities(void)
373{
374#ifdef CONFIG_64BIT
375	if (test_facility(8)) {
376		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
377		__ctl_set_bit(0, 23);
378	}
379	if (test_facility(78))
380		S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
381	if (test_facility(3))
382		S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
383	if (test_facility(40))
384		S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
385	if (test_facility(50) && test_facility(73))
386		S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
387	if (test_facility(66))
388		S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
389	if (test_facility(51))
390		S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
391#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392}
393
394static __init void rescue_initrd(void)
395{
396#ifdef CONFIG_BLK_DEV_INITRD
397	unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
398	/*
399	 * Just like in case of IPL from VM reader we make sure there is a
400	 * gap of 4MB between end of kernel and start of initrd.
401	 * That way we can also be sure that saving an NSS will succeed,
402	 * which however only requires different segments.
403	 */
404	if (!INITRD_START || !INITRD_SIZE)
405		return;
406	if (INITRD_START >= min_initrd_addr)
407		return;
408	memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
409	INITRD_START = min_initrd_addr;
410#endif
411}
412
413/* Set up boot command line */
414static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
415{
416	char *parm, *delim;
417	size_t rc, len;
418
419	len = strlen(boot_command_line);
420
421	delim = boot_command_line + len;	/* '\0' character position */
422	parm  = boot_command_line + len + 1;	/* append right after '\0' */
423
424	rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
425	if (rc) {
426		if (*parm == '=')
427			memmove(boot_command_line, parm + 1, rc);
428		else
429			*delim = ' ';		/* replace '\0' with space */
430	}
431}
432
433static inline int has_ebcdic_char(const char *str)
434{
435	int i;
436
437	for (i = 0; str[i]; i++)
438		if (str[i] & 0x80)
439			return 1;
440	return 0;
441}
442
 
443static void __init setup_boot_command_line(void)
444{
445	COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
446	/* convert arch command line to ascii if necessary */
447	if (has_ebcdic_char(COMMAND_LINE))
448		EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
449	/* copy arch command line */
450	strlcpy(boot_command_line, strstrip(COMMAND_LINE),
451		ARCH_COMMAND_LINE_SIZE);
452
453	/* append IPL PARM data to the boot command line */
454	if (MACHINE_IS_VM)
455		append_to_cmdline(append_ipl_vmparm);
456
457	append_to_cmdline(append_ipl_scpdata);
458}
459
460/*
461 * Save ipl parameters, clear bss memory, initialize storage keys
462 * and create a kernel NSS at startup if the SAVESYS= parm is defined
463 */
464void __init startup_init(void)
465{
 
466	reset_tod_clock();
467	ipl_save_parameters();
468	rescue_initrd();
469	clear_bss_section();
470	init_kernel_storage_key();
471	lockdep_init();
472	lockdep_off();
 
473	setup_lowcore_early();
474	setup_facility_list();
475	detect_machine_type();
476	ipl_update_parameters();
477	setup_boot_command_line();
478	create_kernel_nss();
479	detect_mvpg();
480	detect_ieee();
481	detect_csp();
482	detect_diag9c();
483	detect_diag44();
484	detect_machine_facilities();
 
485	setup_topology();
486	sclp_early_detect();
487#ifdef CONFIG_DYNAMIC_FTRACE
488	S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
489#endif
490	lockdep_on();
491}