Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Code to handle transition of Linux booting another kernel.
  4 *
  5 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
  6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  7 * Copyright (C) 2005 IBM Corporation.
  8 */
  9
 10#include <linux/kexec.h>
 11#include <linux/reboot.h>
 12#include <linux/threads.h>
 13#include <linux/memblock.h>
 14#include <linux/of.h>
 15#include <linux/irq.h>
 16#include <linux/ftrace.h>
 17
 18#include <asm/kdump.h>
 19#include <asm/machdep.h>
 20#include <asm/pgalloc.h>
 
 21#include <asm/sections.h>
 22#include <asm/setup.h>
 23#include <asm/firmware.h>
 24
 25void machine_kexec_mask_interrupts(void) {
 26	unsigned int i;
 27	struct irq_desc *desc;
 28
 29	for_each_irq_desc(i, desc) {
 30		struct irq_chip *chip;
 31
 32		chip = irq_desc_get_chip(desc);
 33		if (!chip)
 34			continue;
 35
 36		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
 37			chip->irq_eoi(&desc->irq_data);
 38
 39		if (chip->irq_mask)
 40			chip->irq_mask(&desc->irq_data);
 41
 42		if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
 43			chip->irq_disable(&desc->irq_data);
 44	}
 45}
 46
 47void machine_crash_shutdown(struct pt_regs *regs)
 48{
 49	default_machine_crash_shutdown(regs);
 50}
 51
 
 
 
 
 
 
 
 
 
 
 
 
 
 52void machine_kexec_cleanup(struct kimage *image)
 53{
 54}
 55
 56void arch_crash_save_vmcoreinfo(void)
 57{
 58
 59#ifdef CONFIG_NUMA
 60	VMCOREINFO_SYMBOL(node_data);
 61	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
 62#endif
 63#ifndef CONFIG_NUMA
 64	VMCOREINFO_SYMBOL(contig_page_data);
 65#endif
 66#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
 67	VMCOREINFO_SYMBOL(vmemmap_list);
 68	VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
 69	VMCOREINFO_SYMBOL(mmu_psize_defs);
 70	VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
 71	VMCOREINFO_OFFSET(vmemmap_backing, list);
 72	VMCOREINFO_OFFSET(vmemmap_backing, phys);
 73	VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
 74	VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
 75	VMCOREINFO_OFFSET(mmu_psize_def, shift);
 76#endif
 77	VMCOREINFO_SYMBOL(cur_cpu_spec);
 78	VMCOREINFO_OFFSET(cpu_spec, cpu_features);
 79	VMCOREINFO_OFFSET(cpu_spec, mmu_features);
 80	vmcoreinfo_append_str("NUMBER(RADIX_MMU)=%d\n", early_radix_enabled());
 81	vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
 82}
 83
 84/*
 85 * Do not allocate memory (or fail in any way) in machine_kexec().
 86 * We are past the point of no return, committed to rebooting now.
 87 */
 88void machine_kexec(struct kimage *image)
 89{
 90	int save_ftrace_enabled;
 91
 92	save_ftrace_enabled = __ftrace_enabled_save();
 93	this_cpu_disable_ftrace();
 94
 95	if (ppc_md.machine_kexec)
 96		ppc_md.machine_kexec(image);
 97	else
 98		default_machine_kexec(image);
 99
100	this_cpu_enable_ftrace();
101	__ftrace_enabled_restore(save_ftrace_enabled);
102
103	/* Fall back to normal restart if we're still alive. */
104	machine_restart(NULL);
105	for(;;);
106}
107
108void __init reserve_crashkernel(void)
109{
110	unsigned long long crash_size, crash_base, total_mem_sz;
111	int ret;
112
113	total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
114	/* use common parsing */
115	ret = parse_crashkernel(boot_command_line, total_mem_sz,
116			&crash_size, &crash_base, NULL, NULL);
117	if (ret == 0 && crash_size > 0) {
118		crashk_res.start = crash_base;
119		crashk_res.end = crash_base + crash_size - 1;
120	}
121
122	if (crashk_res.end == crashk_res.start) {
123		crashk_res.start = crashk_res.end = 0;
124		return;
125	}
126
127	/* We might have got these values via the command line or the
128	 * device tree, either way sanitise them now. */
129
130	crash_size = resource_size(&crashk_res);
131
132#ifndef CONFIG_NONSTATIC_KERNEL
133	if (crashk_res.start != KDUMP_KERNELBASE)
134		printk("Crash kernel location must be 0x%x\n",
135				KDUMP_KERNELBASE);
136
137	crashk_res.start = KDUMP_KERNELBASE;
138#else
139	if (!crashk_res.start) {
140#ifdef CONFIG_PPC64
141		/*
142		 * On the LPAR platform place the crash kernel to mid of
143		 * RMA size (max. of 512MB) to ensure the crash kernel
144		 * gets enough space to place itself and some stack to be
145		 * in the first segment. At the same time normal kernel
146		 * also get enough space to allocate memory for essential
147		 * system resource in the first segment. Keep the crash
148		 * kernel starts at 128MB offset on other platforms.
149		 */
150		if (firmware_has_feature(FW_FEATURE_LPAR))
151			crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M);
152		else
153			crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M);
154#else
155		crashk_res.start = KDUMP_KERNELBASE;
156#endif
157	}
158
159	crash_base = PAGE_ALIGN(crashk_res.start);
160	if (crash_base != crashk_res.start) {
161		printk("Crash kernel base must be aligned to 0x%lx\n",
162				PAGE_SIZE);
163		crashk_res.start = crash_base;
164	}
165
166#endif
167	crash_size = PAGE_ALIGN(crash_size);
168	crashk_res.end = crashk_res.start + crash_size - 1;
169
170	/* The crash region must not overlap the current kernel */
171	if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
172		printk(KERN_WARNING
173			"Crash kernel can not overlap current kernel\n");
174		crashk_res.start = crashk_res.end = 0;
175		return;
176	}
177
178	/* Crash kernel trumps memory limit */
179	if (memory_limit && memory_limit <= crashk_res.end) {
180		memory_limit = crashk_res.end + 1;
181		total_mem_sz = memory_limit;
182		printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
183		       memory_limit);
184	}
185
186	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
187			"for crashkernel (System RAM: %ldMB)\n",
188			(unsigned long)(crash_size >> 20),
189			(unsigned long)(crashk_res.start >> 20),
190			(unsigned long)(total_mem_sz >> 20));
191
192	if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
193	    memblock_reserve(crashk_res.start, crash_size)) {
194		pr_err("Failed to reserve memory for crashkernel!\n");
195		crashk_res.start = crashk_res.end = 0;
196		return;
197	}
198}
199
200int __init overlaps_crashkernel(unsigned long start, unsigned long size)
201{
202	return (start + size) > crashk_res.start && start <= crashk_res.end;
203}
204
205/* Values we need to export to the second kernel via the device tree. */
206static phys_addr_t kernel_end;
207static phys_addr_t crashk_base;
208static phys_addr_t crashk_size;
209static unsigned long long mem_limit;
210
211static struct property kernel_end_prop = {
212	.name = "linux,kernel-end",
213	.length = sizeof(phys_addr_t),
214	.value = &kernel_end,
215};
216
217static struct property crashk_base_prop = {
218	.name = "linux,crashkernel-base",
219	.length = sizeof(phys_addr_t),
220	.value = &crashk_base
221};
222
223static struct property crashk_size_prop = {
224	.name = "linux,crashkernel-size",
225	.length = sizeof(phys_addr_t),
226	.value = &crashk_size,
227};
228
229static struct property memory_limit_prop = {
230	.name = "linux,memory-limit",
231	.length = sizeof(unsigned long long),
232	.value = &mem_limit,
233};
234
235#define cpu_to_be_ulong	__PASTE(cpu_to_be, BITS_PER_LONG)
236
237static void __init export_crashk_values(struct device_node *node)
238{
239	/* There might be existing crash kernel properties, but we can't
240	 * be sure what's in them, so remove them. */
241	of_remove_property(node, of_find_property(node,
242				"linux,crashkernel-base", NULL));
243	of_remove_property(node, of_find_property(node,
244				"linux,crashkernel-size", NULL));
245
246	if (crashk_res.start != 0) {
247		crashk_base = cpu_to_be_ulong(crashk_res.start),
248		of_add_property(node, &crashk_base_prop);
249		crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
250		of_add_property(node, &crashk_size_prop);
251	}
252
253	/*
254	 * memory_limit is required by the kexec-tools to limit the
255	 * crash regions to the actual memory used.
256	 */
257	mem_limit = cpu_to_be_ulong(memory_limit);
258	of_update_property(node, &memory_limit_prop);
259}
260
261static int __init kexec_setup(void)
262{
263	struct device_node *node;
264
265	node = of_find_node_by_path("/chosen");
266	if (!node)
267		return -ENOENT;
268
269	/* remove any stale properties so ours can be found */
270	of_remove_property(node, of_find_property(node, kernel_end_prop.name, NULL));
271
272	/* information needed by userspace when using default_machine_kexec */
273	kernel_end = cpu_to_be_ulong(__pa(_end));
274	of_add_property(node, &kernel_end_prop);
275
276	export_crashk_values(node);
277
278	of_node_put(node);
279	return 0;
280}
281late_initcall(kexec_setup);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Code to handle transition of Linux booting another kernel.
  4 *
  5 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
  6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  7 * Copyright (C) 2005 IBM Corporation.
  8 */
  9
 10#include <linux/kexec.h>
 11#include <linux/reboot.h>
 12#include <linux/threads.h>
 13#include <linux/memblock.h>
 14#include <linux/of.h>
 15#include <linux/irq.h>
 16#include <linux/ftrace.h>
 17
 18#include <asm/kdump.h>
 19#include <asm/machdep.h>
 20#include <asm/pgalloc.h>
 21#include <asm/prom.h>
 22#include <asm/sections.h>
 
 
 23
 24void machine_kexec_mask_interrupts(void) {
 25	unsigned int i;
 26	struct irq_desc *desc;
 27
 28	for_each_irq_desc(i, desc) {
 29		struct irq_chip *chip;
 30
 31		chip = irq_desc_get_chip(desc);
 32		if (!chip)
 33			continue;
 34
 35		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
 36			chip->irq_eoi(&desc->irq_data);
 37
 38		if (chip->irq_mask)
 39			chip->irq_mask(&desc->irq_data);
 40
 41		if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
 42			chip->irq_disable(&desc->irq_data);
 43	}
 44}
 45
 46void machine_crash_shutdown(struct pt_regs *regs)
 47{
 48	default_machine_crash_shutdown(regs);
 49}
 50
 51/*
 52 * Do what every setup is needed on image and the
 53 * reboot code buffer to allow us to avoid allocations
 54 * later.
 55 */
 56int machine_kexec_prepare(struct kimage *image)
 57{
 58	if (ppc_md.machine_kexec_prepare)
 59		return ppc_md.machine_kexec_prepare(image);
 60	else
 61		return default_machine_kexec_prepare(image);
 62}
 63
 64void machine_kexec_cleanup(struct kimage *image)
 65{
 66}
 67
 68void arch_crash_save_vmcoreinfo(void)
 69{
 70
 71#ifdef CONFIG_NEED_MULTIPLE_NODES
 72	VMCOREINFO_SYMBOL(node_data);
 73	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
 74#endif
 75#ifndef CONFIG_NEED_MULTIPLE_NODES
 76	VMCOREINFO_SYMBOL(contig_page_data);
 77#endif
 78#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
 79	VMCOREINFO_SYMBOL(vmemmap_list);
 80	VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
 81	VMCOREINFO_SYMBOL(mmu_psize_defs);
 82	VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
 83	VMCOREINFO_OFFSET(vmemmap_backing, list);
 84	VMCOREINFO_OFFSET(vmemmap_backing, phys);
 85	VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
 86	VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
 87	VMCOREINFO_OFFSET(mmu_psize_def, shift);
 88#endif
 
 
 
 
 89	vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
 90}
 91
 92/*
 93 * Do not allocate memory (or fail in any way) in machine_kexec().
 94 * We are past the point of no return, committed to rebooting now.
 95 */
 96void machine_kexec(struct kimage *image)
 97{
 98	int save_ftrace_enabled;
 99
100	save_ftrace_enabled = __ftrace_enabled_save();
101	this_cpu_disable_ftrace();
102
103	if (ppc_md.machine_kexec)
104		ppc_md.machine_kexec(image);
105	else
106		default_machine_kexec(image);
107
108	this_cpu_enable_ftrace();
109	__ftrace_enabled_restore(save_ftrace_enabled);
110
111	/* Fall back to normal restart if we're still alive. */
112	machine_restart(NULL);
113	for(;;);
114}
115
116void __init reserve_crashkernel(void)
117{
118	unsigned long long crash_size, crash_base, total_mem_sz;
119	int ret;
120
121	total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
122	/* use common parsing */
123	ret = parse_crashkernel(boot_command_line, total_mem_sz,
124			&crash_size, &crash_base);
125	if (ret == 0 && crash_size > 0) {
126		crashk_res.start = crash_base;
127		crashk_res.end = crash_base + crash_size - 1;
128	}
129
130	if (crashk_res.end == crashk_res.start) {
131		crashk_res.start = crashk_res.end = 0;
132		return;
133	}
134
135	/* We might have got these values via the command line or the
136	 * device tree, either way sanitise them now. */
137
138	crash_size = resource_size(&crashk_res);
139
140#ifndef CONFIG_NONSTATIC_KERNEL
141	if (crashk_res.start != KDUMP_KERNELBASE)
142		printk("Crash kernel location must be 0x%x\n",
143				KDUMP_KERNELBASE);
144
145	crashk_res.start = KDUMP_KERNELBASE;
146#else
147	if (!crashk_res.start) {
148#ifdef CONFIG_PPC64
149		/*
150		 * On 64bit we split the RMO in half but cap it at half of
151		 * a small SLB (128MB) since the crash kernel needs to place
152		 * itself and some stacks to be in the first segment.
 
 
 
 
153		 */
154		crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
 
 
 
155#else
156		crashk_res.start = KDUMP_KERNELBASE;
157#endif
158	}
159
160	crash_base = PAGE_ALIGN(crashk_res.start);
161	if (crash_base != crashk_res.start) {
162		printk("Crash kernel base must be aligned to 0x%lx\n",
163				PAGE_SIZE);
164		crashk_res.start = crash_base;
165	}
166
167#endif
168	crash_size = PAGE_ALIGN(crash_size);
169	crashk_res.end = crashk_res.start + crash_size - 1;
170
171	/* The crash region must not overlap the current kernel */
172	if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
173		printk(KERN_WARNING
174			"Crash kernel can not overlap current kernel\n");
175		crashk_res.start = crashk_res.end = 0;
176		return;
177	}
178
179	/* Crash kernel trumps memory limit */
180	if (memory_limit && memory_limit <= crashk_res.end) {
181		memory_limit = crashk_res.end + 1;
182		total_mem_sz = memory_limit;
183		printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
184		       memory_limit);
185	}
186
187	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
188			"for crashkernel (System RAM: %ldMB)\n",
189			(unsigned long)(crash_size >> 20),
190			(unsigned long)(crashk_res.start >> 20),
191			(unsigned long)(total_mem_sz >> 20));
192
193	if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
194	    memblock_reserve(crashk_res.start, crash_size)) {
195		pr_err("Failed to reserve memory for crashkernel!\n");
196		crashk_res.start = crashk_res.end = 0;
197		return;
198	}
199}
200
201int overlaps_crashkernel(unsigned long start, unsigned long size)
202{
203	return (start + size) > crashk_res.start && start <= crashk_res.end;
204}
205
206/* Values we need to export to the second kernel via the device tree. */
207static phys_addr_t kernel_end;
208static phys_addr_t crashk_base;
209static phys_addr_t crashk_size;
210static unsigned long long mem_limit;
211
212static struct property kernel_end_prop = {
213	.name = "linux,kernel-end",
214	.length = sizeof(phys_addr_t),
215	.value = &kernel_end,
216};
217
218static struct property crashk_base_prop = {
219	.name = "linux,crashkernel-base",
220	.length = sizeof(phys_addr_t),
221	.value = &crashk_base
222};
223
224static struct property crashk_size_prop = {
225	.name = "linux,crashkernel-size",
226	.length = sizeof(phys_addr_t),
227	.value = &crashk_size,
228};
229
230static struct property memory_limit_prop = {
231	.name = "linux,memory-limit",
232	.length = sizeof(unsigned long long),
233	.value = &mem_limit,
234};
235
236#define cpu_to_be_ulong	__PASTE(cpu_to_be, BITS_PER_LONG)
237
238static void __init export_crashk_values(struct device_node *node)
239{
240	/* There might be existing crash kernel properties, but we can't
241	 * be sure what's in them, so remove them. */
242	of_remove_property(node, of_find_property(node,
243				"linux,crashkernel-base", NULL));
244	of_remove_property(node, of_find_property(node,
245				"linux,crashkernel-size", NULL));
246
247	if (crashk_res.start != 0) {
248		crashk_base = cpu_to_be_ulong(crashk_res.start),
249		of_add_property(node, &crashk_base_prop);
250		crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
251		of_add_property(node, &crashk_size_prop);
252	}
253
254	/*
255	 * memory_limit is required by the kexec-tools to limit the
256	 * crash regions to the actual memory used.
257	 */
258	mem_limit = cpu_to_be_ulong(memory_limit);
259	of_update_property(node, &memory_limit_prop);
260}
261
262static int __init kexec_setup(void)
263{
264	struct device_node *node;
265
266	node = of_find_node_by_path("/chosen");
267	if (!node)
268		return -ENOENT;
269
270	/* remove any stale properties so ours can be found */
271	of_remove_property(node, of_find_property(node, kernel_end_prop.name, NULL));
272
273	/* information needed by userspace when using default_machine_kexec */
274	kernel_end = cpu_to_be_ulong(__pa(_end));
275	of_add_property(node, &kernel_end_prop);
276
277	export_crashk_values(node);
278
279	of_node_put(node);
280	return 0;
281}
282late_initcall(kexec_setup);