Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * machine_kexec.c - handle transition of Linux booting another kernel
  4 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
  5 *
  6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  7 * LANDISK/sh4 supported by kogiidena
  8 */
  9#include <linux/mm.h>
 10#include <linux/kexec.h>
 11#include <linux/delay.h>
 12#include <linux/reboot.h>
 13#include <linux/numa.h>
 14#include <linux/ftrace.h>
 15#include <linux/suspend.h>
 16#include <linux/memblock.h>
 17#include <asm/pgtable.h>
 18#include <asm/pgalloc.h>
 19#include <asm/mmu_context.h>
 20#include <asm/io.h>
 21#include <asm/cacheflush.h>
 22#include <asm/sh_bios.h>
 23#include <asm/reboot.h>
 24
 25typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
 26				      unsigned long reboot_code_buffer,
 27				      unsigned long start_address);
 28
 29extern const unsigned char relocate_new_kernel[];
 30extern const unsigned int relocate_new_kernel_size;
 31extern void *vbr_base;
 32
 33void native_machine_crash_shutdown(struct pt_regs *regs)
 34{
 35	/* Nothing to do for UP, but definitely broken for SMP.. */
 36}
 37
 38/*
 39 * Do what every setup is needed on image and the
 40 * reboot code buffer to allow us to avoid allocations
 41 * later.
 42 */
 43int machine_kexec_prepare(struct kimage *image)
 44{
 45	return 0;
 46}
 47
 48void machine_kexec_cleanup(struct kimage *image)
 49{
 50}
 51
 52static void kexec_info(struct kimage *image)
 53{
 54        int i;
 55	printk("kexec information\n");
 56	for (i = 0; i < image->nr_segments; i++) {
 57	        printk("  segment[%d]: 0x%08x - 0x%08x (0x%08x)\n",
 58		       i,
 59		       (unsigned int)image->segment[i].mem,
 60		       (unsigned int)image->segment[i].mem +
 61				     image->segment[i].memsz,
 62		       (unsigned int)image->segment[i].memsz);
 63	}
 64	printk("  start     : 0x%08x\n\n", (unsigned int)image->start);
 65}
 66
 67/*
 68 * Do not allocate memory (or fail in any way) in machine_kexec().
 69 * We are past the point of no return, committed to rebooting now.
 70 */
 71void machine_kexec(struct kimage *image)
 72{
 73	unsigned long page_list;
 74	unsigned long reboot_code_buffer;
 75	relocate_new_kernel_t rnk;
 76	unsigned long entry;
 77	unsigned long *ptr;
 78	int save_ftrace_enabled;
 79
 80	/*
 81	 * Nicked from the mips version of machine_kexec():
 82	 * The generic kexec code builds a page list with physical
 83	 * addresses. Use phys_to_virt() to convert them to virtual.
 84	 */
 85	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
 86	     ptr = (entry & IND_INDIRECTION) ?
 87	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
 88		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
 89		    *ptr & IND_DESTINATION)
 90			*ptr = (unsigned long) phys_to_virt(*ptr);
 91	}
 92
 93#ifdef CONFIG_KEXEC_JUMP
 94	if (image->preserve_context)
 95		save_processor_state();
 96#endif
 97
 98	save_ftrace_enabled = __ftrace_enabled_save();
 99
100	/* Interrupts aren't acceptable while we reboot */
101	local_irq_disable();
102
103	page_list = image->head;
104
105	/* we need both effective and real address here */
106	reboot_code_buffer =
107			(unsigned long)page_address(image->control_code_page);
108
109	/* copy our kernel relocation code to the control code page */
110	memcpy((void *)reboot_code_buffer, relocate_new_kernel,
111						relocate_new_kernel_size);
112
113	kexec_info(image);
114	flush_cache_all();
115
116	sh_bios_vbr_reload();
117
118	/* now call it */
119	rnk = (relocate_new_kernel_t) reboot_code_buffer;
120	(*rnk)(page_list, reboot_code_buffer,
121	       (unsigned long)phys_to_virt(image->start));
122
123#ifdef CONFIG_KEXEC_JUMP
124	asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
125
126	if (image->preserve_context)
127		restore_processor_state();
128
129	/* Convert page list back to physical addresses, what a mess. */
130	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
131	     ptr = (*ptr & IND_INDIRECTION) ?
132	       phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
133		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
134		    *ptr & IND_DESTINATION)
135			*ptr = virt_to_phys(*ptr);
136	}
137#endif
138
139	__ftrace_enabled_restore(save_ftrace_enabled);
140}
141
142void arch_crash_save_vmcoreinfo(void)
143{
144#ifdef CONFIG_NUMA
145	VMCOREINFO_SYMBOL(node_data);
146	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
147#endif
148#ifdef CONFIG_X2TLB
149	VMCOREINFO_CONFIG(X2TLB);
150#endif
151}
152
153void __init reserve_crashkernel(void)
154{
155	unsigned long long crash_size, crash_base;
156	int ret;
157
158	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
159			&crash_size, &crash_base);
160	if (ret == 0 && crash_size > 0) {
161		crashk_res.start = crash_base;
162		crashk_res.end = crash_base + crash_size - 1;
163	}
164
165	if (crashk_res.end == crashk_res.start)
166		goto disable;
167
168	crash_size = PAGE_ALIGN(resource_size(&crashk_res));
169	if (!crashk_res.start) {
170		unsigned long max = memblock_end_of_DRAM() - memory_limit;
171		crashk_res.start = memblock_phys_alloc_range(crash_size,
172							     PAGE_SIZE, 0, max);
173		if (!crashk_res.start) {
174			pr_err("crashkernel allocation failed\n");
175			goto disable;
176		}
177	} else {
178		ret = memblock_reserve(crashk_res.start, crash_size);
179		if (unlikely(ret < 0)) {
180			pr_err("crashkernel reservation failed - "
181			       "memory is in use\n");
182			goto disable;
183		}
184	}
185
186	crashk_res.end = crashk_res.start + crash_size - 1;
187
188	/*
189	 * Crash kernel trumps memory limit
190	 */
191	if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
192		memory_limit = 0;
193		pr_info("Disabled memory limit for crashkernel\n");
194	}
195
196	pr_info("Reserving %ldMB of memory at 0x%08lx "
197		"for crashkernel (System RAM: %ldMB)\n",
198		(unsigned long)(crash_size >> 20),
199		(unsigned long)(crashk_res.start),
200		(unsigned long)(memblock_phys_mem_size() >> 20));
201
202	return;
203
204disable:
205	crashk_res.start = crashk_res.end = 0;
206}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * machine_kexec.c - handle transition of Linux booting another kernel
  4 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
  5 *
  6 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  7 * LANDISK/sh4 supported by kogiidena
  8 */
  9#include <linux/mm.h>
 10#include <linux/kexec.h>
 11#include <linux/delay.h>
 12#include <linux/reboot.h>
 13#include <linux/numa.h>
 14#include <linux/ftrace.h>
 15#include <linux/suspend.h>
 16#include <linux/memblock.h>
 
 
 17#include <asm/mmu_context.h>
 18#include <asm/io.h>
 19#include <asm/cacheflush.h>
 20#include <asm/sh_bios.h>
 21#include <asm/reboot.h>
 22
 23typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
 24				      unsigned long reboot_code_buffer,
 25				      unsigned long start_address);
 26
 27extern const unsigned char relocate_new_kernel[];
 28extern const unsigned int relocate_new_kernel_size;
 29extern void *vbr_base;
 30
 31void native_machine_crash_shutdown(struct pt_regs *regs)
 32{
 33	/* Nothing to do for UP, but definitely broken for SMP.. */
 34}
 35
 36/*
 37 * Do what every setup is needed on image and the
 38 * reboot code buffer to allow us to avoid allocations
 39 * later.
 40 */
 41int machine_kexec_prepare(struct kimage *image)
 42{
 43	return 0;
 44}
 45
 46void machine_kexec_cleanup(struct kimage *image)
 47{
 48}
 49
 50static void kexec_info(struct kimage *image)
 51{
 52        int i;
 53	printk("kexec information\n");
 54	for (i = 0; i < image->nr_segments; i++) {
 55	        printk("  segment[%d]: 0x%08x - 0x%08x (0x%08x)\n",
 56		       i,
 57		       (unsigned int)image->segment[i].mem,
 58		       (unsigned int)image->segment[i].mem +
 59				     image->segment[i].memsz,
 60		       (unsigned int)image->segment[i].memsz);
 61	}
 62	printk("  start     : 0x%08x\n\n", (unsigned int)image->start);
 63}
 64
 65/*
 66 * Do not allocate memory (or fail in any way) in machine_kexec().
 67 * We are past the point of no return, committed to rebooting now.
 68 */
 69void machine_kexec(struct kimage *image)
 70{
 71	unsigned long page_list;
 72	unsigned long reboot_code_buffer;
 73	relocate_new_kernel_t rnk;
 74	unsigned long entry;
 75	unsigned long *ptr;
 76	int save_ftrace_enabled;
 77
 78	/*
 79	 * Nicked from the mips version of machine_kexec():
 80	 * The generic kexec code builds a page list with physical
 81	 * addresses. Use phys_to_virt() to convert them to virtual.
 82	 */
 83	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
 84	     ptr = (entry & IND_INDIRECTION) ?
 85	       phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
 86		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
 87		    *ptr & IND_DESTINATION)
 88			*ptr = (unsigned long) phys_to_virt(*ptr);
 89	}
 90
 91#ifdef CONFIG_KEXEC_JUMP
 92	if (image->preserve_context)
 93		save_processor_state();
 94#endif
 95
 96	save_ftrace_enabled = __ftrace_enabled_save();
 97
 98	/* Interrupts aren't acceptable while we reboot */
 99	local_irq_disable();
100
101	page_list = image->head;
102
103	/* we need both effective and real address here */
104	reboot_code_buffer =
105			(unsigned long)page_address(image->control_code_page);
106
107	/* copy our kernel relocation code to the control code page */
108	memcpy((void *)reboot_code_buffer, relocate_new_kernel,
109						relocate_new_kernel_size);
110
111	kexec_info(image);
112	flush_cache_all();
113
114	sh_bios_vbr_reload();
115
116	/* now call it */
117	rnk = (relocate_new_kernel_t) reboot_code_buffer;
118	(*rnk)(page_list, reboot_code_buffer,
119	       (unsigned long)phys_to_virt(image->start));
120
121#ifdef CONFIG_KEXEC_JUMP
122	asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory");
123
124	if (image->preserve_context)
125		restore_processor_state();
126
127	/* Convert page list back to physical addresses, what a mess. */
128	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
129	     ptr = (*ptr & IND_INDIRECTION) ?
130	       phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) {
131		if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
132		    *ptr & IND_DESTINATION)
133			*ptr = virt_to_phys(*ptr);
134	}
135#endif
136
137	__ftrace_enabled_restore(save_ftrace_enabled);
138}
139
140void arch_crash_save_vmcoreinfo(void)
141{
142#ifdef CONFIG_NUMA
143	VMCOREINFO_SYMBOL(node_data);
144	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
145#endif
146#ifdef CONFIG_X2TLB
147	VMCOREINFO_CONFIG(X2TLB);
148#endif
149}
150
151void __init reserve_crashkernel(void)
152{
153	unsigned long long crash_size, crash_base;
154	int ret;
155
156	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
157			&crash_size, &crash_base, NULL, NULL);
158	if (ret == 0 && crash_size > 0) {
159		crashk_res.start = crash_base;
160		crashk_res.end = crash_base + crash_size - 1;
161	}
162
163	if (crashk_res.end == crashk_res.start)
164		goto disable;
165
166	crash_size = PAGE_ALIGN(resource_size(&crashk_res));
167	if (!crashk_res.start) {
168		unsigned long max = memblock_end_of_DRAM() - memory_limit;
169		crashk_res.start = memblock_phys_alloc_range(crash_size,
170							     PAGE_SIZE, 0, max);
171		if (!crashk_res.start) {
172			pr_err("crashkernel allocation failed\n");
173			goto disable;
174		}
175	} else {
176		ret = memblock_reserve(crashk_res.start, crash_size);
177		if (unlikely(ret < 0)) {
178			pr_err("crashkernel reservation failed - "
179			       "memory is in use\n");
180			goto disable;
181		}
182	}
183
184	crashk_res.end = crashk_res.start + crash_size - 1;
185
186	/*
187	 * Crash kernel trumps memory limit
188	 */
189	if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
190		memory_limit = 0;
191		pr_info("Disabled memory limit for crashkernel\n");
192	}
193
194	pr_info("Reserving %ldMB of memory at 0x%08lx "
195		"for crashkernel (System RAM: %ldMB)\n",
196		(unsigned long)(crash_size >> 20),
197		(unsigned long)(crashk_res.start),
198		(unsigned long)(memblock_phys_mem_size() >> 20));
199
200	return;
201
202disable:
203	crashk_res.start = crashk_res.end = 0;
204}