Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
  4 *
  5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  6 *
  7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
  8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
  9 * Authors:
 10 *      Vivek Goyal <vgoyal@redhat.com>
 11 *
 12 */
 13
 14#define pr_fmt(fmt)	"kexec: " fmt
 15
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/smp.h>
 19#include <linux/reboot.h>
 20#include <linux/kexec.h>
 21#include <linux/delay.h>
 22#include <linux/elf.h>
 23#include <linux/elfcore.h>
 24#include <linux/export.h>
 25#include <linux/slab.h>
 26#include <linux/vmalloc.h>
 
 27
 
 28#include <asm/processor.h>
 29#include <asm/hardirq.h>
 30#include <asm/nmi.h>
 31#include <asm/hw_irq.h>
 32#include <asm/apic.h>
 33#include <asm/e820/types.h>
 34#include <asm/io_apic.h>
 35#include <asm/hpet.h>
 36#include <linux/kdebug.h>
 37#include <asm/cpu.h>
 38#include <asm/reboot.h>
 39#include <asm/virtext.h>
 40#include <asm/intel_pt.h>
 41#include <asm/crash.h>
 
 
 42
 43/* Used while preparing memory map entries for second kernel */
 44struct crash_memmap_data {
 45	struct boot_params *params;
 46	/* Type of memory */
 47	unsigned int type;
 48};
 49
 50/*
 51 * This is used to VMCLEAR all VMCSs loaded on the
 52 * processor. And when loading kvm_intel module, the
 53 * callback function pointer will be assigned.
 54 *
 55 * protected by rcu.
 56 */
 57crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
 58EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
 59
 60static inline void cpu_crash_vmclear_loaded_vmcss(void)
 61{
 62	crash_vmclear_fn *do_vmclear_operation = NULL;
 63
 64	rcu_read_lock();
 65	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
 66	if (do_vmclear_operation)
 67		do_vmclear_operation();
 68	rcu_read_unlock();
 69}
 70
 71#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 72
 73static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
 74{
 75	crash_save_cpu(regs, cpu);
 76
 77	/*
 78	 * VMCLEAR VMCSs loaded on all cpus if needed.
 79	 */
 80	cpu_crash_vmclear_loaded_vmcss();
 81
 82	/* Disable VMX or SVM if needed.
 83	 *
 84	 * We need to disable virtualization on all CPUs.
 85	 * Having VMX or SVM enabled on any CPU may break rebooting
 86	 * after the kdump kernel has finished its task.
 87	 */
 88	cpu_emergency_vmxoff();
 89	cpu_emergency_svm_disable();
 90
 91	/*
 92	 * Disable Intel PT to stop its logging
 93	 */
 94	cpu_emergency_stop_pt();
 95
 
 
 96	disable_local_APIC();
 97}
 98
 99void kdump_nmi_shootdown_cpus(void)
100{
101	nmi_shootdown_cpus(kdump_nmi_callback);
102
103	disable_local_APIC();
104}
105
106/* Override the weak function in kernel/panic.c */
107void crash_smp_send_stop(void)
108{
109	static int cpus_stopped;
110
111	if (cpus_stopped)
112		return;
113
114	if (smp_ops.crash_stop_other_cpus)
115		smp_ops.crash_stop_other_cpus();
116	else
117		smp_send_stop();
118
119	cpus_stopped = 1;
120}
121
122#else
123void crash_smp_send_stop(void)
124{
125	/* There are no cpus to shootdown */
126}
127#endif
128
129void native_machine_crash_shutdown(struct pt_regs *regs)
130{
131	/* This function is only called after the system
132	 * has panicked or is otherwise in a critical state.
133	 * The minimum amount of code to allow a kexec'd kernel
134	 * to run successfully needs to happen here.
135	 *
136	 * In practice this means shooting down the other cpus in
137	 * an SMP system.
138	 */
139	/* The kernel is broken so disable interrupts */
140	local_irq_disable();
141
142	crash_smp_send_stop();
143
144	/*
145	 * VMCLEAR VMCSs loaded on this cpu if needed.
146	 */
147	cpu_crash_vmclear_loaded_vmcss();
148
149	/* Booting kdump kernel with VMX or SVM enabled won't work,
150	 * because (among other limitations) we can't disable paging
151	 * with the virt flags.
152	 */
153	cpu_emergency_vmxoff();
154	cpu_emergency_svm_disable();
155
156	/*
157	 * Disable Intel PT to stop its logging
158	 */
159	cpu_emergency_stop_pt();
160
161#ifdef CONFIG_X86_IO_APIC
162	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
163	ioapic_zap_locks();
164	clear_IO_APIC();
165#endif
166	lapic_shutdown();
167	restore_boot_irq_mode();
168#ifdef CONFIG_HPET_TIMER
169	hpet_disable();
170#endif
171	crash_save_cpu(regs, safe_smp_processor_id());
172}
173
174#ifdef CONFIG_KEXEC_FILE
175
176static unsigned long crash_zero_bytes;
177
178static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
179{
180	unsigned int *nr_ranges = arg;
181
182	(*nr_ranges)++;
183	return 0;
184}
185
186/* Gather all the required information to prepare elf headers for ram regions */
187static struct crash_mem *fill_up_crash_elf_data(void)
188{
189	unsigned int nr_ranges = 0;
190	struct crash_mem *cmem;
191
192	walk_system_ram_res(0, -1, &nr_ranges,
193				get_nr_ram_ranges_callback);
194	if (!nr_ranges)
195		return NULL;
196
197	/*
198	 * Exclusion of crash region and/or crashk_low_res may cause
199	 * another range split. So add extra two slots here.
200	 */
201	nr_ranges += 2;
202	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
203	if (!cmem)
204		return NULL;
205
206	cmem->max_nr_ranges = nr_ranges;
207	cmem->nr_ranges = 0;
208
209	return cmem;
210}
211
212/*
213 * Look for any unwanted ranges between mstart, mend and remove them. This
214 * might lead to split and split ranges are put in cmem->ranges[] array
215 */
216static int elf_header_exclude_ranges(struct crash_mem *cmem)
217{
218	int ret = 0;
219
 
 
 
 
 
220	/* Exclude crashkernel region */
221	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
222	if (ret)
223		return ret;
224
225	if (crashk_low_res.end) {
226		ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
227							crashk_low_res.end);
228	}
229
230	return ret;
231}
232
233static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
234{
235	struct crash_mem *cmem = arg;
236
237	cmem->ranges[cmem->nr_ranges].start = res->start;
238	cmem->ranges[cmem->nr_ranges].end = res->end;
239	cmem->nr_ranges++;
240
241	return 0;
242}
243
244/* Prepare elf headers. Return addr and size */
245static int prepare_elf_headers(struct kimage *image, void **addr,
246					unsigned long *sz)
247{
248	struct crash_mem *cmem;
249	Elf64_Ehdr *ehdr;
250	Elf64_Phdr *phdr;
251	int ret, i;
252
253	cmem = fill_up_crash_elf_data();
254	if (!cmem)
255		return -ENOMEM;
256
257	ret = walk_system_ram_res(0, -1, cmem,
258				prepare_elf64_ram_headers_callback);
259	if (ret)
260		goto out;
261
262	/* Exclude unwanted mem ranges */
263	ret = elf_header_exclude_ranges(cmem);
264	if (ret)
265		goto out;
266
 
 
 
267	/* By default prepare 64bit headers */
268	ret =  crash_prepare_elf64_headers(cmem,
269				IS_ENABLED(CONFIG_X86_64), addr, sz);
270	if (ret)
271		goto out;
272
273	/*
274	 * If a range matches backup region, adjust offset to backup
275	 * segment.
276	 */
277	ehdr = (Elf64_Ehdr *)*addr;
278	phdr = (Elf64_Phdr *)(ehdr + 1);
279	for (i = 0; i < ehdr->e_phnum; phdr++, i++)
280		if (phdr->p_type == PT_LOAD &&
281				phdr->p_paddr == image->arch.backup_src_start &&
282				phdr->p_memsz == image->arch.backup_src_sz) {
283			phdr->p_offset = image->arch.backup_load_addr;
284			break;
285		}
286out:
287	vfree(cmem);
288	return ret;
289}
 
290
 
291static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
292{
293	unsigned int nr_e820_entries;
294
295	nr_e820_entries = params->e820_entries;
296	if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
297		return 1;
298
299	memcpy(&params->e820_table[nr_e820_entries], entry,
300			sizeof(struct e820_entry));
301	params->e820_entries++;
302	return 0;
303}
304
305static int memmap_entry_callback(struct resource *res, void *arg)
306{
307	struct crash_memmap_data *cmd = arg;
308	struct boot_params *params = cmd->params;
309	struct e820_entry ei;
310
311	ei.addr = res->start;
312	ei.size = resource_size(res);
313	ei.type = cmd->type;
314	add_e820_entry(params, &ei);
315
316	return 0;
317}
318
319static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
320				 unsigned long long mstart,
321				 unsigned long long mend)
322{
323	unsigned long start, end;
324	int ret = 0;
325
326	cmem->ranges[0].start = mstart;
327	cmem->ranges[0].end = mend;
328	cmem->nr_ranges = 1;
329
330	/* Exclude Backup region */
331	start = image->arch.backup_load_addr;
332	end = start + image->arch.backup_src_sz - 1;
333	ret = crash_exclude_mem_range(cmem, start, end);
334	if (ret)
335		return ret;
336
337	/* Exclude elf header region */
338	start = image->arch.elf_load_addr;
339	end = start + image->arch.elf_headers_sz - 1;
340	return crash_exclude_mem_range(cmem, start, end);
341}
342
343/* Prepare memory map for crash dump kernel */
344int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
345{
346	int i, ret = 0;
347	unsigned long flags;
348	struct e820_entry ei;
349	struct crash_memmap_data cmd;
350	struct crash_mem *cmem;
351
352	cmem = vzalloc(sizeof(struct crash_mem));
353	if (!cmem)
354		return -ENOMEM;
355
356	memset(&cmd, 0, sizeof(struct crash_memmap_data));
357	cmd.params = params;
358
359	/* Add first 640K segment */
360	ei.addr = image->arch.backup_src_start;
361	ei.size = image->arch.backup_src_sz;
362	ei.type = E820_TYPE_RAM;
363	add_e820_entry(params, &ei);
364
365	/* Add ACPI tables */
366	cmd.type = E820_TYPE_ACPI;
367	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
368	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
369		       memmap_entry_callback);
370
371	/* Add ACPI Non-volatile Storage */
372	cmd.type = E820_TYPE_NVS;
373	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
374			memmap_entry_callback);
375
376	/* Add e820 reserved ranges */
377	cmd.type = E820_TYPE_RESERVED;
378	flags = IORESOURCE_MEM;
379	walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
380			   memmap_entry_callback);
381
382	/* Add crashk_low_res region */
383	if (crashk_low_res.end) {
384		ei.addr = crashk_low_res.start;
385		ei.size = crashk_low_res.end - crashk_low_res.start + 1;
386		ei.type = E820_TYPE_RAM;
387		add_e820_entry(params, &ei);
388	}
389
390	/* Exclude some ranges from crashk_res and add rest to memmap */
391	ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
392						crashk_res.end);
393	if (ret)
394		goto out;
395
396	for (i = 0; i < cmem->nr_ranges; i++) {
397		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
398
399		/* If entry is less than a page, skip it */
400		if (ei.size < PAGE_SIZE)
401			continue;
402		ei.addr = cmem->ranges[i].start;
403		ei.type = E820_TYPE_RAM;
404		add_e820_entry(params, &ei);
405	}
406
407out:
408	vfree(cmem);
409	return ret;
410}
411
412static int determine_backup_region(struct resource *res, void *arg)
413{
414	struct kimage *image = arg;
415
416	image->arch.backup_src_start = res->start;
417	image->arch.backup_src_sz = resource_size(res);
418
419	/* Expecting only one range for backup region */
420	return 1;
421}
422
423int crash_load_segments(struct kimage *image)
424{
425	int ret;
 
426	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
427				  .buf_max = ULONG_MAX, .top_down = false };
428
429	/*
430	 * Determine and load a segment for backup area. First 640K RAM
431	 * region is backup source
432	 */
433
434	ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
435				image, determine_backup_region);
436
437	/* Zero or postive return values are ok */
438	if (ret < 0)
439		return ret;
440
441	/* Add backup segment. */
442	if (image->arch.backup_src_sz) {
443		kbuf.buffer = &crash_zero_bytes;
444		kbuf.bufsz = sizeof(crash_zero_bytes);
445		kbuf.memsz = image->arch.backup_src_sz;
446		kbuf.buf_align = PAGE_SIZE;
447		/*
448		 * Ideally there is no source for backup segment. This is
449		 * copied in purgatory after crash. Just add a zero filled
450		 * segment for now to make sure checksum logic works fine.
451		 */
452		ret = kexec_add_buffer(&kbuf);
453		if (ret)
454			return ret;
455		image->arch.backup_load_addr = kbuf.mem;
456		pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
457			 image->arch.backup_load_addr,
458			 image->arch.backup_src_start, kbuf.memsz);
459	}
460
461	/* Prepare elf headers and add a segment */
462	ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
463	if (ret)
464		return ret;
465
466	image->arch.elf_headers = kbuf.buffer;
467	image->arch.elf_headers_sz = kbuf.bufsz;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468
469	kbuf.memsz = kbuf.bufsz;
470	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
471	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
472	ret = kexec_add_buffer(&kbuf);
473	if (ret) {
474		vfree((void *)image->arch.elf_headers);
475		return ret;
476	}
477	image->arch.elf_load_addr = kbuf.mem;
478	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
479		 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
480
481	return ret;
482}
483#endif /* CONFIG_KEXEC_FILE */
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
  4 *
  5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  6 *
  7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
  8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
  9 * Authors:
 10 *      Vivek Goyal <vgoyal@redhat.com>
 11 *
 12 */
 13
 14#define pr_fmt(fmt)	"kexec: " fmt
 15
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/smp.h>
 19#include <linux/reboot.h>
 20#include <linux/kexec.h>
 21#include <linux/delay.h>
 22#include <linux/elf.h>
 23#include <linux/elfcore.h>
 24#include <linux/export.h>
 25#include <linux/slab.h>
 26#include <linux/vmalloc.h>
 27#include <linux/memblock.h>
 28
 29#include <asm/bootparam.h>
 30#include <asm/processor.h>
 31#include <asm/hardirq.h>
 32#include <asm/nmi.h>
 33#include <asm/hw_irq.h>
 34#include <asm/apic.h>
 35#include <asm/e820/types.h>
 36#include <asm/io_apic.h>
 37#include <asm/hpet.h>
 38#include <linux/kdebug.h>
 39#include <asm/cpu.h>
 40#include <asm/reboot.h>
 
 41#include <asm/intel_pt.h>
 42#include <asm/crash.h>
 43#include <asm/cmdline.h>
 44#include <asm/sev.h>
 45
 46/* Used while preparing memory map entries for second kernel */
 47struct crash_memmap_data {
 48	struct boot_params *params;
 49	/* Type of memory */
 50	unsigned int type;
 51};
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 54
 55static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
 56{
 57	crash_save_cpu(regs, cpu);
 58
 59	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60	 * Disable Intel PT to stop its logging
 61	 */
 62	cpu_emergency_stop_pt();
 63
 64	kdump_sev_callback();
 65
 66	disable_local_APIC();
 67}
 68
 69void kdump_nmi_shootdown_cpus(void)
 70{
 71	nmi_shootdown_cpus(kdump_nmi_callback);
 72
 73	disable_local_APIC();
 74}
 75
 76/* Override the weak function in kernel/panic.c */
 77void crash_smp_send_stop(void)
 78{
 79	static int cpus_stopped;
 80
 81	if (cpus_stopped)
 82		return;
 83
 84	if (smp_ops.crash_stop_other_cpus)
 85		smp_ops.crash_stop_other_cpus();
 86	else
 87		smp_send_stop();
 88
 89	cpus_stopped = 1;
 90}
 91
 92#else
 93void crash_smp_send_stop(void)
 94{
 95	/* There are no cpus to shootdown */
 96}
 97#endif
 98
 99void native_machine_crash_shutdown(struct pt_regs *regs)
100{
101	/* This function is only called after the system
102	 * has panicked or is otherwise in a critical state.
103	 * The minimum amount of code to allow a kexec'd kernel
104	 * to run successfully needs to happen here.
105	 *
106	 * In practice this means shooting down the other cpus in
107	 * an SMP system.
108	 */
109	/* The kernel is broken so disable interrupts */
110	local_irq_disable();
111
112	crash_smp_send_stop();
113
114	cpu_emergency_disable_virtualization();
 
 
 
 
 
 
 
 
 
 
115
116	/*
117	 * Disable Intel PT to stop its logging
118	 */
119	cpu_emergency_stop_pt();
120
121#ifdef CONFIG_X86_IO_APIC
122	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
123	ioapic_zap_locks();
124	clear_IO_APIC();
125#endif
126	lapic_shutdown();
127	restore_boot_irq_mode();
128#ifdef CONFIG_HPET_TIMER
129	hpet_disable();
130#endif
131	crash_save_cpu(regs, safe_smp_processor_id());
132}
133
134#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_HOTPLUG)
 
 
 
135static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
136{
137	unsigned int *nr_ranges = arg;
138
139	(*nr_ranges)++;
140	return 0;
141}
142
143/* Gather all the required information to prepare elf headers for ram regions */
144static struct crash_mem *fill_up_crash_elf_data(void)
145{
146	unsigned int nr_ranges = 0;
147	struct crash_mem *cmem;
148
149	walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
 
150	if (!nr_ranges)
151		return NULL;
152
153	/*
154	 * Exclusion of crash region and/or crashk_low_res may cause
155	 * another range split. So add extra two slots here.
156	 */
157	nr_ranges += 2;
158	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
159	if (!cmem)
160		return NULL;
161
162	cmem->max_nr_ranges = nr_ranges;
163	cmem->nr_ranges = 0;
164
165	return cmem;
166}
167
168/*
169 * Look for any unwanted ranges between mstart, mend and remove them. This
170 * might lead to split and split ranges are put in cmem->ranges[] array
171 */
172static int elf_header_exclude_ranges(struct crash_mem *cmem)
173{
174	int ret = 0;
175
176	/* Exclude the low 1M because it is always reserved */
177	ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1);
178	if (ret)
179		return ret;
180
181	/* Exclude crashkernel region */
182	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
183	if (ret)
184		return ret;
185
186	if (crashk_low_res.end)
187		ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
188					      crashk_low_res.end);
 
189
190	return ret;
191}
192
193static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
194{
195	struct crash_mem *cmem = arg;
196
197	cmem->ranges[cmem->nr_ranges].start = res->start;
198	cmem->ranges[cmem->nr_ranges].end = res->end;
199	cmem->nr_ranges++;
200
201	return 0;
202}
203
204/* Prepare elf headers. Return addr and size */
205static int prepare_elf_headers(void **addr, unsigned long *sz,
206			       unsigned long *nr_mem_ranges)
207{
208	struct crash_mem *cmem;
209	int ret;
 
 
210
211	cmem = fill_up_crash_elf_data();
212	if (!cmem)
213		return -ENOMEM;
214
215	ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
 
216	if (ret)
217		goto out;
218
219	/* Exclude unwanted mem ranges */
220	ret = elf_header_exclude_ranges(cmem);
221	if (ret)
222		goto out;
223
224	/* Return the computed number of memory ranges, for hotplug usage */
225	*nr_mem_ranges = cmem->nr_ranges;
226
227	/* By default prepare 64bit headers */
228	ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
 
 
 
229
 
 
 
 
 
 
 
 
 
 
 
 
 
230out:
231	vfree(cmem);
232	return ret;
233}
234#endif
235
236#ifdef CONFIG_KEXEC_FILE
237static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
238{
239	unsigned int nr_e820_entries;
240
241	nr_e820_entries = params->e820_entries;
242	if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
243		return 1;
244
245	memcpy(&params->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry));
 
246	params->e820_entries++;
247	return 0;
248}
249
250static int memmap_entry_callback(struct resource *res, void *arg)
251{
252	struct crash_memmap_data *cmd = arg;
253	struct boot_params *params = cmd->params;
254	struct e820_entry ei;
255
256	ei.addr = res->start;
257	ei.size = resource_size(res);
258	ei.type = cmd->type;
259	add_e820_entry(params, &ei);
260
261	return 0;
262}
263
264static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
265				 unsigned long long mstart,
266				 unsigned long long mend)
267{
268	unsigned long start, end;
 
269
270	cmem->ranges[0].start = mstart;
271	cmem->ranges[0].end = mend;
272	cmem->nr_ranges = 1;
273
 
 
 
 
 
 
 
274	/* Exclude elf header region */
275	start = image->elf_load_addr;
276	end = start + image->elf_headers_sz - 1;
277	return crash_exclude_mem_range(cmem, start, end);
278}
279
280/* Prepare memory map for crash dump kernel */
281int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
282{
283	int i, ret = 0;
284	unsigned long flags;
285	struct e820_entry ei;
286	struct crash_memmap_data cmd;
287	struct crash_mem *cmem;
288
289	cmem = vzalloc(struct_size(cmem, ranges, 1));
290	if (!cmem)
291		return -ENOMEM;
292
293	memset(&cmd, 0, sizeof(struct crash_memmap_data));
294	cmd.params = params;
295
296	/* Add the low 1M */
297	cmd.type = E820_TYPE_RAM;
298	flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
299	walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd,
300			    memmap_entry_callback);
301
302	/* Add ACPI tables */
303	cmd.type = E820_TYPE_ACPI;
304	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
305	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
306			    memmap_entry_callback);
307
308	/* Add ACPI Non-volatile Storage */
309	cmd.type = E820_TYPE_NVS;
310	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
311			    memmap_entry_callback);
312
313	/* Add e820 reserved ranges */
314	cmd.type = E820_TYPE_RESERVED;
315	flags = IORESOURCE_MEM;
316	walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
317			    memmap_entry_callback);
318
319	/* Add crashk_low_res region */
320	if (crashk_low_res.end) {
321		ei.addr = crashk_low_res.start;
322		ei.size = resource_size(&crashk_low_res);
323		ei.type = E820_TYPE_RAM;
324		add_e820_entry(params, &ei);
325	}
326
327	/* Exclude some ranges from crashk_res and add rest to memmap */
328	ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end);
 
329	if (ret)
330		goto out;
331
332	for (i = 0; i < cmem->nr_ranges; i++) {
333		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
334
335		/* If entry is less than a page, skip it */
336		if (ei.size < PAGE_SIZE)
337			continue;
338		ei.addr = cmem->ranges[i].start;
339		ei.type = E820_TYPE_RAM;
340		add_e820_entry(params, &ei);
341	}
342
343out:
344	vfree(cmem);
345	return ret;
346}
347
 
 
 
 
 
 
 
 
 
 
 
348int crash_load_segments(struct kimage *image)
349{
350	int ret;
351	unsigned long pnum = 0;
352	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
353				  .buf_max = ULONG_MAX, .top_down = false };
354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355	/* Prepare elf headers and add a segment */
356	ret = prepare_elf_headers(&kbuf.buffer, &kbuf.bufsz, &pnum);
357	if (ret)
358		return ret;
359
360	image->elf_headers	= kbuf.buffer;
361	image->elf_headers_sz	= kbuf.bufsz;
362	kbuf.memsz		= kbuf.bufsz;
363
364#ifdef CONFIG_CRASH_HOTPLUG
365	/*
366	 * The elfcorehdr segment size accounts for VMCOREINFO, kernel_map,
367	 * maximum CPUs and maximum memory ranges.
368	 */
369	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
370		pnum = 2 + CONFIG_NR_CPUS_DEFAULT + CONFIG_CRASH_MAX_MEMORY_RANGES;
371	else
372		pnum += 2 + CONFIG_NR_CPUS_DEFAULT;
373
374	if (pnum < (unsigned long)PN_XNUM) {
375		kbuf.memsz = pnum * sizeof(Elf64_Phdr);
376		kbuf.memsz += sizeof(Elf64_Ehdr);
377
378		image->elfcorehdr_index = image->nr_segments;
379
380		/* Mark as usable to crash kernel, else crash kernel fails on boot */
381		image->elf_headers_sz = kbuf.memsz;
382	} else {
383		pr_err("number of Phdrs %lu exceeds max\n", pnum);
384	}
385#endif
386
 
387	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
388	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
389	ret = kexec_add_buffer(&kbuf);
390	if (ret)
 
391		return ret;
392	image->elf_load_addr = kbuf.mem;
393	kexec_dprintk("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
394		      image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
 
395
396	return ret;
397}
398#endif /* CONFIG_KEXEC_FILE */
399
400#ifdef CONFIG_CRASH_HOTPLUG
401
402#undef pr_fmt
403#define pr_fmt(fmt) "crash hp: " fmt
404
405/* These functions provide the value for the sysfs crash_hotplug nodes */
406#ifdef CONFIG_HOTPLUG_CPU
407int arch_crash_hotplug_cpu_support(void)
408{
409	return crash_check_update_elfcorehdr();
410}
411#endif
412
413#ifdef CONFIG_MEMORY_HOTPLUG
414int arch_crash_hotplug_memory_support(void)
415{
416	return crash_check_update_elfcorehdr();
417}
418#endif
419
420unsigned int arch_crash_get_elfcorehdr_size(void)
421{
422	unsigned int sz;
423
424	/* kernel_map, VMCOREINFO and maximum CPUs */
425	sz = 2 + CONFIG_NR_CPUS_DEFAULT;
426	if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
427		sz += CONFIG_CRASH_MAX_MEMORY_RANGES;
428	sz *= sizeof(Elf64_Phdr);
429	return sz;
430}
431
432/**
433 * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes
434 * @image: a pointer to kexec_crash_image
435 *
436 * Prepare the new elfcorehdr and replace the existing elfcorehdr.
437 */
438void arch_crash_handle_hotplug_event(struct kimage *image)
439{
440	void *elfbuf = NULL, *old_elfcorehdr;
441	unsigned long nr_mem_ranges;
442	unsigned long mem, memsz;
443	unsigned long elfsz = 0;
444
445	/*
446	 * As crash_prepare_elf64_headers() has already described all
447	 * possible CPUs, there is no need to update the elfcorehdr
448	 * for additional CPU changes.
449	 */
450	if ((image->file_mode || image->elfcorehdr_updated) &&
451		((image->hp_action == KEXEC_CRASH_HP_ADD_CPU) ||
452		(image->hp_action == KEXEC_CRASH_HP_REMOVE_CPU)))
453		return;
454
455	/*
456	 * Create the new elfcorehdr reflecting the changes to CPU and/or
457	 * memory resources.
458	 */
459	if (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) {
460		pr_err("unable to create new elfcorehdr");
461		goto out;
462	}
463
464	/*
465	 * Obtain address and size of the elfcorehdr segment, and
466	 * check it against the new elfcorehdr buffer.
467	 */
468	mem = image->segment[image->elfcorehdr_index].mem;
469	memsz = image->segment[image->elfcorehdr_index].memsz;
470	if (elfsz > memsz) {
471		pr_err("update elfcorehdr elfsz %lu > memsz %lu",
472			elfsz, memsz);
473		goto out;
474	}
475
476	/*
477	 * Copy new elfcorehdr over the old elfcorehdr at destination.
478	 */
479	old_elfcorehdr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT));
480	if (!old_elfcorehdr) {
481		pr_err("mapping elfcorehdr segment failed\n");
482		goto out;
483	}
484
485	/*
486	 * Temporarily invalidate the crash image while the
487	 * elfcorehdr is updated.
488	 */
489	xchg(&kexec_crash_image, NULL);
490	memcpy_flushcache(old_elfcorehdr, elfbuf, elfsz);
491	xchg(&kexec_crash_image, image);
492	kunmap_local(old_elfcorehdr);
493	pr_debug("updated elfcorehdr\n");
494
495out:
496	vfree(elfbuf);
497}
498#endif