Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
  4 *
  5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  6 *
  7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
  8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
  9 * Authors:
 10 *      Vivek Goyal <vgoyal@redhat.com>
 11 *
 12 */
 13
 14#define pr_fmt(fmt)	"kexec: " fmt
 15
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/smp.h>
 19#include <linux/reboot.h>
 20#include <linux/kexec.h>
 21#include <linux/delay.h>
 22#include <linux/elf.h>
 23#include <linux/elfcore.h>
 24#include <linux/export.h>
 25#include <linux/slab.h>
 26#include <linux/vmalloc.h>
 27
 28#include <asm/processor.h>
 29#include <asm/hardirq.h>
 30#include <asm/nmi.h>
 31#include <asm/hw_irq.h>
 32#include <asm/apic.h>
 33#include <asm/e820/types.h>
 34#include <asm/io_apic.h>
 35#include <asm/hpet.h>
 36#include <linux/kdebug.h>
 37#include <asm/cpu.h>
 38#include <asm/reboot.h>
 39#include <asm/virtext.h>
 40#include <asm/intel_pt.h>
 41#include <asm/crash.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42
 43/* Used while preparing memory map entries for second kernel */
 44struct crash_memmap_data {
 45	struct boot_params *params;
 46	/* Type of memory */
 47	unsigned int type;
 48};
 49
 50/*
 51 * This is used to VMCLEAR all VMCSs loaded on the
 52 * processor. And when loading kvm_intel module, the
 53 * callback function pointer will be assigned.
 54 *
 55 * protected by rcu.
 56 */
 57crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
 58EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
 
 59
 60static inline void cpu_crash_vmclear_loaded_vmcss(void)
 61{
 62	crash_vmclear_fn *do_vmclear_operation = NULL;
 63
 64	rcu_read_lock();
 65	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
 66	if (do_vmclear_operation)
 67		do_vmclear_operation();
 68	rcu_read_unlock();
 69}
 70
 71#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 72
 73static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
 74{
 
 
 
 
 
 
 
 
 75	crash_save_cpu(regs, cpu);
 76
 77	/*
 78	 * VMCLEAR VMCSs loaded on all cpus if needed.
 79	 */
 80	cpu_crash_vmclear_loaded_vmcss();
 81
 82	/* Disable VMX or SVM if needed.
 83	 *
 84	 * We need to disable virtualization on all CPUs.
 85	 * Having VMX or SVM enabled on any CPU may break rebooting
 86	 * after the kdump kernel has finished its task.
 87	 */
 88	cpu_emergency_vmxoff();
 89	cpu_emergency_svm_disable();
 90
 91	/*
 92	 * Disable Intel PT to stop its logging
 93	 */
 94	cpu_emergency_stop_pt();
 95
 96	disable_local_APIC();
 97}
 98
 99void kdump_nmi_shootdown_cpus(void)
100{
101	nmi_shootdown_cpus(kdump_nmi_callback);
102
103	disable_local_APIC();
104}
105
106/* Override the weak function in kernel/panic.c */
107void crash_smp_send_stop(void)
108{
109	static int cpus_stopped;
110
111	if (cpus_stopped)
112		return;
113
114	if (smp_ops.crash_stop_other_cpus)
115		smp_ops.crash_stop_other_cpus();
116	else
117		smp_send_stop();
118
119	cpus_stopped = 1;
120}
121
122#else
123void crash_smp_send_stop(void)
124{
125	/* There are no cpus to shootdown */
126}
127#endif
128
129void native_machine_crash_shutdown(struct pt_regs *regs)
130{
131	/* This function is only called after the system
132	 * has panicked or is otherwise in a critical state.
133	 * The minimum amount of code to allow a kexec'd kernel
134	 * to run successfully needs to happen here.
135	 *
136	 * In practice this means shooting down the other cpus in
137	 * an SMP system.
138	 */
139	/* The kernel is broken so disable interrupts */
140	local_irq_disable();
141
142	crash_smp_send_stop();
143
144	/*
145	 * VMCLEAR VMCSs loaded on this cpu if needed.
146	 */
147	cpu_crash_vmclear_loaded_vmcss();
148
149	/* Booting kdump kernel with VMX or SVM enabled won't work,
150	 * because (among other limitations) we can't disable paging
151	 * with the virt flags.
152	 */
153	cpu_emergency_vmxoff();
154	cpu_emergency_svm_disable();
155
156	/*
157	 * Disable Intel PT to stop its logging
158	 */
159	cpu_emergency_stop_pt();
160
161#ifdef CONFIG_X86_IO_APIC
162	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
163	ioapic_zap_locks();
164	clear_IO_APIC();
165#endif
166	lapic_shutdown();
167	restore_boot_irq_mode();
168#ifdef CONFIG_HPET_TIMER
169	hpet_disable();
170#endif
171	crash_save_cpu(regs, safe_smp_processor_id());
172}
173
174#ifdef CONFIG_KEXEC_FILE
175
176static unsigned long crash_zero_bytes;
177
178static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
179{
180	unsigned int *nr_ranges = arg;
181
182	(*nr_ranges)++;
183	return 0;
184}
185
 
186/* Gather all the required information to prepare elf headers for ram regions */
187static struct crash_mem *fill_up_crash_elf_data(void)
 
188{
189	unsigned int nr_ranges = 0;
190	struct crash_mem *cmem;
 
191
192	walk_system_ram_res(0, -1, &nr_ranges,
193				get_nr_ram_ranges_callback);
194	if (!nr_ranges)
195		return NULL;
196
197	/*
198	 * Exclusion of crash region and/or crashk_low_res may cause
199	 * another range split. So add extra two slots here.
200	 */
201	nr_ranges += 2;
202	cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
203	if (!cmem)
204		return NULL;
205
206	cmem->max_nr_ranges = nr_ranges;
207	cmem->nr_ranges = 0;
208
209	return cmem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210}
211
212/*
213 * Look for any unwanted ranges between mstart, mend and remove them. This
214 * might lead to split and split ranges are put in cmem->ranges[] array
215 */
216static int elf_header_exclude_ranges(struct crash_mem *cmem)
 
217{
 
218	int ret = 0;
219
 
 
 
 
 
 
220	/* Exclude crashkernel region */
221	ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
222	if (ret)
223		return ret;
224
225	if (crashk_low_res.end) {
226		ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
227							crashk_low_res.end);
 
228	}
229
230	return ret;
231}
232
233static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
234{
235	struct crash_mem *cmem = arg;
 
 
 
 
 
 
 
 
 
 
 
 
 
236
237	cmem->ranges[cmem->nr_ranges].start = res->start;
238	cmem->ranges[cmem->nr_ranges].end = res->end;
239	cmem->nr_ranges++;
240
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241	return 0;
242}
243
244/* Prepare elf headers. Return addr and size */
245static int prepare_elf_headers(struct kimage *image, void **addr,
246					unsigned long *sz)
247{
248	struct crash_mem *cmem;
249	Elf64_Ehdr *ehdr;
250	Elf64_Phdr *phdr;
251	int ret, i;
252
253	cmem = fill_up_crash_elf_data();
254	if (!cmem)
255		return -ENOMEM;
256
257	ret = walk_system_ram_res(0, -1, cmem,
258				prepare_elf64_ram_headers_callback);
259	if (ret)
260		goto out;
261
262	/* Exclude unwanted mem ranges */
263	ret = elf_header_exclude_ranges(cmem);
264	if (ret)
265		goto out;
266
267	/* By default prepare 64bit headers */
268	ret =  crash_prepare_elf64_headers(cmem,
269				IS_ENABLED(CONFIG_X86_64), addr, sz);
270	if (ret)
271		goto out;
272
273	/*
274	 * If a range matches backup region, adjust offset to backup
275	 * segment.
276	 */
277	ehdr = (Elf64_Ehdr *)*addr;
278	phdr = (Elf64_Phdr *)(ehdr + 1);
279	for (i = 0; i < ehdr->e_phnum; phdr++, i++)
280		if (phdr->p_type == PT_LOAD &&
281				phdr->p_paddr == image->arch.backup_src_start &&
282				phdr->p_memsz == image->arch.backup_src_sz) {
283			phdr->p_offset = image->arch.backup_load_addr;
284			break;
285		}
286out:
287	vfree(cmem);
288	return ret;
289}
290
291static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
292{
293	unsigned int nr_e820_entries;
294
295	nr_e820_entries = params->e820_entries;
296	if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
297		return 1;
298
299	memcpy(&params->e820_table[nr_e820_entries], entry,
300			sizeof(struct e820_entry));
301	params->e820_entries++;
302	return 0;
303}
304
305static int memmap_entry_callback(struct resource *res, void *arg)
306{
307	struct crash_memmap_data *cmd = arg;
308	struct boot_params *params = cmd->params;
309	struct e820_entry ei;
310
311	ei.addr = res->start;
312	ei.size = resource_size(res);
313	ei.type = cmd->type;
314	add_e820_entry(params, &ei);
315
316	return 0;
317}
318
319static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
320				 unsigned long long mstart,
321				 unsigned long long mend)
322{
323	unsigned long start, end;
324	int ret = 0;
325
326	cmem->ranges[0].start = mstart;
327	cmem->ranges[0].end = mend;
328	cmem->nr_ranges = 1;
329
330	/* Exclude Backup region */
331	start = image->arch.backup_load_addr;
332	end = start + image->arch.backup_src_sz - 1;
333	ret = crash_exclude_mem_range(cmem, start, end);
334	if (ret)
335		return ret;
336
337	/* Exclude elf header region */
338	start = image->arch.elf_load_addr;
339	end = start + image->arch.elf_headers_sz - 1;
340	return crash_exclude_mem_range(cmem, start, end);
341}
342
343/* Prepare memory map for crash dump kernel */
344int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
345{
346	int i, ret = 0;
347	unsigned long flags;
348	struct e820_entry ei;
349	struct crash_memmap_data cmd;
350	struct crash_mem *cmem;
351
352	cmem = vzalloc(sizeof(struct crash_mem));
353	if (!cmem)
354		return -ENOMEM;
355
356	memset(&cmd, 0, sizeof(struct crash_memmap_data));
357	cmd.params = params;
358
359	/* Add first 640K segment */
360	ei.addr = image->arch.backup_src_start;
361	ei.size = image->arch.backup_src_sz;
362	ei.type = E820_TYPE_RAM;
363	add_e820_entry(params, &ei);
364
365	/* Add ACPI tables */
366	cmd.type = E820_TYPE_ACPI;
367	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
368	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
369		       memmap_entry_callback);
370
371	/* Add ACPI Non-volatile Storage */
372	cmd.type = E820_TYPE_NVS;
373	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
374			memmap_entry_callback);
375
376	/* Add e820 reserved ranges */
377	cmd.type = E820_TYPE_RESERVED;
378	flags = IORESOURCE_MEM;
379	walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
380			   memmap_entry_callback);
381
382	/* Add crashk_low_res region */
383	if (crashk_low_res.end) {
384		ei.addr = crashk_low_res.start;
385		ei.size = crashk_low_res.end - crashk_low_res.start + 1;
386		ei.type = E820_TYPE_RAM;
387		add_e820_entry(params, &ei);
388	}
389
390	/* Exclude some ranges from crashk_res and add rest to memmap */
391	ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
392						crashk_res.end);
393	if (ret)
394		goto out;
395
396	for (i = 0; i < cmem->nr_ranges; i++) {
397		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
398
399		/* If entry is less than a page, skip it */
400		if (ei.size < PAGE_SIZE)
401			continue;
402		ei.addr = cmem->ranges[i].start;
403		ei.type = E820_TYPE_RAM;
404		add_e820_entry(params, &ei);
405	}
406
407out:
408	vfree(cmem);
409	return ret;
410}
411
412static int determine_backup_region(struct resource *res, void *arg)
413{
414	struct kimage *image = arg;
415
416	image->arch.backup_src_start = res->start;
417	image->arch.backup_src_sz = resource_size(res);
418
419	/* Expecting only one range for backup region */
420	return 1;
421}
422
423int crash_load_segments(struct kimage *image)
424{
425	int ret;
426	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
427				  .buf_max = ULONG_MAX, .top_down = false };
428
429	/*
430	 * Determine and load a segment for backup area. First 640K RAM
431	 * region is backup source
432	 */
433
434	ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
435				image, determine_backup_region);
436
437	/* Zero or postive return values are ok */
438	if (ret < 0)
439		return ret;
440
441	/* Add backup segment. */
442	if (image->arch.backup_src_sz) {
443		kbuf.buffer = &crash_zero_bytes;
444		kbuf.bufsz = sizeof(crash_zero_bytes);
445		kbuf.memsz = image->arch.backup_src_sz;
446		kbuf.buf_align = PAGE_SIZE;
447		/*
448		 * Ideally there is no source for backup segment. This is
449		 * copied in purgatory after crash. Just add a zero filled
450		 * segment for now to make sure checksum logic works fine.
451		 */
452		ret = kexec_add_buffer(&kbuf);
453		if (ret)
454			return ret;
455		image->arch.backup_load_addr = kbuf.mem;
456		pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
457			 image->arch.backup_load_addr,
458			 image->arch.backup_src_start, kbuf.memsz);
459	}
460
461	/* Prepare elf headers and add a segment */
462	ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
463	if (ret)
464		return ret;
465
466	image->arch.elf_headers = kbuf.buffer;
467	image->arch.elf_headers_sz = kbuf.bufsz;
468
469	kbuf.memsz = kbuf.bufsz;
470	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
471	kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
472	ret = kexec_add_buffer(&kbuf);
473	if (ret) {
474		vfree((void *)image->arch.elf_headers);
475		return ret;
476	}
477	image->arch.elf_load_addr = kbuf.mem;
478	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
479		 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
480
481	return ret;
482}
483#endif /* CONFIG_KEXEC_FILE */
v4.10.11
 
  1/*
  2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
  3 *
  4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  5 *
  6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
  7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
  8 * Authors:
  9 *      Vivek Goyal <vgoyal@redhat.com>
 10 *
 11 */
 12
 13#define pr_fmt(fmt)	"kexec: " fmt
 14
 15#include <linux/types.h>
 16#include <linux/kernel.h>
 17#include <linux/smp.h>
 18#include <linux/reboot.h>
 19#include <linux/kexec.h>
 20#include <linux/delay.h>
 21#include <linux/elf.h>
 22#include <linux/elfcore.h>
 23#include <linux/export.h>
 24#include <linux/slab.h>
 25#include <linux/vmalloc.h>
 26
 27#include <asm/processor.h>
 28#include <asm/hardirq.h>
 29#include <asm/nmi.h>
 30#include <asm/hw_irq.h>
 31#include <asm/apic.h>
 
 32#include <asm/io_apic.h>
 33#include <asm/hpet.h>
 34#include <linux/kdebug.h>
 35#include <asm/cpu.h>
 36#include <asm/reboot.h>
 37#include <asm/virtext.h>
 38#include <asm/intel_pt.h>
 39
 40/* Alignment required for elf header segment */
 41#define ELF_CORE_HEADER_ALIGN   4096
 42
 43/* This primarily represents number of split ranges due to exclusion */
 44#define CRASH_MAX_RANGES	16
 45
 46struct crash_mem_range {
 47	u64 start, end;
 48};
 49
 50struct crash_mem {
 51	unsigned int nr_ranges;
 52	struct crash_mem_range ranges[CRASH_MAX_RANGES];
 53};
 54
 55/* Misc data about ram ranges needed to prepare elf headers */
 56struct crash_elf_data {
 57	struct kimage *image;
 58	/*
 59	 * Total number of ram ranges we have after various adjustments for
 60	 * crash reserved region, etc.
 61	 */
 62	unsigned int max_nr_ranges;
 63
 64	/* Pointer to elf header */
 65	void *ehdr;
 66	/* Pointer to next phdr */
 67	void *bufp;
 68	struct crash_mem mem;
 69};
 70
 71/* Used while preparing memory map entries for second kernel */
 72struct crash_memmap_data {
 73	struct boot_params *params;
 74	/* Type of memory */
 75	unsigned int type;
 76};
 77
 78/*
 79 * This is used to VMCLEAR all VMCSs loaded on the
 80 * processor. And when loading kvm_intel module, the
 81 * callback function pointer will be assigned.
 82 *
 83 * protected by rcu.
 84 */
 85crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
 86EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
 87unsigned long crash_zero_bytes;
 88
 89static inline void cpu_crash_vmclear_loaded_vmcss(void)
 90{
 91	crash_vmclear_fn *do_vmclear_operation = NULL;
 92
 93	rcu_read_lock();
 94	do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
 95	if (do_vmclear_operation)
 96		do_vmclear_operation();
 97	rcu_read_unlock();
 98}
 99
100#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
101
102static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
103{
104#ifdef CONFIG_X86_32
105	struct pt_regs fixed_regs;
106
107	if (!user_mode(regs)) {
108		crash_fixup_ss_esp(&fixed_regs, regs);
109		regs = &fixed_regs;
110	}
111#endif
112	crash_save_cpu(regs, cpu);
113
114	/*
115	 * VMCLEAR VMCSs loaded on all cpus if needed.
116	 */
117	cpu_crash_vmclear_loaded_vmcss();
118
119	/* Disable VMX or SVM if needed.
120	 *
121	 * We need to disable virtualization on all CPUs.
122	 * Having VMX or SVM enabled on any CPU may break rebooting
123	 * after the kdump kernel has finished its task.
124	 */
125	cpu_emergency_vmxoff();
126	cpu_emergency_svm_disable();
127
128	/*
129	 * Disable Intel PT to stop its logging
130	 */
131	cpu_emergency_stop_pt();
132
133	disable_local_APIC();
134}
135
136void kdump_nmi_shootdown_cpus(void)
137{
138	nmi_shootdown_cpus(kdump_nmi_callback);
139
140	disable_local_APIC();
141}
142
143/* Override the weak function in kernel/panic.c */
144void crash_smp_send_stop(void)
145{
146	static int cpus_stopped;
147
148	if (cpus_stopped)
149		return;
150
151	if (smp_ops.crash_stop_other_cpus)
152		smp_ops.crash_stop_other_cpus();
153	else
154		smp_send_stop();
155
156	cpus_stopped = 1;
157}
158
159#else
160void crash_smp_send_stop(void)
161{
162	/* There are no cpus to shootdown */
163}
164#endif
165
166void native_machine_crash_shutdown(struct pt_regs *regs)
167{
168	/* This function is only called after the system
169	 * has panicked or is otherwise in a critical state.
170	 * The minimum amount of code to allow a kexec'd kernel
171	 * to run successfully needs to happen here.
172	 *
173	 * In practice this means shooting down the other cpus in
174	 * an SMP system.
175	 */
176	/* The kernel is broken so disable interrupts */
177	local_irq_disable();
178
179	crash_smp_send_stop();
180
181	/*
182	 * VMCLEAR VMCSs loaded on this cpu if needed.
183	 */
184	cpu_crash_vmclear_loaded_vmcss();
185
186	/* Booting kdump kernel with VMX or SVM enabled won't work,
187	 * because (among other limitations) we can't disable paging
188	 * with the virt flags.
189	 */
190	cpu_emergency_vmxoff();
191	cpu_emergency_svm_disable();
192
193	/*
194	 * Disable Intel PT to stop its logging
195	 */
196	cpu_emergency_stop_pt();
197
198#ifdef CONFIG_X86_IO_APIC
199	/* Prevent crash_kexec() from deadlocking on ioapic_lock. */
200	ioapic_zap_locks();
201	disable_IO_APIC();
202#endif
203	lapic_shutdown();
 
204#ifdef CONFIG_HPET_TIMER
205	hpet_disable();
206#endif
207	crash_save_cpu(regs, safe_smp_processor_id());
208}
209
210#ifdef CONFIG_KEXEC_FILE
211static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
 
 
 
212{
213	unsigned int *nr_ranges = arg;
214
215	(*nr_ranges)++;
216	return 0;
217}
218
219
220/* Gather all the required information to prepare elf headers for ram regions */
221static void fill_up_crash_elf_data(struct crash_elf_data *ced,
222				   struct kimage *image)
223{
224	unsigned int nr_ranges = 0;
225
226	ced->image = image;
227
228	walk_system_ram_res(0, -1, &nr_ranges,
229				get_nr_ram_ranges_callback);
 
 
230
231	ced->max_nr_ranges = nr_ranges;
 
 
 
 
 
 
 
232
233	/* Exclusion of crash region could split memory ranges */
234	ced->max_nr_ranges++;
235
236	/* If crashk_low_res is not 0, another range split possible */
237	if (crashk_low_res.end)
238		ced->max_nr_ranges++;
239}
240
241static int exclude_mem_range(struct crash_mem *mem,
242		unsigned long long mstart, unsigned long long mend)
243{
244	int i, j;
245	unsigned long long start, end;
246	struct crash_mem_range temp_range = {0, 0};
247
248	for (i = 0; i < mem->nr_ranges; i++) {
249		start = mem->ranges[i].start;
250		end = mem->ranges[i].end;
251
252		if (mstart > end || mend < start)
253			continue;
254
255		/* Truncate any area outside of range */
256		if (mstart < start)
257			mstart = start;
258		if (mend > end)
259			mend = end;
260
261		/* Found completely overlapping range */
262		if (mstart == start && mend == end) {
263			mem->ranges[i].start = 0;
264			mem->ranges[i].end = 0;
265			if (i < mem->nr_ranges - 1) {
266				/* Shift rest of the ranges to left */
267				for (j = i; j < mem->nr_ranges - 1; j++) {
268					mem->ranges[j].start =
269						mem->ranges[j+1].start;
270					mem->ranges[j].end =
271							mem->ranges[j+1].end;
272				}
273			}
274			mem->nr_ranges--;
275			return 0;
276		}
277
278		if (mstart > start && mend < end) {
279			/* Split original range */
280			mem->ranges[i].end = mstart - 1;
281			temp_range.start = mend + 1;
282			temp_range.end = end;
283		} else if (mstart != start)
284			mem->ranges[i].end = mstart - 1;
285		else
286			mem->ranges[i].start = mend + 1;
287		break;
288	}
289
290	/* If a split happend, add the split to array */
291	if (!temp_range.end)
292		return 0;
293
294	/* Split happened */
295	if (i == CRASH_MAX_RANGES - 1) {
296		pr_err("Too many crash ranges after split\n");
297		return -ENOMEM;
298	}
299
300	/* Location where new range should go */
301	j = i + 1;
302	if (j < mem->nr_ranges) {
303		/* Move over all ranges one slot towards the end */
304		for (i = mem->nr_ranges - 1; i >= j; i--)
305			mem->ranges[i + 1] = mem->ranges[i];
306	}
307
308	mem->ranges[j].start = temp_range.start;
309	mem->ranges[j].end = temp_range.end;
310	mem->nr_ranges++;
311	return 0;
312}
313
314/*
315 * Look for any unwanted ranges between mstart, mend and remove them. This
316 * might lead to split and split ranges are put in ced->mem.ranges[] array
317 */
318static int elf_header_exclude_ranges(struct crash_elf_data *ced,
319		unsigned long long mstart, unsigned long long mend)
320{
321	struct crash_mem *cmem = &ced->mem;
322	int ret = 0;
323
324	memset(cmem->ranges, 0, sizeof(cmem->ranges));
325
326	cmem->ranges[0].start = mstart;
327	cmem->ranges[0].end = mend;
328	cmem->nr_ranges = 1;
329
330	/* Exclude crashkernel region */
331	ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
332	if (ret)
333		return ret;
334
335	if (crashk_low_res.end) {
336		ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
337		if (ret)
338			return ret;
339	}
340
341	return ret;
342}
343
344static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
345{
346	struct crash_elf_data *ced = arg;
347	Elf64_Ehdr *ehdr;
348	Elf64_Phdr *phdr;
349	unsigned long mstart, mend;
350	struct kimage *image = ced->image;
351	struct crash_mem *cmem;
352	int ret, i;
353
354	ehdr = ced->ehdr;
355
356	/* Exclude unwanted mem ranges */
357	ret = elf_header_exclude_ranges(ced, start, end);
358	if (ret)
359		return ret;
360
361	/* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
362	cmem = &ced->mem;
 
363
364	for (i = 0; i < cmem->nr_ranges; i++) {
365		mstart = cmem->ranges[i].start;
366		mend = cmem->ranges[i].end;
367
368		phdr = ced->bufp;
369		ced->bufp += sizeof(Elf64_Phdr);
370
371		phdr->p_type = PT_LOAD;
372		phdr->p_flags = PF_R|PF_W|PF_X;
373		phdr->p_offset  = mstart;
374
375		/*
376		 * If a range matches backup region, adjust offset to backup
377		 * segment.
378		 */
379		if (mstart == image->arch.backup_src_start &&
380		    (mend - mstart + 1) == image->arch.backup_src_sz)
381			phdr->p_offset = image->arch.backup_load_addr;
382
383		phdr->p_paddr = mstart;
384		phdr->p_vaddr = (unsigned long long) __va(mstart);
385		phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
386		phdr->p_align = 0;
387		ehdr->e_phnum++;
388		pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
389			phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
390			ehdr->e_phnum, phdr->p_offset);
391	}
392
393	return ret;
394}
395
396static int prepare_elf64_headers(struct crash_elf_data *ced,
397		void **addr, unsigned long *sz)
398{
399	Elf64_Ehdr *ehdr;
400	Elf64_Phdr *phdr;
401	unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
402	unsigned char *buf, *bufp;
403	unsigned int cpu;
404	unsigned long long notes_addr;
405	int ret;
406
407	/* extra phdr for vmcoreinfo elf note */
408	nr_phdr = nr_cpus + 1;
409	nr_phdr += ced->max_nr_ranges;
410
411	/*
412	 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
413	 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
414	 * I think this is required by tools like gdb. So same physical
415	 * memory will be mapped in two elf headers. One will contain kernel
416	 * text virtual addresses and other will have __va(physical) addresses.
417	 */
418
419	nr_phdr++;
420	elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
421	elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
422
423	buf = vzalloc(elf_sz);
424	if (!buf)
425		return -ENOMEM;
426
427	bufp = buf;
428	ehdr = (Elf64_Ehdr *)bufp;
429	bufp += sizeof(Elf64_Ehdr);
430	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
431	ehdr->e_ident[EI_CLASS] = ELFCLASS64;
432	ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
433	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
434	ehdr->e_ident[EI_OSABI] = ELF_OSABI;
435	memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
436	ehdr->e_type = ET_CORE;
437	ehdr->e_machine = ELF_ARCH;
438	ehdr->e_version = EV_CURRENT;
439	ehdr->e_phoff = sizeof(Elf64_Ehdr);
440	ehdr->e_ehsize = sizeof(Elf64_Ehdr);
441	ehdr->e_phentsize = sizeof(Elf64_Phdr);
442
443	/* Prepare one phdr of type PT_NOTE for each present cpu */
444	for_each_present_cpu(cpu) {
445		phdr = (Elf64_Phdr *)bufp;
446		bufp += sizeof(Elf64_Phdr);
447		phdr->p_type = PT_NOTE;
448		notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
449		phdr->p_offset = phdr->p_paddr = notes_addr;
450		phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
451		(ehdr->e_phnum)++;
452	}
453
454	/* Prepare one PT_NOTE header for vmcoreinfo */
455	phdr = (Elf64_Phdr *)bufp;
456	bufp += sizeof(Elf64_Phdr);
457	phdr->p_type = PT_NOTE;
458	phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
459	phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note);
460	(ehdr->e_phnum)++;
461
462#ifdef CONFIG_X86_64
463	/* Prepare PT_LOAD type program header for kernel text region */
464	phdr = (Elf64_Phdr *)bufp;
465	bufp += sizeof(Elf64_Phdr);
466	phdr->p_type = PT_LOAD;
467	phdr->p_flags = PF_R|PF_W|PF_X;
468	phdr->p_vaddr = (Elf64_Addr)_text;
469	phdr->p_filesz = phdr->p_memsz = _end - _text;
470	phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
471	(ehdr->e_phnum)++;
472#endif
473
474	/* Prepare PT_LOAD headers for system ram chunks. */
475	ced->ehdr = ehdr;
476	ced->bufp = bufp;
477	ret = walk_system_ram_res(0, -1, ced,
478			prepare_elf64_ram_headers_callback);
479	if (ret < 0)
480		return ret;
481
482	*addr = buf;
483	*sz = elf_sz;
484	return 0;
485}
486
487/* Prepare elf headers. Return addr and size */
488static int prepare_elf_headers(struct kimage *image, void **addr,
489					unsigned long *sz)
490{
491	struct crash_elf_data *ced;
492	int ret;
 
 
493
494	ced = kzalloc(sizeof(*ced), GFP_KERNEL);
495	if (!ced)
496		return -ENOMEM;
497
498	fill_up_crash_elf_data(ced, image);
 
 
 
 
 
 
 
 
499
500	/* By default prepare 64bit headers */
501	ret =  prepare_elf64_headers(ced, addr, sz);
502	kfree(ced);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
503	return ret;
504}
505
506static int add_e820_entry(struct boot_params *params, struct e820entry *entry)
507{
508	unsigned int nr_e820_entries;
509
510	nr_e820_entries = params->e820_entries;
511	if (nr_e820_entries >= E820MAX)
512		return 1;
513
514	memcpy(&params->e820_map[nr_e820_entries], entry,
515			sizeof(struct e820entry));
516	params->e820_entries++;
517	return 0;
518}
519
520static int memmap_entry_callback(u64 start, u64 end, void *arg)
521{
522	struct crash_memmap_data *cmd = arg;
523	struct boot_params *params = cmd->params;
524	struct e820entry ei;
525
526	ei.addr = start;
527	ei.size = end - start + 1;
528	ei.type = cmd->type;
529	add_e820_entry(params, &ei);
530
531	return 0;
532}
533
534static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
535				 unsigned long long mstart,
536				 unsigned long long mend)
537{
538	unsigned long start, end;
539	int ret = 0;
540
541	cmem->ranges[0].start = mstart;
542	cmem->ranges[0].end = mend;
543	cmem->nr_ranges = 1;
544
545	/* Exclude Backup region */
546	start = image->arch.backup_load_addr;
547	end = start + image->arch.backup_src_sz - 1;
548	ret = exclude_mem_range(cmem, start, end);
549	if (ret)
550		return ret;
551
552	/* Exclude elf header region */
553	start = image->arch.elf_load_addr;
554	end = start + image->arch.elf_headers_sz - 1;
555	return exclude_mem_range(cmem, start, end);
556}
557
558/* Prepare memory map for crash dump kernel */
559int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
560{
561	int i, ret = 0;
562	unsigned long flags;
563	struct e820entry ei;
564	struct crash_memmap_data cmd;
565	struct crash_mem *cmem;
566
567	cmem = vzalloc(sizeof(struct crash_mem));
568	if (!cmem)
569		return -ENOMEM;
570
571	memset(&cmd, 0, sizeof(struct crash_memmap_data));
572	cmd.params = params;
573
574	/* Add first 640K segment */
575	ei.addr = image->arch.backup_src_start;
576	ei.size = image->arch.backup_src_sz;
577	ei.type = E820_RAM;
578	add_e820_entry(params, &ei);
579
580	/* Add ACPI tables */
581	cmd.type = E820_ACPI;
582	flags = IORESOURCE_MEM | IORESOURCE_BUSY;
583	walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
584		       memmap_entry_callback);
585
586	/* Add ACPI Non-volatile Storage */
587	cmd.type = E820_NVS;
588	walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
589			memmap_entry_callback);
590
 
 
 
 
 
 
591	/* Add crashk_low_res region */
592	if (crashk_low_res.end) {
593		ei.addr = crashk_low_res.start;
594		ei.size = crashk_low_res.end - crashk_low_res.start + 1;
595		ei.type = E820_RAM;
596		add_e820_entry(params, &ei);
597	}
598
599	/* Exclude some ranges from crashk_res and add rest to memmap */
600	ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
601						crashk_res.end);
602	if (ret)
603		goto out;
604
605	for (i = 0; i < cmem->nr_ranges; i++) {
606		ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
607
608		/* If entry is less than a page, skip it */
609		if (ei.size < PAGE_SIZE)
610			continue;
611		ei.addr = cmem->ranges[i].start;
612		ei.type = E820_RAM;
613		add_e820_entry(params, &ei);
614	}
615
616out:
617	vfree(cmem);
618	return ret;
619}
620
621static int determine_backup_region(u64 start, u64 end, void *arg)
622{
623	struct kimage *image = arg;
624
625	image->arch.backup_src_start = start;
626	image->arch.backup_src_sz = end - start + 1;
627
628	/* Expecting only one range for backup region */
629	return 1;
630}
631
632int crash_load_segments(struct kimage *image)
633{
634	int ret;
635	struct kexec_buf kbuf = { .image = image, .buf_min = 0,
636				  .buf_max = ULONG_MAX, .top_down = false };
637
638	/*
639	 * Determine and load a segment for backup area. First 640K RAM
640	 * region is backup source
641	 */
642
643	ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
644				image, determine_backup_region);
645
646	/* Zero or postive return values are ok */
647	if (ret < 0)
648		return ret;
649
650	/* Add backup segment. */
651	if (image->arch.backup_src_sz) {
652		kbuf.buffer = &crash_zero_bytes;
653		kbuf.bufsz = sizeof(crash_zero_bytes);
654		kbuf.memsz = image->arch.backup_src_sz;
655		kbuf.buf_align = PAGE_SIZE;
656		/*
657		 * Ideally there is no source for backup segment. This is
658		 * copied in purgatory after crash. Just add a zero filled
659		 * segment for now to make sure checksum logic works fine.
660		 */
661		ret = kexec_add_buffer(&kbuf);
662		if (ret)
663			return ret;
664		image->arch.backup_load_addr = kbuf.mem;
665		pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
666			 image->arch.backup_load_addr,
667			 image->arch.backup_src_start, kbuf.memsz);
668	}
669
670	/* Prepare elf headers and add a segment */
671	ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
672	if (ret)
673		return ret;
674
675	image->arch.elf_headers = kbuf.buffer;
676	image->arch.elf_headers_sz = kbuf.bufsz;
677
678	kbuf.memsz = kbuf.bufsz;
679	kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
 
680	ret = kexec_add_buffer(&kbuf);
681	if (ret) {
682		vfree((void *)image->arch.elf_headers);
683		return ret;
684	}
685	image->arch.elf_load_addr = kbuf.mem;
686	pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
687		 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
688
689	return ret;
690}
691#endif /* CONFIG_KEXEC_FILE */