Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 *
7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Authors:
10 * Vivek Goyal <vgoyal@redhat.com>
11 *
12 */
13
14#define pr_fmt(fmt) "kexec: " fmt
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/smp.h>
19#include <linux/reboot.h>
20#include <linux/kexec.h>
21#include <linux/delay.h>
22#include <linux/elf.h>
23#include <linux/elfcore.h>
24#include <linux/export.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27
28#include <asm/processor.h>
29#include <asm/hardirq.h>
30#include <asm/nmi.h>
31#include <asm/hw_irq.h>
32#include <asm/apic.h>
33#include <asm/e820/types.h>
34#include <asm/io_apic.h>
35#include <asm/hpet.h>
36#include <linux/kdebug.h>
37#include <asm/cpu.h>
38#include <asm/reboot.h>
39#include <asm/virtext.h>
40#include <asm/intel_pt.h>
41#include <asm/crash.h>
42
43/* Used while preparing memory map entries for second kernel */
44struct crash_memmap_data {
45 struct boot_params *params;
46 /* Type of memory */
47 unsigned int type;
48};
49
50/*
51 * This is used to VMCLEAR all VMCSs loaded on the
52 * processor. And when loading kvm_intel module, the
53 * callback function pointer will be assigned.
54 *
55 * protected by rcu.
56 */
57crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
58EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
59
60static inline void cpu_crash_vmclear_loaded_vmcss(void)
61{
62 crash_vmclear_fn *do_vmclear_operation = NULL;
63
64 rcu_read_lock();
65 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
66 if (do_vmclear_operation)
67 do_vmclear_operation();
68 rcu_read_unlock();
69}
70
71#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
72
73static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
74{
75 crash_save_cpu(regs, cpu);
76
77 /*
78 * VMCLEAR VMCSs loaded on all cpus if needed.
79 */
80 cpu_crash_vmclear_loaded_vmcss();
81
82 /* Disable VMX or SVM if needed.
83 *
84 * We need to disable virtualization on all CPUs.
85 * Having VMX or SVM enabled on any CPU may break rebooting
86 * after the kdump kernel has finished its task.
87 */
88 cpu_emergency_vmxoff();
89 cpu_emergency_svm_disable();
90
91 /*
92 * Disable Intel PT to stop its logging
93 */
94 cpu_emergency_stop_pt();
95
96 disable_local_APIC();
97}
98
99void kdump_nmi_shootdown_cpus(void)
100{
101 nmi_shootdown_cpus(kdump_nmi_callback);
102
103 disable_local_APIC();
104}
105
106/* Override the weak function in kernel/panic.c */
107void crash_smp_send_stop(void)
108{
109 static int cpus_stopped;
110
111 if (cpus_stopped)
112 return;
113
114 if (smp_ops.crash_stop_other_cpus)
115 smp_ops.crash_stop_other_cpus();
116 else
117 smp_send_stop();
118
119 cpus_stopped = 1;
120}
121
122#else
123void crash_smp_send_stop(void)
124{
125 /* There are no cpus to shootdown */
126}
127#endif
128
129void native_machine_crash_shutdown(struct pt_regs *regs)
130{
131 /* This function is only called after the system
132 * has panicked or is otherwise in a critical state.
133 * The minimum amount of code to allow a kexec'd kernel
134 * to run successfully needs to happen here.
135 *
136 * In practice this means shooting down the other cpus in
137 * an SMP system.
138 */
139 /* The kernel is broken so disable interrupts */
140 local_irq_disable();
141
142 crash_smp_send_stop();
143
144 /*
145 * VMCLEAR VMCSs loaded on this cpu if needed.
146 */
147 cpu_crash_vmclear_loaded_vmcss();
148
149 /* Booting kdump kernel with VMX or SVM enabled won't work,
150 * because (among other limitations) we can't disable paging
151 * with the virt flags.
152 */
153 cpu_emergency_vmxoff();
154 cpu_emergency_svm_disable();
155
156 /*
157 * Disable Intel PT to stop its logging
158 */
159 cpu_emergency_stop_pt();
160
161#ifdef CONFIG_X86_IO_APIC
162 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
163 ioapic_zap_locks();
164 clear_IO_APIC();
165#endif
166 lapic_shutdown();
167 restore_boot_irq_mode();
168#ifdef CONFIG_HPET_TIMER
169 hpet_disable();
170#endif
171 crash_save_cpu(regs, safe_smp_processor_id());
172}
173
174#ifdef CONFIG_KEXEC_FILE
175
176static unsigned long crash_zero_bytes;
177
178static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
179{
180 unsigned int *nr_ranges = arg;
181
182 (*nr_ranges)++;
183 return 0;
184}
185
186/* Gather all the required information to prepare elf headers for ram regions */
187static struct crash_mem *fill_up_crash_elf_data(void)
188{
189 unsigned int nr_ranges = 0;
190 struct crash_mem *cmem;
191
192 walk_system_ram_res(0, -1, &nr_ranges,
193 get_nr_ram_ranges_callback);
194 if (!nr_ranges)
195 return NULL;
196
197 /*
198 * Exclusion of crash region and/or crashk_low_res may cause
199 * another range split. So add extra two slots here.
200 */
201 nr_ranges += 2;
202 cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
203 if (!cmem)
204 return NULL;
205
206 cmem->max_nr_ranges = nr_ranges;
207 cmem->nr_ranges = 0;
208
209 return cmem;
210}
211
212/*
213 * Look for any unwanted ranges between mstart, mend and remove them. This
214 * might lead to split and split ranges are put in cmem->ranges[] array
215 */
216static int elf_header_exclude_ranges(struct crash_mem *cmem)
217{
218 int ret = 0;
219
220 /* Exclude crashkernel region */
221 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
222 if (ret)
223 return ret;
224
225 if (crashk_low_res.end) {
226 ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
227 crashk_low_res.end);
228 }
229
230 return ret;
231}
232
233static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
234{
235 struct crash_mem *cmem = arg;
236
237 cmem->ranges[cmem->nr_ranges].start = res->start;
238 cmem->ranges[cmem->nr_ranges].end = res->end;
239 cmem->nr_ranges++;
240
241 return 0;
242}
243
244/* Prepare elf headers. Return addr and size */
245static int prepare_elf_headers(struct kimage *image, void **addr,
246 unsigned long *sz)
247{
248 struct crash_mem *cmem;
249 Elf64_Ehdr *ehdr;
250 Elf64_Phdr *phdr;
251 int ret, i;
252
253 cmem = fill_up_crash_elf_data();
254 if (!cmem)
255 return -ENOMEM;
256
257 ret = walk_system_ram_res(0, -1, cmem,
258 prepare_elf64_ram_headers_callback);
259 if (ret)
260 goto out;
261
262 /* Exclude unwanted mem ranges */
263 ret = elf_header_exclude_ranges(cmem);
264 if (ret)
265 goto out;
266
267 /* By default prepare 64bit headers */
268 ret = crash_prepare_elf64_headers(cmem,
269 IS_ENABLED(CONFIG_X86_64), addr, sz);
270 if (ret)
271 goto out;
272
273 /*
274 * If a range matches backup region, adjust offset to backup
275 * segment.
276 */
277 ehdr = (Elf64_Ehdr *)*addr;
278 phdr = (Elf64_Phdr *)(ehdr + 1);
279 for (i = 0; i < ehdr->e_phnum; phdr++, i++)
280 if (phdr->p_type == PT_LOAD &&
281 phdr->p_paddr == image->arch.backup_src_start &&
282 phdr->p_memsz == image->arch.backup_src_sz) {
283 phdr->p_offset = image->arch.backup_load_addr;
284 break;
285 }
286out:
287 vfree(cmem);
288 return ret;
289}
290
291static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
292{
293 unsigned int nr_e820_entries;
294
295 nr_e820_entries = params->e820_entries;
296 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
297 return 1;
298
299 memcpy(¶ms->e820_table[nr_e820_entries], entry,
300 sizeof(struct e820_entry));
301 params->e820_entries++;
302 return 0;
303}
304
305static int memmap_entry_callback(struct resource *res, void *arg)
306{
307 struct crash_memmap_data *cmd = arg;
308 struct boot_params *params = cmd->params;
309 struct e820_entry ei;
310
311 ei.addr = res->start;
312 ei.size = resource_size(res);
313 ei.type = cmd->type;
314 add_e820_entry(params, &ei);
315
316 return 0;
317}
318
319static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
320 unsigned long long mstart,
321 unsigned long long mend)
322{
323 unsigned long start, end;
324 int ret = 0;
325
326 cmem->ranges[0].start = mstart;
327 cmem->ranges[0].end = mend;
328 cmem->nr_ranges = 1;
329
330 /* Exclude Backup region */
331 start = image->arch.backup_load_addr;
332 end = start + image->arch.backup_src_sz - 1;
333 ret = crash_exclude_mem_range(cmem, start, end);
334 if (ret)
335 return ret;
336
337 /* Exclude elf header region */
338 start = image->arch.elf_load_addr;
339 end = start + image->arch.elf_headers_sz - 1;
340 return crash_exclude_mem_range(cmem, start, end);
341}
342
343/* Prepare memory map for crash dump kernel */
344int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
345{
346 int i, ret = 0;
347 unsigned long flags;
348 struct e820_entry ei;
349 struct crash_memmap_data cmd;
350 struct crash_mem *cmem;
351
352 cmem = vzalloc(sizeof(struct crash_mem));
353 if (!cmem)
354 return -ENOMEM;
355
356 memset(&cmd, 0, sizeof(struct crash_memmap_data));
357 cmd.params = params;
358
359 /* Add first 640K segment */
360 ei.addr = image->arch.backup_src_start;
361 ei.size = image->arch.backup_src_sz;
362 ei.type = E820_TYPE_RAM;
363 add_e820_entry(params, &ei);
364
365 /* Add ACPI tables */
366 cmd.type = E820_TYPE_ACPI;
367 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
368 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
369 memmap_entry_callback);
370
371 /* Add ACPI Non-volatile Storage */
372 cmd.type = E820_TYPE_NVS;
373 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
374 memmap_entry_callback);
375
376 /* Add e820 reserved ranges */
377 cmd.type = E820_TYPE_RESERVED;
378 flags = IORESOURCE_MEM;
379 walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
380 memmap_entry_callback);
381
382 /* Add crashk_low_res region */
383 if (crashk_low_res.end) {
384 ei.addr = crashk_low_res.start;
385 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
386 ei.type = E820_TYPE_RAM;
387 add_e820_entry(params, &ei);
388 }
389
390 /* Exclude some ranges from crashk_res and add rest to memmap */
391 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
392 crashk_res.end);
393 if (ret)
394 goto out;
395
396 for (i = 0; i < cmem->nr_ranges; i++) {
397 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
398
399 /* If entry is less than a page, skip it */
400 if (ei.size < PAGE_SIZE)
401 continue;
402 ei.addr = cmem->ranges[i].start;
403 ei.type = E820_TYPE_RAM;
404 add_e820_entry(params, &ei);
405 }
406
407out:
408 vfree(cmem);
409 return ret;
410}
411
412static int determine_backup_region(struct resource *res, void *arg)
413{
414 struct kimage *image = arg;
415
416 image->arch.backup_src_start = res->start;
417 image->arch.backup_src_sz = resource_size(res);
418
419 /* Expecting only one range for backup region */
420 return 1;
421}
422
423int crash_load_segments(struct kimage *image)
424{
425 int ret;
426 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
427 .buf_max = ULONG_MAX, .top_down = false };
428
429 /*
430 * Determine and load a segment for backup area. First 640K RAM
431 * region is backup source
432 */
433
434 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
435 image, determine_backup_region);
436
437 /* Zero or postive return values are ok */
438 if (ret < 0)
439 return ret;
440
441 /* Add backup segment. */
442 if (image->arch.backup_src_sz) {
443 kbuf.buffer = &crash_zero_bytes;
444 kbuf.bufsz = sizeof(crash_zero_bytes);
445 kbuf.memsz = image->arch.backup_src_sz;
446 kbuf.buf_align = PAGE_SIZE;
447 /*
448 * Ideally there is no source for backup segment. This is
449 * copied in purgatory after crash. Just add a zero filled
450 * segment for now to make sure checksum logic works fine.
451 */
452 ret = kexec_add_buffer(&kbuf);
453 if (ret)
454 return ret;
455 image->arch.backup_load_addr = kbuf.mem;
456 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
457 image->arch.backup_load_addr,
458 image->arch.backup_src_start, kbuf.memsz);
459 }
460
461 /* Prepare elf headers and add a segment */
462 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
463 if (ret)
464 return ret;
465
466 image->arch.elf_headers = kbuf.buffer;
467 image->arch.elf_headers_sz = kbuf.bufsz;
468
469 kbuf.memsz = kbuf.bufsz;
470 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
471 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
472 ret = kexec_add_buffer(&kbuf);
473 if (ret) {
474 vfree((void *)image->arch.elf_headers);
475 return ret;
476 }
477 image->arch.elf_load_addr = kbuf.mem;
478 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
479 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
480
481 return ret;
482}
483#endif /* CONFIG_KEXEC_FILE */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 *
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 *
7 * Copyright (C) IBM Corporation, 2004. All rights reserved.
8 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Authors:
10 * Vivek Goyal <vgoyal@redhat.com>
11 *
12 */
13
14#define pr_fmt(fmt) "kexec: " fmt
15
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/smp.h>
19#include <linux/reboot.h>
20#include <linux/kexec.h>
21#include <linux/delay.h>
22#include <linux/elf.h>
23#include <linux/elfcore.h>
24#include <linux/export.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/memblock.h>
28
29#include <asm/processor.h>
30#include <asm/hardirq.h>
31#include <asm/nmi.h>
32#include <asm/hw_irq.h>
33#include <asm/apic.h>
34#include <asm/e820/types.h>
35#include <asm/io_apic.h>
36#include <asm/hpet.h>
37#include <linux/kdebug.h>
38#include <asm/cpu.h>
39#include <asm/reboot.h>
40#include <asm/virtext.h>
41#include <asm/intel_pt.h>
42#include <asm/crash.h>
43#include <asm/cmdline.h>
44
45/* Used while preparing memory map entries for second kernel */
46struct crash_memmap_data {
47 struct boot_params *params;
48 /* Type of memory */
49 unsigned int type;
50};
51
52/*
53 * This is used to VMCLEAR all VMCSs loaded on the
54 * processor. And when loading kvm_intel module, the
55 * callback function pointer will be assigned.
56 *
57 * protected by rcu.
58 */
59crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
60EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
61
62static inline void cpu_crash_vmclear_loaded_vmcss(void)
63{
64 crash_vmclear_fn *do_vmclear_operation = NULL;
65
66 rcu_read_lock();
67 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
68 if (do_vmclear_operation)
69 do_vmclear_operation();
70 rcu_read_unlock();
71}
72
73/*
74 * When the crashkernel option is specified, only use the low
75 * 1M for the real mode trampoline.
76 */
77void __init crash_reserve_low_1M(void)
78{
79 if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
80 return;
81
82 memblock_reserve(0, 1<<20);
83 pr_info("Reserving the low 1M of memory for crashkernel\n");
84}
85
86#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
87
88static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
89{
90 crash_save_cpu(regs, cpu);
91
92 /*
93 * VMCLEAR VMCSs loaded on all cpus if needed.
94 */
95 cpu_crash_vmclear_loaded_vmcss();
96
97 /* Disable VMX or SVM if needed.
98 *
99 * We need to disable virtualization on all CPUs.
100 * Having VMX or SVM enabled on any CPU may break rebooting
101 * after the kdump kernel has finished its task.
102 */
103 cpu_emergency_vmxoff();
104 cpu_emergency_svm_disable();
105
106 /*
107 * Disable Intel PT to stop its logging
108 */
109 cpu_emergency_stop_pt();
110
111 disable_local_APIC();
112}
113
114void kdump_nmi_shootdown_cpus(void)
115{
116 nmi_shootdown_cpus(kdump_nmi_callback);
117
118 disable_local_APIC();
119}
120
121/* Override the weak function in kernel/panic.c */
122void crash_smp_send_stop(void)
123{
124 static int cpus_stopped;
125
126 if (cpus_stopped)
127 return;
128
129 if (smp_ops.crash_stop_other_cpus)
130 smp_ops.crash_stop_other_cpus();
131 else
132 smp_send_stop();
133
134 cpus_stopped = 1;
135}
136
137#else
138void crash_smp_send_stop(void)
139{
140 /* There are no cpus to shootdown */
141}
142#endif
143
144void native_machine_crash_shutdown(struct pt_regs *regs)
145{
146 /* This function is only called after the system
147 * has panicked or is otherwise in a critical state.
148 * The minimum amount of code to allow a kexec'd kernel
149 * to run successfully needs to happen here.
150 *
151 * In practice this means shooting down the other cpus in
152 * an SMP system.
153 */
154 /* The kernel is broken so disable interrupts */
155 local_irq_disable();
156
157 crash_smp_send_stop();
158
159 /*
160 * VMCLEAR VMCSs loaded on this cpu if needed.
161 */
162 cpu_crash_vmclear_loaded_vmcss();
163
164 /* Booting kdump kernel with VMX or SVM enabled won't work,
165 * because (among other limitations) we can't disable paging
166 * with the virt flags.
167 */
168 cpu_emergency_vmxoff();
169 cpu_emergency_svm_disable();
170
171 /*
172 * Disable Intel PT to stop its logging
173 */
174 cpu_emergency_stop_pt();
175
176#ifdef CONFIG_X86_IO_APIC
177 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
178 ioapic_zap_locks();
179 clear_IO_APIC();
180#endif
181 lapic_shutdown();
182 restore_boot_irq_mode();
183#ifdef CONFIG_HPET_TIMER
184 hpet_disable();
185#endif
186 crash_save_cpu(regs, safe_smp_processor_id());
187}
188
189#ifdef CONFIG_KEXEC_FILE
190
191static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
192{
193 unsigned int *nr_ranges = arg;
194
195 (*nr_ranges)++;
196 return 0;
197}
198
199/* Gather all the required information to prepare elf headers for ram regions */
200static struct crash_mem *fill_up_crash_elf_data(void)
201{
202 unsigned int nr_ranges = 0;
203 struct crash_mem *cmem;
204
205 walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback);
206 if (!nr_ranges)
207 return NULL;
208
209 /*
210 * Exclusion of crash region and/or crashk_low_res may cause
211 * another range split. So add extra two slots here.
212 */
213 nr_ranges += 2;
214 cmem = vzalloc(struct_size(cmem, ranges, nr_ranges));
215 if (!cmem)
216 return NULL;
217
218 cmem->max_nr_ranges = nr_ranges;
219 cmem->nr_ranges = 0;
220
221 return cmem;
222}
223
224/*
225 * Look for any unwanted ranges between mstart, mend and remove them. This
226 * might lead to split and split ranges are put in cmem->ranges[] array
227 */
228static int elf_header_exclude_ranges(struct crash_mem *cmem)
229{
230 int ret = 0;
231
232 /* Exclude the low 1M because it is always reserved */
233 ret = crash_exclude_mem_range(cmem, 0, (1<<20)-1);
234 if (ret)
235 return ret;
236
237 /* Exclude crashkernel region */
238 ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
239 if (ret)
240 return ret;
241
242 if (crashk_low_res.end)
243 ret = crash_exclude_mem_range(cmem, crashk_low_res.start,
244 crashk_low_res.end);
245
246 return ret;
247}
248
249static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
250{
251 struct crash_mem *cmem = arg;
252
253 cmem->ranges[cmem->nr_ranges].start = res->start;
254 cmem->ranges[cmem->nr_ranges].end = res->end;
255 cmem->nr_ranges++;
256
257 return 0;
258}
259
260/* Prepare elf headers. Return addr and size */
261static int prepare_elf_headers(struct kimage *image, void **addr,
262 unsigned long *sz)
263{
264 struct crash_mem *cmem;
265 int ret;
266
267 cmem = fill_up_crash_elf_data();
268 if (!cmem)
269 return -ENOMEM;
270
271 ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback);
272 if (ret)
273 goto out;
274
275 /* Exclude unwanted mem ranges */
276 ret = elf_header_exclude_ranges(cmem);
277 if (ret)
278 goto out;
279
280 /* By default prepare 64bit headers */
281 ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz);
282
283out:
284 vfree(cmem);
285 return ret;
286}
287
288static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
289{
290 unsigned int nr_e820_entries;
291
292 nr_e820_entries = params->e820_entries;
293 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
294 return 1;
295
296 memcpy(¶ms->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry));
297 params->e820_entries++;
298 return 0;
299}
300
301static int memmap_entry_callback(struct resource *res, void *arg)
302{
303 struct crash_memmap_data *cmd = arg;
304 struct boot_params *params = cmd->params;
305 struct e820_entry ei;
306
307 ei.addr = res->start;
308 ei.size = resource_size(res);
309 ei.type = cmd->type;
310 add_e820_entry(params, &ei);
311
312 return 0;
313}
314
315static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
316 unsigned long long mstart,
317 unsigned long long mend)
318{
319 unsigned long start, end;
320
321 cmem->ranges[0].start = mstart;
322 cmem->ranges[0].end = mend;
323 cmem->nr_ranges = 1;
324
325 /* Exclude elf header region */
326 start = image->arch.elf_load_addr;
327 end = start + image->arch.elf_headers_sz - 1;
328 return crash_exclude_mem_range(cmem, start, end);
329}
330
331/* Prepare memory map for crash dump kernel */
332int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
333{
334 int i, ret = 0;
335 unsigned long flags;
336 struct e820_entry ei;
337 struct crash_memmap_data cmd;
338 struct crash_mem *cmem;
339
340 cmem = vzalloc(sizeof(struct crash_mem));
341 if (!cmem)
342 return -ENOMEM;
343
344 memset(&cmd, 0, sizeof(struct crash_memmap_data));
345 cmd.params = params;
346
347 /* Add the low 1M */
348 cmd.type = E820_TYPE_RAM;
349 flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
350 walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd,
351 memmap_entry_callback);
352
353 /* Add ACPI tables */
354 cmd.type = E820_TYPE_ACPI;
355 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
356 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
357 memmap_entry_callback);
358
359 /* Add ACPI Non-volatile Storage */
360 cmd.type = E820_TYPE_NVS;
361 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
362 memmap_entry_callback);
363
364 /* Add e820 reserved ranges */
365 cmd.type = E820_TYPE_RESERVED;
366 flags = IORESOURCE_MEM;
367 walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd,
368 memmap_entry_callback);
369
370 /* Add crashk_low_res region */
371 if (crashk_low_res.end) {
372 ei.addr = crashk_low_res.start;
373 ei.size = resource_size(&crashk_low_res);
374 ei.type = E820_TYPE_RAM;
375 add_e820_entry(params, &ei);
376 }
377
378 /* Exclude some ranges from crashk_res and add rest to memmap */
379 ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end);
380 if (ret)
381 goto out;
382
383 for (i = 0; i < cmem->nr_ranges; i++) {
384 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
385
386 /* If entry is less than a page, skip it */
387 if (ei.size < PAGE_SIZE)
388 continue;
389 ei.addr = cmem->ranges[i].start;
390 ei.type = E820_TYPE_RAM;
391 add_e820_entry(params, &ei);
392 }
393
394out:
395 vfree(cmem);
396 return ret;
397}
398
399int crash_load_segments(struct kimage *image)
400{
401 int ret;
402 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
403 .buf_max = ULONG_MAX, .top_down = false };
404
405 /* Prepare elf headers and add a segment */
406 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
407 if (ret)
408 return ret;
409
410 image->arch.elf_headers = kbuf.buffer;
411 image->arch.elf_headers_sz = kbuf.bufsz;
412
413 kbuf.memsz = kbuf.bufsz;
414 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
415 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
416 ret = kexec_add_buffer(&kbuf);
417 if (ret) {
418 vfree((void *)image->arch.elf_headers);
419 return ret;
420 }
421 image->arch.elf_load_addr = kbuf.mem;
422 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
423 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
424
425 return ret;
426}
427#endif /* CONFIG_KEXEC_FILE */