Loading...
1/*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/vmalloc.h>
24#include <linux/pagemap.h>
25#include <asm/uaccess.h>
26#include <asm/io.h>
27#include "internal.h"
28
29/* List representing chunks of contiguous memory areas and their offsets in
30 * vmcore file.
31 */
32static LIST_HEAD(vmcore_list);
33
34/* Stores the pointer to the buffer containing kernel elf core headers. */
35static char *elfcorebuf;
36static size_t elfcorebuf_sz;
37static size_t elfcorebuf_sz_orig;
38
39static char *elfnotes_buf;
40static size_t elfnotes_sz;
41
42/* Total size of vmcore file. */
43static u64 vmcore_size;
44
45static struct proc_dir_entry *proc_vmcore;
46
47/*
48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49 * The called function has to take care of module refcounting.
50 */
51static int (*oldmem_pfn_is_ram)(unsigned long pfn);
52
53int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
54{
55 if (oldmem_pfn_is_ram)
56 return -EBUSY;
57 oldmem_pfn_is_ram = fn;
58 return 0;
59}
60EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
61
62void unregister_oldmem_pfn_is_ram(void)
63{
64 oldmem_pfn_is_ram = NULL;
65 wmb();
66}
67EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
68
69static int pfn_is_ram(unsigned long pfn)
70{
71 int (*fn)(unsigned long pfn);
72 /* pfn is ram unless fn() checks pagetype */
73 int ret = 1;
74
75 /*
76 * Ask hypervisor if the pfn is really ram.
77 * A ballooned page contains no data and reading from such a page
78 * will cause high load in the hypervisor.
79 */
80 fn = oldmem_pfn_is_ram;
81 if (fn)
82 ret = fn(pfn);
83
84 return ret;
85}
86
87/* Reads a page from the oldmem device from given offset. */
88static ssize_t read_from_oldmem(char *buf, size_t count,
89 u64 *ppos, int userbuf)
90{
91 unsigned long pfn, offset;
92 size_t nr_bytes;
93 ssize_t read = 0, tmp;
94
95 if (!count)
96 return 0;
97
98 offset = (unsigned long)(*ppos % PAGE_SIZE);
99 pfn = (unsigned long)(*ppos / PAGE_SIZE);
100
101 do {
102 if (count > (PAGE_SIZE - offset))
103 nr_bytes = PAGE_SIZE - offset;
104 else
105 nr_bytes = count;
106
107 /* If pfn is not ram, return zeros for sparse dump files */
108 if (pfn_is_ram(pfn) == 0)
109 memset(buf, 0, nr_bytes);
110 else {
111 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112 offset, userbuf);
113 if (tmp < 0)
114 return tmp;
115 }
116 *ppos += nr_bytes;
117 count -= nr_bytes;
118 buf += nr_bytes;
119 read += nr_bytes;
120 ++pfn;
121 offset = 0;
122 } while (count);
123
124 return read;
125}
126
127/*
128 * Architectures may override this function to allocate ELF header in 2nd kernel
129 */
130int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
131{
132 return 0;
133}
134
135/*
136 * Architectures may override this function to free header
137 */
138void __weak elfcorehdr_free(unsigned long long addr)
139{}
140
141/*
142 * Architectures may override this function to read from ELF header
143 */
144ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
145{
146 return read_from_oldmem(buf, count, ppos, 0);
147}
148
149/*
150 * Architectures may override this function to read from notes sections
151 */
152ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
153{
154 return read_from_oldmem(buf, count, ppos, 0);
155}
156
157/*
158 * Architectures may override this function to map oldmem
159 */
160int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 unsigned long from, unsigned long pfn,
162 unsigned long size, pgprot_t prot)
163{
164 return remap_pfn_range(vma, from, pfn, size, prot);
165}
166
167/*
168 * Copy to either kernel or user space
169 */
170static int copy_to(void *target, void *src, size_t size, int userbuf)
171{
172 if (userbuf) {
173 if (copy_to_user((char __user *) target, src, size))
174 return -EFAULT;
175 } else {
176 memcpy(target, src, size);
177 }
178 return 0;
179}
180
181/* Read from the ELF header and then the crash dump. On error, negative value is
182 * returned otherwise number of bytes read are returned.
183 */
184static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
185 int userbuf)
186{
187 ssize_t acc = 0, tmp;
188 size_t tsz;
189 u64 start;
190 struct vmcore *m = NULL;
191
192 if (buflen == 0 || *fpos >= vmcore_size)
193 return 0;
194
195 /* trim buflen to not go beyond EOF */
196 if (buflen > vmcore_size - *fpos)
197 buflen = vmcore_size - *fpos;
198
199 /* Read ELF core header */
200 if (*fpos < elfcorebuf_sz) {
201 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
202 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
203 return -EFAULT;
204 buflen -= tsz;
205 *fpos += tsz;
206 buffer += tsz;
207 acc += tsz;
208
209 /* leave now if filled buffer already */
210 if (buflen == 0)
211 return acc;
212 }
213
214 /* Read Elf note segment */
215 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
216 void *kaddr;
217
218 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
219 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
220 if (copy_to(buffer, kaddr, tsz, userbuf))
221 return -EFAULT;
222 buflen -= tsz;
223 *fpos += tsz;
224 buffer += tsz;
225 acc += tsz;
226
227 /* leave now if filled buffer already */
228 if (buflen == 0)
229 return acc;
230 }
231
232 list_for_each_entry(m, &vmcore_list, list) {
233 if (*fpos < m->offset + m->size) {
234 tsz = (size_t)min_t(unsigned long long,
235 m->offset + m->size - *fpos,
236 buflen);
237 start = m->paddr + *fpos - m->offset;
238 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
239 if (tmp < 0)
240 return tmp;
241 buflen -= tsz;
242 *fpos += tsz;
243 buffer += tsz;
244 acc += tsz;
245
246 /* leave now if filled buffer already */
247 if (buflen == 0)
248 return acc;
249 }
250 }
251
252 return acc;
253}
254
255static ssize_t read_vmcore(struct file *file, char __user *buffer,
256 size_t buflen, loff_t *fpos)
257{
258 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
259}
260
261/*
262 * The vmcore fault handler uses the page cache and fills data using the
263 * standard __vmcore_read() function.
264 *
265 * On s390 the fault handler is used for memory regions that can't be mapped
266 * directly with remap_pfn_range().
267 */
268static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
269{
270#ifdef CONFIG_S390
271 struct address_space *mapping = vma->vm_file->f_mapping;
272 pgoff_t index = vmf->pgoff;
273 struct page *page;
274 loff_t offset;
275 char *buf;
276 int rc;
277
278 page = find_or_create_page(mapping, index, GFP_KERNEL);
279 if (!page)
280 return VM_FAULT_OOM;
281 if (!PageUptodate(page)) {
282 offset = (loff_t) index << PAGE_SHIFT;
283 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
284 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
285 if (rc < 0) {
286 unlock_page(page);
287 put_page(page);
288 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
289 }
290 SetPageUptodate(page);
291 }
292 unlock_page(page);
293 vmf->page = page;
294 return 0;
295#else
296 return VM_FAULT_SIGBUS;
297#endif
298}
299
300static const struct vm_operations_struct vmcore_mmap_ops = {
301 .fault = mmap_vmcore_fault,
302};
303
304/**
305 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
306 * vmalloc memory
307 *
308 * @notes_sz: size of buffer
309 *
310 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
311 * the buffer to user-space by means of remap_vmalloc_range().
312 *
313 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
314 * disabled and there's no need to allow users to mmap the buffer.
315 */
316static inline char *alloc_elfnotes_buf(size_t notes_sz)
317{
318#ifdef CONFIG_MMU
319 return vmalloc_user(notes_sz);
320#else
321 return vzalloc(notes_sz);
322#endif
323}
324
325/*
326 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
327 * essential for mmap_vmcore() in order to map physically
328 * non-contiguous objects (ELF header, ELF note segment and memory
329 * regions in the 1st kernel pointed to by PT_LOAD entries) into
330 * virtually contiguous user-space in ELF layout.
331 */
332#ifdef CONFIG_MMU
333/*
334 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
335 * reported as not being ram with the zero page.
336 *
337 * @vma: vm_area_struct describing requested mapping
338 * @from: start remapping from
339 * @pfn: page frame number to start remapping to
340 * @size: remapping size
341 * @prot: protection bits
342 *
343 * Returns zero on success, -EAGAIN on failure.
344 */
345static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
346 unsigned long from, unsigned long pfn,
347 unsigned long size, pgprot_t prot)
348{
349 unsigned long map_size;
350 unsigned long pos_start, pos_end, pos;
351 unsigned long zeropage_pfn = my_zero_pfn(0);
352 size_t len = 0;
353
354 pos_start = pfn;
355 pos_end = pfn + (size >> PAGE_SHIFT);
356
357 for (pos = pos_start; pos < pos_end; ++pos) {
358 if (!pfn_is_ram(pos)) {
359 /*
360 * We hit a page which is not ram. Remap the continuous
361 * region between pos_start and pos-1 and replace
362 * the non-ram page at pos with the zero page.
363 */
364 if (pos > pos_start) {
365 /* Remap continuous region */
366 map_size = (pos - pos_start) << PAGE_SHIFT;
367 if (remap_oldmem_pfn_range(vma, from + len,
368 pos_start, map_size,
369 prot))
370 goto fail;
371 len += map_size;
372 }
373 /* Remap the zero page */
374 if (remap_oldmem_pfn_range(vma, from + len,
375 zeropage_pfn,
376 PAGE_SIZE, prot))
377 goto fail;
378 len += PAGE_SIZE;
379 pos_start = pos + 1;
380 }
381 }
382 if (pos > pos_start) {
383 /* Remap the rest */
384 map_size = (pos - pos_start) << PAGE_SHIFT;
385 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
386 map_size, prot))
387 goto fail;
388 }
389 return 0;
390fail:
391 do_munmap(vma->vm_mm, from, len);
392 return -EAGAIN;
393}
394
395static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
396 unsigned long from, unsigned long pfn,
397 unsigned long size, pgprot_t prot)
398{
399 /*
400 * Check if oldmem_pfn_is_ram was registered to avoid
401 * looping over all pages without a reason.
402 */
403 if (oldmem_pfn_is_ram)
404 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
405 else
406 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
407}
408
409static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
410{
411 size_t size = vma->vm_end - vma->vm_start;
412 u64 start, end, len, tsz;
413 struct vmcore *m;
414
415 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
416 end = start + size;
417
418 if (size > vmcore_size || end > vmcore_size)
419 return -EINVAL;
420
421 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
422 return -EPERM;
423
424 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
425 vma->vm_flags |= VM_MIXEDMAP;
426 vma->vm_ops = &vmcore_mmap_ops;
427
428 len = 0;
429
430 if (start < elfcorebuf_sz) {
431 u64 pfn;
432
433 tsz = min(elfcorebuf_sz - (size_t)start, size);
434 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
435 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
436 vma->vm_page_prot))
437 return -EAGAIN;
438 size -= tsz;
439 start += tsz;
440 len += tsz;
441
442 if (size == 0)
443 return 0;
444 }
445
446 if (start < elfcorebuf_sz + elfnotes_sz) {
447 void *kaddr;
448
449 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
450 kaddr = elfnotes_buf + start - elfcorebuf_sz;
451 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
452 kaddr, tsz))
453 goto fail;
454 size -= tsz;
455 start += tsz;
456 len += tsz;
457
458 if (size == 0)
459 return 0;
460 }
461
462 list_for_each_entry(m, &vmcore_list, list) {
463 if (start < m->offset + m->size) {
464 u64 paddr = 0;
465
466 tsz = (size_t)min_t(unsigned long long,
467 m->offset + m->size - start, size);
468 paddr = m->paddr + start - m->offset;
469 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
470 paddr >> PAGE_SHIFT, tsz,
471 vma->vm_page_prot))
472 goto fail;
473 size -= tsz;
474 start += tsz;
475 len += tsz;
476
477 if (size == 0)
478 return 0;
479 }
480 }
481
482 return 0;
483fail:
484 do_munmap(vma->vm_mm, vma->vm_start, len);
485 return -EAGAIN;
486}
487#else
488static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
489{
490 return -ENOSYS;
491}
492#endif
493
494static const struct file_operations proc_vmcore_operations = {
495 .read = read_vmcore,
496 .llseek = default_llseek,
497 .mmap = mmap_vmcore,
498};
499
500static struct vmcore* __init get_new_element(void)
501{
502 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
503}
504
505static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
506 struct list_head *vc_list)
507{
508 u64 size;
509 struct vmcore *m;
510
511 size = elfsz + elfnotesegsz;
512 list_for_each_entry(m, vc_list, list) {
513 size += m->size;
514 }
515 return size;
516}
517
518/**
519 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
520 *
521 * @ehdr_ptr: ELF header
522 *
523 * This function updates p_memsz member of each PT_NOTE entry in the
524 * program header table pointed to by @ehdr_ptr to real size of ELF
525 * note segment.
526 */
527static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
528{
529 int i, rc=0;
530 Elf64_Phdr *phdr_ptr;
531 Elf64_Nhdr *nhdr_ptr;
532
533 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
534 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
535 void *notes_section;
536 u64 offset, max_sz, sz, real_sz = 0;
537 if (phdr_ptr->p_type != PT_NOTE)
538 continue;
539 max_sz = phdr_ptr->p_memsz;
540 offset = phdr_ptr->p_offset;
541 notes_section = kmalloc(max_sz, GFP_KERNEL);
542 if (!notes_section)
543 return -ENOMEM;
544 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
545 if (rc < 0) {
546 kfree(notes_section);
547 return rc;
548 }
549 nhdr_ptr = notes_section;
550 while (nhdr_ptr->n_namesz != 0) {
551 sz = sizeof(Elf64_Nhdr) +
552 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
553 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
554 if ((real_sz + sz) > max_sz) {
555 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
556 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
557 break;
558 }
559 real_sz += sz;
560 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
561 }
562 kfree(notes_section);
563 phdr_ptr->p_memsz = real_sz;
564 if (real_sz == 0) {
565 pr_warn("Warning: Zero PT_NOTE entries found\n");
566 }
567 }
568
569 return 0;
570}
571
572/**
573 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
574 * headers and sum of real size of their ELF note segment headers and
575 * data.
576 *
577 * @ehdr_ptr: ELF header
578 * @nr_ptnote: buffer for the number of PT_NOTE program headers
579 * @sz_ptnote: buffer for size of unique PT_NOTE program header
580 *
581 * This function is used to merge multiple PT_NOTE program headers
582 * into a unique single one. The resulting unique entry will have
583 * @sz_ptnote in its phdr->p_mem.
584 *
585 * It is assumed that program headers with PT_NOTE type pointed to by
586 * @ehdr_ptr has already been updated by update_note_header_size_elf64
587 * and each of PT_NOTE program headers has actual ELF note segment
588 * size in its p_memsz member.
589 */
590static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
591 int *nr_ptnote, u64 *sz_ptnote)
592{
593 int i;
594 Elf64_Phdr *phdr_ptr;
595
596 *nr_ptnote = *sz_ptnote = 0;
597
598 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
599 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
600 if (phdr_ptr->p_type != PT_NOTE)
601 continue;
602 *nr_ptnote += 1;
603 *sz_ptnote += phdr_ptr->p_memsz;
604 }
605
606 return 0;
607}
608
609/**
610 * copy_notes_elf64 - copy ELF note segments in a given buffer
611 *
612 * @ehdr_ptr: ELF header
613 * @notes_buf: buffer into which ELF note segments are copied
614 *
615 * This function is used to copy ELF note segment in the 1st kernel
616 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
617 * size of the buffer @notes_buf is equal to or larger than sum of the
618 * real ELF note segment headers and data.
619 *
620 * It is assumed that program headers with PT_NOTE type pointed to by
621 * @ehdr_ptr has already been updated by update_note_header_size_elf64
622 * and each of PT_NOTE program headers has actual ELF note segment
623 * size in its p_memsz member.
624 */
625static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
626{
627 int i, rc=0;
628 Elf64_Phdr *phdr_ptr;
629
630 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
631
632 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
633 u64 offset;
634 if (phdr_ptr->p_type != PT_NOTE)
635 continue;
636 offset = phdr_ptr->p_offset;
637 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
638 &offset);
639 if (rc < 0)
640 return rc;
641 notes_buf += phdr_ptr->p_memsz;
642 }
643
644 return 0;
645}
646
647/* Merges all the PT_NOTE headers into one. */
648static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
649 char **notes_buf, size_t *notes_sz)
650{
651 int i, nr_ptnote=0, rc=0;
652 char *tmp;
653 Elf64_Ehdr *ehdr_ptr;
654 Elf64_Phdr phdr;
655 u64 phdr_sz = 0, note_off;
656
657 ehdr_ptr = (Elf64_Ehdr *)elfptr;
658
659 rc = update_note_header_size_elf64(ehdr_ptr);
660 if (rc < 0)
661 return rc;
662
663 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
664 if (rc < 0)
665 return rc;
666
667 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
668 *notes_buf = alloc_elfnotes_buf(*notes_sz);
669 if (!*notes_buf)
670 return -ENOMEM;
671
672 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
673 if (rc < 0)
674 return rc;
675
676 /* Prepare merged PT_NOTE program header. */
677 phdr.p_type = PT_NOTE;
678 phdr.p_flags = 0;
679 note_off = sizeof(Elf64_Ehdr) +
680 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
681 phdr.p_offset = roundup(note_off, PAGE_SIZE);
682 phdr.p_vaddr = phdr.p_paddr = 0;
683 phdr.p_filesz = phdr.p_memsz = phdr_sz;
684 phdr.p_align = 0;
685
686 /* Add merged PT_NOTE program header*/
687 tmp = elfptr + sizeof(Elf64_Ehdr);
688 memcpy(tmp, &phdr, sizeof(phdr));
689 tmp += sizeof(phdr);
690
691 /* Remove unwanted PT_NOTE program headers. */
692 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
693 *elfsz = *elfsz - i;
694 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
695 memset(elfptr + *elfsz, 0, i);
696 *elfsz = roundup(*elfsz, PAGE_SIZE);
697
698 /* Modify e_phnum to reflect merged headers. */
699 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
700
701 return 0;
702}
703
704/**
705 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
706 *
707 * @ehdr_ptr: ELF header
708 *
709 * This function updates p_memsz member of each PT_NOTE entry in the
710 * program header table pointed to by @ehdr_ptr to real size of ELF
711 * note segment.
712 */
713static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
714{
715 int i, rc=0;
716 Elf32_Phdr *phdr_ptr;
717 Elf32_Nhdr *nhdr_ptr;
718
719 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
720 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
721 void *notes_section;
722 u64 offset, max_sz, sz, real_sz = 0;
723 if (phdr_ptr->p_type != PT_NOTE)
724 continue;
725 max_sz = phdr_ptr->p_memsz;
726 offset = phdr_ptr->p_offset;
727 notes_section = kmalloc(max_sz, GFP_KERNEL);
728 if (!notes_section)
729 return -ENOMEM;
730 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
731 if (rc < 0) {
732 kfree(notes_section);
733 return rc;
734 }
735 nhdr_ptr = notes_section;
736 while (nhdr_ptr->n_namesz != 0) {
737 sz = sizeof(Elf32_Nhdr) +
738 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
739 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
740 if ((real_sz + sz) > max_sz) {
741 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
742 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
743 break;
744 }
745 real_sz += sz;
746 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
747 }
748 kfree(notes_section);
749 phdr_ptr->p_memsz = real_sz;
750 if (real_sz == 0) {
751 pr_warn("Warning: Zero PT_NOTE entries found\n");
752 }
753 }
754
755 return 0;
756}
757
758/**
759 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
760 * headers and sum of real size of their ELF note segment headers and
761 * data.
762 *
763 * @ehdr_ptr: ELF header
764 * @nr_ptnote: buffer for the number of PT_NOTE program headers
765 * @sz_ptnote: buffer for size of unique PT_NOTE program header
766 *
767 * This function is used to merge multiple PT_NOTE program headers
768 * into a unique single one. The resulting unique entry will have
769 * @sz_ptnote in its phdr->p_mem.
770 *
771 * It is assumed that program headers with PT_NOTE type pointed to by
772 * @ehdr_ptr has already been updated by update_note_header_size_elf32
773 * and each of PT_NOTE program headers has actual ELF note segment
774 * size in its p_memsz member.
775 */
776static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
777 int *nr_ptnote, u64 *sz_ptnote)
778{
779 int i;
780 Elf32_Phdr *phdr_ptr;
781
782 *nr_ptnote = *sz_ptnote = 0;
783
784 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
785 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
786 if (phdr_ptr->p_type != PT_NOTE)
787 continue;
788 *nr_ptnote += 1;
789 *sz_ptnote += phdr_ptr->p_memsz;
790 }
791
792 return 0;
793}
794
795/**
796 * copy_notes_elf32 - copy ELF note segments in a given buffer
797 *
798 * @ehdr_ptr: ELF header
799 * @notes_buf: buffer into which ELF note segments are copied
800 *
801 * This function is used to copy ELF note segment in the 1st kernel
802 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
803 * size of the buffer @notes_buf is equal to or larger than sum of the
804 * real ELF note segment headers and data.
805 *
806 * It is assumed that program headers with PT_NOTE type pointed to by
807 * @ehdr_ptr has already been updated by update_note_header_size_elf32
808 * and each of PT_NOTE program headers has actual ELF note segment
809 * size in its p_memsz member.
810 */
811static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
812{
813 int i, rc=0;
814 Elf32_Phdr *phdr_ptr;
815
816 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
817
818 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
819 u64 offset;
820 if (phdr_ptr->p_type != PT_NOTE)
821 continue;
822 offset = phdr_ptr->p_offset;
823 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
824 &offset);
825 if (rc < 0)
826 return rc;
827 notes_buf += phdr_ptr->p_memsz;
828 }
829
830 return 0;
831}
832
833/* Merges all the PT_NOTE headers into one. */
834static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
835 char **notes_buf, size_t *notes_sz)
836{
837 int i, nr_ptnote=0, rc=0;
838 char *tmp;
839 Elf32_Ehdr *ehdr_ptr;
840 Elf32_Phdr phdr;
841 u64 phdr_sz = 0, note_off;
842
843 ehdr_ptr = (Elf32_Ehdr *)elfptr;
844
845 rc = update_note_header_size_elf32(ehdr_ptr);
846 if (rc < 0)
847 return rc;
848
849 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
850 if (rc < 0)
851 return rc;
852
853 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
854 *notes_buf = alloc_elfnotes_buf(*notes_sz);
855 if (!*notes_buf)
856 return -ENOMEM;
857
858 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
859 if (rc < 0)
860 return rc;
861
862 /* Prepare merged PT_NOTE program header. */
863 phdr.p_type = PT_NOTE;
864 phdr.p_flags = 0;
865 note_off = sizeof(Elf32_Ehdr) +
866 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
867 phdr.p_offset = roundup(note_off, PAGE_SIZE);
868 phdr.p_vaddr = phdr.p_paddr = 0;
869 phdr.p_filesz = phdr.p_memsz = phdr_sz;
870 phdr.p_align = 0;
871
872 /* Add merged PT_NOTE program header*/
873 tmp = elfptr + sizeof(Elf32_Ehdr);
874 memcpy(tmp, &phdr, sizeof(phdr));
875 tmp += sizeof(phdr);
876
877 /* Remove unwanted PT_NOTE program headers. */
878 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
879 *elfsz = *elfsz - i;
880 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
881 memset(elfptr + *elfsz, 0, i);
882 *elfsz = roundup(*elfsz, PAGE_SIZE);
883
884 /* Modify e_phnum to reflect merged headers. */
885 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
886
887 return 0;
888}
889
890/* Add memory chunks represented by program headers to vmcore list. Also update
891 * the new offset fields of exported program headers. */
892static int __init process_ptload_program_headers_elf64(char *elfptr,
893 size_t elfsz,
894 size_t elfnotes_sz,
895 struct list_head *vc_list)
896{
897 int i;
898 Elf64_Ehdr *ehdr_ptr;
899 Elf64_Phdr *phdr_ptr;
900 loff_t vmcore_off;
901 struct vmcore *new;
902
903 ehdr_ptr = (Elf64_Ehdr *)elfptr;
904 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
905
906 /* Skip Elf header, program headers and Elf note segment. */
907 vmcore_off = elfsz + elfnotes_sz;
908
909 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
910 u64 paddr, start, end, size;
911
912 if (phdr_ptr->p_type != PT_LOAD)
913 continue;
914
915 paddr = phdr_ptr->p_offset;
916 start = rounddown(paddr, PAGE_SIZE);
917 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
918 size = end - start;
919
920 /* Add this contiguous chunk of memory to vmcore list.*/
921 new = get_new_element();
922 if (!new)
923 return -ENOMEM;
924 new->paddr = start;
925 new->size = size;
926 list_add_tail(&new->list, vc_list);
927
928 /* Update the program header offset. */
929 phdr_ptr->p_offset = vmcore_off + (paddr - start);
930 vmcore_off = vmcore_off + size;
931 }
932 return 0;
933}
934
935static int __init process_ptload_program_headers_elf32(char *elfptr,
936 size_t elfsz,
937 size_t elfnotes_sz,
938 struct list_head *vc_list)
939{
940 int i;
941 Elf32_Ehdr *ehdr_ptr;
942 Elf32_Phdr *phdr_ptr;
943 loff_t vmcore_off;
944 struct vmcore *new;
945
946 ehdr_ptr = (Elf32_Ehdr *)elfptr;
947 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
948
949 /* Skip Elf header, program headers and Elf note segment. */
950 vmcore_off = elfsz + elfnotes_sz;
951
952 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
953 u64 paddr, start, end, size;
954
955 if (phdr_ptr->p_type != PT_LOAD)
956 continue;
957
958 paddr = phdr_ptr->p_offset;
959 start = rounddown(paddr, PAGE_SIZE);
960 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
961 size = end - start;
962
963 /* Add this contiguous chunk of memory to vmcore list.*/
964 new = get_new_element();
965 if (!new)
966 return -ENOMEM;
967 new->paddr = start;
968 new->size = size;
969 list_add_tail(&new->list, vc_list);
970
971 /* Update the program header offset */
972 phdr_ptr->p_offset = vmcore_off + (paddr - start);
973 vmcore_off = vmcore_off + size;
974 }
975 return 0;
976}
977
978/* Sets offset fields of vmcore elements. */
979static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
980 struct list_head *vc_list)
981{
982 loff_t vmcore_off;
983 struct vmcore *m;
984
985 /* Skip Elf header, program headers and Elf note segment. */
986 vmcore_off = elfsz + elfnotes_sz;
987
988 list_for_each_entry(m, vc_list, list) {
989 m->offset = vmcore_off;
990 vmcore_off += m->size;
991 }
992}
993
994static void free_elfcorebuf(void)
995{
996 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
997 elfcorebuf = NULL;
998 vfree(elfnotes_buf);
999 elfnotes_buf = NULL;
1000}
1001
1002static int __init parse_crash_elf64_headers(void)
1003{
1004 int rc=0;
1005 Elf64_Ehdr ehdr;
1006 u64 addr;
1007
1008 addr = elfcorehdr_addr;
1009
1010 /* Read Elf header */
1011 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1012 if (rc < 0)
1013 return rc;
1014
1015 /* Do some basic Verification. */
1016 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1017 (ehdr.e_type != ET_CORE) ||
1018 !vmcore_elf64_check_arch(&ehdr) ||
1019 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1020 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1021 ehdr.e_version != EV_CURRENT ||
1022 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1023 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1024 ehdr.e_phnum == 0) {
1025 pr_warn("Warning: Core image elf header is not sane\n");
1026 return -EINVAL;
1027 }
1028
1029 /* Read in all elf headers. */
1030 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1031 ehdr.e_phnum * sizeof(Elf64_Phdr);
1032 elfcorebuf_sz = elfcorebuf_sz_orig;
1033 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1034 get_order(elfcorebuf_sz_orig));
1035 if (!elfcorebuf)
1036 return -ENOMEM;
1037 addr = elfcorehdr_addr;
1038 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1039 if (rc < 0)
1040 goto fail;
1041
1042 /* Merge all PT_NOTE headers into one. */
1043 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1044 &elfnotes_buf, &elfnotes_sz);
1045 if (rc)
1046 goto fail;
1047 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1048 elfnotes_sz, &vmcore_list);
1049 if (rc)
1050 goto fail;
1051 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1052 return 0;
1053fail:
1054 free_elfcorebuf();
1055 return rc;
1056}
1057
1058static int __init parse_crash_elf32_headers(void)
1059{
1060 int rc=0;
1061 Elf32_Ehdr ehdr;
1062 u64 addr;
1063
1064 addr = elfcorehdr_addr;
1065
1066 /* Read Elf header */
1067 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1068 if (rc < 0)
1069 return rc;
1070
1071 /* Do some basic Verification. */
1072 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1073 (ehdr.e_type != ET_CORE) ||
1074 !elf_check_arch(&ehdr) ||
1075 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1076 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1077 ehdr.e_version != EV_CURRENT ||
1078 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1079 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1080 ehdr.e_phnum == 0) {
1081 pr_warn("Warning: Core image elf header is not sane\n");
1082 return -EINVAL;
1083 }
1084
1085 /* Read in all elf headers. */
1086 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1087 elfcorebuf_sz = elfcorebuf_sz_orig;
1088 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1089 get_order(elfcorebuf_sz_orig));
1090 if (!elfcorebuf)
1091 return -ENOMEM;
1092 addr = elfcorehdr_addr;
1093 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1094 if (rc < 0)
1095 goto fail;
1096
1097 /* Merge all PT_NOTE headers into one. */
1098 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1099 &elfnotes_buf, &elfnotes_sz);
1100 if (rc)
1101 goto fail;
1102 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1103 elfnotes_sz, &vmcore_list);
1104 if (rc)
1105 goto fail;
1106 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1107 return 0;
1108fail:
1109 free_elfcorebuf();
1110 return rc;
1111}
1112
1113static int __init parse_crash_elf_headers(void)
1114{
1115 unsigned char e_ident[EI_NIDENT];
1116 u64 addr;
1117 int rc=0;
1118
1119 addr = elfcorehdr_addr;
1120 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1121 if (rc < 0)
1122 return rc;
1123 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1124 pr_warn("Warning: Core image elf header not found\n");
1125 return -EINVAL;
1126 }
1127
1128 if (e_ident[EI_CLASS] == ELFCLASS64) {
1129 rc = parse_crash_elf64_headers();
1130 if (rc)
1131 return rc;
1132 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1133 rc = parse_crash_elf32_headers();
1134 if (rc)
1135 return rc;
1136 } else {
1137 pr_warn("Warning: Core image elf header is not sane\n");
1138 return -EINVAL;
1139 }
1140
1141 /* Determine vmcore size. */
1142 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1143 &vmcore_list);
1144
1145 return 0;
1146}
1147
1148/* Init function for vmcore module. */
1149static int __init vmcore_init(void)
1150{
1151 int rc = 0;
1152
1153 /* Allow architectures to allocate ELF header in 2nd kernel */
1154 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1155 if (rc)
1156 return rc;
1157 /*
1158 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1159 * then capture the dump.
1160 */
1161 if (!(is_vmcore_usable()))
1162 return rc;
1163 rc = parse_crash_elf_headers();
1164 if (rc) {
1165 pr_warn("Kdump: vmcore not initialized\n");
1166 return rc;
1167 }
1168 elfcorehdr_free(elfcorehdr_addr);
1169 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1170
1171 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1172 if (proc_vmcore)
1173 proc_vmcore->size = vmcore_size;
1174 return 0;
1175}
1176fs_initcall(vmcore_init);
1177
1178/* Cleanup function for vmcore module. */
1179void vmcore_cleanup(void)
1180{
1181 struct list_head *pos, *next;
1182
1183 if (proc_vmcore) {
1184 proc_remove(proc_vmcore);
1185 proc_vmcore = NULL;
1186 }
1187
1188 /* clear the vmcore list. */
1189 list_for_each_safe(pos, next, &vmcore_list) {
1190 struct vmcore *m;
1191
1192 m = list_entry(pos, struct vmcore, list);
1193 list_del(&m->list);
1194 kfree(m);
1195 }
1196 free_elfcorebuf();
1197}
1/*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10#include <linux/mm.h>
11#include <linux/kcore.h>
12#include <linux/user.h>
13#include <linux/elf.h>
14#include <linux/elfcore.h>
15#include <linux/export.h>
16#include <linux/slab.h>
17#include <linux/highmem.h>
18#include <linux/printk.h>
19#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <linux/vmalloc.h>
24#include <linux/pagemap.h>
25#include <asm/uaccess.h>
26#include <asm/io.h>
27#include "internal.h"
28
29/* List representing chunks of contiguous memory areas and their offsets in
30 * vmcore file.
31 */
32static LIST_HEAD(vmcore_list);
33
34/* Stores the pointer to the buffer containing kernel elf core headers. */
35static char *elfcorebuf;
36static size_t elfcorebuf_sz;
37static size_t elfcorebuf_sz_orig;
38
39static char *elfnotes_buf;
40static size_t elfnotes_sz;
41
42/* Total size of vmcore file. */
43static u64 vmcore_size;
44
45static struct proc_dir_entry *proc_vmcore = NULL;
46
47/*
48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49 * The called function has to take care of module refcounting.
50 */
51static int (*oldmem_pfn_is_ram)(unsigned long pfn);
52
53int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
54{
55 if (oldmem_pfn_is_ram)
56 return -EBUSY;
57 oldmem_pfn_is_ram = fn;
58 return 0;
59}
60EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
61
62void unregister_oldmem_pfn_is_ram(void)
63{
64 oldmem_pfn_is_ram = NULL;
65 wmb();
66}
67EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
68
69static int pfn_is_ram(unsigned long pfn)
70{
71 int (*fn)(unsigned long pfn);
72 /* pfn is ram unless fn() checks pagetype */
73 int ret = 1;
74
75 /*
76 * Ask hypervisor if the pfn is really ram.
77 * A ballooned page contains no data and reading from such a page
78 * will cause high load in the hypervisor.
79 */
80 fn = oldmem_pfn_is_ram;
81 if (fn)
82 ret = fn(pfn);
83
84 return ret;
85}
86
87/* Reads a page from the oldmem device from given offset. */
88static ssize_t read_from_oldmem(char *buf, size_t count,
89 u64 *ppos, int userbuf)
90{
91 unsigned long pfn, offset;
92 size_t nr_bytes;
93 ssize_t read = 0, tmp;
94
95 if (!count)
96 return 0;
97
98 offset = (unsigned long)(*ppos % PAGE_SIZE);
99 pfn = (unsigned long)(*ppos / PAGE_SIZE);
100
101 do {
102 if (count > (PAGE_SIZE - offset))
103 nr_bytes = PAGE_SIZE - offset;
104 else
105 nr_bytes = count;
106
107 /* If pfn is not ram, return zeros for sparse dump files */
108 if (pfn_is_ram(pfn) == 0)
109 memset(buf, 0, nr_bytes);
110 else {
111 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112 offset, userbuf);
113 if (tmp < 0)
114 return tmp;
115 }
116 *ppos += nr_bytes;
117 count -= nr_bytes;
118 buf += nr_bytes;
119 read += nr_bytes;
120 ++pfn;
121 offset = 0;
122 } while (count);
123
124 return read;
125}
126
127/*
128 * Architectures may override this function to allocate ELF header in 2nd kernel
129 */
130int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
131{
132 return 0;
133}
134
135/*
136 * Architectures may override this function to free header
137 */
138void __weak elfcorehdr_free(unsigned long long addr)
139{}
140
141/*
142 * Architectures may override this function to read from ELF header
143 */
144ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
145{
146 return read_from_oldmem(buf, count, ppos, 0);
147}
148
149/*
150 * Architectures may override this function to read from notes sections
151 */
152ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
153{
154 return read_from_oldmem(buf, count, ppos, 0);
155}
156
157/*
158 * Architectures may override this function to map oldmem
159 */
160int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 unsigned long from, unsigned long pfn,
162 unsigned long size, pgprot_t prot)
163{
164 return remap_pfn_range(vma, from, pfn, size, prot);
165}
166
167/*
168 * Copy to either kernel or user space
169 */
170static int copy_to(void *target, void *src, size_t size, int userbuf)
171{
172 if (userbuf) {
173 if (copy_to_user((char __user *) target, src, size))
174 return -EFAULT;
175 } else {
176 memcpy(target, src, size);
177 }
178 return 0;
179}
180
181/* Read from the ELF header and then the crash dump. On error, negative value is
182 * returned otherwise number of bytes read are returned.
183 */
184static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
185 int userbuf)
186{
187 ssize_t acc = 0, tmp;
188 size_t tsz;
189 u64 start;
190 struct vmcore *m = NULL;
191
192 if (buflen == 0 || *fpos >= vmcore_size)
193 return 0;
194
195 /* trim buflen to not go beyond EOF */
196 if (buflen > vmcore_size - *fpos)
197 buflen = vmcore_size - *fpos;
198
199 /* Read ELF core header */
200 if (*fpos < elfcorebuf_sz) {
201 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
202 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
203 return -EFAULT;
204 buflen -= tsz;
205 *fpos += tsz;
206 buffer += tsz;
207 acc += tsz;
208
209 /* leave now if filled buffer already */
210 if (buflen == 0)
211 return acc;
212 }
213
214 /* Read Elf note segment */
215 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
216 void *kaddr;
217
218 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
219 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
220 if (copy_to(buffer, kaddr, tsz, userbuf))
221 return -EFAULT;
222 buflen -= tsz;
223 *fpos += tsz;
224 buffer += tsz;
225 acc += tsz;
226
227 /* leave now if filled buffer already */
228 if (buflen == 0)
229 return acc;
230 }
231
232 list_for_each_entry(m, &vmcore_list, list) {
233 if (*fpos < m->offset + m->size) {
234 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
235 start = m->paddr + *fpos - m->offset;
236 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
237 if (tmp < 0)
238 return tmp;
239 buflen -= tsz;
240 *fpos += tsz;
241 buffer += tsz;
242 acc += tsz;
243
244 /* leave now if filled buffer already */
245 if (buflen == 0)
246 return acc;
247 }
248 }
249
250 return acc;
251}
252
253static ssize_t read_vmcore(struct file *file, char __user *buffer,
254 size_t buflen, loff_t *fpos)
255{
256 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
257}
258
259/*
260 * The vmcore fault handler uses the page cache and fills data using the
261 * standard __vmcore_read() function.
262 *
263 * On s390 the fault handler is used for memory regions that can't be mapped
264 * directly with remap_pfn_range().
265 */
266static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
267{
268#ifdef CONFIG_S390
269 struct address_space *mapping = vma->vm_file->f_mapping;
270 pgoff_t index = vmf->pgoff;
271 struct page *page;
272 loff_t offset;
273 char *buf;
274 int rc;
275
276 page = find_or_create_page(mapping, index, GFP_KERNEL);
277 if (!page)
278 return VM_FAULT_OOM;
279 if (!PageUptodate(page)) {
280 offset = (loff_t) index << PAGE_CACHE_SHIFT;
281 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
282 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
283 if (rc < 0) {
284 unlock_page(page);
285 page_cache_release(page);
286 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
287 }
288 SetPageUptodate(page);
289 }
290 unlock_page(page);
291 vmf->page = page;
292 return 0;
293#else
294 return VM_FAULT_SIGBUS;
295#endif
296}
297
298static const struct vm_operations_struct vmcore_mmap_ops = {
299 .fault = mmap_vmcore_fault,
300};
301
302/**
303 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
304 * vmalloc memory
305 *
306 * @notes_sz: size of buffer
307 *
308 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
309 * the buffer to user-space by means of remap_vmalloc_range().
310 *
311 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
312 * disabled and there's no need to allow users to mmap the buffer.
313 */
314static inline char *alloc_elfnotes_buf(size_t notes_sz)
315{
316#ifdef CONFIG_MMU
317 return vmalloc_user(notes_sz);
318#else
319 return vzalloc(notes_sz);
320#endif
321}
322
323/*
324 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
325 * essential for mmap_vmcore() in order to map physically
326 * non-contiguous objects (ELF header, ELF note segment and memory
327 * regions in the 1st kernel pointed to by PT_LOAD entries) into
328 * virtually contiguous user-space in ELF layout.
329 */
330#ifdef CONFIG_MMU
331static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
332{
333 size_t size = vma->vm_end - vma->vm_start;
334 u64 start, end, len, tsz;
335 struct vmcore *m;
336
337 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
338 end = start + size;
339
340 if (size > vmcore_size || end > vmcore_size)
341 return -EINVAL;
342
343 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
344 return -EPERM;
345
346 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
347 vma->vm_flags |= VM_MIXEDMAP;
348 vma->vm_ops = &vmcore_mmap_ops;
349
350 len = 0;
351
352 if (start < elfcorebuf_sz) {
353 u64 pfn;
354
355 tsz = min(elfcorebuf_sz - (size_t)start, size);
356 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
357 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
358 vma->vm_page_prot))
359 return -EAGAIN;
360 size -= tsz;
361 start += tsz;
362 len += tsz;
363
364 if (size == 0)
365 return 0;
366 }
367
368 if (start < elfcorebuf_sz + elfnotes_sz) {
369 void *kaddr;
370
371 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
372 kaddr = elfnotes_buf + start - elfcorebuf_sz;
373 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
374 kaddr, tsz))
375 goto fail;
376 size -= tsz;
377 start += tsz;
378 len += tsz;
379
380 if (size == 0)
381 return 0;
382 }
383
384 list_for_each_entry(m, &vmcore_list, list) {
385 if (start < m->offset + m->size) {
386 u64 paddr = 0;
387
388 tsz = min_t(size_t, m->offset + m->size - start, size);
389 paddr = m->paddr + start - m->offset;
390 if (remap_oldmem_pfn_range(vma, vma->vm_start + len,
391 paddr >> PAGE_SHIFT, tsz,
392 vma->vm_page_prot))
393 goto fail;
394 size -= tsz;
395 start += tsz;
396 len += tsz;
397
398 if (size == 0)
399 return 0;
400 }
401 }
402
403 return 0;
404fail:
405 do_munmap(vma->vm_mm, vma->vm_start, len);
406 return -EAGAIN;
407}
408#else
409static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
410{
411 return -ENOSYS;
412}
413#endif
414
415static const struct file_operations proc_vmcore_operations = {
416 .read = read_vmcore,
417 .llseek = default_llseek,
418 .mmap = mmap_vmcore,
419};
420
421static struct vmcore* __init get_new_element(void)
422{
423 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
424}
425
426static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
427 struct list_head *vc_list)
428{
429 u64 size;
430 struct vmcore *m;
431
432 size = elfsz + elfnotesegsz;
433 list_for_each_entry(m, vc_list, list) {
434 size += m->size;
435 }
436 return size;
437}
438
439/**
440 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
441 *
442 * @ehdr_ptr: ELF header
443 *
444 * This function updates p_memsz member of each PT_NOTE entry in the
445 * program header table pointed to by @ehdr_ptr to real size of ELF
446 * note segment.
447 */
448static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
449{
450 int i, rc=0;
451 Elf64_Phdr *phdr_ptr;
452 Elf64_Nhdr *nhdr_ptr;
453
454 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
455 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
456 void *notes_section;
457 u64 offset, max_sz, sz, real_sz = 0;
458 if (phdr_ptr->p_type != PT_NOTE)
459 continue;
460 max_sz = phdr_ptr->p_memsz;
461 offset = phdr_ptr->p_offset;
462 notes_section = kmalloc(max_sz, GFP_KERNEL);
463 if (!notes_section)
464 return -ENOMEM;
465 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
466 if (rc < 0) {
467 kfree(notes_section);
468 return rc;
469 }
470 nhdr_ptr = notes_section;
471 while (nhdr_ptr->n_namesz != 0) {
472 sz = sizeof(Elf64_Nhdr) +
473 ((nhdr_ptr->n_namesz + 3) & ~3) +
474 ((nhdr_ptr->n_descsz + 3) & ~3);
475 if ((real_sz + sz) > max_sz) {
476 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
477 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
478 break;
479 }
480 real_sz += sz;
481 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
482 }
483 kfree(notes_section);
484 phdr_ptr->p_memsz = real_sz;
485 if (real_sz == 0) {
486 pr_warn("Warning: Zero PT_NOTE entries found\n");
487 }
488 }
489
490 return 0;
491}
492
493/**
494 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
495 * headers and sum of real size of their ELF note segment headers and
496 * data.
497 *
498 * @ehdr_ptr: ELF header
499 * @nr_ptnote: buffer for the number of PT_NOTE program headers
500 * @sz_ptnote: buffer for size of unique PT_NOTE program header
501 *
502 * This function is used to merge multiple PT_NOTE program headers
503 * into a unique single one. The resulting unique entry will have
504 * @sz_ptnote in its phdr->p_mem.
505 *
506 * It is assumed that program headers with PT_NOTE type pointed to by
507 * @ehdr_ptr has already been updated by update_note_header_size_elf64
508 * and each of PT_NOTE program headers has actual ELF note segment
509 * size in its p_memsz member.
510 */
511static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
512 int *nr_ptnote, u64 *sz_ptnote)
513{
514 int i;
515 Elf64_Phdr *phdr_ptr;
516
517 *nr_ptnote = *sz_ptnote = 0;
518
519 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
520 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
521 if (phdr_ptr->p_type != PT_NOTE)
522 continue;
523 *nr_ptnote += 1;
524 *sz_ptnote += phdr_ptr->p_memsz;
525 }
526
527 return 0;
528}
529
530/**
531 * copy_notes_elf64 - copy ELF note segments in a given buffer
532 *
533 * @ehdr_ptr: ELF header
534 * @notes_buf: buffer into which ELF note segments are copied
535 *
536 * This function is used to copy ELF note segment in the 1st kernel
537 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
538 * size of the buffer @notes_buf is equal to or larger than sum of the
539 * real ELF note segment headers and data.
540 *
541 * It is assumed that program headers with PT_NOTE type pointed to by
542 * @ehdr_ptr has already been updated by update_note_header_size_elf64
543 * and each of PT_NOTE program headers has actual ELF note segment
544 * size in its p_memsz member.
545 */
546static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
547{
548 int i, rc=0;
549 Elf64_Phdr *phdr_ptr;
550
551 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
552
553 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
554 u64 offset;
555 if (phdr_ptr->p_type != PT_NOTE)
556 continue;
557 offset = phdr_ptr->p_offset;
558 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
559 &offset);
560 if (rc < 0)
561 return rc;
562 notes_buf += phdr_ptr->p_memsz;
563 }
564
565 return 0;
566}
567
568/* Merges all the PT_NOTE headers into one. */
569static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
570 char **notes_buf, size_t *notes_sz)
571{
572 int i, nr_ptnote=0, rc=0;
573 char *tmp;
574 Elf64_Ehdr *ehdr_ptr;
575 Elf64_Phdr phdr;
576 u64 phdr_sz = 0, note_off;
577
578 ehdr_ptr = (Elf64_Ehdr *)elfptr;
579
580 rc = update_note_header_size_elf64(ehdr_ptr);
581 if (rc < 0)
582 return rc;
583
584 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
585 if (rc < 0)
586 return rc;
587
588 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
589 *notes_buf = alloc_elfnotes_buf(*notes_sz);
590 if (!*notes_buf)
591 return -ENOMEM;
592
593 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
594 if (rc < 0)
595 return rc;
596
597 /* Prepare merged PT_NOTE program header. */
598 phdr.p_type = PT_NOTE;
599 phdr.p_flags = 0;
600 note_off = sizeof(Elf64_Ehdr) +
601 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
602 phdr.p_offset = roundup(note_off, PAGE_SIZE);
603 phdr.p_vaddr = phdr.p_paddr = 0;
604 phdr.p_filesz = phdr.p_memsz = phdr_sz;
605 phdr.p_align = 0;
606
607 /* Add merged PT_NOTE program header*/
608 tmp = elfptr + sizeof(Elf64_Ehdr);
609 memcpy(tmp, &phdr, sizeof(phdr));
610 tmp += sizeof(phdr);
611
612 /* Remove unwanted PT_NOTE program headers. */
613 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
614 *elfsz = *elfsz - i;
615 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
616 memset(elfptr + *elfsz, 0, i);
617 *elfsz = roundup(*elfsz, PAGE_SIZE);
618
619 /* Modify e_phnum to reflect merged headers. */
620 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
621
622 return 0;
623}
624
625/**
626 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
627 *
628 * @ehdr_ptr: ELF header
629 *
630 * This function updates p_memsz member of each PT_NOTE entry in the
631 * program header table pointed to by @ehdr_ptr to real size of ELF
632 * note segment.
633 */
634static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
635{
636 int i, rc=0;
637 Elf32_Phdr *phdr_ptr;
638 Elf32_Nhdr *nhdr_ptr;
639
640 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
641 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
642 void *notes_section;
643 u64 offset, max_sz, sz, real_sz = 0;
644 if (phdr_ptr->p_type != PT_NOTE)
645 continue;
646 max_sz = phdr_ptr->p_memsz;
647 offset = phdr_ptr->p_offset;
648 notes_section = kmalloc(max_sz, GFP_KERNEL);
649 if (!notes_section)
650 return -ENOMEM;
651 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
652 if (rc < 0) {
653 kfree(notes_section);
654 return rc;
655 }
656 nhdr_ptr = notes_section;
657 while (nhdr_ptr->n_namesz != 0) {
658 sz = sizeof(Elf32_Nhdr) +
659 ((nhdr_ptr->n_namesz + 3) & ~3) +
660 ((nhdr_ptr->n_descsz + 3) & ~3);
661 if ((real_sz + sz) > max_sz) {
662 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
663 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
664 break;
665 }
666 real_sz += sz;
667 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
668 }
669 kfree(notes_section);
670 phdr_ptr->p_memsz = real_sz;
671 if (real_sz == 0) {
672 pr_warn("Warning: Zero PT_NOTE entries found\n");
673 }
674 }
675
676 return 0;
677}
678
679/**
680 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
681 * headers and sum of real size of their ELF note segment headers and
682 * data.
683 *
684 * @ehdr_ptr: ELF header
685 * @nr_ptnote: buffer for the number of PT_NOTE program headers
686 * @sz_ptnote: buffer for size of unique PT_NOTE program header
687 *
688 * This function is used to merge multiple PT_NOTE program headers
689 * into a unique single one. The resulting unique entry will have
690 * @sz_ptnote in its phdr->p_mem.
691 *
692 * It is assumed that program headers with PT_NOTE type pointed to by
693 * @ehdr_ptr has already been updated by update_note_header_size_elf32
694 * and each of PT_NOTE program headers has actual ELF note segment
695 * size in its p_memsz member.
696 */
697static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
698 int *nr_ptnote, u64 *sz_ptnote)
699{
700 int i;
701 Elf32_Phdr *phdr_ptr;
702
703 *nr_ptnote = *sz_ptnote = 0;
704
705 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
706 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
707 if (phdr_ptr->p_type != PT_NOTE)
708 continue;
709 *nr_ptnote += 1;
710 *sz_ptnote += phdr_ptr->p_memsz;
711 }
712
713 return 0;
714}
715
716/**
717 * copy_notes_elf32 - copy ELF note segments in a given buffer
718 *
719 * @ehdr_ptr: ELF header
720 * @notes_buf: buffer into which ELF note segments are copied
721 *
722 * This function is used to copy ELF note segment in the 1st kernel
723 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
724 * size of the buffer @notes_buf is equal to or larger than sum of the
725 * real ELF note segment headers and data.
726 *
727 * It is assumed that program headers with PT_NOTE type pointed to by
728 * @ehdr_ptr has already been updated by update_note_header_size_elf32
729 * and each of PT_NOTE program headers has actual ELF note segment
730 * size in its p_memsz member.
731 */
732static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
733{
734 int i, rc=0;
735 Elf32_Phdr *phdr_ptr;
736
737 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
738
739 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
740 u64 offset;
741 if (phdr_ptr->p_type != PT_NOTE)
742 continue;
743 offset = phdr_ptr->p_offset;
744 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
745 &offset);
746 if (rc < 0)
747 return rc;
748 notes_buf += phdr_ptr->p_memsz;
749 }
750
751 return 0;
752}
753
754/* Merges all the PT_NOTE headers into one. */
755static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
756 char **notes_buf, size_t *notes_sz)
757{
758 int i, nr_ptnote=0, rc=0;
759 char *tmp;
760 Elf32_Ehdr *ehdr_ptr;
761 Elf32_Phdr phdr;
762 u64 phdr_sz = 0, note_off;
763
764 ehdr_ptr = (Elf32_Ehdr *)elfptr;
765
766 rc = update_note_header_size_elf32(ehdr_ptr);
767 if (rc < 0)
768 return rc;
769
770 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
771 if (rc < 0)
772 return rc;
773
774 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
775 *notes_buf = alloc_elfnotes_buf(*notes_sz);
776 if (!*notes_buf)
777 return -ENOMEM;
778
779 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
780 if (rc < 0)
781 return rc;
782
783 /* Prepare merged PT_NOTE program header. */
784 phdr.p_type = PT_NOTE;
785 phdr.p_flags = 0;
786 note_off = sizeof(Elf32_Ehdr) +
787 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
788 phdr.p_offset = roundup(note_off, PAGE_SIZE);
789 phdr.p_vaddr = phdr.p_paddr = 0;
790 phdr.p_filesz = phdr.p_memsz = phdr_sz;
791 phdr.p_align = 0;
792
793 /* Add merged PT_NOTE program header*/
794 tmp = elfptr + sizeof(Elf32_Ehdr);
795 memcpy(tmp, &phdr, sizeof(phdr));
796 tmp += sizeof(phdr);
797
798 /* Remove unwanted PT_NOTE program headers. */
799 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
800 *elfsz = *elfsz - i;
801 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
802 memset(elfptr + *elfsz, 0, i);
803 *elfsz = roundup(*elfsz, PAGE_SIZE);
804
805 /* Modify e_phnum to reflect merged headers. */
806 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
807
808 return 0;
809}
810
811/* Add memory chunks represented by program headers to vmcore list. Also update
812 * the new offset fields of exported program headers. */
813static int __init process_ptload_program_headers_elf64(char *elfptr,
814 size_t elfsz,
815 size_t elfnotes_sz,
816 struct list_head *vc_list)
817{
818 int i;
819 Elf64_Ehdr *ehdr_ptr;
820 Elf64_Phdr *phdr_ptr;
821 loff_t vmcore_off;
822 struct vmcore *new;
823
824 ehdr_ptr = (Elf64_Ehdr *)elfptr;
825 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
826
827 /* Skip Elf header, program headers and Elf note segment. */
828 vmcore_off = elfsz + elfnotes_sz;
829
830 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
831 u64 paddr, start, end, size;
832
833 if (phdr_ptr->p_type != PT_LOAD)
834 continue;
835
836 paddr = phdr_ptr->p_offset;
837 start = rounddown(paddr, PAGE_SIZE);
838 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
839 size = end - start;
840
841 /* Add this contiguous chunk of memory to vmcore list.*/
842 new = get_new_element();
843 if (!new)
844 return -ENOMEM;
845 new->paddr = start;
846 new->size = size;
847 list_add_tail(&new->list, vc_list);
848
849 /* Update the program header offset. */
850 phdr_ptr->p_offset = vmcore_off + (paddr - start);
851 vmcore_off = vmcore_off + size;
852 }
853 return 0;
854}
855
856static int __init process_ptload_program_headers_elf32(char *elfptr,
857 size_t elfsz,
858 size_t elfnotes_sz,
859 struct list_head *vc_list)
860{
861 int i;
862 Elf32_Ehdr *ehdr_ptr;
863 Elf32_Phdr *phdr_ptr;
864 loff_t vmcore_off;
865 struct vmcore *new;
866
867 ehdr_ptr = (Elf32_Ehdr *)elfptr;
868 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
869
870 /* Skip Elf header, program headers and Elf note segment. */
871 vmcore_off = elfsz + elfnotes_sz;
872
873 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
874 u64 paddr, start, end, size;
875
876 if (phdr_ptr->p_type != PT_LOAD)
877 continue;
878
879 paddr = phdr_ptr->p_offset;
880 start = rounddown(paddr, PAGE_SIZE);
881 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
882 size = end - start;
883
884 /* Add this contiguous chunk of memory to vmcore list.*/
885 new = get_new_element();
886 if (!new)
887 return -ENOMEM;
888 new->paddr = start;
889 new->size = size;
890 list_add_tail(&new->list, vc_list);
891
892 /* Update the program header offset */
893 phdr_ptr->p_offset = vmcore_off + (paddr - start);
894 vmcore_off = vmcore_off + size;
895 }
896 return 0;
897}
898
899/* Sets offset fields of vmcore elements. */
900static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
901 struct list_head *vc_list)
902{
903 loff_t vmcore_off;
904 struct vmcore *m;
905
906 /* Skip Elf header, program headers and Elf note segment. */
907 vmcore_off = elfsz + elfnotes_sz;
908
909 list_for_each_entry(m, vc_list, list) {
910 m->offset = vmcore_off;
911 vmcore_off += m->size;
912 }
913}
914
915static void free_elfcorebuf(void)
916{
917 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
918 elfcorebuf = NULL;
919 vfree(elfnotes_buf);
920 elfnotes_buf = NULL;
921}
922
923static int __init parse_crash_elf64_headers(void)
924{
925 int rc=0;
926 Elf64_Ehdr ehdr;
927 u64 addr;
928
929 addr = elfcorehdr_addr;
930
931 /* Read Elf header */
932 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
933 if (rc < 0)
934 return rc;
935
936 /* Do some basic Verification. */
937 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
938 (ehdr.e_type != ET_CORE) ||
939 !vmcore_elf64_check_arch(&ehdr) ||
940 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
941 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
942 ehdr.e_version != EV_CURRENT ||
943 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
944 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
945 ehdr.e_phnum == 0) {
946 pr_warn("Warning: Core image elf header is not sane\n");
947 return -EINVAL;
948 }
949
950 /* Read in all elf headers. */
951 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
952 ehdr.e_phnum * sizeof(Elf64_Phdr);
953 elfcorebuf_sz = elfcorebuf_sz_orig;
954 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
955 get_order(elfcorebuf_sz_orig));
956 if (!elfcorebuf)
957 return -ENOMEM;
958 addr = elfcorehdr_addr;
959 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
960 if (rc < 0)
961 goto fail;
962
963 /* Merge all PT_NOTE headers into one. */
964 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
965 &elfnotes_buf, &elfnotes_sz);
966 if (rc)
967 goto fail;
968 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
969 elfnotes_sz, &vmcore_list);
970 if (rc)
971 goto fail;
972 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
973 return 0;
974fail:
975 free_elfcorebuf();
976 return rc;
977}
978
979static int __init parse_crash_elf32_headers(void)
980{
981 int rc=0;
982 Elf32_Ehdr ehdr;
983 u64 addr;
984
985 addr = elfcorehdr_addr;
986
987 /* Read Elf header */
988 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
989 if (rc < 0)
990 return rc;
991
992 /* Do some basic Verification. */
993 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
994 (ehdr.e_type != ET_CORE) ||
995 !elf_check_arch(&ehdr) ||
996 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
997 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
998 ehdr.e_version != EV_CURRENT ||
999 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1000 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1001 ehdr.e_phnum == 0) {
1002 pr_warn("Warning: Core image elf header is not sane\n");
1003 return -EINVAL;
1004 }
1005
1006 /* Read in all elf headers. */
1007 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1008 elfcorebuf_sz = elfcorebuf_sz_orig;
1009 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1010 get_order(elfcorebuf_sz_orig));
1011 if (!elfcorebuf)
1012 return -ENOMEM;
1013 addr = elfcorehdr_addr;
1014 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1015 if (rc < 0)
1016 goto fail;
1017
1018 /* Merge all PT_NOTE headers into one. */
1019 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1020 &elfnotes_buf, &elfnotes_sz);
1021 if (rc)
1022 goto fail;
1023 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1024 elfnotes_sz, &vmcore_list);
1025 if (rc)
1026 goto fail;
1027 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1028 return 0;
1029fail:
1030 free_elfcorebuf();
1031 return rc;
1032}
1033
1034static int __init parse_crash_elf_headers(void)
1035{
1036 unsigned char e_ident[EI_NIDENT];
1037 u64 addr;
1038 int rc=0;
1039
1040 addr = elfcorehdr_addr;
1041 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1042 if (rc < 0)
1043 return rc;
1044 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1045 pr_warn("Warning: Core image elf header not found\n");
1046 return -EINVAL;
1047 }
1048
1049 if (e_ident[EI_CLASS] == ELFCLASS64) {
1050 rc = parse_crash_elf64_headers();
1051 if (rc)
1052 return rc;
1053 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1054 rc = parse_crash_elf32_headers();
1055 if (rc)
1056 return rc;
1057 } else {
1058 pr_warn("Warning: Core image elf header is not sane\n");
1059 return -EINVAL;
1060 }
1061
1062 /* Determine vmcore size. */
1063 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1064 &vmcore_list);
1065
1066 return 0;
1067}
1068
1069/* Init function for vmcore module. */
1070static int __init vmcore_init(void)
1071{
1072 int rc = 0;
1073
1074 /* Allow architectures to allocate ELF header in 2nd kernel */
1075 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1076 if (rc)
1077 return rc;
1078 /*
1079 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1080 * then capture the dump.
1081 */
1082 if (!(is_vmcore_usable()))
1083 return rc;
1084 rc = parse_crash_elf_headers();
1085 if (rc) {
1086 pr_warn("Kdump: vmcore not initialized\n");
1087 return rc;
1088 }
1089 elfcorehdr_free(elfcorehdr_addr);
1090 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1091
1092 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1093 if (proc_vmcore)
1094 proc_vmcore->size = vmcore_size;
1095 return 0;
1096}
1097fs_initcall(vmcore_init);
1098
1099/* Cleanup function for vmcore module. */
1100void vmcore_cleanup(void)
1101{
1102 struct list_head *pos, *next;
1103
1104 if (proc_vmcore) {
1105 proc_remove(proc_vmcore);
1106 proc_vmcore = NULL;
1107 }
1108
1109 /* clear the vmcore list. */
1110 list_for_each_safe(pos, next, &vmcore_list) {
1111 struct vmcore *m;
1112
1113 m = list_entry(pos, struct vmcore, list);
1114 list_del(&m->list);
1115 kfree(m);
1116 }
1117 free_elfcorebuf();
1118}