Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 *	fs/proc/vmcore.c Interface for accessing the crash
   3 * 				 dump from the system's previous life.
   4 * 	Heavily borrowed from fs/proc/kcore.c
   5 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   6 *	Copyright (C) IBM Corporation, 2004. All rights reserved
   7 *
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/kcore.h>
  12#include <linux/user.h>
  13#include <linux/elf.h>
  14#include <linux/elfcore.h>
  15#include <linux/export.h>
  16#include <linux/slab.h>
  17#include <linux/highmem.h>
  18#include <linux/printk.h>
  19#include <linux/bootmem.h>
  20#include <linux/init.h>
  21#include <linux/crash_dump.h>
  22#include <linux/list.h>
 
 
  23#include <linux/vmalloc.h>
  24#include <linux/pagemap.h>
  25#include <asm/uaccess.h>
 
  26#include <asm/io.h>
  27#include "internal.h"
  28
  29/* List representing chunks of contiguous memory areas and their offsets in
  30 * vmcore file.
  31 */
  32static LIST_HEAD(vmcore_list);
  33
  34/* Stores the pointer to the buffer containing kernel elf core headers. */
  35static char *elfcorebuf;
  36static size_t elfcorebuf_sz;
  37static size_t elfcorebuf_sz_orig;
  38
  39static char *elfnotes_buf;
  40static size_t elfnotes_sz;
 
 
  41
  42/* Total size of vmcore file. */
  43static u64 vmcore_size;
  44
  45static struct proc_dir_entry *proc_vmcore;
  46
 
 
 
 
 
 
 
 
 
 
 
 
  47/*
  48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  49 * The called function has to take care of module refcounting.
  50 */
  51static int (*oldmem_pfn_is_ram)(unsigned long pfn);
  52
  53int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
  54{
  55	if (oldmem_pfn_is_ram)
  56		return -EBUSY;
  57	oldmem_pfn_is_ram = fn;
  58	return 0;
  59}
  60EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
  61
  62void unregister_oldmem_pfn_is_ram(void)
  63{
  64	oldmem_pfn_is_ram = NULL;
  65	wmb();
  66}
  67EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
  68
  69static int pfn_is_ram(unsigned long pfn)
  70{
  71	int (*fn)(unsigned long pfn);
  72	/* pfn is ram unless fn() checks pagetype */
  73	int ret = 1;
  74
  75	/*
  76	 * Ask hypervisor if the pfn is really ram.
  77	 * A ballooned page contains no data and reading from such a page
  78	 * will cause high load in the hypervisor.
  79	 */
  80	fn = oldmem_pfn_is_ram;
  81	if (fn)
  82		ret = fn(pfn);
  83
  84	return ret;
  85}
  86
  87/* Reads a page from the oldmem device from given offset. */
  88static ssize_t read_from_oldmem(char *buf, size_t count,
  89				u64 *ppos, int userbuf)
 
  90{
  91	unsigned long pfn, offset;
  92	size_t nr_bytes;
  93	ssize_t read = 0, tmp;
  94
  95	if (!count)
  96		return 0;
  97
  98	offset = (unsigned long)(*ppos % PAGE_SIZE);
  99	pfn = (unsigned long)(*ppos / PAGE_SIZE);
 100
 101	do {
 102		if (count > (PAGE_SIZE - offset))
 103			nr_bytes = PAGE_SIZE - offset;
 104		else
 105			nr_bytes = count;
 106
 107		/* If pfn is not ram, return zeros for sparse dump files */
 108		if (pfn_is_ram(pfn) == 0)
 109			memset(buf, 0, nr_bytes);
 110		else {
 111			tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 112						offset, userbuf);
 
 
 
 
 
 
 
 113			if (tmp < 0)
 114				return tmp;
 115		}
 116		*ppos += nr_bytes;
 117		count -= nr_bytes;
 118		buf += nr_bytes;
 119		read += nr_bytes;
 120		++pfn;
 121		offset = 0;
 122	} while (count);
 123
 124	return read;
 125}
 126
 127/*
 128 * Architectures may override this function to allocate ELF header in 2nd kernel
 129 */
 130int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 131{
 132	return 0;
 133}
 134
 135/*
 136 * Architectures may override this function to free header
 137 */
 138void __weak elfcorehdr_free(unsigned long long addr)
 139{}
 140
 141/*
 142 * Architectures may override this function to read from ELF header
 143 */
 144ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 145{
 146	return read_from_oldmem(buf, count, ppos, 0);
 147}
 148
 149/*
 150 * Architectures may override this function to read from notes sections
 151 */
 152ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 153{
 154	return read_from_oldmem(buf, count, ppos, 0);
 155}
 156
 157/*
 158 * Architectures may override this function to map oldmem
 159 */
 160int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 161				  unsigned long from, unsigned long pfn,
 162				  unsigned long size, pgprot_t prot)
 163{
 
 164	return remap_pfn_range(vma, from, pfn, size, prot);
 165}
 166
 167/*
 
 
 
 
 
 
 
 
 
 
 168 * Copy to either kernel or user space
 169 */
 170static int copy_to(void *target, void *src, size_t size, int userbuf)
 171{
 172	if (userbuf) {
 173		if (copy_to_user((char __user *) target, src, size))
 174			return -EFAULT;
 175	} else {
 176		memcpy(target, src, size);
 177	}
 178	return 0;
 179}
 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181/* Read from the ELF header and then the crash dump. On error, negative value is
 182 * returned otherwise number of bytes read are returned.
 183 */
 184static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 185			     int userbuf)
 186{
 187	ssize_t acc = 0, tmp;
 188	size_t tsz;
 189	u64 start;
 190	struct vmcore *m = NULL;
 191
 192	if (buflen == 0 || *fpos >= vmcore_size)
 193		return 0;
 194
 195	/* trim buflen to not go beyond EOF */
 196	if (buflen > vmcore_size - *fpos)
 197		buflen = vmcore_size - *fpos;
 198
 199	/* Read ELF core header */
 200	if (*fpos < elfcorebuf_sz) {
 201		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
 202		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 203			return -EFAULT;
 204		buflen -= tsz;
 205		*fpos += tsz;
 206		buffer += tsz;
 207		acc += tsz;
 208
 209		/* leave now if filled buffer already */
 210		if (buflen == 0)
 211			return acc;
 212	}
 213
 214	/* Read Elf note segment */
 215	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 216		void *kaddr;
 217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 219		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
 220		if (copy_to(buffer, kaddr, tsz, userbuf))
 221			return -EFAULT;
 
 222		buflen -= tsz;
 223		*fpos += tsz;
 224		buffer += tsz;
 225		acc += tsz;
 226
 227		/* leave now if filled buffer already */
 228		if (buflen == 0)
 229			return acc;
 230	}
 231
 232	list_for_each_entry(m, &vmcore_list, list) {
 233		if (*fpos < m->offset + m->size) {
 234			tsz = (size_t)min_t(unsigned long long,
 235					    m->offset + m->size - *fpos,
 236					    buflen);
 237			start = m->paddr + *fpos - m->offset;
 238			tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
 
 239			if (tmp < 0)
 240				return tmp;
 241			buflen -= tsz;
 242			*fpos += tsz;
 243			buffer += tsz;
 244			acc += tsz;
 245
 246			/* leave now if filled buffer already */
 247			if (buflen == 0)
 248				return acc;
 249		}
 250	}
 251
 252	return acc;
 253}
 254
 255static ssize_t read_vmcore(struct file *file, char __user *buffer,
 256			   size_t buflen, loff_t *fpos)
 257{
 258	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
 259}
 260
 261/*
 262 * The vmcore fault handler uses the page cache and fills data using the
 263 * standard __vmcore_read() function.
 264 *
 265 * On s390 the fault handler is used for memory regions that can't be mapped
 266 * directly with remap_pfn_range().
 267 */
 268static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 269{
 270#ifdef CONFIG_S390
 271	struct address_space *mapping = vma->vm_file->f_mapping;
 272	pgoff_t index = vmf->pgoff;
 273	struct page *page;
 274	loff_t offset;
 275	char *buf;
 276	int rc;
 277
 278	page = find_or_create_page(mapping, index, GFP_KERNEL);
 279	if (!page)
 280		return VM_FAULT_OOM;
 281	if (!PageUptodate(page)) {
 282		offset = (loff_t) index << PAGE_SHIFT;
 283		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 284		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 285		if (rc < 0) {
 286			unlock_page(page);
 287			put_page(page);
 288			return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 289		}
 290		SetPageUptodate(page);
 291	}
 292	unlock_page(page);
 293	vmf->page = page;
 294	return 0;
 295#else
 296	return VM_FAULT_SIGBUS;
 297#endif
 298}
 299
 300static const struct vm_operations_struct vmcore_mmap_ops = {
 301	.fault = mmap_vmcore_fault,
 302};
 303
 304/**
 305 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
 306 *                      vmalloc memory
 307 *
 308 * @notes_sz: size of buffer
 309 *
 310 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 311 * the buffer to user-space by means of remap_vmalloc_range().
 312 *
 313 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 314 * disabled and there's no need to allow users to mmap the buffer.
 315 */
 316static inline char *alloc_elfnotes_buf(size_t notes_sz)
 317{
 318#ifdef CONFIG_MMU
 319	return vmalloc_user(notes_sz);
 320#else
 321	return vzalloc(notes_sz);
 322#endif
 323}
 324
 325/*
 326 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 327 * essential for mmap_vmcore() in order to map physically
 328 * non-contiguous objects (ELF header, ELF note segment and memory
 329 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 330 * virtually contiguous user-space in ELF layout.
 331 */
 332#ifdef CONFIG_MMU
 333/*
 334 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 335 * reported as not being ram with the zero page.
 336 *
 337 * @vma: vm_area_struct describing requested mapping
 338 * @from: start remapping from
 339 * @pfn: page frame number to start remapping to
 340 * @size: remapping size
 341 * @prot: protection bits
 342 *
 343 * Returns zero on success, -EAGAIN on failure.
 344 */
 345static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 346				    unsigned long from, unsigned long pfn,
 347				    unsigned long size, pgprot_t prot)
 348{
 349	unsigned long map_size;
 350	unsigned long pos_start, pos_end, pos;
 351	unsigned long zeropage_pfn = my_zero_pfn(0);
 352	size_t len = 0;
 353
 354	pos_start = pfn;
 355	pos_end = pfn + (size >> PAGE_SHIFT);
 356
 357	for (pos = pos_start; pos < pos_end; ++pos) {
 358		if (!pfn_is_ram(pos)) {
 359			/*
 360			 * We hit a page which is not ram. Remap the continuous
 361			 * region between pos_start and pos-1 and replace
 362			 * the non-ram page at pos with the zero page.
 363			 */
 364			if (pos > pos_start) {
 365				/* Remap continuous region */
 366				map_size = (pos - pos_start) << PAGE_SHIFT;
 367				if (remap_oldmem_pfn_range(vma, from + len,
 368							   pos_start, map_size,
 369							   prot))
 370					goto fail;
 371				len += map_size;
 372			}
 373			/* Remap the zero page */
 374			if (remap_oldmem_pfn_range(vma, from + len,
 375						   zeropage_pfn,
 376						   PAGE_SIZE, prot))
 377				goto fail;
 378			len += PAGE_SIZE;
 379			pos_start = pos + 1;
 380		}
 381	}
 382	if (pos > pos_start) {
 383		/* Remap the rest */
 384		map_size = (pos - pos_start) << PAGE_SHIFT;
 385		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 386					   map_size, prot))
 387			goto fail;
 388	}
 389	return 0;
 390fail:
 391	do_munmap(vma->vm_mm, from, len);
 392	return -EAGAIN;
 393}
 394
 395static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 396			    unsigned long from, unsigned long pfn,
 397			    unsigned long size, pgprot_t prot)
 398{
 399	/*
 400	 * Check if oldmem_pfn_is_ram was registered to avoid
 401	 * looping over all pages without a reason.
 402	 */
 403	if (oldmem_pfn_is_ram)
 404		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 405	else
 406		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 407}
 408
 409static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 410{
 411	size_t size = vma->vm_end - vma->vm_start;
 412	u64 start, end, len, tsz;
 413	struct vmcore *m;
 414
 415	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 416	end = start + size;
 417
 418	if (size > vmcore_size || end > vmcore_size)
 419		return -EINVAL;
 420
 421	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 422		return -EPERM;
 423
 424	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 425	vma->vm_flags |= VM_MIXEDMAP;
 426	vma->vm_ops = &vmcore_mmap_ops;
 427
 428	len = 0;
 429
 430	if (start < elfcorebuf_sz) {
 431		u64 pfn;
 432
 433		tsz = min(elfcorebuf_sz - (size_t)start, size);
 434		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 435		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 436				    vma->vm_page_prot))
 437			return -EAGAIN;
 438		size -= tsz;
 439		start += tsz;
 440		len += tsz;
 441
 442		if (size == 0)
 443			return 0;
 444	}
 445
 446	if (start < elfcorebuf_sz + elfnotes_sz) {
 447		void *kaddr;
 448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 450		kaddr = elfnotes_buf + start - elfcorebuf_sz;
 451		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 452						kaddr, tsz))
 453			goto fail;
 
 454		size -= tsz;
 455		start += tsz;
 456		len += tsz;
 457
 458		if (size == 0)
 459			return 0;
 460	}
 461
 462	list_for_each_entry(m, &vmcore_list, list) {
 463		if (start < m->offset + m->size) {
 464			u64 paddr = 0;
 465
 466			tsz = (size_t)min_t(unsigned long long,
 467					    m->offset + m->size - start, size);
 468			paddr = m->paddr + start - m->offset;
 469			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 470						    paddr >> PAGE_SHIFT, tsz,
 471						    vma->vm_page_prot))
 472				goto fail;
 473			size -= tsz;
 474			start += tsz;
 475			len += tsz;
 476
 477			if (size == 0)
 478				return 0;
 479		}
 480	}
 481
 482	return 0;
 483fail:
 484	do_munmap(vma->vm_mm, vma->vm_start, len);
 485	return -EAGAIN;
 486}
 487#else
 488static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 489{
 490	return -ENOSYS;
 491}
 492#endif
 493
 494static const struct file_operations proc_vmcore_operations = {
 495	.read		= read_vmcore,
 496	.llseek		= default_llseek,
 497	.mmap		= mmap_vmcore,
 498};
 499
 500static struct vmcore* __init get_new_element(void)
 501{
 502	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 503}
 504
 505static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 506				  struct list_head *vc_list)
 507{
 508	u64 size;
 509	struct vmcore *m;
 510
 511	size = elfsz + elfnotesegsz;
 512	list_for_each_entry(m, vc_list, list) {
 513		size += m->size;
 514	}
 515	return size;
 516}
 517
 518/**
 519 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 520 *
 521 * @ehdr_ptr: ELF header
 522 *
 523 * This function updates p_memsz member of each PT_NOTE entry in the
 524 * program header table pointed to by @ehdr_ptr to real size of ELF
 525 * note segment.
 526 */
 527static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 528{
 529	int i, rc=0;
 530	Elf64_Phdr *phdr_ptr;
 531	Elf64_Nhdr *nhdr_ptr;
 532
 533	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 534	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 535		void *notes_section;
 536		u64 offset, max_sz, sz, real_sz = 0;
 537		if (phdr_ptr->p_type != PT_NOTE)
 538			continue;
 539		max_sz = phdr_ptr->p_memsz;
 540		offset = phdr_ptr->p_offset;
 541		notes_section = kmalloc(max_sz, GFP_KERNEL);
 542		if (!notes_section)
 543			return -ENOMEM;
 544		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 545		if (rc < 0) {
 546			kfree(notes_section);
 547			return rc;
 548		}
 549		nhdr_ptr = notes_section;
 550		while (nhdr_ptr->n_namesz != 0) {
 551			sz = sizeof(Elf64_Nhdr) +
 552				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 553				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 554			if ((real_sz + sz) > max_sz) {
 555				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 556					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 557				break;
 558			}
 559			real_sz += sz;
 560			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 561		}
 562		kfree(notes_section);
 563		phdr_ptr->p_memsz = real_sz;
 564		if (real_sz == 0) {
 565			pr_warn("Warning: Zero PT_NOTE entries found\n");
 566		}
 567	}
 568
 569	return 0;
 570}
 571
 572/**
 573 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 574 * headers and sum of real size of their ELF note segment headers and
 575 * data.
 576 *
 577 * @ehdr_ptr: ELF header
 578 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 579 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 580 *
 581 * This function is used to merge multiple PT_NOTE program headers
 582 * into a unique single one. The resulting unique entry will have
 583 * @sz_ptnote in its phdr->p_mem.
 584 *
 585 * It is assumed that program headers with PT_NOTE type pointed to by
 586 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 587 * and each of PT_NOTE program headers has actual ELF note segment
 588 * size in its p_memsz member.
 589 */
 590static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 591						 int *nr_ptnote, u64 *sz_ptnote)
 592{
 593	int i;
 594	Elf64_Phdr *phdr_ptr;
 595
 596	*nr_ptnote = *sz_ptnote = 0;
 597
 598	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 599	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 600		if (phdr_ptr->p_type != PT_NOTE)
 601			continue;
 602		*nr_ptnote += 1;
 603		*sz_ptnote += phdr_ptr->p_memsz;
 604	}
 605
 606	return 0;
 607}
 608
 609/**
 610 * copy_notes_elf64 - copy ELF note segments in a given buffer
 611 *
 612 * @ehdr_ptr: ELF header
 613 * @notes_buf: buffer into which ELF note segments are copied
 614 *
 615 * This function is used to copy ELF note segment in the 1st kernel
 616 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 617 * size of the buffer @notes_buf is equal to or larger than sum of the
 618 * real ELF note segment headers and data.
 619 *
 620 * It is assumed that program headers with PT_NOTE type pointed to by
 621 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 622 * and each of PT_NOTE program headers has actual ELF note segment
 623 * size in its p_memsz member.
 624 */
 625static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 626{
 627	int i, rc=0;
 628	Elf64_Phdr *phdr_ptr;
 629
 630	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 631
 632	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 633		u64 offset;
 634		if (phdr_ptr->p_type != PT_NOTE)
 635			continue;
 636		offset = phdr_ptr->p_offset;
 637		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 638					   &offset);
 639		if (rc < 0)
 640			return rc;
 641		notes_buf += phdr_ptr->p_memsz;
 642	}
 643
 644	return 0;
 645}
 646
 647/* Merges all the PT_NOTE headers into one. */
 648static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 649					   char **notes_buf, size_t *notes_sz)
 650{
 651	int i, nr_ptnote=0, rc=0;
 652	char *tmp;
 653	Elf64_Ehdr *ehdr_ptr;
 654	Elf64_Phdr phdr;
 655	u64 phdr_sz = 0, note_off;
 656
 657	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 658
 659	rc = update_note_header_size_elf64(ehdr_ptr);
 660	if (rc < 0)
 661		return rc;
 662
 663	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 664	if (rc < 0)
 665		return rc;
 666
 667	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 668	*notes_buf = alloc_elfnotes_buf(*notes_sz);
 669	if (!*notes_buf)
 670		return -ENOMEM;
 671
 672	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 673	if (rc < 0)
 674		return rc;
 675
 676	/* Prepare merged PT_NOTE program header. */
 677	phdr.p_type    = PT_NOTE;
 678	phdr.p_flags   = 0;
 679	note_off = sizeof(Elf64_Ehdr) +
 680			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 681	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 682	phdr.p_vaddr   = phdr.p_paddr = 0;
 683	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 684	phdr.p_align   = 0;
 685
 686	/* Add merged PT_NOTE program header*/
 687	tmp = elfptr + sizeof(Elf64_Ehdr);
 688	memcpy(tmp, &phdr, sizeof(phdr));
 689	tmp += sizeof(phdr);
 690
 691	/* Remove unwanted PT_NOTE program headers. */
 692	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 693	*elfsz = *elfsz - i;
 694	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 695	memset(elfptr + *elfsz, 0, i);
 696	*elfsz = roundup(*elfsz, PAGE_SIZE);
 697
 698	/* Modify e_phnum to reflect merged headers. */
 699	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 700
 
 
 
 
 
 701	return 0;
 702}
 703
 704/**
 705 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 706 *
 707 * @ehdr_ptr: ELF header
 708 *
 709 * This function updates p_memsz member of each PT_NOTE entry in the
 710 * program header table pointed to by @ehdr_ptr to real size of ELF
 711 * note segment.
 712 */
 713static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 714{
 715	int i, rc=0;
 716	Elf32_Phdr *phdr_ptr;
 717	Elf32_Nhdr *nhdr_ptr;
 718
 719	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 720	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 721		void *notes_section;
 722		u64 offset, max_sz, sz, real_sz = 0;
 723		if (phdr_ptr->p_type != PT_NOTE)
 724			continue;
 725		max_sz = phdr_ptr->p_memsz;
 726		offset = phdr_ptr->p_offset;
 727		notes_section = kmalloc(max_sz, GFP_KERNEL);
 728		if (!notes_section)
 729			return -ENOMEM;
 730		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 731		if (rc < 0) {
 732			kfree(notes_section);
 733			return rc;
 734		}
 735		nhdr_ptr = notes_section;
 736		while (nhdr_ptr->n_namesz != 0) {
 737			sz = sizeof(Elf32_Nhdr) +
 738				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 739				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 740			if ((real_sz + sz) > max_sz) {
 741				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 742					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 743				break;
 744			}
 745			real_sz += sz;
 746			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 747		}
 748		kfree(notes_section);
 749		phdr_ptr->p_memsz = real_sz;
 750		if (real_sz == 0) {
 751			pr_warn("Warning: Zero PT_NOTE entries found\n");
 752		}
 753	}
 754
 755	return 0;
 756}
 757
 758/**
 759 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 760 * headers and sum of real size of their ELF note segment headers and
 761 * data.
 762 *
 763 * @ehdr_ptr: ELF header
 764 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 765 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 766 *
 767 * This function is used to merge multiple PT_NOTE program headers
 768 * into a unique single one. The resulting unique entry will have
 769 * @sz_ptnote in its phdr->p_mem.
 770 *
 771 * It is assumed that program headers with PT_NOTE type pointed to by
 772 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 773 * and each of PT_NOTE program headers has actual ELF note segment
 774 * size in its p_memsz member.
 775 */
 776static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 777						 int *nr_ptnote, u64 *sz_ptnote)
 778{
 779	int i;
 780	Elf32_Phdr *phdr_ptr;
 781
 782	*nr_ptnote = *sz_ptnote = 0;
 783
 784	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 785	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 786		if (phdr_ptr->p_type != PT_NOTE)
 787			continue;
 788		*nr_ptnote += 1;
 789		*sz_ptnote += phdr_ptr->p_memsz;
 790	}
 791
 792	return 0;
 793}
 794
 795/**
 796 * copy_notes_elf32 - copy ELF note segments in a given buffer
 797 *
 798 * @ehdr_ptr: ELF header
 799 * @notes_buf: buffer into which ELF note segments are copied
 800 *
 801 * This function is used to copy ELF note segment in the 1st kernel
 802 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 803 * size of the buffer @notes_buf is equal to or larger than sum of the
 804 * real ELF note segment headers and data.
 805 *
 806 * It is assumed that program headers with PT_NOTE type pointed to by
 807 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 808 * and each of PT_NOTE program headers has actual ELF note segment
 809 * size in its p_memsz member.
 810 */
 811static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
 812{
 813	int i, rc=0;
 814	Elf32_Phdr *phdr_ptr;
 815
 816	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
 817
 818	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 819		u64 offset;
 820		if (phdr_ptr->p_type != PT_NOTE)
 821			continue;
 822		offset = phdr_ptr->p_offset;
 823		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 824					   &offset);
 825		if (rc < 0)
 826			return rc;
 827		notes_buf += phdr_ptr->p_memsz;
 828	}
 829
 830	return 0;
 831}
 832
 833/* Merges all the PT_NOTE headers into one. */
 834static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
 835					   char **notes_buf, size_t *notes_sz)
 836{
 837	int i, nr_ptnote=0, rc=0;
 838	char *tmp;
 839	Elf32_Ehdr *ehdr_ptr;
 840	Elf32_Phdr phdr;
 841	u64 phdr_sz = 0, note_off;
 842
 843	ehdr_ptr = (Elf32_Ehdr *)elfptr;
 844
 845	rc = update_note_header_size_elf32(ehdr_ptr);
 846	if (rc < 0)
 847		return rc;
 848
 849	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
 850	if (rc < 0)
 851		return rc;
 852
 853	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 854	*notes_buf = alloc_elfnotes_buf(*notes_sz);
 855	if (!*notes_buf)
 856		return -ENOMEM;
 857
 858	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
 859	if (rc < 0)
 860		return rc;
 861
 862	/* Prepare merged PT_NOTE program header. */
 863	phdr.p_type    = PT_NOTE;
 864	phdr.p_flags   = 0;
 865	note_off = sizeof(Elf32_Ehdr) +
 866			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
 867	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 868	phdr.p_vaddr   = phdr.p_paddr = 0;
 869	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 870	phdr.p_align   = 0;
 871
 872	/* Add merged PT_NOTE program header*/
 873	tmp = elfptr + sizeof(Elf32_Ehdr);
 874	memcpy(tmp, &phdr, sizeof(phdr));
 875	tmp += sizeof(phdr);
 876
 877	/* Remove unwanted PT_NOTE program headers. */
 878	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
 879	*elfsz = *elfsz - i;
 880	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
 881	memset(elfptr + *elfsz, 0, i);
 882	*elfsz = roundup(*elfsz, PAGE_SIZE);
 883
 884	/* Modify e_phnum to reflect merged headers. */
 885	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 886
 
 
 
 
 
 887	return 0;
 888}
 889
 890/* Add memory chunks represented by program headers to vmcore list. Also update
 891 * the new offset fields of exported program headers. */
 892static int __init process_ptload_program_headers_elf64(char *elfptr,
 893						size_t elfsz,
 894						size_t elfnotes_sz,
 895						struct list_head *vc_list)
 896{
 897	int i;
 898	Elf64_Ehdr *ehdr_ptr;
 899	Elf64_Phdr *phdr_ptr;
 900	loff_t vmcore_off;
 901	struct vmcore *new;
 902
 903	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 904	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
 905
 906	/* Skip Elf header, program headers and Elf note segment. */
 907	vmcore_off = elfsz + elfnotes_sz;
 908
 909	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 910		u64 paddr, start, end, size;
 911
 912		if (phdr_ptr->p_type != PT_LOAD)
 913			continue;
 914
 915		paddr = phdr_ptr->p_offset;
 916		start = rounddown(paddr, PAGE_SIZE);
 917		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
 918		size = end - start;
 919
 920		/* Add this contiguous chunk of memory to vmcore list.*/
 921		new = get_new_element();
 922		if (!new)
 923			return -ENOMEM;
 924		new->paddr = start;
 925		new->size = size;
 926		list_add_tail(&new->list, vc_list);
 927
 928		/* Update the program header offset. */
 929		phdr_ptr->p_offset = vmcore_off + (paddr - start);
 930		vmcore_off = vmcore_off + size;
 931	}
 932	return 0;
 933}
 934
 935static int __init process_ptload_program_headers_elf32(char *elfptr,
 936						size_t elfsz,
 937						size_t elfnotes_sz,
 938						struct list_head *vc_list)
 939{
 940	int i;
 941	Elf32_Ehdr *ehdr_ptr;
 942	Elf32_Phdr *phdr_ptr;
 943	loff_t vmcore_off;
 944	struct vmcore *new;
 945
 946	ehdr_ptr = (Elf32_Ehdr *)elfptr;
 947	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
 948
 949	/* Skip Elf header, program headers and Elf note segment. */
 950	vmcore_off = elfsz + elfnotes_sz;
 951
 952	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 953		u64 paddr, start, end, size;
 954
 955		if (phdr_ptr->p_type != PT_LOAD)
 956			continue;
 957
 958		paddr = phdr_ptr->p_offset;
 959		start = rounddown(paddr, PAGE_SIZE);
 960		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
 961		size = end - start;
 962
 963		/* Add this contiguous chunk of memory to vmcore list.*/
 964		new = get_new_element();
 965		if (!new)
 966			return -ENOMEM;
 967		new->paddr = start;
 968		new->size = size;
 969		list_add_tail(&new->list, vc_list);
 970
 971		/* Update the program header offset */
 972		phdr_ptr->p_offset = vmcore_off + (paddr - start);
 973		vmcore_off = vmcore_off + size;
 974	}
 975	return 0;
 976}
 977
 978/* Sets offset fields of vmcore elements. */
 979static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
 980					   struct list_head *vc_list)
 981{
 982	loff_t vmcore_off;
 983	struct vmcore *m;
 984
 985	/* Skip Elf header, program headers and Elf note segment. */
 986	vmcore_off = elfsz + elfnotes_sz;
 987
 988	list_for_each_entry(m, vc_list, list) {
 989		m->offset = vmcore_off;
 990		vmcore_off += m->size;
 991	}
 992}
 993
 994static void free_elfcorebuf(void)
 995{
 996	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
 997	elfcorebuf = NULL;
 998	vfree(elfnotes_buf);
 999	elfnotes_buf = NULL;
1000}
1001
1002static int __init parse_crash_elf64_headers(void)
1003{
1004	int rc=0;
1005	Elf64_Ehdr ehdr;
1006	u64 addr;
1007
1008	addr = elfcorehdr_addr;
1009
1010	/* Read Elf header */
1011	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1012	if (rc < 0)
1013		return rc;
1014
1015	/* Do some basic Verification. */
1016	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1017		(ehdr.e_type != ET_CORE) ||
1018		!vmcore_elf64_check_arch(&ehdr) ||
1019		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1020		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1021		ehdr.e_version != EV_CURRENT ||
1022		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1023		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1024		ehdr.e_phnum == 0) {
1025		pr_warn("Warning: Core image elf header is not sane\n");
1026		return -EINVAL;
1027	}
1028
1029	/* Read in all elf headers. */
1030	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1031				ehdr.e_phnum * sizeof(Elf64_Phdr);
1032	elfcorebuf_sz = elfcorebuf_sz_orig;
1033	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1034					      get_order(elfcorebuf_sz_orig));
1035	if (!elfcorebuf)
1036		return -ENOMEM;
1037	addr = elfcorehdr_addr;
1038	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1039	if (rc < 0)
1040		goto fail;
1041
1042	/* Merge all PT_NOTE headers into one. */
1043	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1044				      &elfnotes_buf, &elfnotes_sz);
1045	if (rc)
1046		goto fail;
1047	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1048						  elfnotes_sz, &vmcore_list);
1049	if (rc)
1050		goto fail;
1051	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1052	return 0;
1053fail:
1054	free_elfcorebuf();
1055	return rc;
1056}
1057
1058static int __init parse_crash_elf32_headers(void)
1059{
1060	int rc=0;
1061	Elf32_Ehdr ehdr;
1062	u64 addr;
1063
1064	addr = elfcorehdr_addr;
1065
1066	/* Read Elf header */
1067	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1068	if (rc < 0)
1069		return rc;
1070
1071	/* Do some basic Verification. */
1072	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1073		(ehdr.e_type != ET_CORE) ||
1074		!elf_check_arch(&ehdr) ||
1075		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1076		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1077		ehdr.e_version != EV_CURRENT ||
1078		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1079		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1080		ehdr.e_phnum == 0) {
1081		pr_warn("Warning: Core image elf header is not sane\n");
1082		return -EINVAL;
1083	}
1084
1085	/* Read in all elf headers. */
1086	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1087	elfcorebuf_sz = elfcorebuf_sz_orig;
1088	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1089					      get_order(elfcorebuf_sz_orig));
1090	if (!elfcorebuf)
1091		return -ENOMEM;
1092	addr = elfcorehdr_addr;
1093	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1094	if (rc < 0)
1095		goto fail;
1096
1097	/* Merge all PT_NOTE headers into one. */
1098	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1099				      &elfnotes_buf, &elfnotes_sz);
1100	if (rc)
1101		goto fail;
1102	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1103						  elfnotes_sz, &vmcore_list);
1104	if (rc)
1105		goto fail;
1106	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1107	return 0;
1108fail:
1109	free_elfcorebuf();
1110	return rc;
1111}
1112
1113static int __init parse_crash_elf_headers(void)
1114{
1115	unsigned char e_ident[EI_NIDENT];
1116	u64 addr;
1117	int rc=0;
1118
1119	addr = elfcorehdr_addr;
1120	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1121	if (rc < 0)
1122		return rc;
1123	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1124		pr_warn("Warning: Core image elf header not found\n");
1125		return -EINVAL;
1126	}
1127
1128	if (e_ident[EI_CLASS] == ELFCLASS64) {
1129		rc = parse_crash_elf64_headers();
1130		if (rc)
1131			return rc;
1132	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1133		rc = parse_crash_elf32_headers();
1134		if (rc)
1135			return rc;
1136	} else {
1137		pr_warn("Warning: Core image elf header is not sane\n");
1138		return -EINVAL;
1139	}
1140
1141	/* Determine vmcore size. */
1142	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1143				      &vmcore_list);
1144
1145	return 0;
1146}
1147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1148/* Init function for vmcore module. */
1149static int __init vmcore_init(void)
1150{
1151	int rc = 0;
1152
1153	/* Allow architectures to allocate ELF header in 2nd kernel */
1154	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1155	if (rc)
1156		return rc;
1157	/*
1158	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1159	 * then capture the dump.
1160	 */
1161	if (!(is_vmcore_usable()))
1162		return rc;
1163	rc = parse_crash_elf_headers();
1164	if (rc) {
1165		pr_warn("Kdump: vmcore not initialized\n");
1166		return rc;
1167	}
1168	elfcorehdr_free(elfcorehdr_addr);
1169	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1170
1171	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1172	if (proc_vmcore)
1173		proc_vmcore->size = vmcore_size;
1174	return 0;
1175}
1176fs_initcall(vmcore_init);
1177
1178/* Cleanup function for vmcore module. */
1179void vmcore_cleanup(void)
1180{
1181	struct list_head *pos, *next;
1182
1183	if (proc_vmcore) {
1184		proc_remove(proc_vmcore);
1185		proc_vmcore = NULL;
1186	}
1187
1188	/* clear the vmcore list. */
1189	list_for_each_safe(pos, next, &vmcore_list) {
1190		struct vmcore *m;
1191
1192		m = list_entry(pos, struct vmcore, list);
1193		list_del(&m->list);
1194		kfree(m);
1195	}
1196	free_elfcorebuf();
 
 
 
1197}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	fs/proc/vmcore.c Interface for accessing the crash
   4 * 				 dump from the system's previous life.
   5 * 	Heavily borrowed from fs/proc/kcore.c
   6 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   7 *	Copyright (C) IBM Corporation, 2004. All rights reserved
   8 *
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/kcore.h>
  13#include <linux/user.h>
  14#include <linux/elf.h>
  15#include <linux/elfcore.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/highmem.h>
  19#include <linux/printk.h>
  20#include <linux/memblock.h>
  21#include <linux/init.h>
  22#include <linux/crash_dump.h>
  23#include <linux/list.h>
  24#include <linux/moduleparam.h>
  25#include <linux/mutex.h>
  26#include <linux/vmalloc.h>
  27#include <linux/pagemap.h>
  28#include <linux/uaccess.h>
  29#include <linux/mem_encrypt.h>
  30#include <asm/io.h>
  31#include "internal.h"
  32
  33/* List representing chunks of contiguous memory areas and their offsets in
  34 * vmcore file.
  35 */
  36static LIST_HEAD(vmcore_list);
  37
  38/* Stores the pointer to the buffer containing kernel elf core headers. */
  39static char *elfcorebuf;
  40static size_t elfcorebuf_sz;
  41static size_t elfcorebuf_sz_orig;
  42
  43static char *elfnotes_buf;
  44static size_t elfnotes_sz;
  45/* Size of all notes minus the device dump notes */
  46static size_t elfnotes_orig_sz;
  47
  48/* Total size of vmcore file. */
  49static u64 vmcore_size;
  50
  51static struct proc_dir_entry *proc_vmcore;
  52
  53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
  54/* Device Dump list and mutex to synchronize access to list */
  55static LIST_HEAD(vmcoredd_list);
  56static DEFINE_MUTEX(vmcoredd_mutex);
  57
  58static bool vmcoredd_disabled;
  59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
  60#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
  61
  62/* Device Dump Size */
  63static size_t vmcoredd_orig_sz;
  64
  65/*
  66 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  67 * The called function has to take care of module refcounting.
  68 */
  69static int (*oldmem_pfn_is_ram)(unsigned long pfn);
  70
  71int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
  72{
  73	if (oldmem_pfn_is_ram)
  74		return -EBUSY;
  75	oldmem_pfn_is_ram = fn;
  76	return 0;
  77}
  78EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
  79
  80void unregister_oldmem_pfn_is_ram(void)
  81{
  82	oldmem_pfn_is_ram = NULL;
  83	wmb();
  84}
  85EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
  86
  87static int pfn_is_ram(unsigned long pfn)
  88{
  89	int (*fn)(unsigned long pfn);
  90	/* pfn is ram unless fn() checks pagetype */
  91	int ret = 1;
  92
  93	/*
  94	 * Ask hypervisor if the pfn is really ram.
  95	 * A ballooned page contains no data and reading from such a page
  96	 * will cause high load in the hypervisor.
  97	 */
  98	fn = oldmem_pfn_is_ram;
  99	if (fn)
 100		ret = fn(pfn);
 101
 102	return ret;
 103}
 104
 105/* Reads a page from the oldmem device from given offset. */
 106ssize_t read_from_oldmem(char *buf, size_t count,
 107			 u64 *ppos, int userbuf,
 108			 bool encrypted)
 109{
 110	unsigned long pfn, offset;
 111	size_t nr_bytes;
 112	ssize_t read = 0, tmp;
 113
 114	if (!count)
 115		return 0;
 116
 117	offset = (unsigned long)(*ppos % PAGE_SIZE);
 118	pfn = (unsigned long)(*ppos / PAGE_SIZE);
 119
 120	do {
 121		if (count > (PAGE_SIZE - offset))
 122			nr_bytes = PAGE_SIZE - offset;
 123		else
 124			nr_bytes = count;
 125
 126		/* If pfn is not ram, return zeros for sparse dump files */
 127		if (pfn_is_ram(pfn) == 0)
 128			memset(buf, 0, nr_bytes);
 129		else {
 130			if (encrypted)
 131				tmp = copy_oldmem_page_encrypted(pfn, buf,
 132								 nr_bytes,
 133								 offset,
 134								 userbuf);
 135			else
 136				tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 137						       offset, userbuf);
 138
 139			if (tmp < 0)
 140				return tmp;
 141		}
 142		*ppos += nr_bytes;
 143		count -= nr_bytes;
 144		buf += nr_bytes;
 145		read += nr_bytes;
 146		++pfn;
 147		offset = 0;
 148	} while (count);
 149
 150	return read;
 151}
 152
 153/*
 154 * Architectures may override this function to allocate ELF header in 2nd kernel
 155 */
 156int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 157{
 158	return 0;
 159}
 160
 161/*
 162 * Architectures may override this function to free header
 163 */
 164void __weak elfcorehdr_free(unsigned long long addr)
 165{}
 166
 167/*
 168 * Architectures may override this function to read from ELF header
 169 */
 170ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 171{
 172	return read_from_oldmem(buf, count, ppos, 0, false);
 173}
 174
 175/*
 176 * Architectures may override this function to read from notes sections
 177 */
 178ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 179{
 180	return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
 181}
 182
 183/*
 184 * Architectures may override this function to map oldmem
 185 */
 186int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 187				  unsigned long from, unsigned long pfn,
 188				  unsigned long size, pgprot_t prot)
 189{
 190	prot = pgprot_encrypted(prot);
 191	return remap_pfn_range(vma, from, pfn, size, prot);
 192}
 193
 194/*
 195 * Architectures which support memory encryption override this.
 196 */
 197ssize_t __weak
 198copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
 199			   unsigned long offset, int userbuf)
 200{
 201	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
 202}
 203
 204/*
 205 * Copy to either kernel or user space
 206 */
 207static int copy_to(void *target, void *src, size_t size, int userbuf)
 208{
 209	if (userbuf) {
 210		if (copy_to_user((char __user *) target, src, size))
 211			return -EFAULT;
 212	} else {
 213		memcpy(target, src, size);
 214	}
 215	return 0;
 216}
 217
 218#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 219static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
 220{
 221	struct vmcoredd_node *dump;
 222	u64 offset = 0;
 223	int ret = 0;
 224	size_t tsz;
 225	char *buf;
 226
 227	mutex_lock(&vmcoredd_mutex);
 228	list_for_each_entry(dump, &vmcoredd_list, list) {
 229		if (start < offset + dump->size) {
 230			tsz = min(offset + (u64)dump->size - start, (u64)size);
 231			buf = dump->buf + start - offset;
 232			if (copy_to(dst, buf, tsz, userbuf)) {
 233				ret = -EFAULT;
 234				goto out_unlock;
 235			}
 236
 237			size -= tsz;
 238			start += tsz;
 239			dst += tsz;
 240
 241			/* Leave now if buffer filled already */
 242			if (!size)
 243				goto out_unlock;
 244		}
 245		offset += dump->size;
 246	}
 247
 248out_unlock:
 249	mutex_unlock(&vmcoredd_mutex);
 250	return ret;
 251}
 252
 253#ifdef CONFIG_MMU
 254static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 255			       u64 start, size_t size)
 256{
 257	struct vmcoredd_node *dump;
 258	u64 offset = 0;
 259	int ret = 0;
 260	size_t tsz;
 261	char *buf;
 262
 263	mutex_lock(&vmcoredd_mutex);
 264	list_for_each_entry(dump, &vmcoredd_list, list) {
 265		if (start < offset + dump->size) {
 266			tsz = min(offset + (u64)dump->size - start, (u64)size);
 267			buf = dump->buf + start - offset;
 268			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
 269							tsz)) {
 270				ret = -EFAULT;
 271				goto out_unlock;
 272			}
 273
 274			size -= tsz;
 275			start += tsz;
 276			dst += tsz;
 277
 278			/* Leave now if buffer filled already */
 279			if (!size)
 280				goto out_unlock;
 281		}
 282		offset += dump->size;
 283	}
 284
 285out_unlock:
 286	mutex_unlock(&vmcoredd_mutex);
 287	return ret;
 288}
 289#endif /* CONFIG_MMU */
 290#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 291
 292/* Read from the ELF header and then the crash dump. On error, negative value is
 293 * returned otherwise number of bytes read are returned.
 294 */
 295static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 296			     int userbuf)
 297{
 298	ssize_t acc = 0, tmp;
 299	size_t tsz;
 300	u64 start;
 301	struct vmcore *m = NULL;
 302
 303	if (buflen == 0 || *fpos >= vmcore_size)
 304		return 0;
 305
 306	/* trim buflen to not go beyond EOF */
 307	if (buflen > vmcore_size - *fpos)
 308		buflen = vmcore_size - *fpos;
 309
 310	/* Read ELF core header */
 311	if (*fpos < elfcorebuf_sz) {
 312		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
 313		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 314			return -EFAULT;
 315		buflen -= tsz;
 316		*fpos += tsz;
 317		buffer += tsz;
 318		acc += tsz;
 319
 320		/* leave now if filled buffer already */
 321		if (buflen == 0)
 322			return acc;
 323	}
 324
 325	/* Read Elf note segment */
 326	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 327		void *kaddr;
 328
 329		/* We add device dumps before other elf notes because the
 330		 * other elf notes may not fill the elf notes buffer
 331		 * completely and we will end up with zero-filled data
 332		 * between the elf notes and the device dumps. Tools will
 333		 * then try to decode this zero-filled data as valid notes
 334		 * and we don't want that. Hence, adding device dumps before
 335		 * the other elf notes ensure that zero-filled data can be
 336		 * avoided.
 337		 */
 338#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 339		/* Read device dumps */
 340		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
 341			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 342				  (size_t)*fpos, buflen);
 343			start = *fpos - elfcorebuf_sz;
 344			if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
 345				return -EFAULT;
 346
 347			buflen -= tsz;
 348			*fpos += tsz;
 349			buffer += tsz;
 350			acc += tsz;
 351
 352			/* leave now if filled buffer already */
 353			if (!buflen)
 354				return acc;
 355		}
 356#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 357
 358		/* Read remaining elf notes */
 359		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 360		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
 361		if (copy_to(buffer, kaddr, tsz, userbuf))
 362			return -EFAULT;
 363
 364		buflen -= tsz;
 365		*fpos += tsz;
 366		buffer += tsz;
 367		acc += tsz;
 368
 369		/* leave now if filled buffer already */
 370		if (buflen == 0)
 371			return acc;
 372	}
 373
 374	list_for_each_entry(m, &vmcore_list, list) {
 375		if (*fpos < m->offset + m->size) {
 376			tsz = (size_t)min_t(unsigned long long,
 377					    m->offset + m->size - *fpos,
 378					    buflen);
 379			start = m->paddr + *fpos - m->offset;
 380			tmp = read_from_oldmem(buffer, tsz, &start,
 381					       userbuf, mem_encrypt_active());
 382			if (tmp < 0)
 383				return tmp;
 384			buflen -= tsz;
 385			*fpos += tsz;
 386			buffer += tsz;
 387			acc += tsz;
 388
 389			/* leave now if filled buffer already */
 390			if (buflen == 0)
 391				return acc;
 392		}
 393	}
 394
 395	return acc;
 396}
 397
 398static ssize_t read_vmcore(struct file *file, char __user *buffer,
 399			   size_t buflen, loff_t *fpos)
 400{
 401	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
 402}
 403
 404/*
 405 * The vmcore fault handler uses the page cache and fills data using the
 406 * standard __vmcore_read() function.
 407 *
 408 * On s390 the fault handler is used for memory regions that can't be mapped
 409 * directly with remap_pfn_range().
 410 */
 411static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
 412{
 413#ifdef CONFIG_S390
 414	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 415	pgoff_t index = vmf->pgoff;
 416	struct page *page;
 417	loff_t offset;
 418	char *buf;
 419	int rc;
 420
 421	page = find_or_create_page(mapping, index, GFP_KERNEL);
 422	if (!page)
 423		return VM_FAULT_OOM;
 424	if (!PageUptodate(page)) {
 425		offset = (loff_t) index << PAGE_SHIFT;
 426		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 427		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 428		if (rc < 0) {
 429			unlock_page(page);
 430			put_page(page);
 431			return vmf_error(rc);
 432		}
 433		SetPageUptodate(page);
 434	}
 435	unlock_page(page);
 436	vmf->page = page;
 437	return 0;
 438#else
 439	return VM_FAULT_SIGBUS;
 440#endif
 441}
 442
 443static const struct vm_operations_struct vmcore_mmap_ops = {
 444	.fault = mmap_vmcore_fault,
 445};
 446
 447/**
 448 * vmcore_alloc_buf - allocate buffer in vmalloc memory
 449 * @sizez: size of buffer
 
 
 450 *
 451 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 452 * the buffer to user-space by means of remap_vmalloc_range().
 453 *
 454 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 455 * disabled and there's no need to allow users to mmap the buffer.
 456 */
 457static inline char *vmcore_alloc_buf(size_t size)
 458{
 459#ifdef CONFIG_MMU
 460	return vmalloc_user(size);
 461#else
 462	return vzalloc(size);
 463#endif
 464}
 465
 466/*
 467 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 468 * essential for mmap_vmcore() in order to map physically
 469 * non-contiguous objects (ELF header, ELF note segment and memory
 470 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 471 * virtually contiguous user-space in ELF layout.
 472 */
 473#ifdef CONFIG_MMU
 474/*
 475 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 476 * reported as not being ram with the zero page.
 477 *
 478 * @vma: vm_area_struct describing requested mapping
 479 * @from: start remapping from
 480 * @pfn: page frame number to start remapping to
 481 * @size: remapping size
 482 * @prot: protection bits
 483 *
 484 * Returns zero on success, -EAGAIN on failure.
 485 */
 486static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 487				    unsigned long from, unsigned long pfn,
 488				    unsigned long size, pgprot_t prot)
 489{
 490	unsigned long map_size;
 491	unsigned long pos_start, pos_end, pos;
 492	unsigned long zeropage_pfn = my_zero_pfn(0);
 493	size_t len = 0;
 494
 495	pos_start = pfn;
 496	pos_end = pfn + (size >> PAGE_SHIFT);
 497
 498	for (pos = pos_start; pos < pos_end; ++pos) {
 499		if (!pfn_is_ram(pos)) {
 500			/*
 501			 * We hit a page which is not ram. Remap the continuous
 502			 * region between pos_start and pos-1 and replace
 503			 * the non-ram page at pos with the zero page.
 504			 */
 505			if (pos > pos_start) {
 506				/* Remap continuous region */
 507				map_size = (pos - pos_start) << PAGE_SHIFT;
 508				if (remap_oldmem_pfn_range(vma, from + len,
 509							   pos_start, map_size,
 510							   prot))
 511					goto fail;
 512				len += map_size;
 513			}
 514			/* Remap the zero page */
 515			if (remap_oldmem_pfn_range(vma, from + len,
 516						   zeropage_pfn,
 517						   PAGE_SIZE, prot))
 518				goto fail;
 519			len += PAGE_SIZE;
 520			pos_start = pos + 1;
 521		}
 522	}
 523	if (pos > pos_start) {
 524		/* Remap the rest */
 525		map_size = (pos - pos_start) << PAGE_SHIFT;
 526		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 527					   map_size, prot))
 528			goto fail;
 529	}
 530	return 0;
 531fail:
 532	do_munmap(vma->vm_mm, from, len, NULL);
 533	return -EAGAIN;
 534}
 535
 536static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 537			    unsigned long from, unsigned long pfn,
 538			    unsigned long size, pgprot_t prot)
 539{
 540	/*
 541	 * Check if oldmem_pfn_is_ram was registered to avoid
 542	 * looping over all pages without a reason.
 543	 */
 544	if (oldmem_pfn_is_ram)
 545		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 546	else
 547		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 548}
 549
 550static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 551{
 552	size_t size = vma->vm_end - vma->vm_start;
 553	u64 start, end, len, tsz;
 554	struct vmcore *m;
 555
 556	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 557	end = start + size;
 558
 559	if (size > vmcore_size || end > vmcore_size)
 560		return -EINVAL;
 561
 562	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 563		return -EPERM;
 564
 565	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 566	vma->vm_flags |= VM_MIXEDMAP;
 567	vma->vm_ops = &vmcore_mmap_ops;
 568
 569	len = 0;
 570
 571	if (start < elfcorebuf_sz) {
 572		u64 pfn;
 573
 574		tsz = min(elfcorebuf_sz - (size_t)start, size);
 575		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 576		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 577				    vma->vm_page_prot))
 578			return -EAGAIN;
 579		size -= tsz;
 580		start += tsz;
 581		len += tsz;
 582
 583		if (size == 0)
 584			return 0;
 585	}
 586
 587	if (start < elfcorebuf_sz + elfnotes_sz) {
 588		void *kaddr;
 589
 590		/* We add device dumps before other elf notes because the
 591		 * other elf notes may not fill the elf notes buffer
 592		 * completely and we will end up with zero-filled data
 593		 * between the elf notes and the device dumps. Tools will
 594		 * then try to decode this zero-filled data as valid notes
 595		 * and we don't want that. Hence, adding device dumps before
 596		 * the other elf notes ensure that zero-filled data can be
 597		 * avoided. This also ensures that the device dumps and
 598		 * other elf notes can be properly mmaped at page aligned
 599		 * address.
 600		 */
 601#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 602		/* Read device dumps */
 603		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
 604			u64 start_off;
 605
 606			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 607				  (size_t)start, size);
 608			start_off = start - elfcorebuf_sz;
 609			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
 610						start_off, tsz))
 611				goto fail;
 612
 613			size -= tsz;
 614			start += tsz;
 615			len += tsz;
 616
 617			/* leave now if filled buffer already */
 618			if (!size)
 619				return 0;
 620		}
 621#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 622
 623		/* Read remaining elf notes */
 624		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 625		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
 626		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 627						kaddr, 0, tsz))
 628			goto fail;
 629
 630		size -= tsz;
 631		start += tsz;
 632		len += tsz;
 633
 634		if (size == 0)
 635			return 0;
 636	}
 637
 638	list_for_each_entry(m, &vmcore_list, list) {
 639		if (start < m->offset + m->size) {
 640			u64 paddr = 0;
 641
 642			tsz = (size_t)min_t(unsigned long long,
 643					    m->offset + m->size - start, size);
 644			paddr = m->paddr + start - m->offset;
 645			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 646						    paddr >> PAGE_SHIFT, tsz,
 647						    vma->vm_page_prot))
 648				goto fail;
 649			size -= tsz;
 650			start += tsz;
 651			len += tsz;
 652
 653			if (size == 0)
 654				return 0;
 655		}
 656	}
 657
 658	return 0;
 659fail:
 660	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
 661	return -EAGAIN;
 662}
 663#else
 664static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 665{
 666	return -ENOSYS;
 667}
 668#endif
 669
 670static const struct proc_ops vmcore_proc_ops = {
 671	.proc_read	= read_vmcore,
 672	.proc_lseek	= default_llseek,
 673	.proc_mmap	= mmap_vmcore,
 674};
 675
 676static struct vmcore* __init get_new_element(void)
 677{
 678	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 679}
 680
 681static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 682			   struct list_head *vc_list)
 683{
 684	u64 size;
 685	struct vmcore *m;
 686
 687	size = elfsz + elfnotesegsz;
 688	list_for_each_entry(m, vc_list, list) {
 689		size += m->size;
 690	}
 691	return size;
 692}
 693
 694/**
 695 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 696 *
 697 * @ehdr_ptr: ELF header
 698 *
 699 * This function updates p_memsz member of each PT_NOTE entry in the
 700 * program header table pointed to by @ehdr_ptr to real size of ELF
 701 * note segment.
 702 */
 703static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 704{
 705	int i, rc=0;
 706	Elf64_Phdr *phdr_ptr;
 707	Elf64_Nhdr *nhdr_ptr;
 708
 709	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 710	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 711		void *notes_section;
 712		u64 offset, max_sz, sz, real_sz = 0;
 713		if (phdr_ptr->p_type != PT_NOTE)
 714			continue;
 715		max_sz = phdr_ptr->p_memsz;
 716		offset = phdr_ptr->p_offset;
 717		notes_section = kmalloc(max_sz, GFP_KERNEL);
 718		if (!notes_section)
 719			return -ENOMEM;
 720		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 721		if (rc < 0) {
 722			kfree(notes_section);
 723			return rc;
 724		}
 725		nhdr_ptr = notes_section;
 726		while (nhdr_ptr->n_namesz != 0) {
 727			sz = sizeof(Elf64_Nhdr) +
 728				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 729				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 730			if ((real_sz + sz) > max_sz) {
 731				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 732					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 733				break;
 734			}
 735			real_sz += sz;
 736			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 737		}
 738		kfree(notes_section);
 739		phdr_ptr->p_memsz = real_sz;
 740		if (real_sz == 0) {
 741			pr_warn("Warning: Zero PT_NOTE entries found\n");
 742		}
 743	}
 744
 745	return 0;
 746}
 747
 748/**
 749 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 750 * headers and sum of real size of their ELF note segment headers and
 751 * data.
 752 *
 753 * @ehdr_ptr: ELF header
 754 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 755 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 756 *
 757 * This function is used to merge multiple PT_NOTE program headers
 758 * into a unique single one. The resulting unique entry will have
 759 * @sz_ptnote in its phdr->p_mem.
 760 *
 761 * It is assumed that program headers with PT_NOTE type pointed to by
 762 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 763 * and each of PT_NOTE program headers has actual ELF note segment
 764 * size in its p_memsz member.
 765 */
 766static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 767						 int *nr_ptnote, u64 *sz_ptnote)
 768{
 769	int i;
 770	Elf64_Phdr *phdr_ptr;
 771
 772	*nr_ptnote = *sz_ptnote = 0;
 773
 774	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 775	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 776		if (phdr_ptr->p_type != PT_NOTE)
 777			continue;
 778		*nr_ptnote += 1;
 779		*sz_ptnote += phdr_ptr->p_memsz;
 780	}
 781
 782	return 0;
 783}
 784
 785/**
 786 * copy_notes_elf64 - copy ELF note segments in a given buffer
 787 *
 788 * @ehdr_ptr: ELF header
 789 * @notes_buf: buffer into which ELF note segments are copied
 790 *
 791 * This function is used to copy ELF note segment in the 1st kernel
 792 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 793 * size of the buffer @notes_buf is equal to or larger than sum of the
 794 * real ELF note segment headers and data.
 795 *
 796 * It is assumed that program headers with PT_NOTE type pointed to by
 797 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 798 * and each of PT_NOTE program headers has actual ELF note segment
 799 * size in its p_memsz member.
 800 */
 801static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 802{
 803	int i, rc=0;
 804	Elf64_Phdr *phdr_ptr;
 805
 806	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 807
 808	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 809		u64 offset;
 810		if (phdr_ptr->p_type != PT_NOTE)
 811			continue;
 812		offset = phdr_ptr->p_offset;
 813		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 814					   &offset);
 815		if (rc < 0)
 816			return rc;
 817		notes_buf += phdr_ptr->p_memsz;
 818	}
 819
 820	return 0;
 821}
 822
 823/* Merges all the PT_NOTE headers into one. */
 824static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 825					   char **notes_buf, size_t *notes_sz)
 826{
 827	int i, nr_ptnote=0, rc=0;
 828	char *tmp;
 829	Elf64_Ehdr *ehdr_ptr;
 830	Elf64_Phdr phdr;
 831	u64 phdr_sz = 0, note_off;
 832
 833	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 834
 835	rc = update_note_header_size_elf64(ehdr_ptr);
 836	if (rc < 0)
 837		return rc;
 838
 839	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 840	if (rc < 0)
 841		return rc;
 842
 843	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 844	*notes_buf = vmcore_alloc_buf(*notes_sz);
 845	if (!*notes_buf)
 846		return -ENOMEM;
 847
 848	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 849	if (rc < 0)
 850		return rc;
 851
 852	/* Prepare merged PT_NOTE program header. */
 853	phdr.p_type    = PT_NOTE;
 854	phdr.p_flags   = 0;
 855	note_off = sizeof(Elf64_Ehdr) +
 856			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 857	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 858	phdr.p_vaddr   = phdr.p_paddr = 0;
 859	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 860	phdr.p_align   = 0;
 861
 862	/* Add merged PT_NOTE program header*/
 863	tmp = elfptr + sizeof(Elf64_Ehdr);
 864	memcpy(tmp, &phdr, sizeof(phdr));
 865	tmp += sizeof(phdr);
 866
 867	/* Remove unwanted PT_NOTE program headers. */
 868	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 869	*elfsz = *elfsz - i;
 870	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 871	memset(elfptr + *elfsz, 0, i);
 872	*elfsz = roundup(*elfsz, PAGE_SIZE);
 873
 874	/* Modify e_phnum to reflect merged headers. */
 875	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 876
 877	/* Store the size of all notes.  We need this to update the note
 878	 * header when the device dumps will be added.
 879	 */
 880	elfnotes_orig_sz = phdr.p_memsz;
 881
 882	return 0;
 883}
 884
 885/**
 886 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 887 *
 888 * @ehdr_ptr: ELF header
 889 *
 890 * This function updates p_memsz member of each PT_NOTE entry in the
 891 * program header table pointed to by @ehdr_ptr to real size of ELF
 892 * note segment.
 893 */
 894static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 895{
 896	int i, rc=0;
 897	Elf32_Phdr *phdr_ptr;
 898	Elf32_Nhdr *nhdr_ptr;
 899
 900	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 901	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 902		void *notes_section;
 903		u64 offset, max_sz, sz, real_sz = 0;
 904		if (phdr_ptr->p_type != PT_NOTE)
 905			continue;
 906		max_sz = phdr_ptr->p_memsz;
 907		offset = phdr_ptr->p_offset;
 908		notes_section = kmalloc(max_sz, GFP_KERNEL);
 909		if (!notes_section)
 910			return -ENOMEM;
 911		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 912		if (rc < 0) {
 913			kfree(notes_section);
 914			return rc;
 915		}
 916		nhdr_ptr = notes_section;
 917		while (nhdr_ptr->n_namesz != 0) {
 918			sz = sizeof(Elf32_Nhdr) +
 919				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 920				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 921			if ((real_sz + sz) > max_sz) {
 922				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 923					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 924				break;
 925			}
 926			real_sz += sz;
 927			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 928		}
 929		kfree(notes_section);
 930		phdr_ptr->p_memsz = real_sz;
 931		if (real_sz == 0) {
 932			pr_warn("Warning: Zero PT_NOTE entries found\n");
 933		}
 934	}
 935
 936	return 0;
 937}
 938
 939/**
 940 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 941 * headers and sum of real size of their ELF note segment headers and
 942 * data.
 943 *
 944 * @ehdr_ptr: ELF header
 945 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 946 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 947 *
 948 * This function is used to merge multiple PT_NOTE program headers
 949 * into a unique single one. The resulting unique entry will have
 950 * @sz_ptnote in its phdr->p_mem.
 951 *
 952 * It is assumed that program headers with PT_NOTE type pointed to by
 953 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 954 * and each of PT_NOTE program headers has actual ELF note segment
 955 * size in its p_memsz member.
 956 */
 957static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 958						 int *nr_ptnote, u64 *sz_ptnote)
 959{
 960	int i;
 961	Elf32_Phdr *phdr_ptr;
 962
 963	*nr_ptnote = *sz_ptnote = 0;
 964
 965	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 966	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 967		if (phdr_ptr->p_type != PT_NOTE)
 968			continue;
 969		*nr_ptnote += 1;
 970		*sz_ptnote += phdr_ptr->p_memsz;
 971	}
 972
 973	return 0;
 974}
 975
 976/**
 977 * copy_notes_elf32 - copy ELF note segments in a given buffer
 978 *
 979 * @ehdr_ptr: ELF header
 980 * @notes_buf: buffer into which ELF note segments are copied
 981 *
 982 * This function is used to copy ELF note segment in the 1st kernel
 983 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 984 * size of the buffer @notes_buf is equal to or larger than sum of the
 985 * real ELF note segment headers and data.
 986 *
 987 * It is assumed that program headers with PT_NOTE type pointed to by
 988 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 989 * and each of PT_NOTE program headers has actual ELF note segment
 990 * size in its p_memsz member.
 991 */
 992static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
 993{
 994	int i, rc=0;
 995	Elf32_Phdr *phdr_ptr;
 996
 997	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
 998
 999	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1000		u64 offset;
1001		if (phdr_ptr->p_type != PT_NOTE)
1002			continue;
1003		offset = phdr_ptr->p_offset;
1004		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1005					   &offset);
1006		if (rc < 0)
1007			return rc;
1008		notes_buf += phdr_ptr->p_memsz;
1009	}
1010
1011	return 0;
1012}
1013
1014/* Merges all the PT_NOTE headers into one. */
1015static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1016					   char **notes_buf, size_t *notes_sz)
1017{
1018	int i, nr_ptnote=0, rc=0;
1019	char *tmp;
1020	Elf32_Ehdr *ehdr_ptr;
1021	Elf32_Phdr phdr;
1022	u64 phdr_sz = 0, note_off;
1023
1024	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1025
1026	rc = update_note_header_size_elf32(ehdr_ptr);
1027	if (rc < 0)
1028		return rc;
1029
1030	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1031	if (rc < 0)
1032		return rc;
1033
1034	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1035	*notes_buf = vmcore_alloc_buf(*notes_sz);
1036	if (!*notes_buf)
1037		return -ENOMEM;
1038
1039	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1040	if (rc < 0)
1041		return rc;
1042
1043	/* Prepare merged PT_NOTE program header. */
1044	phdr.p_type    = PT_NOTE;
1045	phdr.p_flags   = 0;
1046	note_off = sizeof(Elf32_Ehdr) +
1047			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1048	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1049	phdr.p_vaddr   = phdr.p_paddr = 0;
1050	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1051	phdr.p_align   = 0;
1052
1053	/* Add merged PT_NOTE program header*/
1054	tmp = elfptr + sizeof(Elf32_Ehdr);
1055	memcpy(tmp, &phdr, sizeof(phdr));
1056	tmp += sizeof(phdr);
1057
1058	/* Remove unwanted PT_NOTE program headers. */
1059	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1060	*elfsz = *elfsz - i;
1061	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1062	memset(elfptr + *elfsz, 0, i);
1063	*elfsz = roundup(*elfsz, PAGE_SIZE);
1064
1065	/* Modify e_phnum to reflect merged headers. */
1066	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1067
1068	/* Store the size of all notes.  We need this to update the note
1069	 * header when the device dumps will be added.
1070	 */
1071	elfnotes_orig_sz = phdr.p_memsz;
1072
1073	return 0;
1074}
1075
1076/* Add memory chunks represented by program headers to vmcore list. Also update
1077 * the new offset fields of exported program headers. */
1078static int __init process_ptload_program_headers_elf64(char *elfptr,
1079						size_t elfsz,
1080						size_t elfnotes_sz,
1081						struct list_head *vc_list)
1082{
1083	int i;
1084	Elf64_Ehdr *ehdr_ptr;
1085	Elf64_Phdr *phdr_ptr;
1086	loff_t vmcore_off;
1087	struct vmcore *new;
1088
1089	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1090	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1091
1092	/* Skip Elf header, program headers and Elf note segment. */
1093	vmcore_off = elfsz + elfnotes_sz;
1094
1095	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1096		u64 paddr, start, end, size;
1097
1098		if (phdr_ptr->p_type != PT_LOAD)
1099			continue;
1100
1101		paddr = phdr_ptr->p_offset;
1102		start = rounddown(paddr, PAGE_SIZE);
1103		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1104		size = end - start;
1105
1106		/* Add this contiguous chunk of memory to vmcore list.*/
1107		new = get_new_element();
1108		if (!new)
1109			return -ENOMEM;
1110		new->paddr = start;
1111		new->size = size;
1112		list_add_tail(&new->list, vc_list);
1113
1114		/* Update the program header offset. */
1115		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1116		vmcore_off = vmcore_off + size;
1117	}
1118	return 0;
1119}
1120
1121static int __init process_ptload_program_headers_elf32(char *elfptr,
1122						size_t elfsz,
1123						size_t elfnotes_sz,
1124						struct list_head *vc_list)
1125{
1126	int i;
1127	Elf32_Ehdr *ehdr_ptr;
1128	Elf32_Phdr *phdr_ptr;
1129	loff_t vmcore_off;
1130	struct vmcore *new;
1131
1132	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1133	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1134
1135	/* Skip Elf header, program headers and Elf note segment. */
1136	vmcore_off = elfsz + elfnotes_sz;
1137
1138	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1139		u64 paddr, start, end, size;
1140
1141		if (phdr_ptr->p_type != PT_LOAD)
1142			continue;
1143
1144		paddr = phdr_ptr->p_offset;
1145		start = rounddown(paddr, PAGE_SIZE);
1146		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1147		size = end - start;
1148
1149		/* Add this contiguous chunk of memory to vmcore list.*/
1150		new = get_new_element();
1151		if (!new)
1152			return -ENOMEM;
1153		new->paddr = start;
1154		new->size = size;
1155		list_add_tail(&new->list, vc_list);
1156
1157		/* Update the program header offset */
1158		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1159		vmcore_off = vmcore_off + size;
1160	}
1161	return 0;
1162}
1163
1164/* Sets offset fields of vmcore elements. */
1165static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1166				    struct list_head *vc_list)
1167{
1168	loff_t vmcore_off;
1169	struct vmcore *m;
1170
1171	/* Skip Elf header, program headers and Elf note segment. */
1172	vmcore_off = elfsz + elfnotes_sz;
1173
1174	list_for_each_entry(m, vc_list, list) {
1175		m->offset = vmcore_off;
1176		vmcore_off += m->size;
1177	}
1178}
1179
1180static void free_elfcorebuf(void)
1181{
1182	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1183	elfcorebuf = NULL;
1184	vfree(elfnotes_buf);
1185	elfnotes_buf = NULL;
1186}
1187
1188static int __init parse_crash_elf64_headers(void)
1189{
1190	int rc=0;
1191	Elf64_Ehdr ehdr;
1192	u64 addr;
1193
1194	addr = elfcorehdr_addr;
1195
1196	/* Read Elf header */
1197	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1198	if (rc < 0)
1199		return rc;
1200
1201	/* Do some basic Verification. */
1202	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1203		(ehdr.e_type != ET_CORE) ||
1204		!vmcore_elf64_check_arch(&ehdr) ||
1205		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1206		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1207		ehdr.e_version != EV_CURRENT ||
1208		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1209		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1210		ehdr.e_phnum == 0) {
1211		pr_warn("Warning: Core image elf header is not sane\n");
1212		return -EINVAL;
1213	}
1214
1215	/* Read in all elf headers. */
1216	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1217				ehdr.e_phnum * sizeof(Elf64_Phdr);
1218	elfcorebuf_sz = elfcorebuf_sz_orig;
1219	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1220					      get_order(elfcorebuf_sz_orig));
1221	if (!elfcorebuf)
1222		return -ENOMEM;
1223	addr = elfcorehdr_addr;
1224	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1225	if (rc < 0)
1226		goto fail;
1227
1228	/* Merge all PT_NOTE headers into one. */
1229	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1230				      &elfnotes_buf, &elfnotes_sz);
1231	if (rc)
1232		goto fail;
1233	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1234						  elfnotes_sz, &vmcore_list);
1235	if (rc)
1236		goto fail;
1237	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1238	return 0;
1239fail:
1240	free_elfcorebuf();
1241	return rc;
1242}
1243
1244static int __init parse_crash_elf32_headers(void)
1245{
1246	int rc=0;
1247	Elf32_Ehdr ehdr;
1248	u64 addr;
1249
1250	addr = elfcorehdr_addr;
1251
1252	/* Read Elf header */
1253	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1254	if (rc < 0)
1255		return rc;
1256
1257	/* Do some basic Verification. */
1258	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1259		(ehdr.e_type != ET_CORE) ||
1260		!vmcore_elf32_check_arch(&ehdr) ||
1261		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1262		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1263		ehdr.e_version != EV_CURRENT ||
1264		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1265		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1266		ehdr.e_phnum == 0) {
1267		pr_warn("Warning: Core image elf header is not sane\n");
1268		return -EINVAL;
1269	}
1270
1271	/* Read in all elf headers. */
1272	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1273	elfcorebuf_sz = elfcorebuf_sz_orig;
1274	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1275					      get_order(elfcorebuf_sz_orig));
1276	if (!elfcorebuf)
1277		return -ENOMEM;
1278	addr = elfcorehdr_addr;
1279	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1280	if (rc < 0)
1281		goto fail;
1282
1283	/* Merge all PT_NOTE headers into one. */
1284	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1285				      &elfnotes_buf, &elfnotes_sz);
1286	if (rc)
1287		goto fail;
1288	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1289						  elfnotes_sz, &vmcore_list);
1290	if (rc)
1291		goto fail;
1292	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1293	return 0;
1294fail:
1295	free_elfcorebuf();
1296	return rc;
1297}
1298
1299static int __init parse_crash_elf_headers(void)
1300{
1301	unsigned char e_ident[EI_NIDENT];
1302	u64 addr;
1303	int rc=0;
1304
1305	addr = elfcorehdr_addr;
1306	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1307	if (rc < 0)
1308		return rc;
1309	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1310		pr_warn("Warning: Core image elf header not found\n");
1311		return -EINVAL;
1312	}
1313
1314	if (e_ident[EI_CLASS] == ELFCLASS64) {
1315		rc = parse_crash_elf64_headers();
1316		if (rc)
1317			return rc;
1318	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1319		rc = parse_crash_elf32_headers();
1320		if (rc)
1321			return rc;
1322	} else {
1323		pr_warn("Warning: Core image elf header is not sane\n");
1324		return -EINVAL;
1325	}
1326
1327	/* Determine vmcore size. */
1328	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1329				      &vmcore_list);
1330
1331	return 0;
1332}
1333
1334#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1335/**
1336 * vmcoredd_write_header - Write vmcore device dump header at the
1337 * beginning of the dump's buffer.
1338 * @buf: Output buffer where the note is written
1339 * @data: Dump info
1340 * @size: Size of the dump
1341 *
1342 * Fills beginning of the dump's buffer with vmcore device dump header.
1343 */
1344static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1345				  u32 size)
1346{
1347	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1348
1349	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1350	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1351	vdd_hdr->n_type = NT_VMCOREDD;
1352
1353	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1354		sizeof(vdd_hdr->name));
1355	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1356}
1357
1358/**
1359 * vmcoredd_update_program_headers - Update all Elf program headers
1360 * @elfptr: Pointer to elf header
1361 * @elfnotesz: Size of elf notes aligned to page size
1362 * @vmcoreddsz: Size of device dumps to be added to elf note header
1363 *
1364 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1365 * Also update the offsets of all the program headers after the elf note header.
1366 */
1367static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1368					    size_t vmcoreddsz)
1369{
1370	unsigned char *e_ident = (unsigned char *)elfptr;
1371	u64 start, end, size;
1372	loff_t vmcore_off;
1373	u32 i;
1374
1375	vmcore_off = elfcorebuf_sz + elfnotesz;
1376
1377	if (e_ident[EI_CLASS] == ELFCLASS64) {
1378		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1379		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1380
1381		/* Update all program headers */
1382		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1383			if (phdr->p_type == PT_NOTE) {
1384				/* Update note size */
1385				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1386				phdr->p_filesz = phdr->p_memsz;
1387				continue;
1388			}
1389
1390			start = rounddown(phdr->p_offset, PAGE_SIZE);
1391			end = roundup(phdr->p_offset + phdr->p_memsz,
1392				      PAGE_SIZE);
1393			size = end - start;
1394			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1395			vmcore_off += size;
1396		}
1397	} else {
1398		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1399		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1400
1401		/* Update all program headers */
1402		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1403			if (phdr->p_type == PT_NOTE) {
1404				/* Update note size */
1405				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1406				phdr->p_filesz = phdr->p_memsz;
1407				continue;
1408			}
1409
1410			start = rounddown(phdr->p_offset, PAGE_SIZE);
1411			end = roundup(phdr->p_offset + phdr->p_memsz,
1412				      PAGE_SIZE);
1413			size = end - start;
1414			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1415			vmcore_off += size;
1416		}
1417	}
1418}
1419
1420/**
1421 * vmcoredd_update_size - Update the total size of the device dumps and update
1422 * Elf header
1423 * @dump_size: Size of the current device dump to be added to total size
1424 *
1425 * Update the total size of all the device dumps and update the Elf program
1426 * headers. Calculate the new offsets for the vmcore list and update the
1427 * total vmcore size.
1428 */
1429static void vmcoredd_update_size(size_t dump_size)
1430{
1431	vmcoredd_orig_sz += dump_size;
1432	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1433	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1434					vmcoredd_orig_sz);
1435
1436	/* Update vmcore list offsets */
1437	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1438
1439	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1440				      &vmcore_list);
1441	proc_vmcore->size = vmcore_size;
1442}
1443
1444/**
1445 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1446 * @data: dump info.
1447 *
1448 * Allocate a buffer and invoke the calling driver's dump collect routine.
1449 * Write Elf note at the beginning of the buffer to indicate vmcore device
1450 * dump and add the dump to global list.
1451 */
1452int vmcore_add_device_dump(struct vmcoredd_data *data)
1453{
1454	struct vmcoredd_node *dump;
1455	void *buf = NULL;
1456	size_t data_size;
1457	int ret;
1458
1459	if (vmcoredd_disabled) {
1460		pr_err_once("Device dump is disabled\n");
1461		return -EINVAL;
1462	}
1463
1464	if (!data || !strlen(data->dump_name) ||
1465	    !data->vmcoredd_callback || !data->size)
1466		return -EINVAL;
1467
1468	dump = vzalloc(sizeof(*dump));
1469	if (!dump) {
1470		ret = -ENOMEM;
1471		goto out_err;
1472	}
1473
1474	/* Keep size of the buffer page aligned so that it can be mmaped */
1475	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1476			    PAGE_SIZE);
1477
1478	/* Allocate buffer for driver's to write their dumps */
1479	buf = vmcore_alloc_buf(data_size);
1480	if (!buf) {
1481		ret = -ENOMEM;
1482		goto out_err;
1483	}
1484
1485	vmcoredd_write_header(buf, data, data_size -
1486			      sizeof(struct vmcoredd_header));
1487
1488	/* Invoke the driver's dump collection routing */
1489	ret = data->vmcoredd_callback(data, buf +
1490				      sizeof(struct vmcoredd_header));
1491	if (ret)
1492		goto out_err;
1493
1494	dump->buf = buf;
1495	dump->size = data_size;
1496
1497	/* Add the dump to driver sysfs list */
1498	mutex_lock(&vmcoredd_mutex);
1499	list_add_tail(&dump->list, &vmcoredd_list);
1500	mutex_unlock(&vmcoredd_mutex);
1501
1502	vmcoredd_update_size(data_size);
1503	return 0;
1504
1505out_err:
1506	vfree(buf);
1507	vfree(dump);
1508
1509	return ret;
1510}
1511EXPORT_SYMBOL(vmcore_add_device_dump);
1512#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1513
1514/* Free all dumps in vmcore device dump list */
1515static void vmcore_free_device_dumps(void)
1516{
1517#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1518	mutex_lock(&vmcoredd_mutex);
1519	while (!list_empty(&vmcoredd_list)) {
1520		struct vmcoredd_node *dump;
1521
1522		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1523					list);
1524		list_del(&dump->list);
1525		vfree(dump->buf);
1526		vfree(dump);
1527	}
1528	mutex_unlock(&vmcoredd_mutex);
1529#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1530}
1531
1532/* Init function for vmcore module. */
1533static int __init vmcore_init(void)
1534{
1535	int rc = 0;
1536
1537	/* Allow architectures to allocate ELF header in 2nd kernel */
1538	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1539	if (rc)
1540		return rc;
1541	/*
1542	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1543	 * then capture the dump.
1544	 */
1545	if (!(is_vmcore_usable()))
1546		return rc;
1547	rc = parse_crash_elf_headers();
1548	if (rc) {
1549		pr_warn("Kdump: vmcore not initialized\n");
1550		return rc;
1551	}
1552	elfcorehdr_free(elfcorehdr_addr);
1553	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1554
1555	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1556	if (proc_vmcore)
1557		proc_vmcore->size = vmcore_size;
1558	return 0;
1559}
1560fs_initcall(vmcore_init);
1561
1562/* Cleanup function for vmcore module. */
1563void vmcore_cleanup(void)
1564{
 
 
1565	if (proc_vmcore) {
1566		proc_remove(proc_vmcore);
1567		proc_vmcore = NULL;
1568	}
1569
1570	/* clear the vmcore list. */
1571	while (!list_empty(&vmcore_list)) {
1572		struct vmcore *m;
1573
1574		m = list_first_entry(&vmcore_list, struct vmcore, list);
1575		list_del(&m->list);
1576		kfree(m);
1577	}
1578	free_elfcorebuf();
1579
1580	/* clear vmcore device dump list */
1581	vmcore_free_device_dumps();
1582}