Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	fs/proc/vmcore.c Interface for accessing the crash
   4 * 				 dump from the system's previous life.
   5 * 	Heavily borrowed from fs/proc/kcore.c
   6 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   7 *	Copyright (C) IBM Corporation, 2004. All rights reserved
   8 *
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/kcore.h>
  13#include <linux/user.h>
  14#include <linux/elf.h>
  15#include <linux/elfcore.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/highmem.h>
  19#include <linux/printk.h>
  20#include <linux/memblock.h>
  21#include <linux/init.h>
  22#include <linux/crash_dump.h>
  23#include <linux/list.h>
  24#include <linux/moduleparam.h>
  25#include <linux/mutex.h>
  26#include <linux/vmalloc.h>
  27#include <linux/pagemap.h>
  28#include <linux/uio.h>
  29#include <linux/cc_platform.h>
  30#include <asm/io.h>
  31#include "internal.h"
  32
  33/* List representing chunks of contiguous memory areas and their offsets in
  34 * vmcore file.
  35 */
  36static LIST_HEAD(vmcore_list);
  37
  38/* Stores the pointer to the buffer containing kernel elf core headers. */
  39static char *elfcorebuf;
  40static size_t elfcorebuf_sz;
  41static size_t elfcorebuf_sz_orig;
  42
  43static char *elfnotes_buf;
  44static size_t elfnotes_sz;
  45/* Size of all notes minus the device dump notes */
  46static size_t elfnotes_orig_sz;
  47
  48/* Total size of vmcore file. */
  49static u64 vmcore_size;
  50
  51static struct proc_dir_entry *proc_vmcore;
  52
  53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
  54/* Device Dump list and mutex to synchronize access to list */
  55static LIST_HEAD(vmcoredd_list);
  56static DEFINE_MUTEX(vmcoredd_mutex);
  57
  58static bool vmcoredd_disabled;
  59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
  60#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
  61
  62/* Device Dump Size */
  63static size_t vmcoredd_orig_sz;
  64
  65static DEFINE_SPINLOCK(vmcore_cb_lock);
  66DEFINE_STATIC_SRCU(vmcore_cb_srcu);
  67/* List of registered vmcore callbacks. */
  68static LIST_HEAD(vmcore_cb_list);
  69/* Whether the vmcore has been opened once. */
  70static bool vmcore_opened;
  71
  72void register_vmcore_cb(struct vmcore_cb *cb)
  73{
  74	INIT_LIST_HEAD(&cb->next);
  75	spin_lock(&vmcore_cb_lock);
  76	list_add_tail(&cb->next, &vmcore_cb_list);
  77	/*
  78	 * Registering a vmcore callback after the vmcore was opened is
  79	 * very unusual (e.g., manual driver loading).
  80	 */
  81	if (vmcore_opened)
  82		pr_warn_once("Unexpected vmcore callback registration\n");
  83	spin_unlock(&vmcore_cb_lock);
  84}
  85EXPORT_SYMBOL_GPL(register_vmcore_cb);
  86
  87void unregister_vmcore_cb(struct vmcore_cb *cb)
  88{
  89	spin_lock(&vmcore_cb_lock);
  90	list_del_rcu(&cb->next);
  91	/*
  92	 * Unregistering a vmcore callback after the vmcore was opened is
  93	 * very unusual (e.g., forced driver removal), but we cannot stop
  94	 * unregistering.
  95	 */
  96	if (vmcore_opened)
  97		pr_warn_once("Unexpected vmcore callback unregistration\n");
  98	spin_unlock(&vmcore_cb_lock);
  99
 100	synchronize_srcu(&vmcore_cb_srcu);
 101}
 102EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
 103
 104static bool pfn_is_ram(unsigned long pfn)
 105{
 106	struct vmcore_cb *cb;
 107	bool ret = true;
 108
 109	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
 110				 srcu_read_lock_held(&vmcore_cb_srcu)) {
 111		if (unlikely(!cb->pfn_is_ram))
 112			continue;
 113		ret = cb->pfn_is_ram(cb, pfn);
 114		if (!ret)
 115			break;
 116	}
 117
 118	return ret;
 119}
 
 120
 121static int open_vmcore(struct inode *inode, struct file *file)
 122{
 123	spin_lock(&vmcore_cb_lock);
 124	vmcore_opened = true;
 125	spin_unlock(&vmcore_cb_lock);
 126
 127	return 0;
 
 
 
 
 
 
 
 
 
 128}
 129
 130/* Reads a page from the oldmem device from given offset. */
 131ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
 132			 u64 *ppos, bool encrypted)
 133{
 134	unsigned long pfn, offset;
 135	size_t nr_bytes;
 136	ssize_t read = 0, tmp;
 137	int idx;
 138
 139	if (!count)
 140		return 0;
 141
 142	offset = (unsigned long)(*ppos % PAGE_SIZE);
 143	pfn = (unsigned long)(*ppos / PAGE_SIZE);
 144
 145	idx = srcu_read_lock(&vmcore_cb_srcu);
 146	do {
 147		if (count > (PAGE_SIZE - offset))
 148			nr_bytes = PAGE_SIZE - offset;
 149		else
 150			nr_bytes = count;
 151
 152		/* If pfn is not ram, return zeros for sparse dump files */
 153		if (!pfn_is_ram(pfn)) {
 154			tmp = iov_iter_zero(nr_bytes, iter);
 155		} else {
 156			if (encrypted)
 157				tmp = copy_oldmem_page_encrypted(iter, pfn,
 158								 nr_bytes,
 159								 offset);
 160			else
 161				tmp = copy_oldmem_page(iter, pfn, nr_bytes,
 162						       offset);
 163		}
 164		if (tmp < nr_bytes) {
 165			srcu_read_unlock(&vmcore_cb_srcu, idx);
 166			return -EFAULT;
 167		}
 168
 169		*ppos += nr_bytes;
 170		count -= nr_bytes;
 
 171		read += nr_bytes;
 172		++pfn;
 173		offset = 0;
 174	} while (count);
 175	srcu_read_unlock(&vmcore_cb_srcu, idx);
 176
 177	return read;
 178}
 179
 180/*
 181 * Architectures may override this function to allocate ELF header in 2nd kernel
 182 */
 183int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 184{
 185	return 0;
 186}
 187
 188/*
 189 * Architectures may override this function to free header
 190 */
 191void __weak elfcorehdr_free(unsigned long long addr)
 192{}
 193
 194/*
 195 * Architectures may override this function to read from ELF header
 196 */
 197ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 198{
 199	struct kvec kvec = { .iov_base = buf, .iov_len = count };
 200	struct iov_iter iter;
 201
 202	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
 203
 204	return read_from_oldmem(&iter, count, ppos, false);
 205}
 206
 207/*
 208 * Architectures may override this function to read from notes sections
 209 */
 210ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 211{
 212	struct kvec kvec = { .iov_base = buf, .iov_len = count };
 213	struct iov_iter iter;
 214
 215	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
 216
 217	return read_from_oldmem(&iter, count, ppos,
 218			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
 219}
 220
 221/*
 222 * Architectures may override this function to map oldmem
 223 */
 224int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 225				  unsigned long from, unsigned long pfn,
 226				  unsigned long size, pgprot_t prot)
 227{
 228	prot = pgprot_encrypted(prot);
 229	return remap_pfn_range(vma, from, pfn, size, prot);
 230}
 231
 232/*
 233 * Architectures which support memory encryption override this.
 234 */
 235ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
 236		unsigned long pfn, size_t csize, unsigned long offset)
 237{
 238	return copy_oldmem_page(iter, pfn, csize, offset);
 239}
 240
 241#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 242static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
 243{
 244	struct vmcoredd_node *dump;
 245	u64 offset = 0;
 246	int ret = 0;
 247	size_t tsz;
 248	char *buf;
 249
 250	mutex_lock(&vmcoredd_mutex);
 251	list_for_each_entry(dump, &vmcoredd_list, list) {
 252		if (start < offset + dump->size) {
 253			tsz = min(offset + (u64)dump->size - start, (u64)size);
 254			buf = dump->buf + start - offset;
 255			if (copy_to_iter(buf, tsz, iter) < tsz) {
 256				ret = -EFAULT;
 257				goto out_unlock;
 258			}
 259
 260			size -= tsz;
 261			start += tsz;
 262
 263			/* Leave now if buffer filled already */
 264			if (!size)
 265				goto out_unlock;
 266		}
 267		offset += dump->size;
 268	}
 269
 270out_unlock:
 271	mutex_unlock(&vmcoredd_mutex);
 272	return ret;
 273}
 274
 275#ifdef CONFIG_MMU
 276static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 277			       u64 start, size_t size)
 278{
 279	struct vmcoredd_node *dump;
 280	u64 offset = 0;
 281	int ret = 0;
 282	size_t tsz;
 283	char *buf;
 284
 285	mutex_lock(&vmcoredd_mutex);
 286	list_for_each_entry(dump, &vmcoredd_list, list) {
 287		if (start < offset + dump->size) {
 288			tsz = min(offset + (u64)dump->size - start, (u64)size);
 289			buf = dump->buf + start - offset;
 290			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
 291							tsz)) {
 292				ret = -EFAULT;
 293				goto out_unlock;
 294			}
 295
 296			size -= tsz;
 297			start += tsz;
 298			dst += tsz;
 299
 300			/* Leave now if buffer filled already */
 301			if (!size)
 302				goto out_unlock;
 303		}
 304		offset += dump->size;
 305	}
 306
 307out_unlock:
 308	mutex_unlock(&vmcoredd_mutex);
 309	return ret;
 310}
 311#endif /* CONFIG_MMU */
 312#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 313
 314/* Read from the ELF header and then the crash dump. On error, negative value is
 315 * returned otherwise number of bytes read are returned.
 316 */
 317static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
 
 318{
 319	ssize_t acc = 0, tmp;
 320	size_t tsz;
 321	u64 start;
 322	struct vmcore *m = NULL;
 323
 324	if (!iov_iter_count(iter) || *fpos >= vmcore_size)
 325		return 0;
 326
 327	iov_iter_truncate(iter, vmcore_size - *fpos);
 
 
 328
 329	/* Read ELF core header */
 330	if (*fpos < elfcorebuf_sz) {
 331		tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
 332		if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
 333			return -EFAULT;
 
 334		*fpos += tsz;
 
 335		acc += tsz;
 336
 337		/* leave now if filled buffer already */
 338		if (!iov_iter_count(iter))
 339			return acc;
 340	}
 341
 342	/* Read Elf note segment */
 343	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 344		void *kaddr;
 345
 346		/* We add device dumps before other elf notes because the
 347		 * other elf notes may not fill the elf notes buffer
 348		 * completely and we will end up with zero-filled data
 349		 * between the elf notes and the device dumps. Tools will
 350		 * then try to decode this zero-filled data as valid notes
 351		 * and we don't want that. Hence, adding device dumps before
 352		 * the other elf notes ensure that zero-filled data can be
 353		 * avoided.
 354		 */
 355#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 356		/* Read device dumps */
 357		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
 358			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 359				  (size_t)*fpos, iov_iter_count(iter));
 360			start = *fpos - elfcorebuf_sz;
 361			if (vmcoredd_copy_dumps(iter, start, tsz))
 362				return -EFAULT;
 363
 364			*fpos += tsz;
 365			acc += tsz;
 366
 367			/* leave now if filled buffer already */
 368			if (!iov_iter_count(iter))
 369				return acc;
 370		}
 371#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 372
 373		/* Read remaining elf notes */
 374		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
 375			  iov_iter_count(iter));
 376		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
 377		if (copy_to_iter(kaddr, tsz, iter) < tsz)
 378			return -EFAULT;
 379
 380		*fpos += tsz;
 
 381		acc += tsz;
 382
 383		/* leave now if filled buffer already */
 384		if (!iov_iter_count(iter))
 385			return acc;
 386	}
 387
 388	list_for_each_entry(m, &vmcore_list, list) {
 389		if (*fpos < m->offset + m->size) {
 390			tsz = (size_t)min_t(unsigned long long,
 391					    m->offset + m->size - *fpos,
 392					    iov_iter_count(iter));
 393			start = m->paddr + *fpos - m->offset;
 394			tmp = read_from_oldmem(iter, tsz, &start,
 395					cc_platform_has(CC_ATTR_MEM_ENCRYPT));
 396			if (tmp < 0)
 397				return tmp;
 
 398			*fpos += tsz;
 
 399			acc += tsz;
 400
 401			/* leave now if filled buffer already */
 402			if (!iov_iter_count(iter))
 403				return acc;
 404		}
 405	}
 406
 407	return acc;
 408}
 409
 410static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
 
 411{
 412	return __read_vmcore(iter, &iocb->ki_pos);
 413}
 414
 415/*
 416 * The vmcore fault handler uses the page cache and fills data using the
 417 * standard __read_vmcore() function.
 418 *
 419 * On s390 the fault handler is used for memory regions that can't be mapped
 420 * directly with remap_pfn_range().
 421 */
 422static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
 423{
 424#ifdef CONFIG_S390
 425	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 426	pgoff_t index = vmf->pgoff;
 427	struct iov_iter iter;
 428	struct kvec kvec;
 429	struct page *page;
 430	loff_t offset;
 
 431	int rc;
 432
 433	page = find_or_create_page(mapping, index, GFP_KERNEL);
 434	if (!page)
 435		return VM_FAULT_OOM;
 436	if (!PageUptodate(page)) {
 437		offset = (loff_t) index << PAGE_SHIFT;
 438		kvec.iov_base = page_address(page);
 439		kvec.iov_len = PAGE_SIZE;
 440		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
 441
 442		rc = __read_vmcore(&iter, &offset);
 443		if (rc < 0) {
 444			unlock_page(page);
 445			put_page(page);
 446			return vmf_error(rc);
 447		}
 448		SetPageUptodate(page);
 449	}
 450	unlock_page(page);
 451	vmf->page = page;
 452	return 0;
 453#else
 454	return VM_FAULT_SIGBUS;
 455#endif
 456}
 457
 458static const struct vm_operations_struct vmcore_mmap_ops = {
 459	.fault = mmap_vmcore_fault,
 460};
 461
 462/**
 463 * vmcore_alloc_buf - allocate buffer in vmalloc memory
 464 * @size: size of buffer
 
 
 465 *
 466 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 467 * the buffer to user-space by means of remap_vmalloc_range().
 468 *
 469 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 470 * disabled and there's no need to allow users to mmap the buffer.
 471 */
 472static inline char *vmcore_alloc_buf(size_t size)
 473{
 474#ifdef CONFIG_MMU
 475	return vmalloc_user(size);
 476#else
 477	return vzalloc(size);
 478#endif
 479}
 480
 481/*
 482 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 483 * essential for mmap_vmcore() in order to map physically
 484 * non-contiguous objects (ELF header, ELF note segment and memory
 485 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 486 * virtually contiguous user-space in ELF layout.
 487 */
 488#ifdef CONFIG_MMU
 489/*
 490 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 491 * reported as not being ram with the zero page.
 492 *
 493 * @vma: vm_area_struct describing requested mapping
 494 * @from: start remapping from
 495 * @pfn: page frame number to start remapping to
 496 * @size: remapping size
 497 * @prot: protection bits
 498 *
 499 * Returns zero on success, -EAGAIN on failure.
 500 */
 501static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 502				    unsigned long from, unsigned long pfn,
 503				    unsigned long size, pgprot_t prot)
 504{
 505	unsigned long map_size;
 506	unsigned long pos_start, pos_end, pos;
 507	unsigned long zeropage_pfn = my_zero_pfn(0);
 508	size_t len = 0;
 509
 510	pos_start = pfn;
 511	pos_end = pfn + (size >> PAGE_SHIFT);
 512
 513	for (pos = pos_start; pos < pos_end; ++pos) {
 514		if (!pfn_is_ram(pos)) {
 515			/*
 516			 * We hit a page which is not ram. Remap the continuous
 517			 * region between pos_start and pos-1 and replace
 518			 * the non-ram page at pos with the zero page.
 519			 */
 520			if (pos > pos_start) {
 521				/* Remap continuous region */
 522				map_size = (pos - pos_start) << PAGE_SHIFT;
 523				if (remap_oldmem_pfn_range(vma, from + len,
 524							   pos_start, map_size,
 525							   prot))
 526					goto fail;
 527				len += map_size;
 528			}
 529			/* Remap the zero page */
 530			if (remap_oldmem_pfn_range(vma, from + len,
 531						   zeropage_pfn,
 532						   PAGE_SIZE, prot))
 533				goto fail;
 534			len += PAGE_SIZE;
 535			pos_start = pos + 1;
 536		}
 537	}
 538	if (pos > pos_start) {
 539		/* Remap the rest */
 540		map_size = (pos - pos_start) << PAGE_SHIFT;
 541		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 542					   map_size, prot))
 543			goto fail;
 544	}
 545	return 0;
 546fail:
 547	do_munmap(vma->vm_mm, from, len, NULL);
 548	return -EAGAIN;
 549}
 550
 551static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 552			    unsigned long from, unsigned long pfn,
 553			    unsigned long size, pgprot_t prot)
 554{
 555	int ret, idx;
 556
 557	/*
 558	 * Check if a callback was registered to avoid looping over all
 559	 * pages without a reason.
 560	 */
 561	idx = srcu_read_lock(&vmcore_cb_srcu);
 562	if (!list_empty(&vmcore_cb_list))
 563		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 564	else
 565		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 566	srcu_read_unlock(&vmcore_cb_srcu, idx);
 567	return ret;
 568}
 569
 570static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 571{
 572	size_t size = vma->vm_end - vma->vm_start;
 573	u64 start, end, len, tsz;
 574	struct vmcore *m;
 575
 576	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 577	end = start + size;
 578
 579	if (size > vmcore_size || end > vmcore_size)
 580		return -EINVAL;
 581
 582	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 583		return -EPERM;
 584
 585	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 586	vma->vm_flags |= VM_MIXEDMAP;
 587	vma->vm_ops = &vmcore_mmap_ops;
 588
 589	len = 0;
 590
 591	if (start < elfcorebuf_sz) {
 592		u64 pfn;
 593
 594		tsz = min(elfcorebuf_sz - (size_t)start, size);
 595		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 596		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 597				    vma->vm_page_prot))
 598			return -EAGAIN;
 599		size -= tsz;
 600		start += tsz;
 601		len += tsz;
 602
 603		if (size == 0)
 604			return 0;
 605	}
 606
 607	if (start < elfcorebuf_sz + elfnotes_sz) {
 608		void *kaddr;
 609
 610		/* We add device dumps before other elf notes because the
 611		 * other elf notes may not fill the elf notes buffer
 612		 * completely and we will end up with zero-filled data
 613		 * between the elf notes and the device dumps. Tools will
 614		 * then try to decode this zero-filled data as valid notes
 615		 * and we don't want that. Hence, adding device dumps before
 616		 * the other elf notes ensure that zero-filled data can be
 617		 * avoided. This also ensures that the device dumps and
 618		 * other elf notes can be properly mmaped at page aligned
 619		 * address.
 620		 */
 621#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 622		/* Read device dumps */
 623		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
 624			u64 start_off;
 625
 626			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 627				  (size_t)start, size);
 628			start_off = start - elfcorebuf_sz;
 629			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
 630						start_off, tsz))
 631				goto fail;
 632
 633			size -= tsz;
 634			start += tsz;
 635			len += tsz;
 636
 637			/* leave now if filled buffer already */
 638			if (!size)
 639				return 0;
 640		}
 641#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 642
 643		/* Read remaining elf notes */
 644		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 645		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
 646		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 647						kaddr, 0, tsz))
 648			goto fail;
 649
 650		size -= tsz;
 651		start += tsz;
 652		len += tsz;
 653
 654		if (size == 0)
 655			return 0;
 656	}
 657
 658	list_for_each_entry(m, &vmcore_list, list) {
 659		if (start < m->offset + m->size) {
 660			u64 paddr = 0;
 661
 662			tsz = (size_t)min_t(unsigned long long,
 663					    m->offset + m->size - start, size);
 664			paddr = m->paddr + start - m->offset;
 665			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 666						    paddr >> PAGE_SHIFT, tsz,
 667						    vma->vm_page_prot))
 668				goto fail;
 669			size -= tsz;
 670			start += tsz;
 671			len += tsz;
 672
 673			if (size == 0)
 674				return 0;
 675		}
 676	}
 677
 678	return 0;
 679fail:
 680	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
 681	return -EAGAIN;
 682}
 683#else
 684static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 685{
 686	return -ENOSYS;
 687}
 688#endif
 689
 690static const struct proc_ops vmcore_proc_ops = {
 691	.proc_open	= open_vmcore,
 692	.proc_read_iter	= read_vmcore,
 693	.proc_lseek	= default_llseek,
 694	.proc_mmap	= mmap_vmcore,
 695};
 696
 697static struct vmcore* __init get_new_element(void)
 698{
 699	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 700}
 701
 702static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 703			   struct list_head *vc_list)
 704{
 705	u64 size;
 706	struct vmcore *m;
 707
 708	size = elfsz + elfnotesegsz;
 709	list_for_each_entry(m, vc_list, list) {
 710		size += m->size;
 711	}
 712	return size;
 713}
 714
 715/**
 716 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 717 *
 718 * @ehdr_ptr: ELF header
 719 *
 720 * This function updates p_memsz member of each PT_NOTE entry in the
 721 * program header table pointed to by @ehdr_ptr to real size of ELF
 722 * note segment.
 723 */
 724static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 725{
 726	int i, rc=0;
 727	Elf64_Phdr *phdr_ptr;
 728	Elf64_Nhdr *nhdr_ptr;
 729
 730	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 731	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 732		void *notes_section;
 733		u64 offset, max_sz, sz, real_sz = 0;
 734		if (phdr_ptr->p_type != PT_NOTE)
 735			continue;
 736		max_sz = phdr_ptr->p_memsz;
 737		offset = phdr_ptr->p_offset;
 738		notes_section = kmalloc(max_sz, GFP_KERNEL);
 739		if (!notes_section)
 740			return -ENOMEM;
 741		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 742		if (rc < 0) {
 743			kfree(notes_section);
 744			return rc;
 745		}
 746		nhdr_ptr = notes_section;
 747		while (nhdr_ptr->n_namesz != 0) {
 748			sz = sizeof(Elf64_Nhdr) +
 749				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 750				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 751			if ((real_sz + sz) > max_sz) {
 752				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 753					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 754				break;
 755			}
 756			real_sz += sz;
 757			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 758		}
 759		kfree(notes_section);
 760		phdr_ptr->p_memsz = real_sz;
 761		if (real_sz == 0) {
 762			pr_warn("Warning: Zero PT_NOTE entries found\n");
 763		}
 764	}
 765
 766	return 0;
 767}
 768
 769/**
 770 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 771 * headers and sum of real size of their ELF note segment headers and
 772 * data.
 773 *
 774 * @ehdr_ptr: ELF header
 775 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 776 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 777 *
 778 * This function is used to merge multiple PT_NOTE program headers
 779 * into a unique single one. The resulting unique entry will have
 780 * @sz_ptnote in its phdr->p_mem.
 781 *
 782 * It is assumed that program headers with PT_NOTE type pointed to by
 783 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 784 * and each of PT_NOTE program headers has actual ELF note segment
 785 * size in its p_memsz member.
 786 */
 787static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 788						 int *nr_ptnote, u64 *sz_ptnote)
 789{
 790	int i;
 791	Elf64_Phdr *phdr_ptr;
 792
 793	*nr_ptnote = *sz_ptnote = 0;
 794
 795	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 796	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 797		if (phdr_ptr->p_type != PT_NOTE)
 798			continue;
 799		*nr_ptnote += 1;
 800		*sz_ptnote += phdr_ptr->p_memsz;
 801	}
 802
 803	return 0;
 804}
 805
 806/**
 807 * copy_notes_elf64 - copy ELF note segments in a given buffer
 808 *
 809 * @ehdr_ptr: ELF header
 810 * @notes_buf: buffer into which ELF note segments are copied
 811 *
 812 * This function is used to copy ELF note segment in the 1st kernel
 813 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 814 * size of the buffer @notes_buf is equal to or larger than sum of the
 815 * real ELF note segment headers and data.
 816 *
 817 * It is assumed that program headers with PT_NOTE type pointed to by
 818 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 819 * and each of PT_NOTE program headers has actual ELF note segment
 820 * size in its p_memsz member.
 821 */
 822static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 823{
 824	int i, rc=0;
 825	Elf64_Phdr *phdr_ptr;
 826
 827	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 828
 829	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 830		u64 offset;
 831		if (phdr_ptr->p_type != PT_NOTE)
 832			continue;
 833		offset = phdr_ptr->p_offset;
 834		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 835					   &offset);
 836		if (rc < 0)
 837			return rc;
 838		notes_buf += phdr_ptr->p_memsz;
 839	}
 840
 841	return 0;
 842}
 843
 844/* Merges all the PT_NOTE headers into one. */
 845static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 846					   char **notes_buf, size_t *notes_sz)
 847{
 848	int i, nr_ptnote=0, rc=0;
 849	char *tmp;
 850	Elf64_Ehdr *ehdr_ptr;
 851	Elf64_Phdr phdr;
 852	u64 phdr_sz = 0, note_off;
 853
 854	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 855
 856	rc = update_note_header_size_elf64(ehdr_ptr);
 857	if (rc < 0)
 858		return rc;
 859
 860	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 861	if (rc < 0)
 862		return rc;
 863
 864	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 865	*notes_buf = vmcore_alloc_buf(*notes_sz);
 866	if (!*notes_buf)
 867		return -ENOMEM;
 868
 869	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 870	if (rc < 0)
 871		return rc;
 872
 873	/* Prepare merged PT_NOTE program header. */
 874	phdr.p_type    = PT_NOTE;
 875	phdr.p_flags   = 0;
 876	note_off = sizeof(Elf64_Ehdr) +
 877			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 878	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 879	phdr.p_vaddr   = phdr.p_paddr = 0;
 880	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 881	phdr.p_align   = 0;
 882
 883	/* Add merged PT_NOTE program header*/
 884	tmp = elfptr + sizeof(Elf64_Ehdr);
 885	memcpy(tmp, &phdr, sizeof(phdr));
 886	tmp += sizeof(phdr);
 887
 888	/* Remove unwanted PT_NOTE program headers. */
 889	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 890	*elfsz = *elfsz - i;
 891	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 892	memset(elfptr + *elfsz, 0, i);
 893	*elfsz = roundup(*elfsz, PAGE_SIZE);
 894
 895	/* Modify e_phnum to reflect merged headers. */
 896	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 897
 898	/* Store the size of all notes.  We need this to update the note
 899	 * header when the device dumps will be added.
 900	 */
 901	elfnotes_orig_sz = phdr.p_memsz;
 902
 903	return 0;
 904}
 905
 906/**
 907 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 908 *
 909 * @ehdr_ptr: ELF header
 910 *
 911 * This function updates p_memsz member of each PT_NOTE entry in the
 912 * program header table pointed to by @ehdr_ptr to real size of ELF
 913 * note segment.
 914 */
 915static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 916{
 917	int i, rc=0;
 918	Elf32_Phdr *phdr_ptr;
 919	Elf32_Nhdr *nhdr_ptr;
 920
 921	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 922	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 923		void *notes_section;
 924		u64 offset, max_sz, sz, real_sz = 0;
 925		if (phdr_ptr->p_type != PT_NOTE)
 926			continue;
 927		max_sz = phdr_ptr->p_memsz;
 928		offset = phdr_ptr->p_offset;
 929		notes_section = kmalloc(max_sz, GFP_KERNEL);
 930		if (!notes_section)
 931			return -ENOMEM;
 932		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 933		if (rc < 0) {
 934			kfree(notes_section);
 935			return rc;
 936		}
 937		nhdr_ptr = notes_section;
 938		while (nhdr_ptr->n_namesz != 0) {
 939			sz = sizeof(Elf32_Nhdr) +
 940				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 941				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 942			if ((real_sz + sz) > max_sz) {
 943				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 944					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 945				break;
 946			}
 947			real_sz += sz;
 948			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 949		}
 950		kfree(notes_section);
 951		phdr_ptr->p_memsz = real_sz;
 952		if (real_sz == 0) {
 953			pr_warn("Warning: Zero PT_NOTE entries found\n");
 954		}
 955	}
 956
 957	return 0;
 958}
 959
 960/**
 961 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 962 * headers and sum of real size of their ELF note segment headers and
 963 * data.
 964 *
 965 * @ehdr_ptr: ELF header
 966 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 967 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 968 *
 969 * This function is used to merge multiple PT_NOTE program headers
 970 * into a unique single one. The resulting unique entry will have
 971 * @sz_ptnote in its phdr->p_mem.
 972 *
 973 * It is assumed that program headers with PT_NOTE type pointed to by
 974 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 975 * and each of PT_NOTE program headers has actual ELF note segment
 976 * size in its p_memsz member.
 977 */
 978static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 979						 int *nr_ptnote, u64 *sz_ptnote)
 980{
 981	int i;
 982	Elf32_Phdr *phdr_ptr;
 983
 984	*nr_ptnote = *sz_ptnote = 0;
 985
 986	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 987	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 988		if (phdr_ptr->p_type != PT_NOTE)
 989			continue;
 990		*nr_ptnote += 1;
 991		*sz_ptnote += phdr_ptr->p_memsz;
 992	}
 993
 994	return 0;
 995}
 996
 997/**
 998 * copy_notes_elf32 - copy ELF note segments in a given buffer
 999 *
1000 * @ehdr_ptr: ELF header
1001 * @notes_buf: buffer into which ELF note segments are copied
1002 *
1003 * This function is used to copy ELF note segment in the 1st kernel
1004 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1005 * size of the buffer @notes_buf is equal to or larger than sum of the
1006 * real ELF note segment headers and data.
1007 *
1008 * It is assumed that program headers with PT_NOTE type pointed to by
1009 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1010 * and each of PT_NOTE program headers has actual ELF note segment
1011 * size in its p_memsz member.
1012 */
1013static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1014{
1015	int i, rc=0;
1016	Elf32_Phdr *phdr_ptr;
1017
1018	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1019
1020	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1021		u64 offset;
1022		if (phdr_ptr->p_type != PT_NOTE)
1023			continue;
1024		offset = phdr_ptr->p_offset;
1025		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1026					   &offset);
1027		if (rc < 0)
1028			return rc;
1029		notes_buf += phdr_ptr->p_memsz;
1030	}
1031
1032	return 0;
1033}
1034
1035/* Merges all the PT_NOTE headers into one. */
1036static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1037					   char **notes_buf, size_t *notes_sz)
1038{
1039	int i, nr_ptnote=0, rc=0;
1040	char *tmp;
1041	Elf32_Ehdr *ehdr_ptr;
1042	Elf32_Phdr phdr;
1043	u64 phdr_sz = 0, note_off;
1044
1045	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1046
1047	rc = update_note_header_size_elf32(ehdr_ptr);
1048	if (rc < 0)
1049		return rc;
1050
1051	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1052	if (rc < 0)
1053		return rc;
1054
1055	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1056	*notes_buf = vmcore_alloc_buf(*notes_sz);
1057	if (!*notes_buf)
1058		return -ENOMEM;
1059
1060	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1061	if (rc < 0)
1062		return rc;
1063
1064	/* Prepare merged PT_NOTE program header. */
1065	phdr.p_type    = PT_NOTE;
1066	phdr.p_flags   = 0;
1067	note_off = sizeof(Elf32_Ehdr) +
1068			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1069	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1070	phdr.p_vaddr   = phdr.p_paddr = 0;
1071	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1072	phdr.p_align   = 0;
1073
1074	/* Add merged PT_NOTE program header*/
1075	tmp = elfptr + sizeof(Elf32_Ehdr);
1076	memcpy(tmp, &phdr, sizeof(phdr));
1077	tmp += sizeof(phdr);
1078
1079	/* Remove unwanted PT_NOTE program headers. */
1080	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1081	*elfsz = *elfsz - i;
1082	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1083	memset(elfptr + *elfsz, 0, i);
1084	*elfsz = roundup(*elfsz, PAGE_SIZE);
1085
1086	/* Modify e_phnum to reflect merged headers. */
1087	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1088
1089	/* Store the size of all notes.  We need this to update the note
1090	 * header when the device dumps will be added.
1091	 */
1092	elfnotes_orig_sz = phdr.p_memsz;
1093
1094	return 0;
1095}
1096
1097/* Add memory chunks represented by program headers to vmcore list. Also update
1098 * the new offset fields of exported program headers. */
1099static int __init process_ptload_program_headers_elf64(char *elfptr,
1100						size_t elfsz,
1101						size_t elfnotes_sz,
1102						struct list_head *vc_list)
1103{
1104	int i;
1105	Elf64_Ehdr *ehdr_ptr;
1106	Elf64_Phdr *phdr_ptr;
1107	loff_t vmcore_off;
1108	struct vmcore *new;
1109
1110	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1111	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1112
1113	/* Skip Elf header, program headers and Elf note segment. */
1114	vmcore_off = elfsz + elfnotes_sz;
1115
1116	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1117		u64 paddr, start, end, size;
1118
1119		if (phdr_ptr->p_type != PT_LOAD)
1120			continue;
1121
1122		paddr = phdr_ptr->p_offset;
1123		start = rounddown(paddr, PAGE_SIZE);
1124		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1125		size = end - start;
1126
1127		/* Add this contiguous chunk of memory to vmcore list.*/
1128		new = get_new_element();
1129		if (!new)
1130			return -ENOMEM;
1131		new->paddr = start;
1132		new->size = size;
1133		list_add_tail(&new->list, vc_list);
1134
1135		/* Update the program header offset. */
1136		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1137		vmcore_off = vmcore_off + size;
1138	}
1139	return 0;
1140}
1141
1142static int __init process_ptload_program_headers_elf32(char *elfptr,
1143						size_t elfsz,
1144						size_t elfnotes_sz,
1145						struct list_head *vc_list)
1146{
1147	int i;
1148	Elf32_Ehdr *ehdr_ptr;
1149	Elf32_Phdr *phdr_ptr;
1150	loff_t vmcore_off;
1151	struct vmcore *new;
1152
1153	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1154	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1155
1156	/* Skip Elf header, program headers and Elf note segment. */
1157	vmcore_off = elfsz + elfnotes_sz;
1158
1159	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1160		u64 paddr, start, end, size;
1161
1162		if (phdr_ptr->p_type != PT_LOAD)
1163			continue;
1164
1165		paddr = phdr_ptr->p_offset;
1166		start = rounddown(paddr, PAGE_SIZE);
1167		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1168		size = end - start;
1169
1170		/* Add this contiguous chunk of memory to vmcore list.*/
1171		new = get_new_element();
1172		if (!new)
1173			return -ENOMEM;
1174		new->paddr = start;
1175		new->size = size;
1176		list_add_tail(&new->list, vc_list);
1177
1178		/* Update the program header offset */
1179		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1180		vmcore_off = vmcore_off + size;
1181	}
1182	return 0;
1183}
1184
1185/* Sets offset fields of vmcore elements. */
1186static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1187				    struct list_head *vc_list)
1188{
1189	loff_t vmcore_off;
1190	struct vmcore *m;
1191
1192	/* Skip Elf header, program headers and Elf note segment. */
1193	vmcore_off = elfsz + elfnotes_sz;
1194
1195	list_for_each_entry(m, vc_list, list) {
1196		m->offset = vmcore_off;
1197		vmcore_off += m->size;
1198	}
1199}
1200
1201static void free_elfcorebuf(void)
1202{
1203	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1204	elfcorebuf = NULL;
1205	vfree(elfnotes_buf);
1206	elfnotes_buf = NULL;
1207}
1208
1209static int __init parse_crash_elf64_headers(void)
1210{
1211	int rc=0;
1212	Elf64_Ehdr ehdr;
1213	u64 addr;
1214
1215	addr = elfcorehdr_addr;
1216
1217	/* Read Elf header */
1218	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1219	if (rc < 0)
1220		return rc;
1221
1222	/* Do some basic Verification. */
1223	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1224		(ehdr.e_type != ET_CORE) ||
1225		!vmcore_elf64_check_arch(&ehdr) ||
1226		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1227		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1228		ehdr.e_version != EV_CURRENT ||
1229		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1230		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1231		ehdr.e_phnum == 0) {
1232		pr_warn("Warning: Core image elf header is not sane\n");
1233		return -EINVAL;
1234	}
1235
1236	/* Read in all elf headers. */
1237	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1238				ehdr.e_phnum * sizeof(Elf64_Phdr);
1239	elfcorebuf_sz = elfcorebuf_sz_orig;
1240	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1241					      get_order(elfcorebuf_sz_orig));
1242	if (!elfcorebuf)
1243		return -ENOMEM;
1244	addr = elfcorehdr_addr;
1245	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1246	if (rc < 0)
1247		goto fail;
1248
1249	/* Merge all PT_NOTE headers into one. */
1250	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1251				      &elfnotes_buf, &elfnotes_sz);
1252	if (rc)
1253		goto fail;
1254	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1255						  elfnotes_sz, &vmcore_list);
1256	if (rc)
1257		goto fail;
1258	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1259	return 0;
1260fail:
1261	free_elfcorebuf();
1262	return rc;
1263}
1264
1265static int __init parse_crash_elf32_headers(void)
1266{
1267	int rc=0;
1268	Elf32_Ehdr ehdr;
1269	u64 addr;
1270
1271	addr = elfcorehdr_addr;
1272
1273	/* Read Elf header */
1274	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1275	if (rc < 0)
1276		return rc;
1277
1278	/* Do some basic Verification. */
1279	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1280		(ehdr.e_type != ET_CORE) ||
1281		!vmcore_elf32_check_arch(&ehdr) ||
1282		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1283		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1284		ehdr.e_version != EV_CURRENT ||
1285		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1286		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1287		ehdr.e_phnum == 0) {
1288		pr_warn("Warning: Core image elf header is not sane\n");
1289		return -EINVAL;
1290	}
1291
1292	/* Read in all elf headers. */
1293	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1294	elfcorebuf_sz = elfcorebuf_sz_orig;
1295	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1296					      get_order(elfcorebuf_sz_orig));
1297	if (!elfcorebuf)
1298		return -ENOMEM;
1299	addr = elfcorehdr_addr;
1300	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1301	if (rc < 0)
1302		goto fail;
1303
1304	/* Merge all PT_NOTE headers into one. */
1305	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1306				      &elfnotes_buf, &elfnotes_sz);
1307	if (rc)
1308		goto fail;
1309	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1310						  elfnotes_sz, &vmcore_list);
1311	if (rc)
1312		goto fail;
1313	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1314	return 0;
1315fail:
1316	free_elfcorebuf();
1317	return rc;
1318}
1319
1320static int __init parse_crash_elf_headers(void)
1321{
1322	unsigned char e_ident[EI_NIDENT];
1323	u64 addr;
1324	int rc=0;
1325
1326	addr = elfcorehdr_addr;
1327	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1328	if (rc < 0)
1329		return rc;
1330	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1331		pr_warn("Warning: Core image elf header not found\n");
1332		return -EINVAL;
1333	}
1334
1335	if (e_ident[EI_CLASS] == ELFCLASS64) {
1336		rc = parse_crash_elf64_headers();
1337		if (rc)
1338			return rc;
1339	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1340		rc = parse_crash_elf32_headers();
1341		if (rc)
1342			return rc;
1343	} else {
1344		pr_warn("Warning: Core image elf header is not sane\n");
1345		return -EINVAL;
1346	}
1347
1348	/* Determine vmcore size. */
1349	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1350				      &vmcore_list);
1351
1352	return 0;
1353}
1354
1355#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1356/**
1357 * vmcoredd_write_header - Write vmcore device dump header at the
1358 * beginning of the dump's buffer.
1359 * @buf: Output buffer where the note is written
1360 * @data: Dump info
1361 * @size: Size of the dump
1362 *
1363 * Fills beginning of the dump's buffer with vmcore device dump header.
1364 */
1365static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1366				  u32 size)
1367{
1368	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1369
1370	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1371	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1372	vdd_hdr->n_type = NT_VMCOREDD;
1373
1374	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1375		sizeof(vdd_hdr->name));
1376	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1377}
1378
1379/**
1380 * vmcoredd_update_program_headers - Update all Elf program headers
1381 * @elfptr: Pointer to elf header
1382 * @elfnotesz: Size of elf notes aligned to page size
1383 * @vmcoreddsz: Size of device dumps to be added to elf note header
1384 *
1385 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1386 * Also update the offsets of all the program headers after the elf note header.
1387 */
1388static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1389					    size_t vmcoreddsz)
1390{
1391	unsigned char *e_ident = (unsigned char *)elfptr;
1392	u64 start, end, size;
1393	loff_t vmcore_off;
1394	u32 i;
1395
1396	vmcore_off = elfcorebuf_sz + elfnotesz;
1397
1398	if (e_ident[EI_CLASS] == ELFCLASS64) {
1399		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1400		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1401
1402		/* Update all program headers */
1403		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1404			if (phdr->p_type == PT_NOTE) {
1405				/* Update note size */
1406				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1407				phdr->p_filesz = phdr->p_memsz;
1408				continue;
1409			}
1410
1411			start = rounddown(phdr->p_offset, PAGE_SIZE);
1412			end = roundup(phdr->p_offset + phdr->p_memsz,
1413				      PAGE_SIZE);
1414			size = end - start;
1415			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1416			vmcore_off += size;
1417		}
1418	} else {
1419		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1420		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1421
1422		/* Update all program headers */
1423		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1424			if (phdr->p_type == PT_NOTE) {
1425				/* Update note size */
1426				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1427				phdr->p_filesz = phdr->p_memsz;
1428				continue;
1429			}
1430
1431			start = rounddown(phdr->p_offset, PAGE_SIZE);
1432			end = roundup(phdr->p_offset + phdr->p_memsz,
1433				      PAGE_SIZE);
1434			size = end - start;
1435			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1436			vmcore_off += size;
1437		}
1438	}
1439}
1440
1441/**
1442 * vmcoredd_update_size - Update the total size of the device dumps and update
1443 * Elf header
1444 * @dump_size: Size of the current device dump to be added to total size
1445 *
1446 * Update the total size of all the device dumps and update the Elf program
1447 * headers. Calculate the new offsets for the vmcore list and update the
1448 * total vmcore size.
1449 */
1450static void vmcoredd_update_size(size_t dump_size)
1451{
1452	vmcoredd_orig_sz += dump_size;
1453	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1454	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1455					vmcoredd_orig_sz);
1456
1457	/* Update vmcore list offsets */
1458	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1459
1460	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1461				      &vmcore_list);
1462	proc_vmcore->size = vmcore_size;
1463}
1464
1465/**
1466 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1467 * @data: dump info.
1468 *
1469 * Allocate a buffer and invoke the calling driver's dump collect routine.
1470 * Write Elf note at the beginning of the buffer to indicate vmcore device
1471 * dump and add the dump to global list.
1472 */
1473int vmcore_add_device_dump(struct vmcoredd_data *data)
1474{
1475	struct vmcoredd_node *dump;
1476	void *buf = NULL;
1477	size_t data_size;
1478	int ret;
1479
1480	if (vmcoredd_disabled) {
1481		pr_err_once("Device dump is disabled\n");
1482		return -EINVAL;
1483	}
1484
1485	if (!data || !strlen(data->dump_name) ||
1486	    !data->vmcoredd_callback || !data->size)
1487		return -EINVAL;
1488
1489	dump = vzalloc(sizeof(*dump));
1490	if (!dump) {
1491		ret = -ENOMEM;
1492		goto out_err;
1493	}
1494
1495	/* Keep size of the buffer page aligned so that it can be mmaped */
1496	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1497			    PAGE_SIZE);
1498
1499	/* Allocate buffer for driver's to write their dumps */
1500	buf = vmcore_alloc_buf(data_size);
1501	if (!buf) {
1502		ret = -ENOMEM;
1503		goto out_err;
1504	}
1505
1506	vmcoredd_write_header(buf, data, data_size -
1507			      sizeof(struct vmcoredd_header));
1508
1509	/* Invoke the driver's dump collection routing */
1510	ret = data->vmcoredd_callback(data, buf +
1511				      sizeof(struct vmcoredd_header));
1512	if (ret)
1513		goto out_err;
1514
1515	dump->buf = buf;
1516	dump->size = data_size;
1517
1518	/* Add the dump to driver sysfs list */
1519	mutex_lock(&vmcoredd_mutex);
1520	list_add_tail(&dump->list, &vmcoredd_list);
1521	mutex_unlock(&vmcoredd_mutex);
1522
1523	vmcoredd_update_size(data_size);
1524	return 0;
1525
1526out_err:
1527	vfree(buf);
1528	vfree(dump);
1529
1530	return ret;
1531}
1532EXPORT_SYMBOL(vmcore_add_device_dump);
1533#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1534
1535/* Free all dumps in vmcore device dump list */
1536static void vmcore_free_device_dumps(void)
1537{
1538#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1539	mutex_lock(&vmcoredd_mutex);
1540	while (!list_empty(&vmcoredd_list)) {
1541		struct vmcoredd_node *dump;
1542
1543		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1544					list);
1545		list_del(&dump->list);
1546		vfree(dump->buf);
1547		vfree(dump);
1548	}
1549	mutex_unlock(&vmcoredd_mutex);
1550#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1551}
1552
1553/* Init function for vmcore module. */
1554static int __init vmcore_init(void)
1555{
1556	int rc = 0;
1557
1558	/* Allow architectures to allocate ELF header in 2nd kernel */
1559	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1560	if (rc)
1561		return rc;
1562	/*
1563	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1564	 * then capture the dump.
1565	 */
1566	if (!(is_vmcore_usable()))
1567		return rc;
1568	rc = parse_crash_elf_headers();
1569	if (rc) {
1570		elfcorehdr_free(elfcorehdr_addr);
1571		pr_warn("Kdump: vmcore not initialized\n");
1572		return rc;
1573	}
1574	elfcorehdr_free(elfcorehdr_addr);
1575	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1576
1577	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1578	if (proc_vmcore)
1579		proc_vmcore->size = vmcore_size;
1580	return 0;
1581}
1582fs_initcall(vmcore_init);
1583
1584/* Cleanup function for vmcore module. */
1585void vmcore_cleanup(void)
1586{
1587	if (proc_vmcore) {
1588		proc_remove(proc_vmcore);
1589		proc_vmcore = NULL;
1590	}
1591
1592	/* clear the vmcore list. */
1593	while (!list_empty(&vmcore_list)) {
1594		struct vmcore *m;
1595
1596		m = list_first_entry(&vmcore_list, struct vmcore, list);
1597		list_del(&m->list);
1598		kfree(m);
1599	}
1600	free_elfcorebuf();
1601
1602	/* clear vmcore device dump list */
1603	vmcore_free_device_dumps();
1604}
v4.17
 
   1/*
   2 *	fs/proc/vmcore.c Interface for accessing the crash
   3 * 				 dump from the system's previous life.
   4 * 	Heavily borrowed from fs/proc/kcore.c
   5 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   6 *	Copyright (C) IBM Corporation, 2004. All rights reserved
   7 *
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/kcore.h>
  12#include <linux/user.h>
  13#include <linux/elf.h>
  14#include <linux/elfcore.h>
  15#include <linux/export.h>
  16#include <linux/slab.h>
  17#include <linux/highmem.h>
  18#include <linux/printk.h>
  19#include <linux/bootmem.h>
  20#include <linux/init.h>
  21#include <linux/crash_dump.h>
  22#include <linux/list.h>
 
 
  23#include <linux/vmalloc.h>
  24#include <linux/pagemap.h>
  25#include <linux/uaccess.h>
 
  26#include <asm/io.h>
  27#include "internal.h"
  28
  29/* List representing chunks of contiguous memory areas and their offsets in
  30 * vmcore file.
  31 */
  32static LIST_HEAD(vmcore_list);
  33
  34/* Stores the pointer to the buffer containing kernel elf core headers. */
  35static char *elfcorebuf;
  36static size_t elfcorebuf_sz;
  37static size_t elfcorebuf_sz_orig;
  38
  39static char *elfnotes_buf;
  40static size_t elfnotes_sz;
 
 
  41
  42/* Total size of vmcore file. */
  43static u64 vmcore_size;
  44
  45static struct proc_dir_entry *proc_vmcore;
  46
  47/*
  48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  49 * The called function has to take care of module refcounting.
  50 */
  51static int (*oldmem_pfn_is_ram)(unsigned long pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52
  53int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
  54{
  55	if (oldmem_pfn_is_ram)
  56		return -EBUSY;
  57	oldmem_pfn_is_ram = fn;
  58	return 0;
 
 
 
 
 
 
 
 
  59}
  60EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
  61
  62void unregister_oldmem_pfn_is_ram(void)
  63{
  64	oldmem_pfn_is_ram = NULL;
  65	wmb();
 
 
 
 
 
 
 
 
 
 
 
  66}
  67EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
  68
  69static int pfn_is_ram(unsigned long pfn)
  70{
  71	int (*fn)(unsigned long pfn);
  72	/* pfn is ram unless fn() checks pagetype */
  73	int ret = 1;
  74
  75	/*
  76	 * Ask hypervisor if the pfn is really ram.
  77	 * A ballooned page contains no data and reading from such a page
  78	 * will cause high load in the hypervisor.
  79	 */
  80	fn = oldmem_pfn_is_ram;
  81	if (fn)
  82		ret = fn(pfn);
  83
  84	return ret;
  85}
  86
  87/* Reads a page from the oldmem device from given offset. */
  88static ssize_t read_from_oldmem(char *buf, size_t count,
  89				u64 *ppos, int userbuf)
  90{
  91	unsigned long pfn, offset;
  92	size_t nr_bytes;
  93	ssize_t read = 0, tmp;
 
  94
  95	if (!count)
  96		return 0;
  97
  98	offset = (unsigned long)(*ppos % PAGE_SIZE);
  99	pfn = (unsigned long)(*ppos / PAGE_SIZE);
 100
 
 101	do {
 102		if (count > (PAGE_SIZE - offset))
 103			nr_bytes = PAGE_SIZE - offset;
 104		else
 105			nr_bytes = count;
 106
 107		/* If pfn is not ram, return zeros for sparse dump files */
 108		if (pfn_is_ram(pfn) == 0)
 109			memset(buf, 0, nr_bytes);
 110		else {
 111			tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 112						offset, userbuf);
 113			if (tmp < 0)
 114				return tmp;
 
 
 
 
 
 
 
 115		}
 
 116		*ppos += nr_bytes;
 117		count -= nr_bytes;
 118		buf += nr_bytes;
 119		read += nr_bytes;
 120		++pfn;
 121		offset = 0;
 122	} while (count);
 
 123
 124	return read;
 125}
 126
 127/*
 128 * Architectures may override this function to allocate ELF header in 2nd kernel
 129 */
 130int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 131{
 132	return 0;
 133}
 134
 135/*
 136 * Architectures may override this function to free header
 137 */
 138void __weak elfcorehdr_free(unsigned long long addr)
 139{}
 140
 141/*
 142 * Architectures may override this function to read from ELF header
 143 */
 144ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 145{
 146	return read_from_oldmem(buf, count, ppos, 0);
 
 
 
 
 
 147}
 148
 149/*
 150 * Architectures may override this function to read from notes sections
 151 */
 152ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 153{
 154	return read_from_oldmem(buf, count, ppos, 0);
 
 
 
 
 
 
 155}
 156
 157/*
 158 * Architectures may override this function to map oldmem
 159 */
 160int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 161				  unsigned long from, unsigned long pfn,
 162				  unsigned long size, pgprot_t prot)
 163{
 
 164	return remap_pfn_range(vma, from, pfn, size, prot);
 165}
 166
 167/*
 168 * Copy to either kernel or user space
 169 */
 170static int copy_to(void *target, void *src, size_t size, int userbuf)
 
 
 
 
 
 
 
 171{
 172	if (userbuf) {
 173		if (copy_to_user((char __user *) target, src, size))
 174			return -EFAULT;
 175	} else {
 176		memcpy(target, src, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 177	}
 178	return 0;
 
 
 
 179}
 
 
 180
 181/* Read from the ELF header and then the crash dump. On error, negative value is
 182 * returned otherwise number of bytes read are returned.
 183 */
 184static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 185			     int userbuf)
 186{
 187	ssize_t acc = 0, tmp;
 188	size_t tsz;
 189	u64 start;
 190	struct vmcore *m = NULL;
 191
 192	if (buflen == 0 || *fpos >= vmcore_size)
 193		return 0;
 194
 195	/* trim buflen to not go beyond EOF */
 196	if (buflen > vmcore_size - *fpos)
 197		buflen = vmcore_size - *fpos;
 198
 199	/* Read ELF core header */
 200	if (*fpos < elfcorebuf_sz) {
 201		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
 202		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 203			return -EFAULT;
 204		buflen -= tsz;
 205		*fpos += tsz;
 206		buffer += tsz;
 207		acc += tsz;
 208
 209		/* leave now if filled buffer already */
 210		if (buflen == 0)
 211			return acc;
 212	}
 213
 214	/* Read Elf note segment */
 215	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 216		void *kaddr;
 217
 218		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 219		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
 220		if (copy_to(buffer, kaddr, tsz, userbuf))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221			return -EFAULT;
 222		buflen -= tsz;
 223		*fpos += tsz;
 224		buffer += tsz;
 225		acc += tsz;
 226
 227		/* leave now if filled buffer already */
 228		if (buflen == 0)
 229			return acc;
 230	}
 231
 232	list_for_each_entry(m, &vmcore_list, list) {
 233		if (*fpos < m->offset + m->size) {
 234			tsz = (size_t)min_t(unsigned long long,
 235					    m->offset + m->size - *fpos,
 236					    buflen);
 237			start = m->paddr + *fpos - m->offset;
 238			tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
 
 239			if (tmp < 0)
 240				return tmp;
 241			buflen -= tsz;
 242			*fpos += tsz;
 243			buffer += tsz;
 244			acc += tsz;
 245
 246			/* leave now if filled buffer already */
 247			if (buflen == 0)
 248				return acc;
 249		}
 250	}
 251
 252	return acc;
 253}
 254
 255static ssize_t read_vmcore(struct file *file, char __user *buffer,
 256			   size_t buflen, loff_t *fpos)
 257{
 258	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
 259}
 260
 261/*
 262 * The vmcore fault handler uses the page cache and fills data using the
 263 * standard __vmcore_read() function.
 264 *
 265 * On s390 the fault handler is used for memory regions that can't be mapped
 266 * directly with remap_pfn_range().
 267 */
 268static int mmap_vmcore_fault(struct vm_fault *vmf)
 269{
 270#ifdef CONFIG_S390
 271	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 272	pgoff_t index = vmf->pgoff;
 
 
 273	struct page *page;
 274	loff_t offset;
 275	char *buf;
 276	int rc;
 277
 278	page = find_or_create_page(mapping, index, GFP_KERNEL);
 279	if (!page)
 280		return VM_FAULT_OOM;
 281	if (!PageUptodate(page)) {
 282		offset = (loff_t) index << PAGE_SHIFT;
 283		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 284		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 
 
 
 285		if (rc < 0) {
 286			unlock_page(page);
 287			put_page(page);
 288			return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
 289		}
 290		SetPageUptodate(page);
 291	}
 292	unlock_page(page);
 293	vmf->page = page;
 294	return 0;
 295#else
 296	return VM_FAULT_SIGBUS;
 297#endif
 298}
 299
 300static const struct vm_operations_struct vmcore_mmap_ops = {
 301	.fault = mmap_vmcore_fault,
 302};
 303
 304/**
 305 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
 306 *                      vmalloc memory
 307 *
 308 * @notes_sz: size of buffer
 309 *
 310 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 311 * the buffer to user-space by means of remap_vmalloc_range().
 312 *
 313 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 314 * disabled and there's no need to allow users to mmap the buffer.
 315 */
 316static inline char *alloc_elfnotes_buf(size_t notes_sz)
 317{
 318#ifdef CONFIG_MMU
 319	return vmalloc_user(notes_sz);
 320#else
 321	return vzalloc(notes_sz);
 322#endif
 323}
 324
 325/*
 326 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 327 * essential for mmap_vmcore() in order to map physically
 328 * non-contiguous objects (ELF header, ELF note segment and memory
 329 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 330 * virtually contiguous user-space in ELF layout.
 331 */
 332#ifdef CONFIG_MMU
 333/*
 334 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 335 * reported as not being ram with the zero page.
 336 *
 337 * @vma: vm_area_struct describing requested mapping
 338 * @from: start remapping from
 339 * @pfn: page frame number to start remapping to
 340 * @size: remapping size
 341 * @prot: protection bits
 342 *
 343 * Returns zero on success, -EAGAIN on failure.
 344 */
 345static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 346				    unsigned long from, unsigned long pfn,
 347				    unsigned long size, pgprot_t prot)
 348{
 349	unsigned long map_size;
 350	unsigned long pos_start, pos_end, pos;
 351	unsigned long zeropage_pfn = my_zero_pfn(0);
 352	size_t len = 0;
 353
 354	pos_start = pfn;
 355	pos_end = pfn + (size >> PAGE_SHIFT);
 356
 357	for (pos = pos_start; pos < pos_end; ++pos) {
 358		if (!pfn_is_ram(pos)) {
 359			/*
 360			 * We hit a page which is not ram. Remap the continuous
 361			 * region between pos_start and pos-1 and replace
 362			 * the non-ram page at pos with the zero page.
 363			 */
 364			if (pos > pos_start) {
 365				/* Remap continuous region */
 366				map_size = (pos - pos_start) << PAGE_SHIFT;
 367				if (remap_oldmem_pfn_range(vma, from + len,
 368							   pos_start, map_size,
 369							   prot))
 370					goto fail;
 371				len += map_size;
 372			}
 373			/* Remap the zero page */
 374			if (remap_oldmem_pfn_range(vma, from + len,
 375						   zeropage_pfn,
 376						   PAGE_SIZE, prot))
 377				goto fail;
 378			len += PAGE_SIZE;
 379			pos_start = pos + 1;
 380		}
 381	}
 382	if (pos > pos_start) {
 383		/* Remap the rest */
 384		map_size = (pos - pos_start) << PAGE_SHIFT;
 385		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 386					   map_size, prot))
 387			goto fail;
 388	}
 389	return 0;
 390fail:
 391	do_munmap(vma->vm_mm, from, len, NULL);
 392	return -EAGAIN;
 393}
 394
 395static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 396			    unsigned long from, unsigned long pfn,
 397			    unsigned long size, pgprot_t prot)
 398{
 
 
 399	/*
 400	 * Check if oldmem_pfn_is_ram was registered to avoid
 401	 * looping over all pages without a reason.
 402	 */
 403	if (oldmem_pfn_is_ram)
 404		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 
 405	else
 406		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 
 
 407}
 408
 409static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 410{
 411	size_t size = vma->vm_end - vma->vm_start;
 412	u64 start, end, len, tsz;
 413	struct vmcore *m;
 414
 415	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 416	end = start + size;
 417
 418	if (size > vmcore_size || end > vmcore_size)
 419		return -EINVAL;
 420
 421	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 422		return -EPERM;
 423
 424	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 425	vma->vm_flags |= VM_MIXEDMAP;
 426	vma->vm_ops = &vmcore_mmap_ops;
 427
 428	len = 0;
 429
 430	if (start < elfcorebuf_sz) {
 431		u64 pfn;
 432
 433		tsz = min(elfcorebuf_sz - (size_t)start, size);
 434		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 435		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 436				    vma->vm_page_prot))
 437			return -EAGAIN;
 438		size -= tsz;
 439		start += tsz;
 440		len += tsz;
 441
 442		if (size == 0)
 443			return 0;
 444	}
 445
 446	if (start < elfcorebuf_sz + elfnotes_sz) {
 447		void *kaddr;
 448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 450		kaddr = elfnotes_buf + start - elfcorebuf_sz;
 451		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 452						kaddr, tsz))
 453			goto fail;
 
 454		size -= tsz;
 455		start += tsz;
 456		len += tsz;
 457
 458		if (size == 0)
 459			return 0;
 460	}
 461
 462	list_for_each_entry(m, &vmcore_list, list) {
 463		if (start < m->offset + m->size) {
 464			u64 paddr = 0;
 465
 466			tsz = (size_t)min_t(unsigned long long,
 467					    m->offset + m->size - start, size);
 468			paddr = m->paddr + start - m->offset;
 469			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 470						    paddr >> PAGE_SHIFT, tsz,
 471						    vma->vm_page_prot))
 472				goto fail;
 473			size -= tsz;
 474			start += tsz;
 475			len += tsz;
 476
 477			if (size == 0)
 478				return 0;
 479		}
 480	}
 481
 482	return 0;
 483fail:
 484	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
 485	return -EAGAIN;
 486}
 487#else
 488static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 489{
 490	return -ENOSYS;
 491}
 492#endif
 493
 494static const struct file_operations proc_vmcore_operations = {
 495	.read		= read_vmcore,
 496	.llseek		= default_llseek,
 497	.mmap		= mmap_vmcore,
 
 498};
 499
 500static struct vmcore* __init get_new_element(void)
 501{
 502	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 503}
 504
 505static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 506				  struct list_head *vc_list)
 507{
 508	u64 size;
 509	struct vmcore *m;
 510
 511	size = elfsz + elfnotesegsz;
 512	list_for_each_entry(m, vc_list, list) {
 513		size += m->size;
 514	}
 515	return size;
 516}
 517
 518/**
 519 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 520 *
 521 * @ehdr_ptr: ELF header
 522 *
 523 * This function updates p_memsz member of each PT_NOTE entry in the
 524 * program header table pointed to by @ehdr_ptr to real size of ELF
 525 * note segment.
 526 */
 527static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 528{
 529	int i, rc=0;
 530	Elf64_Phdr *phdr_ptr;
 531	Elf64_Nhdr *nhdr_ptr;
 532
 533	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 534	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 535		void *notes_section;
 536		u64 offset, max_sz, sz, real_sz = 0;
 537		if (phdr_ptr->p_type != PT_NOTE)
 538			continue;
 539		max_sz = phdr_ptr->p_memsz;
 540		offset = phdr_ptr->p_offset;
 541		notes_section = kmalloc(max_sz, GFP_KERNEL);
 542		if (!notes_section)
 543			return -ENOMEM;
 544		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 545		if (rc < 0) {
 546			kfree(notes_section);
 547			return rc;
 548		}
 549		nhdr_ptr = notes_section;
 550		while (nhdr_ptr->n_namesz != 0) {
 551			sz = sizeof(Elf64_Nhdr) +
 552				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 553				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 554			if ((real_sz + sz) > max_sz) {
 555				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 556					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 557				break;
 558			}
 559			real_sz += sz;
 560			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 561		}
 562		kfree(notes_section);
 563		phdr_ptr->p_memsz = real_sz;
 564		if (real_sz == 0) {
 565			pr_warn("Warning: Zero PT_NOTE entries found\n");
 566		}
 567	}
 568
 569	return 0;
 570}
 571
 572/**
 573 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 574 * headers and sum of real size of their ELF note segment headers and
 575 * data.
 576 *
 577 * @ehdr_ptr: ELF header
 578 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 579 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 580 *
 581 * This function is used to merge multiple PT_NOTE program headers
 582 * into a unique single one. The resulting unique entry will have
 583 * @sz_ptnote in its phdr->p_mem.
 584 *
 585 * It is assumed that program headers with PT_NOTE type pointed to by
 586 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 587 * and each of PT_NOTE program headers has actual ELF note segment
 588 * size in its p_memsz member.
 589 */
 590static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 591						 int *nr_ptnote, u64 *sz_ptnote)
 592{
 593	int i;
 594	Elf64_Phdr *phdr_ptr;
 595
 596	*nr_ptnote = *sz_ptnote = 0;
 597
 598	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 599	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 600		if (phdr_ptr->p_type != PT_NOTE)
 601			continue;
 602		*nr_ptnote += 1;
 603		*sz_ptnote += phdr_ptr->p_memsz;
 604	}
 605
 606	return 0;
 607}
 608
 609/**
 610 * copy_notes_elf64 - copy ELF note segments in a given buffer
 611 *
 612 * @ehdr_ptr: ELF header
 613 * @notes_buf: buffer into which ELF note segments are copied
 614 *
 615 * This function is used to copy ELF note segment in the 1st kernel
 616 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 617 * size of the buffer @notes_buf is equal to or larger than sum of the
 618 * real ELF note segment headers and data.
 619 *
 620 * It is assumed that program headers with PT_NOTE type pointed to by
 621 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 622 * and each of PT_NOTE program headers has actual ELF note segment
 623 * size in its p_memsz member.
 624 */
 625static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 626{
 627	int i, rc=0;
 628	Elf64_Phdr *phdr_ptr;
 629
 630	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 631
 632	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 633		u64 offset;
 634		if (phdr_ptr->p_type != PT_NOTE)
 635			continue;
 636		offset = phdr_ptr->p_offset;
 637		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 638					   &offset);
 639		if (rc < 0)
 640			return rc;
 641		notes_buf += phdr_ptr->p_memsz;
 642	}
 643
 644	return 0;
 645}
 646
 647/* Merges all the PT_NOTE headers into one. */
 648static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 649					   char **notes_buf, size_t *notes_sz)
 650{
 651	int i, nr_ptnote=0, rc=0;
 652	char *tmp;
 653	Elf64_Ehdr *ehdr_ptr;
 654	Elf64_Phdr phdr;
 655	u64 phdr_sz = 0, note_off;
 656
 657	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 658
 659	rc = update_note_header_size_elf64(ehdr_ptr);
 660	if (rc < 0)
 661		return rc;
 662
 663	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 664	if (rc < 0)
 665		return rc;
 666
 667	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 668	*notes_buf = alloc_elfnotes_buf(*notes_sz);
 669	if (!*notes_buf)
 670		return -ENOMEM;
 671
 672	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 673	if (rc < 0)
 674		return rc;
 675
 676	/* Prepare merged PT_NOTE program header. */
 677	phdr.p_type    = PT_NOTE;
 678	phdr.p_flags   = 0;
 679	note_off = sizeof(Elf64_Ehdr) +
 680			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 681	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 682	phdr.p_vaddr   = phdr.p_paddr = 0;
 683	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 684	phdr.p_align   = 0;
 685
 686	/* Add merged PT_NOTE program header*/
 687	tmp = elfptr + sizeof(Elf64_Ehdr);
 688	memcpy(tmp, &phdr, sizeof(phdr));
 689	tmp += sizeof(phdr);
 690
 691	/* Remove unwanted PT_NOTE program headers. */
 692	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 693	*elfsz = *elfsz - i;
 694	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 695	memset(elfptr + *elfsz, 0, i);
 696	*elfsz = roundup(*elfsz, PAGE_SIZE);
 697
 698	/* Modify e_phnum to reflect merged headers. */
 699	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 700
 
 
 
 
 
 701	return 0;
 702}
 703
 704/**
 705 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 706 *
 707 * @ehdr_ptr: ELF header
 708 *
 709 * This function updates p_memsz member of each PT_NOTE entry in the
 710 * program header table pointed to by @ehdr_ptr to real size of ELF
 711 * note segment.
 712 */
 713static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 714{
 715	int i, rc=0;
 716	Elf32_Phdr *phdr_ptr;
 717	Elf32_Nhdr *nhdr_ptr;
 718
 719	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 720	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 721		void *notes_section;
 722		u64 offset, max_sz, sz, real_sz = 0;
 723		if (phdr_ptr->p_type != PT_NOTE)
 724			continue;
 725		max_sz = phdr_ptr->p_memsz;
 726		offset = phdr_ptr->p_offset;
 727		notes_section = kmalloc(max_sz, GFP_KERNEL);
 728		if (!notes_section)
 729			return -ENOMEM;
 730		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 731		if (rc < 0) {
 732			kfree(notes_section);
 733			return rc;
 734		}
 735		nhdr_ptr = notes_section;
 736		while (nhdr_ptr->n_namesz != 0) {
 737			sz = sizeof(Elf32_Nhdr) +
 738				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 739				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 740			if ((real_sz + sz) > max_sz) {
 741				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 742					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 743				break;
 744			}
 745			real_sz += sz;
 746			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 747		}
 748		kfree(notes_section);
 749		phdr_ptr->p_memsz = real_sz;
 750		if (real_sz == 0) {
 751			pr_warn("Warning: Zero PT_NOTE entries found\n");
 752		}
 753	}
 754
 755	return 0;
 756}
 757
 758/**
 759 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 760 * headers and sum of real size of their ELF note segment headers and
 761 * data.
 762 *
 763 * @ehdr_ptr: ELF header
 764 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 765 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 766 *
 767 * This function is used to merge multiple PT_NOTE program headers
 768 * into a unique single one. The resulting unique entry will have
 769 * @sz_ptnote in its phdr->p_mem.
 770 *
 771 * It is assumed that program headers with PT_NOTE type pointed to by
 772 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 773 * and each of PT_NOTE program headers has actual ELF note segment
 774 * size in its p_memsz member.
 775 */
 776static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 777						 int *nr_ptnote, u64 *sz_ptnote)
 778{
 779	int i;
 780	Elf32_Phdr *phdr_ptr;
 781
 782	*nr_ptnote = *sz_ptnote = 0;
 783
 784	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 785	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 786		if (phdr_ptr->p_type != PT_NOTE)
 787			continue;
 788		*nr_ptnote += 1;
 789		*sz_ptnote += phdr_ptr->p_memsz;
 790	}
 791
 792	return 0;
 793}
 794
 795/**
 796 * copy_notes_elf32 - copy ELF note segments in a given buffer
 797 *
 798 * @ehdr_ptr: ELF header
 799 * @notes_buf: buffer into which ELF note segments are copied
 800 *
 801 * This function is used to copy ELF note segment in the 1st kernel
 802 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 803 * size of the buffer @notes_buf is equal to or larger than sum of the
 804 * real ELF note segment headers and data.
 805 *
 806 * It is assumed that program headers with PT_NOTE type pointed to by
 807 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 808 * and each of PT_NOTE program headers has actual ELF note segment
 809 * size in its p_memsz member.
 810 */
 811static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
 812{
 813	int i, rc=0;
 814	Elf32_Phdr *phdr_ptr;
 815
 816	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
 817
 818	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 819		u64 offset;
 820		if (phdr_ptr->p_type != PT_NOTE)
 821			continue;
 822		offset = phdr_ptr->p_offset;
 823		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 824					   &offset);
 825		if (rc < 0)
 826			return rc;
 827		notes_buf += phdr_ptr->p_memsz;
 828	}
 829
 830	return 0;
 831}
 832
 833/* Merges all the PT_NOTE headers into one. */
 834static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
 835					   char **notes_buf, size_t *notes_sz)
 836{
 837	int i, nr_ptnote=0, rc=0;
 838	char *tmp;
 839	Elf32_Ehdr *ehdr_ptr;
 840	Elf32_Phdr phdr;
 841	u64 phdr_sz = 0, note_off;
 842
 843	ehdr_ptr = (Elf32_Ehdr *)elfptr;
 844
 845	rc = update_note_header_size_elf32(ehdr_ptr);
 846	if (rc < 0)
 847		return rc;
 848
 849	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
 850	if (rc < 0)
 851		return rc;
 852
 853	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 854	*notes_buf = alloc_elfnotes_buf(*notes_sz);
 855	if (!*notes_buf)
 856		return -ENOMEM;
 857
 858	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
 859	if (rc < 0)
 860		return rc;
 861
 862	/* Prepare merged PT_NOTE program header. */
 863	phdr.p_type    = PT_NOTE;
 864	phdr.p_flags   = 0;
 865	note_off = sizeof(Elf32_Ehdr) +
 866			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
 867	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 868	phdr.p_vaddr   = phdr.p_paddr = 0;
 869	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 870	phdr.p_align   = 0;
 871
 872	/* Add merged PT_NOTE program header*/
 873	tmp = elfptr + sizeof(Elf32_Ehdr);
 874	memcpy(tmp, &phdr, sizeof(phdr));
 875	tmp += sizeof(phdr);
 876
 877	/* Remove unwanted PT_NOTE program headers. */
 878	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
 879	*elfsz = *elfsz - i;
 880	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
 881	memset(elfptr + *elfsz, 0, i);
 882	*elfsz = roundup(*elfsz, PAGE_SIZE);
 883
 884	/* Modify e_phnum to reflect merged headers. */
 885	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 886
 
 
 
 
 
 887	return 0;
 888}
 889
 890/* Add memory chunks represented by program headers to vmcore list. Also update
 891 * the new offset fields of exported program headers. */
 892static int __init process_ptload_program_headers_elf64(char *elfptr,
 893						size_t elfsz,
 894						size_t elfnotes_sz,
 895						struct list_head *vc_list)
 896{
 897	int i;
 898	Elf64_Ehdr *ehdr_ptr;
 899	Elf64_Phdr *phdr_ptr;
 900	loff_t vmcore_off;
 901	struct vmcore *new;
 902
 903	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 904	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
 905
 906	/* Skip Elf header, program headers and Elf note segment. */
 907	vmcore_off = elfsz + elfnotes_sz;
 908
 909	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 910		u64 paddr, start, end, size;
 911
 912		if (phdr_ptr->p_type != PT_LOAD)
 913			continue;
 914
 915		paddr = phdr_ptr->p_offset;
 916		start = rounddown(paddr, PAGE_SIZE);
 917		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
 918		size = end - start;
 919
 920		/* Add this contiguous chunk of memory to vmcore list.*/
 921		new = get_new_element();
 922		if (!new)
 923			return -ENOMEM;
 924		new->paddr = start;
 925		new->size = size;
 926		list_add_tail(&new->list, vc_list);
 927
 928		/* Update the program header offset. */
 929		phdr_ptr->p_offset = vmcore_off + (paddr - start);
 930		vmcore_off = vmcore_off + size;
 931	}
 932	return 0;
 933}
 934
 935static int __init process_ptload_program_headers_elf32(char *elfptr,
 936						size_t elfsz,
 937						size_t elfnotes_sz,
 938						struct list_head *vc_list)
 939{
 940	int i;
 941	Elf32_Ehdr *ehdr_ptr;
 942	Elf32_Phdr *phdr_ptr;
 943	loff_t vmcore_off;
 944	struct vmcore *new;
 945
 946	ehdr_ptr = (Elf32_Ehdr *)elfptr;
 947	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
 948
 949	/* Skip Elf header, program headers and Elf note segment. */
 950	vmcore_off = elfsz + elfnotes_sz;
 951
 952	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 953		u64 paddr, start, end, size;
 954
 955		if (phdr_ptr->p_type != PT_LOAD)
 956			continue;
 957
 958		paddr = phdr_ptr->p_offset;
 959		start = rounddown(paddr, PAGE_SIZE);
 960		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
 961		size = end - start;
 962
 963		/* Add this contiguous chunk of memory to vmcore list.*/
 964		new = get_new_element();
 965		if (!new)
 966			return -ENOMEM;
 967		new->paddr = start;
 968		new->size = size;
 969		list_add_tail(&new->list, vc_list);
 970
 971		/* Update the program header offset */
 972		phdr_ptr->p_offset = vmcore_off + (paddr - start);
 973		vmcore_off = vmcore_off + size;
 974	}
 975	return 0;
 976}
 977
 978/* Sets offset fields of vmcore elements. */
 979static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
 980					   struct list_head *vc_list)
 981{
 982	loff_t vmcore_off;
 983	struct vmcore *m;
 984
 985	/* Skip Elf header, program headers and Elf note segment. */
 986	vmcore_off = elfsz + elfnotes_sz;
 987
 988	list_for_each_entry(m, vc_list, list) {
 989		m->offset = vmcore_off;
 990		vmcore_off += m->size;
 991	}
 992}
 993
 994static void free_elfcorebuf(void)
 995{
 996	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
 997	elfcorebuf = NULL;
 998	vfree(elfnotes_buf);
 999	elfnotes_buf = NULL;
1000}
1001
1002static int __init parse_crash_elf64_headers(void)
1003{
1004	int rc=0;
1005	Elf64_Ehdr ehdr;
1006	u64 addr;
1007
1008	addr = elfcorehdr_addr;
1009
1010	/* Read Elf header */
1011	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1012	if (rc < 0)
1013		return rc;
1014
1015	/* Do some basic Verification. */
1016	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1017		(ehdr.e_type != ET_CORE) ||
1018		!vmcore_elf64_check_arch(&ehdr) ||
1019		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1020		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1021		ehdr.e_version != EV_CURRENT ||
1022		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1023		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1024		ehdr.e_phnum == 0) {
1025		pr_warn("Warning: Core image elf header is not sane\n");
1026		return -EINVAL;
1027	}
1028
1029	/* Read in all elf headers. */
1030	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1031				ehdr.e_phnum * sizeof(Elf64_Phdr);
1032	elfcorebuf_sz = elfcorebuf_sz_orig;
1033	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1034					      get_order(elfcorebuf_sz_orig));
1035	if (!elfcorebuf)
1036		return -ENOMEM;
1037	addr = elfcorehdr_addr;
1038	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1039	if (rc < 0)
1040		goto fail;
1041
1042	/* Merge all PT_NOTE headers into one. */
1043	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1044				      &elfnotes_buf, &elfnotes_sz);
1045	if (rc)
1046		goto fail;
1047	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1048						  elfnotes_sz, &vmcore_list);
1049	if (rc)
1050		goto fail;
1051	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1052	return 0;
1053fail:
1054	free_elfcorebuf();
1055	return rc;
1056}
1057
1058static int __init parse_crash_elf32_headers(void)
1059{
1060	int rc=0;
1061	Elf32_Ehdr ehdr;
1062	u64 addr;
1063
1064	addr = elfcorehdr_addr;
1065
1066	/* Read Elf header */
1067	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1068	if (rc < 0)
1069		return rc;
1070
1071	/* Do some basic Verification. */
1072	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1073		(ehdr.e_type != ET_CORE) ||
1074		!vmcore_elf32_check_arch(&ehdr) ||
1075		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1076		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1077		ehdr.e_version != EV_CURRENT ||
1078		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1079		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1080		ehdr.e_phnum == 0) {
1081		pr_warn("Warning: Core image elf header is not sane\n");
1082		return -EINVAL;
1083	}
1084
1085	/* Read in all elf headers. */
1086	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1087	elfcorebuf_sz = elfcorebuf_sz_orig;
1088	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1089					      get_order(elfcorebuf_sz_orig));
1090	if (!elfcorebuf)
1091		return -ENOMEM;
1092	addr = elfcorehdr_addr;
1093	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1094	if (rc < 0)
1095		goto fail;
1096
1097	/* Merge all PT_NOTE headers into one. */
1098	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1099				      &elfnotes_buf, &elfnotes_sz);
1100	if (rc)
1101		goto fail;
1102	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1103						  elfnotes_sz, &vmcore_list);
1104	if (rc)
1105		goto fail;
1106	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1107	return 0;
1108fail:
1109	free_elfcorebuf();
1110	return rc;
1111}
1112
1113static int __init parse_crash_elf_headers(void)
1114{
1115	unsigned char e_ident[EI_NIDENT];
1116	u64 addr;
1117	int rc=0;
1118
1119	addr = elfcorehdr_addr;
1120	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1121	if (rc < 0)
1122		return rc;
1123	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1124		pr_warn("Warning: Core image elf header not found\n");
1125		return -EINVAL;
1126	}
1127
1128	if (e_ident[EI_CLASS] == ELFCLASS64) {
1129		rc = parse_crash_elf64_headers();
1130		if (rc)
1131			return rc;
1132	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1133		rc = parse_crash_elf32_headers();
1134		if (rc)
1135			return rc;
1136	} else {
1137		pr_warn("Warning: Core image elf header is not sane\n");
1138		return -EINVAL;
1139	}
1140
1141	/* Determine vmcore size. */
1142	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1143				      &vmcore_list);
1144
1145	return 0;
1146}
1147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1148/* Init function for vmcore module. */
1149static int __init vmcore_init(void)
1150{
1151	int rc = 0;
1152
1153	/* Allow architectures to allocate ELF header in 2nd kernel */
1154	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1155	if (rc)
1156		return rc;
1157	/*
1158	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1159	 * then capture the dump.
1160	 */
1161	if (!(is_vmcore_usable()))
1162		return rc;
1163	rc = parse_crash_elf_headers();
1164	if (rc) {
 
1165		pr_warn("Kdump: vmcore not initialized\n");
1166		return rc;
1167	}
1168	elfcorehdr_free(elfcorehdr_addr);
1169	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1170
1171	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1172	if (proc_vmcore)
1173		proc_vmcore->size = vmcore_size;
1174	return 0;
1175}
1176fs_initcall(vmcore_init);
1177
1178/* Cleanup function for vmcore module. */
1179void vmcore_cleanup(void)
1180{
1181	if (proc_vmcore) {
1182		proc_remove(proc_vmcore);
1183		proc_vmcore = NULL;
1184	}
1185
1186	/* clear the vmcore list. */
1187	while (!list_empty(&vmcore_list)) {
1188		struct vmcore *m;
1189
1190		m = list_first_entry(&vmcore_list, struct vmcore, list);
1191		list_del(&m->list);
1192		kfree(m);
1193	}
1194	free_elfcorebuf();
 
 
 
1195}