Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	fs/proc/vmcore.c Interface for accessing the crash
   4 * 				 dump from the system's previous life.
   5 * 	Heavily borrowed from fs/proc/kcore.c
   6 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   7 *	Copyright (C) IBM Corporation, 2004. All rights reserved
   8 *
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/kcore.h>
  13#include <linux/user.h>
  14#include <linux/elf.h>
  15#include <linux/elfcore.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/highmem.h>
  19#include <linux/printk.h>
  20#include <linux/memblock.h>
  21#include <linux/init.h>
  22#include <linux/crash_dump.h>
  23#include <linux/list.h>
  24#include <linux/moduleparam.h>
  25#include <linux/mutex.h>
  26#include <linux/vmalloc.h>
  27#include <linux/pagemap.h>
  28#include <linux/uio.h>
  29#include <linux/cc_platform.h>
  30#include <asm/io.h>
  31#include "internal.h"
  32
  33/* List representing chunks of contiguous memory areas and their offsets in
  34 * vmcore file.
  35 */
  36static LIST_HEAD(vmcore_list);
  37
  38/* Stores the pointer to the buffer containing kernel elf core headers. */
  39static char *elfcorebuf;
  40static size_t elfcorebuf_sz;
  41static size_t elfcorebuf_sz_orig;
  42
  43static char *elfnotes_buf;
  44static size_t elfnotes_sz;
  45/* Size of all notes minus the device dump notes */
  46static size_t elfnotes_orig_sz;
  47
  48/* Total size of vmcore file. */
  49static u64 vmcore_size;
  50
  51static struct proc_dir_entry *proc_vmcore;
  52
  53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
  54/* Device Dump list and mutex to synchronize access to list */
  55static LIST_HEAD(vmcoredd_list);
  56static DEFINE_MUTEX(vmcoredd_mutex);
  57
  58static bool vmcoredd_disabled;
  59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
  60#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
  61
  62/* Device Dump Size */
  63static size_t vmcoredd_orig_sz;
  64
  65static DEFINE_SPINLOCK(vmcore_cb_lock);
  66DEFINE_STATIC_SRCU(vmcore_cb_srcu);
  67/* List of registered vmcore callbacks. */
  68static LIST_HEAD(vmcore_cb_list);
  69/* Whether the vmcore has been opened once. */
  70static bool vmcore_opened;
  71
  72void register_vmcore_cb(struct vmcore_cb *cb)
  73{
  74	INIT_LIST_HEAD(&cb->next);
  75	spin_lock(&vmcore_cb_lock);
  76	list_add_tail(&cb->next, &vmcore_cb_list);
  77	/*
  78	 * Registering a vmcore callback after the vmcore was opened is
  79	 * very unusual (e.g., manual driver loading).
  80	 */
  81	if (vmcore_opened)
  82		pr_warn_once("Unexpected vmcore callback registration\n");
  83	spin_unlock(&vmcore_cb_lock);
  84}
  85EXPORT_SYMBOL_GPL(register_vmcore_cb);
  86
  87void unregister_vmcore_cb(struct vmcore_cb *cb)
  88{
  89	spin_lock(&vmcore_cb_lock);
  90	list_del_rcu(&cb->next);
  91	/*
  92	 * Unregistering a vmcore callback after the vmcore was opened is
  93	 * very unusual (e.g., forced driver removal), but we cannot stop
  94	 * unregistering.
  95	 */
  96	if (vmcore_opened)
  97		pr_warn_once("Unexpected vmcore callback unregistration\n");
  98	spin_unlock(&vmcore_cb_lock);
  99
 100	synchronize_srcu(&vmcore_cb_srcu);
 101}
 102EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
 103
 104static bool pfn_is_ram(unsigned long pfn)
 105{
 106	struct vmcore_cb *cb;
 107	bool ret = true;
 108
 109	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
 110				 srcu_read_lock_held(&vmcore_cb_srcu)) {
 111		if (unlikely(!cb->pfn_is_ram))
 112			continue;
 113		ret = cb->pfn_is_ram(cb, pfn);
 114		if (!ret)
 115			break;
 116	}
 117
 118	return ret;
 119}
 
 120
 121static int open_vmcore(struct inode *inode, struct file *file)
 122{
 123	spin_lock(&vmcore_cb_lock);
 124	vmcore_opened = true;
 125	spin_unlock(&vmcore_cb_lock);
 126
 127	return 0;
 
 
 
 
 
 
 
 
 
 128}
 129
 130/* Reads a page from the oldmem device from given offset. */
 131ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
 132			 u64 *ppos, bool encrypted)
 
 133{
 134	unsigned long pfn, offset;
 135	ssize_t nr_bytes;
 136	ssize_t read = 0, tmp;
 137	int idx;
 138
 139	if (!count)
 140		return 0;
 141
 142	offset = (unsigned long)(*ppos % PAGE_SIZE);
 143	pfn = (unsigned long)(*ppos / PAGE_SIZE);
 144
 145	idx = srcu_read_lock(&vmcore_cb_srcu);
 146	do {
 147		if (count > (PAGE_SIZE - offset))
 148			nr_bytes = PAGE_SIZE - offset;
 149		else
 150			nr_bytes = count;
 151
 152		/* If pfn is not ram, return zeros for sparse dump files */
 153		if (!pfn_is_ram(pfn)) {
 154			tmp = iov_iter_zero(nr_bytes, iter);
 155		} else {
 156			if (encrypted)
 157				tmp = copy_oldmem_page_encrypted(iter, pfn,
 158								 nr_bytes,
 159								 offset);
 
 160			else
 161				tmp = copy_oldmem_page(iter, pfn, nr_bytes,
 162						       offset);
 163		}
 164		if (tmp < nr_bytes) {
 165			srcu_read_unlock(&vmcore_cb_srcu, idx);
 166			return -EFAULT;
 167		}
 168
 
 
 
 169		*ppos += nr_bytes;
 170		count -= nr_bytes;
 
 171		read += nr_bytes;
 172		++pfn;
 173		offset = 0;
 174	} while (count);
 175	srcu_read_unlock(&vmcore_cb_srcu, idx);
 176
 177	return read;
 178}
 179
 180/*
 181 * Architectures may override this function to allocate ELF header in 2nd kernel
 182 */
 183int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 184{
 185	return 0;
 186}
 187
 188/*
 189 * Architectures may override this function to free header
 190 */
 191void __weak elfcorehdr_free(unsigned long long addr)
 192{}
 193
 194/*
 195 * Architectures may override this function to read from ELF header
 196 */
 197ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 198{
 199	struct kvec kvec = { .iov_base = buf, .iov_len = count };
 200	struct iov_iter iter;
 201
 202	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
 203
 204	return read_from_oldmem(&iter, count, ppos, false);
 205}
 206
 207/*
 208 * Architectures may override this function to read from notes sections
 209 */
 210ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 211{
 212	struct kvec kvec = { .iov_base = buf, .iov_len = count };
 213	struct iov_iter iter;
 214
 215	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
 216
 217	return read_from_oldmem(&iter, count, ppos,
 218			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
 219}
 220
 221/*
 222 * Architectures may override this function to map oldmem
 223 */
 224int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 225				  unsigned long from, unsigned long pfn,
 226				  unsigned long size, pgprot_t prot)
 227{
 228	prot = pgprot_encrypted(prot);
 229	return remap_pfn_range(vma, from, pfn, size, prot);
 230}
 231
 232/*
 233 * Architectures which support memory encryption override this.
 234 */
 235ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
 236		unsigned long pfn, size_t csize, unsigned long offset)
 
 
 
 
 
 
 
 
 
 237{
 238	return copy_oldmem_page(iter, pfn, csize, offset);
 
 
 
 
 
 
 239}
 240
 241#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 242static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
 243{
 244	struct vmcoredd_node *dump;
 245	u64 offset = 0;
 246	int ret = 0;
 247	size_t tsz;
 248	char *buf;
 249
 250	mutex_lock(&vmcoredd_mutex);
 251	list_for_each_entry(dump, &vmcoredd_list, list) {
 252		if (start < offset + dump->size) {
 253			tsz = min(offset + (u64)dump->size - start, (u64)size);
 254			buf = dump->buf + start - offset;
 255			if (copy_to_iter(buf, tsz, iter) < tsz) {
 256				ret = -EFAULT;
 257				goto out_unlock;
 258			}
 259
 260			size -= tsz;
 261			start += tsz;
 
 262
 263			/* Leave now if buffer filled already */
 264			if (!size)
 265				goto out_unlock;
 266		}
 267		offset += dump->size;
 268	}
 269
 270out_unlock:
 271	mutex_unlock(&vmcoredd_mutex);
 272	return ret;
 273}
 274
 275#ifdef CONFIG_MMU
 276static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 277			       u64 start, size_t size)
 278{
 279	struct vmcoredd_node *dump;
 280	u64 offset = 0;
 281	int ret = 0;
 282	size_t tsz;
 283	char *buf;
 284
 285	mutex_lock(&vmcoredd_mutex);
 286	list_for_each_entry(dump, &vmcoredd_list, list) {
 287		if (start < offset + dump->size) {
 288			tsz = min(offset + (u64)dump->size - start, (u64)size);
 289			buf = dump->buf + start - offset;
 290			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
 291							tsz)) {
 292				ret = -EFAULT;
 293				goto out_unlock;
 294			}
 295
 296			size -= tsz;
 297			start += tsz;
 298			dst += tsz;
 299
 300			/* Leave now if buffer filled already */
 301			if (!size)
 302				goto out_unlock;
 303		}
 304		offset += dump->size;
 305	}
 306
 307out_unlock:
 308	mutex_unlock(&vmcoredd_mutex);
 309	return ret;
 310}
 311#endif /* CONFIG_MMU */
 312#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 313
 314/* Read from the ELF header and then the crash dump. On error, negative value is
 315 * returned otherwise number of bytes read are returned.
 316 */
 317static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
 
 318{
 319	ssize_t acc = 0, tmp;
 320	size_t tsz;
 321	u64 start;
 322	struct vmcore *m = NULL;
 323
 324	if (!iov_iter_count(iter) || *fpos >= vmcore_size)
 325		return 0;
 326
 327	iov_iter_truncate(iter, vmcore_size - *fpos);
 
 
 328
 329	/* Read ELF core header */
 330	if (*fpos < elfcorebuf_sz) {
 331		tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
 332		if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
 333			return -EFAULT;
 
 334		*fpos += tsz;
 
 335		acc += tsz;
 336
 337		/* leave now if filled buffer already */
 338		if (!iov_iter_count(iter))
 339			return acc;
 340	}
 341
 342	/* Read ELF note segment */
 343	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 344		void *kaddr;
 345
 346		/* We add device dumps before other elf notes because the
 347		 * other elf notes may not fill the elf notes buffer
 348		 * completely and we will end up with zero-filled data
 349		 * between the elf notes and the device dumps. Tools will
 350		 * then try to decode this zero-filled data as valid notes
 351		 * and we don't want that. Hence, adding device dumps before
 352		 * the other elf notes ensure that zero-filled data can be
 353		 * avoided.
 354		 */
 355#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 356		/* Read device dumps */
 357		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
 358			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 359				  (size_t)*fpos, iov_iter_count(iter));
 360			start = *fpos - elfcorebuf_sz;
 361			if (vmcoredd_copy_dumps(iter, start, tsz))
 362				return -EFAULT;
 363
 
 364			*fpos += tsz;
 
 365			acc += tsz;
 366
 367			/* leave now if filled buffer already */
 368			if (!iov_iter_count(iter))
 369				return acc;
 370		}
 371#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 372
 373		/* Read remaining elf notes */
 374		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
 375			  iov_iter_count(iter));
 376		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
 377		if (copy_to_iter(kaddr, tsz, iter) < tsz)
 378			return -EFAULT;
 379
 
 380		*fpos += tsz;
 
 381		acc += tsz;
 382
 383		/* leave now if filled buffer already */
 384		if (!iov_iter_count(iter))
 385			return acc;
 386
 387		cond_resched();
 388	}
 389
 390	list_for_each_entry(m, &vmcore_list, list) {
 391		if (*fpos < m->offset + m->size) {
 392			tsz = (size_t)min_t(unsigned long long,
 393					    m->offset + m->size - *fpos,
 394					    iov_iter_count(iter));
 395			start = m->paddr + *fpos - m->offset;
 396			tmp = read_from_oldmem(iter, tsz, &start,
 397					cc_platform_has(CC_ATTR_MEM_ENCRYPT));
 398			if (tmp < 0)
 399				return tmp;
 
 400			*fpos += tsz;
 
 401			acc += tsz;
 402
 403			/* leave now if filled buffer already */
 404			if (!iov_iter_count(iter))
 405				return acc;
 406		}
 407
 408		cond_resched();
 409	}
 410
 411	return acc;
 412}
 413
 414static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
 415{
 416	return __read_vmcore(iter, &iocb->ki_pos);
 417}
 418
 419/**
 420 * vmcore_alloc_buf - allocate buffer in vmalloc memory
 421 * @size: size of buffer
 422 *
 423 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 424 * the buffer to user-space by means of remap_vmalloc_range().
 425 *
 426 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 427 * disabled and there's no need to allow users to mmap the buffer.
 428 */
 429static inline char *vmcore_alloc_buf(size_t size)
 430{
 431#ifdef CONFIG_MMU
 432	return vmalloc_user(size);
 433#else
 434	return vzalloc(size);
 435#endif
 436}
 437
 438/*
 439 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 440 * essential for mmap_vmcore() in order to map physically
 441 * non-contiguous objects (ELF header, ELF note segment and memory
 442 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 443 * virtually contiguous user-space in ELF layout.
 444 */
 445#ifdef CONFIG_MMU
 446
 447/*
 448 * The vmcore fault handler uses the page cache and fills data using the
 449 * standard __read_vmcore() function.
 450 *
 451 * On s390 the fault handler is used for memory regions that can't be mapped
 452 * directly with remap_pfn_range().
 453 */
 454static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
 455{
 456#ifdef CONFIG_S390
 457	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 458	pgoff_t index = vmf->pgoff;
 459	struct iov_iter iter;
 460	struct kvec kvec;
 461	struct page *page;
 462	loff_t offset;
 
 463	int rc;
 464
 465	page = find_or_create_page(mapping, index, GFP_KERNEL);
 466	if (!page)
 467		return VM_FAULT_OOM;
 468	if (!PageUptodate(page)) {
 469		offset = (loff_t) index << PAGE_SHIFT;
 470		kvec.iov_base = page_address(page);
 471		kvec.iov_len = PAGE_SIZE;
 472		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
 473
 474		rc = __read_vmcore(&iter, &offset);
 475		if (rc < 0) {
 476			unlock_page(page);
 477			put_page(page);
 478			return vmf_error(rc);
 479		}
 480		SetPageUptodate(page);
 481	}
 482	unlock_page(page);
 483	vmf->page = page;
 484	return 0;
 485#else
 486	return VM_FAULT_SIGBUS;
 487#endif
 488}
 489
 490static const struct vm_operations_struct vmcore_mmap_ops = {
 491	.fault = mmap_vmcore_fault,
 492};
 493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494/*
 495 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 496 * reported as not being ram with the zero page.
 497 *
 498 * @vma: vm_area_struct describing requested mapping
 499 * @from: start remapping from
 500 * @pfn: page frame number to start remapping to
 501 * @size: remapping size
 502 * @prot: protection bits
 503 *
 504 * Returns zero on success, -EAGAIN on failure.
 505 */
 506static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 507				    unsigned long from, unsigned long pfn,
 508				    unsigned long size, pgprot_t prot)
 509{
 510	unsigned long map_size;
 511	unsigned long pos_start, pos_end, pos;
 512	unsigned long zeropage_pfn = my_zero_pfn(0);
 513	size_t len = 0;
 514
 515	pos_start = pfn;
 516	pos_end = pfn + (size >> PAGE_SHIFT);
 517
 518	for (pos = pos_start; pos < pos_end; ++pos) {
 519		if (!pfn_is_ram(pos)) {
 520			/*
 521			 * We hit a page which is not ram. Remap the continuous
 522			 * region between pos_start and pos-1 and replace
 523			 * the non-ram page at pos with the zero page.
 524			 */
 525			if (pos > pos_start) {
 526				/* Remap continuous region */
 527				map_size = (pos - pos_start) << PAGE_SHIFT;
 528				if (remap_oldmem_pfn_range(vma, from + len,
 529							   pos_start, map_size,
 530							   prot))
 531					goto fail;
 532				len += map_size;
 533			}
 534			/* Remap the zero page */
 535			if (remap_oldmem_pfn_range(vma, from + len,
 536						   zeropage_pfn,
 537						   PAGE_SIZE, prot))
 538				goto fail;
 539			len += PAGE_SIZE;
 540			pos_start = pos + 1;
 541		}
 542	}
 543	if (pos > pos_start) {
 544		/* Remap the rest */
 545		map_size = (pos - pos_start) << PAGE_SHIFT;
 546		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 547					   map_size, prot))
 548			goto fail;
 549	}
 550	return 0;
 551fail:
 552	do_munmap(vma->vm_mm, from, len, NULL);
 553	return -EAGAIN;
 554}
 555
 556static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 557			    unsigned long from, unsigned long pfn,
 558			    unsigned long size, pgprot_t prot)
 559{
 560	int ret, idx;
 561
 562	/*
 563	 * Check if a callback was registered to avoid looping over all
 564	 * pages without a reason.
 565	 */
 566	idx = srcu_read_lock(&vmcore_cb_srcu);
 567	if (!list_empty(&vmcore_cb_list))
 568		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 569	else
 570		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 571	srcu_read_unlock(&vmcore_cb_srcu, idx);
 572	return ret;
 573}
 574
 575static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 576{
 577	size_t size = vma->vm_end - vma->vm_start;
 578	u64 start, end, len, tsz;
 579	struct vmcore *m;
 580
 581	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 582	end = start + size;
 583
 584	if (size > vmcore_size || end > vmcore_size)
 585		return -EINVAL;
 586
 587	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 588		return -EPERM;
 589
 590	vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
 
 591	vma->vm_ops = &vmcore_mmap_ops;
 592
 593	len = 0;
 594
 595	if (start < elfcorebuf_sz) {
 596		u64 pfn;
 597
 598		tsz = min(elfcorebuf_sz - (size_t)start, size);
 599		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 600		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 601				    vma->vm_page_prot))
 602			return -EAGAIN;
 603		size -= tsz;
 604		start += tsz;
 605		len += tsz;
 606
 607		if (size == 0)
 608			return 0;
 609	}
 610
 611	if (start < elfcorebuf_sz + elfnotes_sz) {
 612		void *kaddr;
 613
 614		/* We add device dumps before other elf notes because the
 615		 * other elf notes may not fill the elf notes buffer
 616		 * completely and we will end up with zero-filled data
 617		 * between the elf notes and the device dumps. Tools will
 618		 * then try to decode this zero-filled data as valid notes
 619		 * and we don't want that. Hence, adding device dumps before
 620		 * the other elf notes ensure that zero-filled data can be
 621		 * avoided. This also ensures that the device dumps and
 622		 * other elf notes can be properly mmaped at page aligned
 623		 * address.
 624		 */
 625#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 626		/* Read device dumps */
 627		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
 628			u64 start_off;
 629
 630			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 631				  (size_t)start, size);
 632			start_off = start - elfcorebuf_sz;
 633			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
 634						start_off, tsz))
 635				goto fail;
 636
 637			size -= tsz;
 638			start += tsz;
 639			len += tsz;
 640
 641			/* leave now if filled buffer already */
 642			if (!size)
 643				return 0;
 644		}
 645#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 646
 647		/* Read remaining elf notes */
 648		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 649		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
 650		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 651						kaddr, 0, tsz))
 652			goto fail;
 653
 654		size -= tsz;
 655		start += tsz;
 656		len += tsz;
 657
 658		if (size == 0)
 659			return 0;
 660	}
 661
 662	list_for_each_entry(m, &vmcore_list, list) {
 663		if (start < m->offset + m->size) {
 664			u64 paddr = 0;
 665
 666			tsz = (size_t)min_t(unsigned long long,
 667					    m->offset + m->size - start, size);
 668			paddr = m->paddr + start - m->offset;
 669			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 670						    paddr >> PAGE_SHIFT, tsz,
 671						    vma->vm_page_prot))
 672				goto fail;
 673			size -= tsz;
 674			start += tsz;
 675			len += tsz;
 676
 677			if (size == 0)
 678				return 0;
 679		}
 680	}
 681
 682	return 0;
 683fail:
 684	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
 685	return -EAGAIN;
 686}
 687#else
 688static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 689{
 690	return -ENOSYS;
 691}
 692#endif
 693
 694static const struct proc_ops vmcore_proc_ops = {
 695	.proc_open	= open_vmcore,
 696	.proc_read_iter	= read_vmcore,
 697	.proc_lseek	= default_llseek,
 698	.proc_mmap	= mmap_vmcore,
 699};
 700
 701static struct vmcore* __init get_new_element(void)
 702{
 703	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 704}
 705
 706static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 707			   struct list_head *vc_list)
 708{
 709	u64 size;
 710	struct vmcore *m;
 711
 712	size = elfsz + elfnotesegsz;
 713	list_for_each_entry(m, vc_list, list) {
 714		size += m->size;
 715	}
 716	return size;
 717}
 718
 719/**
 720 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 721 *
 722 * @ehdr_ptr: ELF header
 723 *
 724 * This function updates p_memsz member of each PT_NOTE entry in the
 725 * program header table pointed to by @ehdr_ptr to real size of ELF
 726 * note segment.
 727 */
 728static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 729{
 730	int i, rc=0;
 731	Elf64_Phdr *phdr_ptr;
 732	Elf64_Nhdr *nhdr_ptr;
 733
 734	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 735	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 736		void *notes_section;
 737		u64 offset, max_sz, sz, real_sz = 0;
 738		if (phdr_ptr->p_type != PT_NOTE)
 739			continue;
 740		max_sz = phdr_ptr->p_memsz;
 741		offset = phdr_ptr->p_offset;
 742		notes_section = kmalloc(max_sz, GFP_KERNEL);
 743		if (!notes_section)
 744			return -ENOMEM;
 745		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 746		if (rc < 0) {
 747			kfree(notes_section);
 748			return rc;
 749		}
 750		nhdr_ptr = notes_section;
 751		while (nhdr_ptr->n_namesz != 0) {
 752			sz = sizeof(Elf64_Nhdr) +
 753				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 754				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 755			if ((real_sz + sz) > max_sz) {
 756				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 757					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 758				break;
 759			}
 760			real_sz += sz;
 761			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 762		}
 763		kfree(notes_section);
 764		phdr_ptr->p_memsz = real_sz;
 765		if (real_sz == 0) {
 766			pr_warn("Warning: Zero PT_NOTE entries found\n");
 767		}
 768	}
 769
 770	return 0;
 771}
 772
 773/**
 774 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 775 * headers and sum of real size of their ELF note segment headers and
 776 * data.
 777 *
 778 * @ehdr_ptr: ELF header
 779 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 780 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 781 *
 782 * This function is used to merge multiple PT_NOTE program headers
 783 * into a unique single one. The resulting unique entry will have
 784 * @sz_ptnote in its phdr->p_mem.
 785 *
 786 * It is assumed that program headers with PT_NOTE type pointed to by
 787 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 788 * and each of PT_NOTE program headers has actual ELF note segment
 789 * size in its p_memsz member.
 790 */
 791static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 792						 int *nr_ptnote, u64 *sz_ptnote)
 793{
 794	int i;
 795	Elf64_Phdr *phdr_ptr;
 796
 797	*nr_ptnote = *sz_ptnote = 0;
 798
 799	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 800	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 801		if (phdr_ptr->p_type != PT_NOTE)
 802			continue;
 803		*nr_ptnote += 1;
 804		*sz_ptnote += phdr_ptr->p_memsz;
 805	}
 806
 807	return 0;
 808}
 809
 810/**
 811 * copy_notes_elf64 - copy ELF note segments in a given buffer
 812 *
 813 * @ehdr_ptr: ELF header
 814 * @notes_buf: buffer into which ELF note segments are copied
 815 *
 816 * This function is used to copy ELF note segment in the 1st kernel
 817 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 818 * size of the buffer @notes_buf is equal to or larger than sum of the
 819 * real ELF note segment headers and data.
 820 *
 821 * It is assumed that program headers with PT_NOTE type pointed to by
 822 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 823 * and each of PT_NOTE program headers has actual ELF note segment
 824 * size in its p_memsz member.
 825 */
 826static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 827{
 828	int i, rc=0;
 829	Elf64_Phdr *phdr_ptr;
 830
 831	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 832
 833	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 834		u64 offset;
 835		if (phdr_ptr->p_type != PT_NOTE)
 836			continue;
 837		offset = phdr_ptr->p_offset;
 838		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 839					   &offset);
 840		if (rc < 0)
 841			return rc;
 842		notes_buf += phdr_ptr->p_memsz;
 843	}
 844
 845	return 0;
 846}
 847
 848/* Merges all the PT_NOTE headers into one. */
 849static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 850					   char **notes_buf, size_t *notes_sz)
 851{
 852	int i, nr_ptnote=0, rc=0;
 853	char *tmp;
 854	Elf64_Ehdr *ehdr_ptr;
 855	Elf64_Phdr phdr;
 856	u64 phdr_sz = 0, note_off;
 857
 858	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 859
 860	rc = update_note_header_size_elf64(ehdr_ptr);
 861	if (rc < 0)
 862		return rc;
 863
 864	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 865	if (rc < 0)
 866		return rc;
 867
 868	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 869	*notes_buf = vmcore_alloc_buf(*notes_sz);
 870	if (!*notes_buf)
 871		return -ENOMEM;
 872
 873	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 874	if (rc < 0)
 875		return rc;
 876
 877	/* Prepare merged PT_NOTE program header. */
 878	phdr.p_type    = PT_NOTE;
 879	phdr.p_flags   = 0;
 880	note_off = sizeof(Elf64_Ehdr) +
 881			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 882	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 883	phdr.p_vaddr   = phdr.p_paddr = 0;
 884	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 885	phdr.p_align   = 4;
 886
 887	/* Add merged PT_NOTE program header*/
 888	tmp = elfptr + sizeof(Elf64_Ehdr);
 889	memcpy(tmp, &phdr, sizeof(phdr));
 890	tmp += sizeof(phdr);
 891
 892	/* Remove unwanted PT_NOTE program headers. */
 893	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 894	*elfsz = *elfsz - i;
 895	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 896	memset(elfptr + *elfsz, 0, i);
 897	*elfsz = roundup(*elfsz, PAGE_SIZE);
 898
 899	/* Modify e_phnum to reflect merged headers. */
 900	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 901
 902	/* Store the size of all notes.  We need this to update the note
 903	 * header when the device dumps will be added.
 904	 */
 905	elfnotes_orig_sz = phdr.p_memsz;
 906
 907	return 0;
 908}
 909
 910/**
 911 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 912 *
 913 * @ehdr_ptr: ELF header
 914 *
 915 * This function updates p_memsz member of each PT_NOTE entry in the
 916 * program header table pointed to by @ehdr_ptr to real size of ELF
 917 * note segment.
 918 */
 919static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 920{
 921	int i, rc=0;
 922	Elf32_Phdr *phdr_ptr;
 923	Elf32_Nhdr *nhdr_ptr;
 924
 925	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 926	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 927		void *notes_section;
 928		u64 offset, max_sz, sz, real_sz = 0;
 929		if (phdr_ptr->p_type != PT_NOTE)
 930			continue;
 931		max_sz = phdr_ptr->p_memsz;
 932		offset = phdr_ptr->p_offset;
 933		notes_section = kmalloc(max_sz, GFP_KERNEL);
 934		if (!notes_section)
 935			return -ENOMEM;
 936		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 937		if (rc < 0) {
 938			kfree(notes_section);
 939			return rc;
 940		}
 941		nhdr_ptr = notes_section;
 942		while (nhdr_ptr->n_namesz != 0) {
 943			sz = sizeof(Elf32_Nhdr) +
 944				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 945				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 946			if ((real_sz + sz) > max_sz) {
 947				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 948					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 949				break;
 950			}
 951			real_sz += sz;
 952			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 953		}
 954		kfree(notes_section);
 955		phdr_ptr->p_memsz = real_sz;
 956		if (real_sz == 0) {
 957			pr_warn("Warning: Zero PT_NOTE entries found\n");
 958		}
 959	}
 960
 961	return 0;
 962}
 963
 964/**
 965 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 966 * headers and sum of real size of their ELF note segment headers and
 967 * data.
 968 *
 969 * @ehdr_ptr: ELF header
 970 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 971 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 972 *
 973 * This function is used to merge multiple PT_NOTE program headers
 974 * into a unique single one. The resulting unique entry will have
 975 * @sz_ptnote in its phdr->p_mem.
 976 *
 977 * It is assumed that program headers with PT_NOTE type pointed to by
 978 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 979 * and each of PT_NOTE program headers has actual ELF note segment
 980 * size in its p_memsz member.
 981 */
 982static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 983						 int *nr_ptnote, u64 *sz_ptnote)
 984{
 985	int i;
 986	Elf32_Phdr *phdr_ptr;
 987
 988	*nr_ptnote = *sz_ptnote = 0;
 989
 990	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 991	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 992		if (phdr_ptr->p_type != PT_NOTE)
 993			continue;
 994		*nr_ptnote += 1;
 995		*sz_ptnote += phdr_ptr->p_memsz;
 996	}
 997
 998	return 0;
 999}
1000
1001/**
1002 * copy_notes_elf32 - copy ELF note segments in a given buffer
1003 *
1004 * @ehdr_ptr: ELF header
1005 * @notes_buf: buffer into which ELF note segments are copied
1006 *
1007 * This function is used to copy ELF note segment in the 1st kernel
1008 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1009 * size of the buffer @notes_buf is equal to or larger than sum of the
1010 * real ELF note segment headers and data.
1011 *
1012 * It is assumed that program headers with PT_NOTE type pointed to by
1013 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1014 * and each of PT_NOTE program headers has actual ELF note segment
1015 * size in its p_memsz member.
1016 */
1017static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1018{
1019	int i, rc=0;
1020	Elf32_Phdr *phdr_ptr;
1021
1022	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1023
1024	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1025		u64 offset;
1026		if (phdr_ptr->p_type != PT_NOTE)
1027			continue;
1028		offset = phdr_ptr->p_offset;
1029		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1030					   &offset);
1031		if (rc < 0)
1032			return rc;
1033		notes_buf += phdr_ptr->p_memsz;
1034	}
1035
1036	return 0;
1037}
1038
1039/* Merges all the PT_NOTE headers into one. */
1040static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1041					   char **notes_buf, size_t *notes_sz)
1042{
1043	int i, nr_ptnote=0, rc=0;
1044	char *tmp;
1045	Elf32_Ehdr *ehdr_ptr;
1046	Elf32_Phdr phdr;
1047	u64 phdr_sz = 0, note_off;
1048
1049	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1050
1051	rc = update_note_header_size_elf32(ehdr_ptr);
1052	if (rc < 0)
1053		return rc;
1054
1055	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1056	if (rc < 0)
1057		return rc;
1058
1059	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1060	*notes_buf = vmcore_alloc_buf(*notes_sz);
1061	if (!*notes_buf)
1062		return -ENOMEM;
1063
1064	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1065	if (rc < 0)
1066		return rc;
1067
1068	/* Prepare merged PT_NOTE program header. */
1069	phdr.p_type    = PT_NOTE;
1070	phdr.p_flags   = 0;
1071	note_off = sizeof(Elf32_Ehdr) +
1072			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1073	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1074	phdr.p_vaddr   = phdr.p_paddr = 0;
1075	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1076	phdr.p_align   = 4;
1077
1078	/* Add merged PT_NOTE program header*/
1079	tmp = elfptr + sizeof(Elf32_Ehdr);
1080	memcpy(tmp, &phdr, sizeof(phdr));
1081	tmp += sizeof(phdr);
1082
1083	/* Remove unwanted PT_NOTE program headers. */
1084	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1085	*elfsz = *elfsz - i;
1086	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1087	memset(elfptr + *elfsz, 0, i);
1088	*elfsz = roundup(*elfsz, PAGE_SIZE);
1089
1090	/* Modify e_phnum to reflect merged headers. */
1091	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1092
1093	/* Store the size of all notes.  We need this to update the note
1094	 * header when the device dumps will be added.
1095	 */
1096	elfnotes_orig_sz = phdr.p_memsz;
1097
1098	return 0;
1099}
1100
1101/* Add memory chunks represented by program headers to vmcore list. Also update
1102 * the new offset fields of exported program headers. */
1103static int __init process_ptload_program_headers_elf64(char *elfptr,
1104						size_t elfsz,
1105						size_t elfnotes_sz,
1106						struct list_head *vc_list)
1107{
1108	int i;
1109	Elf64_Ehdr *ehdr_ptr;
1110	Elf64_Phdr *phdr_ptr;
1111	loff_t vmcore_off;
1112	struct vmcore *new;
1113
1114	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1115	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1116
1117	/* Skip ELF header, program headers and ELF note segment. */
1118	vmcore_off = elfsz + elfnotes_sz;
1119
1120	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1121		u64 paddr, start, end, size;
1122
1123		if (phdr_ptr->p_type != PT_LOAD)
1124			continue;
1125
1126		paddr = phdr_ptr->p_offset;
1127		start = rounddown(paddr, PAGE_SIZE);
1128		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1129		size = end - start;
1130
1131		/* Add this contiguous chunk of memory to vmcore list.*/
1132		new = get_new_element();
1133		if (!new)
1134			return -ENOMEM;
1135		new->paddr = start;
1136		new->size = size;
1137		list_add_tail(&new->list, vc_list);
1138
1139		/* Update the program header offset. */
1140		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1141		vmcore_off = vmcore_off + size;
1142	}
1143	return 0;
1144}
1145
1146static int __init process_ptload_program_headers_elf32(char *elfptr,
1147						size_t elfsz,
1148						size_t elfnotes_sz,
1149						struct list_head *vc_list)
1150{
1151	int i;
1152	Elf32_Ehdr *ehdr_ptr;
1153	Elf32_Phdr *phdr_ptr;
1154	loff_t vmcore_off;
1155	struct vmcore *new;
1156
1157	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1158	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1159
1160	/* Skip ELF header, program headers and ELF note segment. */
1161	vmcore_off = elfsz + elfnotes_sz;
1162
1163	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1164		u64 paddr, start, end, size;
1165
1166		if (phdr_ptr->p_type != PT_LOAD)
1167			continue;
1168
1169		paddr = phdr_ptr->p_offset;
1170		start = rounddown(paddr, PAGE_SIZE);
1171		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1172		size = end - start;
1173
1174		/* Add this contiguous chunk of memory to vmcore list.*/
1175		new = get_new_element();
1176		if (!new)
1177			return -ENOMEM;
1178		new->paddr = start;
1179		new->size = size;
1180		list_add_tail(&new->list, vc_list);
1181
1182		/* Update the program header offset */
1183		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1184		vmcore_off = vmcore_off + size;
1185	}
1186	return 0;
1187}
1188
1189/* Sets offset fields of vmcore elements. */
1190static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1191				    struct list_head *vc_list)
1192{
1193	loff_t vmcore_off;
1194	struct vmcore *m;
1195
1196	/* Skip ELF header, program headers and ELF note segment. */
1197	vmcore_off = elfsz + elfnotes_sz;
1198
1199	list_for_each_entry(m, vc_list, list) {
1200		m->offset = vmcore_off;
1201		vmcore_off += m->size;
1202	}
1203}
1204
1205static void free_elfcorebuf(void)
1206{
1207	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1208	elfcorebuf = NULL;
1209	vfree(elfnotes_buf);
1210	elfnotes_buf = NULL;
1211}
1212
1213static int __init parse_crash_elf64_headers(void)
1214{
1215	int rc=0;
1216	Elf64_Ehdr ehdr;
1217	u64 addr;
1218
1219	addr = elfcorehdr_addr;
1220
1221	/* Read ELF header */
1222	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1223	if (rc < 0)
1224		return rc;
1225
1226	/* Do some basic Verification. */
1227	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1228		(ehdr.e_type != ET_CORE) ||
1229		!vmcore_elf64_check_arch(&ehdr) ||
1230		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1231		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1232		ehdr.e_version != EV_CURRENT ||
1233		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1234		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1235		ehdr.e_phnum == 0) {
1236		pr_warn("Warning: Core image elf header is not sane\n");
1237		return -EINVAL;
1238	}
1239
1240	/* Read in all elf headers. */
1241	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1242				ehdr.e_phnum * sizeof(Elf64_Phdr);
1243	elfcorebuf_sz = elfcorebuf_sz_orig;
1244	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1245					      get_order(elfcorebuf_sz_orig));
1246	if (!elfcorebuf)
1247		return -ENOMEM;
1248	addr = elfcorehdr_addr;
1249	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1250	if (rc < 0)
1251		goto fail;
1252
1253	/* Merge all PT_NOTE headers into one. */
1254	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1255				      &elfnotes_buf, &elfnotes_sz);
1256	if (rc)
1257		goto fail;
1258	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1259						  elfnotes_sz, &vmcore_list);
1260	if (rc)
1261		goto fail;
1262	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1263	return 0;
1264fail:
1265	free_elfcorebuf();
1266	return rc;
1267}
1268
1269static int __init parse_crash_elf32_headers(void)
1270{
1271	int rc=0;
1272	Elf32_Ehdr ehdr;
1273	u64 addr;
1274
1275	addr = elfcorehdr_addr;
1276
1277	/* Read ELF header */
1278	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1279	if (rc < 0)
1280		return rc;
1281
1282	/* Do some basic Verification. */
1283	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1284		(ehdr.e_type != ET_CORE) ||
1285		!vmcore_elf32_check_arch(&ehdr) ||
1286		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1287		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1288		ehdr.e_version != EV_CURRENT ||
1289		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1290		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1291		ehdr.e_phnum == 0) {
1292		pr_warn("Warning: Core image elf header is not sane\n");
1293		return -EINVAL;
1294	}
1295
1296	/* Read in all elf headers. */
1297	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1298	elfcorebuf_sz = elfcorebuf_sz_orig;
1299	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1300					      get_order(elfcorebuf_sz_orig));
1301	if (!elfcorebuf)
1302		return -ENOMEM;
1303	addr = elfcorehdr_addr;
1304	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1305	if (rc < 0)
1306		goto fail;
1307
1308	/* Merge all PT_NOTE headers into one. */
1309	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1310				      &elfnotes_buf, &elfnotes_sz);
1311	if (rc)
1312		goto fail;
1313	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1314						  elfnotes_sz, &vmcore_list);
1315	if (rc)
1316		goto fail;
1317	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1318	return 0;
1319fail:
1320	free_elfcorebuf();
1321	return rc;
1322}
1323
1324static int __init parse_crash_elf_headers(void)
1325{
1326	unsigned char e_ident[EI_NIDENT];
1327	u64 addr;
1328	int rc=0;
1329
1330	addr = elfcorehdr_addr;
1331	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1332	if (rc < 0)
1333		return rc;
1334	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1335		pr_warn("Warning: Core image elf header not found\n");
1336		return -EINVAL;
1337	}
1338
1339	if (e_ident[EI_CLASS] == ELFCLASS64) {
1340		rc = parse_crash_elf64_headers();
1341		if (rc)
1342			return rc;
1343	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1344		rc = parse_crash_elf32_headers();
1345		if (rc)
1346			return rc;
1347	} else {
1348		pr_warn("Warning: Core image elf header is not sane\n");
1349		return -EINVAL;
1350	}
1351
1352	/* Determine vmcore size. */
1353	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1354				      &vmcore_list);
1355
1356	return 0;
1357}
1358
1359#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1360/**
1361 * vmcoredd_write_header - Write vmcore device dump header at the
1362 * beginning of the dump's buffer.
1363 * @buf: Output buffer where the note is written
1364 * @data: Dump info
1365 * @size: Size of the dump
1366 *
1367 * Fills beginning of the dump's buffer with vmcore device dump header.
1368 */
1369static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1370				  u32 size)
1371{
1372	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1373
1374	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1375	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1376	vdd_hdr->n_type = NT_VMCOREDD;
1377
1378	strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
1379	strscpy_pad(vdd_hdr->dump_name, data->dump_name);
 
1380}
1381
1382/**
1383 * vmcoredd_update_program_headers - Update all ELF program headers
1384 * @elfptr: Pointer to elf header
1385 * @elfnotesz: Size of elf notes aligned to page size
1386 * @vmcoreddsz: Size of device dumps to be added to elf note header
1387 *
1388 * Determine type of ELF header (Elf64 or Elf32) and update the elf note size.
1389 * Also update the offsets of all the program headers after the elf note header.
1390 */
1391static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1392					    size_t vmcoreddsz)
1393{
1394	unsigned char *e_ident = (unsigned char *)elfptr;
1395	u64 start, end, size;
1396	loff_t vmcore_off;
1397	u32 i;
1398
1399	vmcore_off = elfcorebuf_sz + elfnotesz;
1400
1401	if (e_ident[EI_CLASS] == ELFCLASS64) {
1402		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1403		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1404
1405		/* Update all program headers */
1406		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1407			if (phdr->p_type == PT_NOTE) {
1408				/* Update note size */
1409				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1410				phdr->p_filesz = phdr->p_memsz;
1411				continue;
1412			}
1413
1414			start = rounddown(phdr->p_offset, PAGE_SIZE);
1415			end = roundup(phdr->p_offset + phdr->p_memsz,
1416				      PAGE_SIZE);
1417			size = end - start;
1418			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1419			vmcore_off += size;
1420		}
1421	} else {
1422		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1423		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1424
1425		/* Update all program headers */
1426		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1427			if (phdr->p_type == PT_NOTE) {
1428				/* Update note size */
1429				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1430				phdr->p_filesz = phdr->p_memsz;
1431				continue;
1432			}
1433
1434			start = rounddown(phdr->p_offset, PAGE_SIZE);
1435			end = roundup(phdr->p_offset + phdr->p_memsz,
1436				      PAGE_SIZE);
1437			size = end - start;
1438			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1439			vmcore_off += size;
1440		}
1441	}
1442}
1443
1444/**
1445 * vmcoredd_update_size - Update the total size of the device dumps and update
1446 * ELF header
1447 * @dump_size: Size of the current device dump to be added to total size
1448 *
1449 * Update the total size of all the device dumps and update the ELF program
1450 * headers. Calculate the new offsets for the vmcore list and update the
1451 * total vmcore size.
1452 */
1453static void vmcoredd_update_size(size_t dump_size)
1454{
1455	vmcoredd_orig_sz += dump_size;
1456	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1457	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1458					vmcoredd_orig_sz);
1459
1460	/* Update vmcore list offsets */
1461	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1462
1463	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1464				      &vmcore_list);
1465	proc_vmcore->size = vmcore_size;
1466}
1467
1468/**
1469 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1470 * @data: dump info.
1471 *
1472 * Allocate a buffer and invoke the calling driver's dump collect routine.
1473 * Write ELF note at the beginning of the buffer to indicate vmcore device
1474 * dump and add the dump to global list.
1475 */
1476int vmcore_add_device_dump(struct vmcoredd_data *data)
1477{
1478	struct vmcoredd_node *dump;
1479	void *buf = NULL;
1480	size_t data_size;
1481	int ret;
1482
1483	if (vmcoredd_disabled) {
1484		pr_err_once("Device dump is disabled\n");
1485		return -EINVAL;
1486	}
1487
1488	if (!data || !strlen(data->dump_name) ||
1489	    !data->vmcoredd_callback || !data->size)
1490		return -EINVAL;
1491
1492	dump = vzalloc(sizeof(*dump));
1493	if (!dump) {
1494		ret = -ENOMEM;
1495		goto out_err;
1496	}
1497
1498	/* Keep size of the buffer page aligned so that it can be mmaped */
1499	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1500			    PAGE_SIZE);
1501
1502	/* Allocate buffer for driver's to write their dumps */
1503	buf = vmcore_alloc_buf(data_size);
1504	if (!buf) {
1505		ret = -ENOMEM;
1506		goto out_err;
1507	}
1508
1509	vmcoredd_write_header(buf, data, data_size -
1510			      sizeof(struct vmcoredd_header));
1511
1512	/* Invoke the driver's dump collection routing */
1513	ret = data->vmcoredd_callback(data, buf +
1514				      sizeof(struct vmcoredd_header));
1515	if (ret)
1516		goto out_err;
1517
1518	dump->buf = buf;
1519	dump->size = data_size;
1520
1521	/* Add the dump to driver sysfs list */
1522	mutex_lock(&vmcoredd_mutex);
1523	list_add_tail(&dump->list, &vmcoredd_list);
1524	mutex_unlock(&vmcoredd_mutex);
1525
1526	vmcoredd_update_size(data_size);
1527	return 0;
1528
1529out_err:
1530	vfree(buf);
1531	vfree(dump);
1532
1533	return ret;
1534}
1535EXPORT_SYMBOL(vmcore_add_device_dump);
1536#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1537
1538/* Free all dumps in vmcore device dump list */
1539static void vmcore_free_device_dumps(void)
1540{
1541#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1542	mutex_lock(&vmcoredd_mutex);
1543	while (!list_empty(&vmcoredd_list)) {
1544		struct vmcoredd_node *dump;
1545
1546		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1547					list);
1548		list_del(&dump->list);
1549		vfree(dump->buf);
1550		vfree(dump);
1551	}
1552	mutex_unlock(&vmcoredd_mutex);
1553#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1554}
1555
1556/* Init function for vmcore module. */
1557static int __init vmcore_init(void)
1558{
1559	int rc = 0;
1560
1561	/* Allow architectures to allocate ELF header in 2nd kernel */
1562	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1563	if (rc)
1564		return rc;
1565	/*
1566	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1567	 * then capture the dump.
1568	 */
1569	if (!(is_vmcore_usable()))
1570		return rc;
1571	rc = parse_crash_elf_headers();
1572	if (rc) {
1573		elfcorehdr_free(elfcorehdr_addr);
1574		pr_warn("Kdump: vmcore not initialized\n");
1575		return rc;
1576	}
1577	elfcorehdr_free(elfcorehdr_addr);
1578	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1579
1580	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1581	if (proc_vmcore)
1582		proc_vmcore->size = vmcore_size;
1583	return 0;
1584}
1585fs_initcall(vmcore_init);
1586
1587/* Cleanup function for vmcore module. */
1588void vmcore_cleanup(void)
1589{
1590	if (proc_vmcore) {
1591		proc_remove(proc_vmcore);
1592		proc_vmcore = NULL;
1593	}
1594
1595	/* clear the vmcore list. */
1596	while (!list_empty(&vmcore_list)) {
1597		struct vmcore *m;
1598
1599		m = list_first_entry(&vmcore_list, struct vmcore, list);
1600		list_del(&m->list);
1601		kfree(m);
1602	}
1603	free_elfcorebuf();
1604
1605	/* clear vmcore device dump list */
1606	vmcore_free_device_dumps();
1607}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *	fs/proc/vmcore.c Interface for accessing the crash
   4 * 				 dump from the system's previous life.
   5 * 	Heavily borrowed from fs/proc/kcore.c
   6 *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
   7 *	Copyright (C) IBM Corporation, 2004. All rights reserved
   8 *
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/kcore.h>
  13#include <linux/user.h>
  14#include <linux/elf.h>
  15#include <linux/elfcore.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/highmem.h>
  19#include <linux/printk.h>
  20#include <linux/memblock.h>
  21#include <linux/init.h>
  22#include <linux/crash_dump.h>
  23#include <linux/list.h>
  24#include <linux/moduleparam.h>
  25#include <linux/mutex.h>
  26#include <linux/vmalloc.h>
  27#include <linux/pagemap.h>
  28#include <linux/uaccess.h>
  29#include <linux/mem_encrypt.h>
  30#include <asm/io.h>
  31#include "internal.h"
  32
  33/* List representing chunks of contiguous memory areas and their offsets in
  34 * vmcore file.
  35 */
  36static LIST_HEAD(vmcore_list);
  37
  38/* Stores the pointer to the buffer containing kernel elf core headers. */
  39static char *elfcorebuf;
  40static size_t elfcorebuf_sz;
  41static size_t elfcorebuf_sz_orig;
  42
  43static char *elfnotes_buf;
  44static size_t elfnotes_sz;
  45/* Size of all notes minus the device dump notes */
  46static size_t elfnotes_orig_sz;
  47
  48/* Total size of vmcore file. */
  49static u64 vmcore_size;
  50
  51static struct proc_dir_entry *proc_vmcore;
  52
  53#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
  54/* Device Dump list and mutex to synchronize access to list */
  55static LIST_HEAD(vmcoredd_list);
  56static DEFINE_MUTEX(vmcoredd_mutex);
  57
  58static bool vmcoredd_disabled;
  59core_param(novmcoredd, vmcoredd_disabled, bool, 0);
  60#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
  61
  62/* Device Dump Size */
  63static size_t vmcoredd_orig_sz;
  64
  65/*
  66 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
  67 * The called function has to take care of module refcounting.
  68 */
  69static int (*oldmem_pfn_is_ram)(unsigned long pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  70
  71int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
  72{
  73	if (oldmem_pfn_is_ram)
  74		return -EBUSY;
  75	oldmem_pfn_is_ram = fn;
  76	return 0;
 
 
 
 
 
 
 
 
  77}
  78EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
  79
  80void unregister_oldmem_pfn_is_ram(void)
  81{
  82	oldmem_pfn_is_ram = NULL;
  83	wmb();
 
 
 
 
 
 
 
 
 
 
 
  84}
  85EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
  86
  87static int pfn_is_ram(unsigned long pfn)
  88{
  89	int (*fn)(unsigned long pfn);
  90	/* pfn is ram unless fn() checks pagetype */
  91	int ret = 1;
  92
  93	/*
  94	 * Ask hypervisor if the pfn is really ram.
  95	 * A ballooned page contains no data and reading from such a page
  96	 * will cause high load in the hypervisor.
  97	 */
  98	fn = oldmem_pfn_is_ram;
  99	if (fn)
 100		ret = fn(pfn);
 101
 102	return ret;
 103}
 104
 105/* Reads a page from the oldmem device from given offset. */
 106ssize_t read_from_oldmem(char *buf, size_t count,
 107			 u64 *ppos, int userbuf,
 108			 bool encrypted)
 109{
 110	unsigned long pfn, offset;
 111	size_t nr_bytes;
 112	ssize_t read = 0, tmp;
 
 113
 114	if (!count)
 115		return 0;
 116
 117	offset = (unsigned long)(*ppos % PAGE_SIZE);
 118	pfn = (unsigned long)(*ppos / PAGE_SIZE);
 119
 
 120	do {
 121		if (count > (PAGE_SIZE - offset))
 122			nr_bytes = PAGE_SIZE - offset;
 123		else
 124			nr_bytes = count;
 125
 126		/* If pfn is not ram, return zeros for sparse dump files */
 127		if (pfn_is_ram(pfn) == 0)
 128			memset(buf, 0, nr_bytes);
 129		else {
 130			if (encrypted)
 131				tmp = copy_oldmem_page_encrypted(pfn, buf,
 132								 nr_bytes,
 133								 offset,
 134								 userbuf);
 135			else
 136				tmp = copy_oldmem_page(pfn, buf, nr_bytes,
 137						       offset, userbuf);
 
 
 
 
 
 138
 139			if (tmp < 0)
 140				return tmp;
 141		}
 142		*ppos += nr_bytes;
 143		count -= nr_bytes;
 144		buf += nr_bytes;
 145		read += nr_bytes;
 146		++pfn;
 147		offset = 0;
 148	} while (count);
 
 149
 150	return read;
 151}
 152
 153/*
 154 * Architectures may override this function to allocate ELF header in 2nd kernel
 155 */
 156int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
 157{
 158	return 0;
 159}
 160
 161/*
 162 * Architectures may override this function to free header
 163 */
 164void __weak elfcorehdr_free(unsigned long long addr)
 165{}
 166
 167/*
 168 * Architectures may override this function to read from ELF header
 169 */
 170ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 171{
 172	return read_from_oldmem(buf, count, ppos, 0, false);
 
 
 
 
 
 173}
 174
 175/*
 176 * Architectures may override this function to read from notes sections
 177 */
 178ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
 179{
 180	return read_from_oldmem(buf, count, ppos, 0, mem_encrypt_active());
 
 
 
 
 
 
 181}
 182
 183/*
 184 * Architectures may override this function to map oldmem
 185 */
 186int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
 187				  unsigned long from, unsigned long pfn,
 188				  unsigned long size, pgprot_t prot)
 189{
 190	prot = pgprot_encrypted(prot);
 191	return remap_pfn_range(vma, from, pfn, size, prot);
 192}
 193
 194/*
 195 * Architectures which support memory encryption override this.
 196 */
 197ssize_t __weak
 198copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
 199			   unsigned long offset, int userbuf)
 200{
 201	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
 202}
 203
 204/*
 205 * Copy to either kernel or user space
 206 */
 207static int copy_to(void *target, void *src, size_t size, int userbuf)
 208{
 209	if (userbuf) {
 210		if (copy_to_user((char __user *) target, src, size))
 211			return -EFAULT;
 212	} else {
 213		memcpy(target, src, size);
 214	}
 215	return 0;
 216}
 217
 218#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 219static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
 220{
 221	struct vmcoredd_node *dump;
 222	u64 offset = 0;
 223	int ret = 0;
 224	size_t tsz;
 225	char *buf;
 226
 227	mutex_lock(&vmcoredd_mutex);
 228	list_for_each_entry(dump, &vmcoredd_list, list) {
 229		if (start < offset + dump->size) {
 230			tsz = min(offset + (u64)dump->size - start, (u64)size);
 231			buf = dump->buf + start - offset;
 232			if (copy_to(dst, buf, tsz, userbuf)) {
 233				ret = -EFAULT;
 234				goto out_unlock;
 235			}
 236
 237			size -= tsz;
 238			start += tsz;
 239			dst += tsz;
 240
 241			/* Leave now if buffer filled already */
 242			if (!size)
 243				goto out_unlock;
 244		}
 245		offset += dump->size;
 246	}
 247
 248out_unlock:
 249	mutex_unlock(&vmcoredd_mutex);
 250	return ret;
 251}
 252
 253#ifdef CONFIG_MMU
 254static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
 255			       u64 start, size_t size)
 256{
 257	struct vmcoredd_node *dump;
 258	u64 offset = 0;
 259	int ret = 0;
 260	size_t tsz;
 261	char *buf;
 262
 263	mutex_lock(&vmcoredd_mutex);
 264	list_for_each_entry(dump, &vmcoredd_list, list) {
 265		if (start < offset + dump->size) {
 266			tsz = min(offset + (u64)dump->size - start, (u64)size);
 267			buf = dump->buf + start - offset;
 268			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
 269							tsz)) {
 270				ret = -EFAULT;
 271				goto out_unlock;
 272			}
 273
 274			size -= tsz;
 275			start += tsz;
 276			dst += tsz;
 277
 278			/* Leave now if buffer filled already */
 279			if (!size)
 280				goto out_unlock;
 281		}
 282		offset += dump->size;
 283	}
 284
 285out_unlock:
 286	mutex_unlock(&vmcoredd_mutex);
 287	return ret;
 288}
 289#endif /* CONFIG_MMU */
 290#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 291
 292/* Read from the ELF header and then the crash dump. On error, negative value is
 293 * returned otherwise number of bytes read are returned.
 294 */
 295static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
 296			     int userbuf)
 297{
 298	ssize_t acc = 0, tmp;
 299	size_t tsz;
 300	u64 start;
 301	struct vmcore *m = NULL;
 302
 303	if (buflen == 0 || *fpos >= vmcore_size)
 304		return 0;
 305
 306	/* trim buflen to not go beyond EOF */
 307	if (buflen > vmcore_size - *fpos)
 308		buflen = vmcore_size - *fpos;
 309
 310	/* Read ELF core header */
 311	if (*fpos < elfcorebuf_sz) {
 312		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
 313		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
 314			return -EFAULT;
 315		buflen -= tsz;
 316		*fpos += tsz;
 317		buffer += tsz;
 318		acc += tsz;
 319
 320		/* leave now if filled buffer already */
 321		if (buflen == 0)
 322			return acc;
 323	}
 324
 325	/* Read Elf note segment */
 326	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
 327		void *kaddr;
 328
 329		/* We add device dumps before other elf notes because the
 330		 * other elf notes may not fill the elf notes buffer
 331		 * completely and we will end up with zero-filled data
 332		 * between the elf notes and the device dumps. Tools will
 333		 * then try to decode this zero-filled data as valid notes
 334		 * and we don't want that. Hence, adding device dumps before
 335		 * the other elf notes ensure that zero-filled data can be
 336		 * avoided.
 337		 */
 338#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 339		/* Read device dumps */
 340		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
 341			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 342				  (size_t)*fpos, buflen);
 343			start = *fpos - elfcorebuf_sz;
 344			if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
 345				return -EFAULT;
 346
 347			buflen -= tsz;
 348			*fpos += tsz;
 349			buffer += tsz;
 350			acc += tsz;
 351
 352			/* leave now if filled buffer already */
 353			if (!buflen)
 354				return acc;
 355		}
 356#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 357
 358		/* Read remaining elf notes */
 359		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
 
 360		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
 361		if (copy_to(buffer, kaddr, tsz, userbuf))
 362			return -EFAULT;
 363
 364		buflen -= tsz;
 365		*fpos += tsz;
 366		buffer += tsz;
 367		acc += tsz;
 368
 369		/* leave now if filled buffer already */
 370		if (buflen == 0)
 371			return acc;
 
 
 372	}
 373
 374	list_for_each_entry(m, &vmcore_list, list) {
 375		if (*fpos < m->offset + m->size) {
 376			tsz = (size_t)min_t(unsigned long long,
 377					    m->offset + m->size - *fpos,
 378					    buflen);
 379			start = m->paddr + *fpos - m->offset;
 380			tmp = read_from_oldmem(buffer, tsz, &start,
 381					       userbuf, mem_encrypt_active());
 382			if (tmp < 0)
 383				return tmp;
 384			buflen -= tsz;
 385			*fpos += tsz;
 386			buffer += tsz;
 387			acc += tsz;
 388
 389			/* leave now if filled buffer already */
 390			if (buflen == 0)
 391				return acc;
 392		}
 
 
 393	}
 394
 395	return acc;
 396}
 397
 398static ssize_t read_vmcore(struct file *file, char __user *buffer,
 399			   size_t buflen, loff_t *fpos)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400{
 401	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
 
 
 
 
 402}
 403
 404/*
 
 
 
 
 
 
 
 
 
 405 * The vmcore fault handler uses the page cache and fills data using the
 406 * standard __vmcore_read() function.
 407 *
 408 * On s390 the fault handler is used for memory regions that can't be mapped
 409 * directly with remap_pfn_range().
 410 */
 411static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
 412{
 413#ifdef CONFIG_S390
 414	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
 415	pgoff_t index = vmf->pgoff;
 
 
 416	struct page *page;
 417	loff_t offset;
 418	char *buf;
 419	int rc;
 420
 421	page = find_or_create_page(mapping, index, GFP_KERNEL);
 422	if (!page)
 423		return VM_FAULT_OOM;
 424	if (!PageUptodate(page)) {
 425		offset = (loff_t) index << PAGE_SHIFT;
 426		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
 427		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
 
 
 
 428		if (rc < 0) {
 429			unlock_page(page);
 430			put_page(page);
 431			return vmf_error(rc);
 432		}
 433		SetPageUptodate(page);
 434	}
 435	unlock_page(page);
 436	vmf->page = page;
 437	return 0;
 438#else
 439	return VM_FAULT_SIGBUS;
 440#endif
 441}
 442
 443static const struct vm_operations_struct vmcore_mmap_ops = {
 444	.fault = mmap_vmcore_fault,
 445};
 446
 447/**
 448 * vmcore_alloc_buf - allocate buffer in vmalloc memory
 449 * @sizez: size of buffer
 450 *
 451 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
 452 * the buffer to user-space by means of remap_vmalloc_range().
 453 *
 454 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
 455 * disabled and there's no need to allow users to mmap the buffer.
 456 */
 457static inline char *vmcore_alloc_buf(size_t size)
 458{
 459#ifdef CONFIG_MMU
 460	return vmalloc_user(size);
 461#else
 462	return vzalloc(size);
 463#endif
 464}
 465
 466/*
 467 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
 468 * essential for mmap_vmcore() in order to map physically
 469 * non-contiguous objects (ELF header, ELF note segment and memory
 470 * regions in the 1st kernel pointed to by PT_LOAD entries) into
 471 * virtually contiguous user-space in ELF layout.
 472 */
 473#ifdef CONFIG_MMU
 474/*
 475 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
 476 * reported as not being ram with the zero page.
 477 *
 478 * @vma: vm_area_struct describing requested mapping
 479 * @from: start remapping from
 480 * @pfn: page frame number to start remapping to
 481 * @size: remapping size
 482 * @prot: protection bits
 483 *
 484 * Returns zero on success, -EAGAIN on failure.
 485 */
 486static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
 487				    unsigned long from, unsigned long pfn,
 488				    unsigned long size, pgprot_t prot)
 489{
 490	unsigned long map_size;
 491	unsigned long pos_start, pos_end, pos;
 492	unsigned long zeropage_pfn = my_zero_pfn(0);
 493	size_t len = 0;
 494
 495	pos_start = pfn;
 496	pos_end = pfn + (size >> PAGE_SHIFT);
 497
 498	for (pos = pos_start; pos < pos_end; ++pos) {
 499		if (!pfn_is_ram(pos)) {
 500			/*
 501			 * We hit a page which is not ram. Remap the continuous
 502			 * region between pos_start and pos-1 and replace
 503			 * the non-ram page at pos with the zero page.
 504			 */
 505			if (pos > pos_start) {
 506				/* Remap continuous region */
 507				map_size = (pos - pos_start) << PAGE_SHIFT;
 508				if (remap_oldmem_pfn_range(vma, from + len,
 509							   pos_start, map_size,
 510							   prot))
 511					goto fail;
 512				len += map_size;
 513			}
 514			/* Remap the zero page */
 515			if (remap_oldmem_pfn_range(vma, from + len,
 516						   zeropage_pfn,
 517						   PAGE_SIZE, prot))
 518				goto fail;
 519			len += PAGE_SIZE;
 520			pos_start = pos + 1;
 521		}
 522	}
 523	if (pos > pos_start) {
 524		/* Remap the rest */
 525		map_size = (pos - pos_start) << PAGE_SHIFT;
 526		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
 527					   map_size, prot))
 528			goto fail;
 529	}
 530	return 0;
 531fail:
 532	do_munmap(vma->vm_mm, from, len, NULL);
 533	return -EAGAIN;
 534}
 535
 536static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
 537			    unsigned long from, unsigned long pfn,
 538			    unsigned long size, pgprot_t prot)
 539{
 
 
 540	/*
 541	 * Check if oldmem_pfn_is_ram was registered to avoid
 542	 * looping over all pages without a reason.
 543	 */
 544	if (oldmem_pfn_is_ram)
 545		return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
 
 546	else
 547		return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
 
 
 548}
 549
 550static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 551{
 552	size_t size = vma->vm_end - vma->vm_start;
 553	u64 start, end, len, tsz;
 554	struct vmcore *m;
 555
 556	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
 557	end = start + size;
 558
 559	if (size > vmcore_size || end > vmcore_size)
 560		return -EINVAL;
 561
 562	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
 563		return -EPERM;
 564
 565	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
 566	vma->vm_flags |= VM_MIXEDMAP;
 567	vma->vm_ops = &vmcore_mmap_ops;
 568
 569	len = 0;
 570
 571	if (start < elfcorebuf_sz) {
 572		u64 pfn;
 573
 574		tsz = min(elfcorebuf_sz - (size_t)start, size);
 575		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
 576		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
 577				    vma->vm_page_prot))
 578			return -EAGAIN;
 579		size -= tsz;
 580		start += tsz;
 581		len += tsz;
 582
 583		if (size == 0)
 584			return 0;
 585	}
 586
 587	if (start < elfcorebuf_sz + elfnotes_sz) {
 588		void *kaddr;
 589
 590		/* We add device dumps before other elf notes because the
 591		 * other elf notes may not fill the elf notes buffer
 592		 * completely and we will end up with zero-filled data
 593		 * between the elf notes and the device dumps. Tools will
 594		 * then try to decode this zero-filled data as valid notes
 595		 * and we don't want that. Hence, adding device dumps before
 596		 * the other elf notes ensure that zero-filled data can be
 597		 * avoided. This also ensures that the device dumps and
 598		 * other elf notes can be properly mmaped at page aligned
 599		 * address.
 600		 */
 601#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 602		/* Read device dumps */
 603		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
 604			u64 start_off;
 605
 606			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
 607				  (size_t)start, size);
 608			start_off = start - elfcorebuf_sz;
 609			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
 610						start_off, tsz))
 611				goto fail;
 612
 613			size -= tsz;
 614			start += tsz;
 615			len += tsz;
 616
 617			/* leave now if filled buffer already */
 618			if (!size)
 619				return 0;
 620		}
 621#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 622
 623		/* Read remaining elf notes */
 624		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
 625		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
 626		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
 627						kaddr, 0, tsz))
 628			goto fail;
 629
 630		size -= tsz;
 631		start += tsz;
 632		len += tsz;
 633
 634		if (size == 0)
 635			return 0;
 636	}
 637
 638	list_for_each_entry(m, &vmcore_list, list) {
 639		if (start < m->offset + m->size) {
 640			u64 paddr = 0;
 641
 642			tsz = (size_t)min_t(unsigned long long,
 643					    m->offset + m->size - start, size);
 644			paddr = m->paddr + start - m->offset;
 645			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
 646						    paddr >> PAGE_SHIFT, tsz,
 647						    vma->vm_page_prot))
 648				goto fail;
 649			size -= tsz;
 650			start += tsz;
 651			len += tsz;
 652
 653			if (size == 0)
 654				return 0;
 655		}
 656	}
 657
 658	return 0;
 659fail:
 660	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
 661	return -EAGAIN;
 662}
 663#else
 664static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
 665{
 666	return -ENOSYS;
 667}
 668#endif
 669
 670static const struct proc_ops vmcore_proc_ops = {
 671	.proc_read	= read_vmcore,
 
 672	.proc_lseek	= default_llseek,
 673	.proc_mmap	= mmap_vmcore,
 674};
 675
 676static struct vmcore* __init get_new_element(void)
 677{
 678	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
 679}
 680
 681static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
 682			   struct list_head *vc_list)
 683{
 684	u64 size;
 685	struct vmcore *m;
 686
 687	size = elfsz + elfnotesegsz;
 688	list_for_each_entry(m, vc_list, list) {
 689		size += m->size;
 690	}
 691	return size;
 692}
 693
 694/**
 695 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
 696 *
 697 * @ehdr_ptr: ELF header
 698 *
 699 * This function updates p_memsz member of each PT_NOTE entry in the
 700 * program header table pointed to by @ehdr_ptr to real size of ELF
 701 * note segment.
 702 */
 703static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
 704{
 705	int i, rc=0;
 706	Elf64_Phdr *phdr_ptr;
 707	Elf64_Nhdr *nhdr_ptr;
 708
 709	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 710	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 711		void *notes_section;
 712		u64 offset, max_sz, sz, real_sz = 0;
 713		if (phdr_ptr->p_type != PT_NOTE)
 714			continue;
 715		max_sz = phdr_ptr->p_memsz;
 716		offset = phdr_ptr->p_offset;
 717		notes_section = kmalloc(max_sz, GFP_KERNEL);
 718		if (!notes_section)
 719			return -ENOMEM;
 720		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 721		if (rc < 0) {
 722			kfree(notes_section);
 723			return rc;
 724		}
 725		nhdr_ptr = notes_section;
 726		while (nhdr_ptr->n_namesz != 0) {
 727			sz = sizeof(Elf64_Nhdr) +
 728				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 729				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 730			if ((real_sz + sz) > max_sz) {
 731				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 732					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 733				break;
 734			}
 735			real_sz += sz;
 736			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
 737		}
 738		kfree(notes_section);
 739		phdr_ptr->p_memsz = real_sz;
 740		if (real_sz == 0) {
 741			pr_warn("Warning: Zero PT_NOTE entries found\n");
 742		}
 743	}
 744
 745	return 0;
 746}
 747
 748/**
 749 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
 750 * headers and sum of real size of their ELF note segment headers and
 751 * data.
 752 *
 753 * @ehdr_ptr: ELF header
 754 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 755 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 756 *
 757 * This function is used to merge multiple PT_NOTE program headers
 758 * into a unique single one. The resulting unique entry will have
 759 * @sz_ptnote in its phdr->p_mem.
 760 *
 761 * It is assumed that program headers with PT_NOTE type pointed to by
 762 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 763 * and each of PT_NOTE program headers has actual ELF note segment
 764 * size in its p_memsz member.
 765 */
 766static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
 767						 int *nr_ptnote, u64 *sz_ptnote)
 768{
 769	int i;
 770	Elf64_Phdr *phdr_ptr;
 771
 772	*nr_ptnote = *sz_ptnote = 0;
 773
 774	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
 775	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 776		if (phdr_ptr->p_type != PT_NOTE)
 777			continue;
 778		*nr_ptnote += 1;
 779		*sz_ptnote += phdr_ptr->p_memsz;
 780	}
 781
 782	return 0;
 783}
 784
 785/**
 786 * copy_notes_elf64 - copy ELF note segments in a given buffer
 787 *
 788 * @ehdr_ptr: ELF header
 789 * @notes_buf: buffer into which ELF note segments are copied
 790 *
 791 * This function is used to copy ELF note segment in the 1st kernel
 792 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 793 * size of the buffer @notes_buf is equal to or larger than sum of the
 794 * real ELF note segment headers and data.
 795 *
 796 * It is assumed that program headers with PT_NOTE type pointed to by
 797 * @ehdr_ptr has already been updated by update_note_header_size_elf64
 798 * and each of PT_NOTE program headers has actual ELF note segment
 799 * size in its p_memsz member.
 800 */
 801static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
 802{
 803	int i, rc=0;
 804	Elf64_Phdr *phdr_ptr;
 805
 806	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
 807
 808	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 809		u64 offset;
 810		if (phdr_ptr->p_type != PT_NOTE)
 811			continue;
 812		offset = phdr_ptr->p_offset;
 813		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
 814					   &offset);
 815		if (rc < 0)
 816			return rc;
 817		notes_buf += phdr_ptr->p_memsz;
 818	}
 819
 820	return 0;
 821}
 822
 823/* Merges all the PT_NOTE headers into one. */
 824static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
 825					   char **notes_buf, size_t *notes_sz)
 826{
 827	int i, nr_ptnote=0, rc=0;
 828	char *tmp;
 829	Elf64_Ehdr *ehdr_ptr;
 830	Elf64_Phdr phdr;
 831	u64 phdr_sz = 0, note_off;
 832
 833	ehdr_ptr = (Elf64_Ehdr *)elfptr;
 834
 835	rc = update_note_header_size_elf64(ehdr_ptr);
 836	if (rc < 0)
 837		return rc;
 838
 839	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
 840	if (rc < 0)
 841		return rc;
 842
 843	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
 844	*notes_buf = vmcore_alloc_buf(*notes_sz);
 845	if (!*notes_buf)
 846		return -ENOMEM;
 847
 848	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
 849	if (rc < 0)
 850		return rc;
 851
 852	/* Prepare merged PT_NOTE program header. */
 853	phdr.p_type    = PT_NOTE;
 854	phdr.p_flags   = 0;
 855	note_off = sizeof(Elf64_Ehdr) +
 856			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
 857	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
 858	phdr.p_vaddr   = phdr.p_paddr = 0;
 859	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
 860	phdr.p_align   = 0;
 861
 862	/* Add merged PT_NOTE program header*/
 863	tmp = elfptr + sizeof(Elf64_Ehdr);
 864	memcpy(tmp, &phdr, sizeof(phdr));
 865	tmp += sizeof(phdr);
 866
 867	/* Remove unwanted PT_NOTE program headers. */
 868	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
 869	*elfsz = *elfsz - i;
 870	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
 871	memset(elfptr + *elfsz, 0, i);
 872	*elfsz = roundup(*elfsz, PAGE_SIZE);
 873
 874	/* Modify e_phnum to reflect merged headers. */
 875	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
 876
 877	/* Store the size of all notes.  We need this to update the note
 878	 * header when the device dumps will be added.
 879	 */
 880	elfnotes_orig_sz = phdr.p_memsz;
 881
 882	return 0;
 883}
 884
 885/**
 886 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
 887 *
 888 * @ehdr_ptr: ELF header
 889 *
 890 * This function updates p_memsz member of each PT_NOTE entry in the
 891 * program header table pointed to by @ehdr_ptr to real size of ELF
 892 * note segment.
 893 */
 894static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
 895{
 896	int i, rc=0;
 897	Elf32_Phdr *phdr_ptr;
 898	Elf32_Nhdr *nhdr_ptr;
 899
 900	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 901	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 902		void *notes_section;
 903		u64 offset, max_sz, sz, real_sz = 0;
 904		if (phdr_ptr->p_type != PT_NOTE)
 905			continue;
 906		max_sz = phdr_ptr->p_memsz;
 907		offset = phdr_ptr->p_offset;
 908		notes_section = kmalloc(max_sz, GFP_KERNEL);
 909		if (!notes_section)
 910			return -ENOMEM;
 911		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
 912		if (rc < 0) {
 913			kfree(notes_section);
 914			return rc;
 915		}
 916		nhdr_ptr = notes_section;
 917		while (nhdr_ptr->n_namesz != 0) {
 918			sz = sizeof(Elf32_Nhdr) +
 919				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
 920				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
 921			if ((real_sz + sz) > max_sz) {
 922				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
 923					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
 924				break;
 925			}
 926			real_sz += sz;
 927			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
 928		}
 929		kfree(notes_section);
 930		phdr_ptr->p_memsz = real_sz;
 931		if (real_sz == 0) {
 932			pr_warn("Warning: Zero PT_NOTE entries found\n");
 933		}
 934	}
 935
 936	return 0;
 937}
 938
 939/**
 940 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
 941 * headers and sum of real size of their ELF note segment headers and
 942 * data.
 943 *
 944 * @ehdr_ptr: ELF header
 945 * @nr_ptnote: buffer for the number of PT_NOTE program headers
 946 * @sz_ptnote: buffer for size of unique PT_NOTE program header
 947 *
 948 * This function is used to merge multiple PT_NOTE program headers
 949 * into a unique single one. The resulting unique entry will have
 950 * @sz_ptnote in its phdr->p_mem.
 951 *
 952 * It is assumed that program headers with PT_NOTE type pointed to by
 953 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 954 * and each of PT_NOTE program headers has actual ELF note segment
 955 * size in its p_memsz member.
 956 */
 957static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
 958						 int *nr_ptnote, u64 *sz_ptnote)
 959{
 960	int i;
 961	Elf32_Phdr *phdr_ptr;
 962
 963	*nr_ptnote = *sz_ptnote = 0;
 964
 965	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
 966	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
 967		if (phdr_ptr->p_type != PT_NOTE)
 968			continue;
 969		*nr_ptnote += 1;
 970		*sz_ptnote += phdr_ptr->p_memsz;
 971	}
 972
 973	return 0;
 974}
 975
 976/**
 977 * copy_notes_elf32 - copy ELF note segments in a given buffer
 978 *
 979 * @ehdr_ptr: ELF header
 980 * @notes_buf: buffer into which ELF note segments are copied
 981 *
 982 * This function is used to copy ELF note segment in the 1st kernel
 983 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
 984 * size of the buffer @notes_buf is equal to or larger than sum of the
 985 * real ELF note segment headers and data.
 986 *
 987 * It is assumed that program headers with PT_NOTE type pointed to by
 988 * @ehdr_ptr has already been updated by update_note_header_size_elf32
 989 * and each of PT_NOTE program headers has actual ELF note segment
 990 * size in its p_memsz member.
 991 */
 992static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
 993{
 994	int i, rc=0;
 995	Elf32_Phdr *phdr_ptr;
 996
 997	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
 998
 999	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1000		u64 offset;
1001		if (phdr_ptr->p_type != PT_NOTE)
1002			continue;
1003		offset = phdr_ptr->p_offset;
1004		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1005					   &offset);
1006		if (rc < 0)
1007			return rc;
1008		notes_buf += phdr_ptr->p_memsz;
1009	}
1010
1011	return 0;
1012}
1013
1014/* Merges all the PT_NOTE headers into one. */
1015static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1016					   char **notes_buf, size_t *notes_sz)
1017{
1018	int i, nr_ptnote=0, rc=0;
1019	char *tmp;
1020	Elf32_Ehdr *ehdr_ptr;
1021	Elf32_Phdr phdr;
1022	u64 phdr_sz = 0, note_off;
1023
1024	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1025
1026	rc = update_note_header_size_elf32(ehdr_ptr);
1027	if (rc < 0)
1028		return rc;
1029
1030	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1031	if (rc < 0)
1032		return rc;
1033
1034	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1035	*notes_buf = vmcore_alloc_buf(*notes_sz);
1036	if (!*notes_buf)
1037		return -ENOMEM;
1038
1039	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1040	if (rc < 0)
1041		return rc;
1042
1043	/* Prepare merged PT_NOTE program header. */
1044	phdr.p_type    = PT_NOTE;
1045	phdr.p_flags   = 0;
1046	note_off = sizeof(Elf32_Ehdr) +
1047			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1048	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1049	phdr.p_vaddr   = phdr.p_paddr = 0;
1050	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1051	phdr.p_align   = 0;
1052
1053	/* Add merged PT_NOTE program header*/
1054	tmp = elfptr + sizeof(Elf32_Ehdr);
1055	memcpy(tmp, &phdr, sizeof(phdr));
1056	tmp += sizeof(phdr);
1057
1058	/* Remove unwanted PT_NOTE program headers. */
1059	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1060	*elfsz = *elfsz - i;
1061	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1062	memset(elfptr + *elfsz, 0, i);
1063	*elfsz = roundup(*elfsz, PAGE_SIZE);
1064
1065	/* Modify e_phnum to reflect merged headers. */
1066	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1067
1068	/* Store the size of all notes.  We need this to update the note
1069	 * header when the device dumps will be added.
1070	 */
1071	elfnotes_orig_sz = phdr.p_memsz;
1072
1073	return 0;
1074}
1075
1076/* Add memory chunks represented by program headers to vmcore list. Also update
1077 * the new offset fields of exported program headers. */
1078static int __init process_ptload_program_headers_elf64(char *elfptr,
1079						size_t elfsz,
1080						size_t elfnotes_sz,
1081						struct list_head *vc_list)
1082{
1083	int i;
1084	Elf64_Ehdr *ehdr_ptr;
1085	Elf64_Phdr *phdr_ptr;
1086	loff_t vmcore_off;
1087	struct vmcore *new;
1088
1089	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1090	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1091
1092	/* Skip Elf header, program headers and Elf note segment. */
1093	vmcore_off = elfsz + elfnotes_sz;
1094
1095	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1096		u64 paddr, start, end, size;
1097
1098		if (phdr_ptr->p_type != PT_LOAD)
1099			continue;
1100
1101		paddr = phdr_ptr->p_offset;
1102		start = rounddown(paddr, PAGE_SIZE);
1103		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1104		size = end - start;
1105
1106		/* Add this contiguous chunk of memory to vmcore list.*/
1107		new = get_new_element();
1108		if (!new)
1109			return -ENOMEM;
1110		new->paddr = start;
1111		new->size = size;
1112		list_add_tail(&new->list, vc_list);
1113
1114		/* Update the program header offset. */
1115		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1116		vmcore_off = vmcore_off + size;
1117	}
1118	return 0;
1119}
1120
1121static int __init process_ptload_program_headers_elf32(char *elfptr,
1122						size_t elfsz,
1123						size_t elfnotes_sz,
1124						struct list_head *vc_list)
1125{
1126	int i;
1127	Elf32_Ehdr *ehdr_ptr;
1128	Elf32_Phdr *phdr_ptr;
1129	loff_t vmcore_off;
1130	struct vmcore *new;
1131
1132	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1133	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1134
1135	/* Skip Elf header, program headers and Elf note segment. */
1136	vmcore_off = elfsz + elfnotes_sz;
1137
1138	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1139		u64 paddr, start, end, size;
1140
1141		if (phdr_ptr->p_type != PT_LOAD)
1142			continue;
1143
1144		paddr = phdr_ptr->p_offset;
1145		start = rounddown(paddr, PAGE_SIZE);
1146		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1147		size = end - start;
1148
1149		/* Add this contiguous chunk of memory to vmcore list.*/
1150		new = get_new_element();
1151		if (!new)
1152			return -ENOMEM;
1153		new->paddr = start;
1154		new->size = size;
1155		list_add_tail(&new->list, vc_list);
1156
1157		/* Update the program header offset */
1158		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1159		vmcore_off = vmcore_off + size;
1160	}
1161	return 0;
1162}
1163
1164/* Sets offset fields of vmcore elements. */
1165static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1166				    struct list_head *vc_list)
1167{
1168	loff_t vmcore_off;
1169	struct vmcore *m;
1170
1171	/* Skip Elf header, program headers and Elf note segment. */
1172	vmcore_off = elfsz + elfnotes_sz;
1173
1174	list_for_each_entry(m, vc_list, list) {
1175		m->offset = vmcore_off;
1176		vmcore_off += m->size;
1177	}
1178}
1179
1180static void free_elfcorebuf(void)
1181{
1182	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1183	elfcorebuf = NULL;
1184	vfree(elfnotes_buf);
1185	elfnotes_buf = NULL;
1186}
1187
1188static int __init parse_crash_elf64_headers(void)
1189{
1190	int rc=0;
1191	Elf64_Ehdr ehdr;
1192	u64 addr;
1193
1194	addr = elfcorehdr_addr;
1195
1196	/* Read Elf header */
1197	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1198	if (rc < 0)
1199		return rc;
1200
1201	/* Do some basic Verification. */
1202	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1203		(ehdr.e_type != ET_CORE) ||
1204		!vmcore_elf64_check_arch(&ehdr) ||
1205		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1206		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1207		ehdr.e_version != EV_CURRENT ||
1208		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1209		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1210		ehdr.e_phnum == 0) {
1211		pr_warn("Warning: Core image elf header is not sane\n");
1212		return -EINVAL;
1213	}
1214
1215	/* Read in all elf headers. */
1216	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1217				ehdr.e_phnum * sizeof(Elf64_Phdr);
1218	elfcorebuf_sz = elfcorebuf_sz_orig;
1219	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1220					      get_order(elfcorebuf_sz_orig));
1221	if (!elfcorebuf)
1222		return -ENOMEM;
1223	addr = elfcorehdr_addr;
1224	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1225	if (rc < 0)
1226		goto fail;
1227
1228	/* Merge all PT_NOTE headers into one. */
1229	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1230				      &elfnotes_buf, &elfnotes_sz);
1231	if (rc)
1232		goto fail;
1233	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1234						  elfnotes_sz, &vmcore_list);
1235	if (rc)
1236		goto fail;
1237	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1238	return 0;
1239fail:
1240	free_elfcorebuf();
1241	return rc;
1242}
1243
1244static int __init parse_crash_elf32_headers(void)
1245{
1246	int rc=0;
1247	Elf32_Ehdr ehdr;
1248	u64 addr;
1249
1250	addr = elfcorehdr_addr;
1251
1252	/* Read Elf header */
1253	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1254	if (rc < 0)
1255		return rc;
1256
1257	/* Do some basic Verification. */
1258	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1259		(ehdr.e_type != ET_CORE) ||
1260		!vmcore_elf32_check_arch(&ehdr) ||
1261		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1262		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1263		ehdr.e_version != EV_CURRENT ||
1264		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1265		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1266		ehdr.e_phnum == 0) {
1267		pr_warn("Warning: Core image elf header is not sane\n");
1268		return -EINVAL;
1269	}
1270
1271	/* Read in all elf headers. */
1272	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1273	elfcorebuf_sz = elfcorebuf_sz_orig;
1274	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1275					      get_order(elfcorebuf_sz_orig));
1276	if (!elfcorebuf)
1277		return -ENOMEM;
1278	addr = elfcorehdr_addr;
1279	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1280	if (rc < 0)
1281		goto fail;
1282
1283	/* Merge all PT_NOTE headers into one. */
1284	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1285				      &elfnotes_buf, &elfnotes_sz);
1286	if (rc)
1287		goto fail;
1288	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1289						  elfnotes_sz, &vmcore_list);
1290	if (rc)
1291		goto fail;
1292	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1293	return 0;
1294fail:
1295	free_elfcorebuf();
1296	return rc;
1297}
1298
1299static int __init parse_crash_elf_headers(void)
1300{
1301	unsigned char e_ident[EI_NIDENT];
1302	u64 addr;
1303	int rc=0;
1304
1305	addr = elfcorehdr_addr;
1306	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1307	if (rc < 0)
1308		return rc;
1309	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1310		pr_warn("Warning: Core image elf header not found\n");
1311		return -EINVAL;
1312	}
1313
1314	if (e_ident[EI_CLASS] == ELFCLASS64) {
1315		rc = parse_crash_elf64_headers();
1316		if (rc)
1317			return rc;
1318	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1319		rc = parse_crash_elf32_headers();
1320		if (rc)
1321			return rc;
1322	} else {
1323		pr_warn("Warning: Core image elf header is not sane\n");
1324		return -EINVAL;
1325	}
1326
1327	/* Determine vmcore size. */
1328	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1329				      &vmcore_list);
1330
1331	return 0;
1332}
1333
1334#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1335/**
1336 * vmcoredd_write_header - Write vmcore device dump header at the
1337 * beginning of the dump's buffer.
1338 * @buf: Output buffer where the note is written
1339 * @data: Dump info
1340 * @size: Size of the dump
1341 *
1342 * Fills beginning of the dump's buffer with vmcore device dump header.
1343 */
1344static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1345				  u32 size)
1346{
1347	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1348
1349	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1350	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1351	vdd_hdr->n_type = NT_VMCOREDD;
1352
1353	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1354		sizeof(vdd_hdr->name));
1355	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1356}
1357
1358/**
1359 * vmcoredd_update_program_headers - Update all Elf program headers
1360 * @elfptr: Pointer to elf header
1361 * @elfnotesz: Size of elf notes aligned to page size
1362 * @vmcoreddsz: Size of device dumps to be added to elf note header
1363 *
1364 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1365 * Also update the offsets of all the program headers after the elf note header.
1366 */
1367static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1368					    size_t vmcoreddsz)
1369{
1370	unsigned char *e_ident = (unsigned char *)elfptr;
1371	u64 start, end, size;
1372	loff_t vmcore_off;
1373	u32 i;
1374
1375	vmcore_off = elfcorebuf_sz + elfnotesz;
1376
1377	if (e_ident[EI_CLASS] == ELFCLASS64) {
1378		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1379		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1380
1381		/* Update all program headers */
1382		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1383			if (phdr->p_type == PT_NOTE) {
1384				/* Update note size */
1385				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1386				phdr->p_filesz = phdr->p_memsz;
1387				continue;
1388			}
1389
1390			start = rounddown(phdr->p_offset, PAGE_SIZE);
1391			end = roundup(phdr->p_offset + phdr->p_memsz,
1392				      PAGE_SIZE);
1393			size = end - start;
1394			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1395			vmcore_off += size;
1396		}
1397	} else {
1398		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1399		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1400
1401		/* Update all program headers */
1402		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1403			if (phdr->p_type == PT_NOTE) {
1404				/* Update note size */
1405				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1406				phdr->p_filesz = phdr->p_memsz;
1407				continue;
1408			}
1409
1410			start = rounddown(phdr->p_offset, PAGE_SIZE);
1411			end = roundup(phdr->p_offset + phdr->p_memsz,
1412				      PAGE_SIZE);
1413			size = end - start;
1414			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1415			vmcore_off += size;
1416		}
1417	}
1418}
1419
1420/**
1421 * vmcoredd_update_size - Update the total size of the device dumps and update
1422 * Elf header
1423 * @dump_size: Size of the current device dump to be added to total size
1424 *
1425 * Update the total size of all the device dumps and update the Elf program
1426 * headers. Calculate the new offsets for the vmcore list and update the
1427 * total vmcore size.
1428 */
1429static void vmcoredd_update_size(size_t dump_size)
1430{
1431	vmcoredd_orig_sz += dump_size;
1432	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1433	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1434					vmcoredd_orig_sz);
1435
1436	/* Update vmcore list offsets */
1437	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1438
1439	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1440				      &vmcore_list);
1441	proc_vmcore->size = vmcore_size;
1442}
1443
1444/**
1445 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1446 * @data: dump info.
1447 *
1448 * Allocate a buffer and invoke the calling driver's dump collect routine.
1449 * Write Elf note at the beginning of the buffer to indicate vmcore device
1450 * dump and add the dump to global list.
1451 */
1452int vmcore_add_device_dump(struct vmcoredd_data *data)
1453{
1454	struct vmcoredd_node *dump;
1455	void *buf = NULL;
1456	size_t data_size;
1457	int ret;
1458
1459	if (vmcoredd_disabled) {
1460		pr_err_once("Device dump is disabled\n");
1461		return -EINVAL;
1462	}
1463
1464	if (!data || !strlen(data->dump_name) ||
1465	    !data->vmcoredd_callback || !data->size)
1466		return -EINVAL;
1467
1468	dump = vzalloc(sizeof(*dump));
1469	if (!dump) {
1470		ret = -ENOMEM;
1471		goto out_err;
1472	}
1473
1474	/* Keep size of the buffer page aligned so that it can be mmaped */
1475	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1476			    PAGE_SIZE);
1477
1478	/* Allocate buffer for driver's to write their dumps */
1479	buf = vmcore_alloc_buf(data_size);
1480	if (!buf) {
1481		ret = -ENOMEM;
1482		goto out_err;
1483	}
1484
1485	vmcoredd_write_header(buf, data, data_size -
1486			      sizeof(struct vmcoredd_header));
1487
1488	/* Invoke the driver's dump collection routing */
1489	ret = data->vmcoredd_callback(data, buf +
1490				      sizeof(struct vmcoredd_header));
1491	if (ret)
1492		goto out_err;
1493
1494	dump->buf = buf;
1495	dump->size = data_size;
1496
1497	/* Add the dump to driver sysfs list */
1498	mutex_lock(&vmcoredd_mutex);
1499	list_add_tail(&dump->list, &vmcoredd_list);
1500	mutex_unlock(&vmcoredd_mutex);
1501
1502	vmcoredd_update_size(data_size);
1503	return 0;
1504
1505out_err:
1506	vfree(buf);
1507	vfree(dump);
1508
1509	return ret;
1510}
1511EXPORT_SYMBOL(vmcore_add_device_dump);
1512#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1513
1514/* Free all dumps in vmcore device dump list */
1515static void vmcore_free_device_dumps(void)
1516{
1517#ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1518	mutex_lock(&vmcoredd_mutex);
1519	while (!list_empty(&vmcoredd_list)) {
1520		struct vmcoredd_node *dump;
1521
1522		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1523					list);
1524		list_del(&dump->list);
1525		vfree(dump->buf);
1526		vfree(dump);
1527	}
1528	mutex_unlock(&vmcoredd_mutex);
1529#endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1530}
1531
1532/* Init function for vmcore module. */
1533static int __init vmcore_init(void)
1534{
1535	int rc = 0;
1536
1537	/* Allow architectures to allocate ELF header in 2nd kernel */
1538	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1539	if (rc)
1540		return rc;
1541	/*
1542	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1543	 * then capture the dump.
1544	 */
1545	if (!(is_vmcore_usable()))
1546		return rc;
1547	rc = parse_crash_elf_headers();
1548	if (rc) {
 
1549		pr_warn("Kdump: vmcore not initialized\n");
1550		return rc;
1551	}
1552	elfcorehdr_free(elfcorehdr_addr);
1553	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1554
1555	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1556	if (proc_vmcore)
1557		proc_vmcore->size = vmcore_size;
1558	return 0;
1559}
1560fs_initcall(vmcore_init);
1561
1562/* Cleanup function for vmcore module. */
1563void vmcore_cleanup(void)
1564{
1565	if (proc_vmcore) {
1566		proc_remove(proc_vmcore);
1567		proc_vmcore = NULL;
1568	}
1569
1570	/* clear the vmcore list. */
1571	while (!list_empty(&vmcore_list)) {
1572		struct vmcore *m;
1573
1574		m = list_first_entry(&vmcore_list, struct vmcore, list);
1575		list_del(&m->list);
1576		kfree(m);
1577	}
1578	free_elfcorebuf();
1579
1580	/* clear vmcore device dump list */
1581	vmcore_free_device_dumps();
1582}