Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kexec.c - kexec system call core code.
   4 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/mutex.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/ioport.h>
  21#include <linux/hardirq.h>
  22#include <linux/elf.h>
  23#include <linux/elfcore.h>
  24#include <linux/utsname.h>
  25#include <linux/numa.h>
  26#include <linux/suspend.h>
  27#include <linux/device.h>
  28#include <linux/freezer.h>
 
  29#include <linux/pm.h>
  30#include <linux/cpu.h>
  31#include <linux/uaccess.h>
  32#include <linux/io.h>
  33#include <linux/console.h>
  34#include <linux/vmalloc.h>
  35#include <linux/swap.h>
  36#include <linux/syscore_ops.h>
  37#include <linux/compiler.h>
  38#include <linux/hugetlb.h>
  39#include <linux/frame.h>
 
  40
  41#include <asm/page.h>
  42#include <asm/sections.h>
  43
  44#include <crypto/hash.h>
  45#include <crypto/sha.h>
  46#include "kexec_internal.h"
  47
  48DEFINE_MUTEX(kexec_mutex);
  49
  50/* Per cpu memory for storing cpu states in case of system crash. */
  51note_buf_t __percpu *crash_notes;
  52
  53/* Flag to indicate we are going to kexec a new kernel */
  54bool kexec_in_progress = false;
  55
  56
  57/* Location of the reserved area for the crash kernel */
  58struct resource crashk_res = {
  59	.name  = "Crash kernel",
  60	.start = 0,
  61	.end   = 0,
  62	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  63	.desc  = IORES_DESC_CRASH_KERNEL
  64};
  65struct resource crashk_low_res = {
  66	.name  = "Crash kernel",
  67	.start = 0,
  68	.end   = 0,
  69	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  70	.desc  = IORES_DESC_CRASH_KERNEL
  71};
  72
  73int kexec_should_crash(struct task_struct *p)
  74{
  75	/*
  76	 * If crash_kexec_post_notifiers is enabled, don't run
  77	 * crash_kexec() here yet, which must be run after panic
  78	 * notifiers in panic().
  79	 */
  80	if (crash_kexec_post_notifiers)
  81		return 0;
  82	/*
  83	 * There are 4 panic() calls in do_exit() path, each of which
  84	 * corresponds to each of these 4 conditions.
  85	 */
  86	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  87		return 1;
  88	return 0;
  89}
  90
  91int kexec_crash_loaded(void)
  92{
  93	return !!kexec_crash_image;
  94}
  95EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  96
  97/*
  98 * When kexec transitions to the new kernel there is a one-to-one
  99 * mapping between physical and virtual addresses.  On processors
 100 * where you can disable the MMU this is trivial, and easy.  For
 101 * others it is still a simple predictable page table to setup.
 102 *
 103 * In that environment kexec copies the new kernel to its final
 104 * resting place.  This means I can only support memory whose
 105 * physical address can fit in an unsigned long.  In particular
 106 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 107 * If the assembly stub has more restrictive requirements
 108 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 109 * defined more restrictively in <asm/kexec.h>.
 110 *
 111 * The code for the transition from the current kernel to the
 112 * the new kernel is placed in the control_code_buffer, whose size
 113 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 114 * page of memory is necessary, but some architectures require more.
 115 * Because this memory must be identity mapped in the transition from
 116 * virtual to physical addresses it must live in the range
 117 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 118 * modifiable.
 119 *
 120 * The assembly stub in the control code buffer is passed a linked list
 121 * of descriptor pages detailing the source pages of the new kernel,
 122 * and the destination addresses of those source pages.  As this data
 123 * structure is not used in the context of the current OS, it must
 124 * be self-contained.
 125 *
 126 * The code has been made to work with highmem pages and will use a
 127 * destination page in its final resting place (if it happens
 128 * to allocate it).  The end product of this is that most of the
 129 * physical address space, and most of RAM can be used.
 130 *
 131 * Future directions include:
 132 *  - allocating a page table with the control code buffer identity
 133 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 134 *    reliable.
 135 */
 136
 137/*
 138 * KIMAGE_NO_DEST is an impossible destination address..., for
 139 * allocating pages whose destination address we do not care about.
 140 */
 141#define KIMAGE_NO_DEST (-1UL)
 142#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 143
 144static struct page *kimage_alloc_page(struct kimage *image,
 145				       gfp_t gfp_mask,
 146				       unsigned long dest);
 147
 148int sanity_check_segment_list(struct kimage *image)
 149{
 150	int i;
 151	unsigned long nr_segments = image->nr_segments;
 152	unsigned long total_pages = 0;
 153	unsigned long nr_pages = totalram_pages();
 154
 155	/*
 156	 * Verify we have good destination addresses.  The caller is
 157	 * responsible for making certain we don't attempt to load
 158	 * the new image into invalid or reserved areas of RAM.  This
 159	 * just verifies it is an address we can use.
 160	 *
 161	 * Since the kernel does everything in page size chunks ensure
 162	 * the destination addresses are page aligned.  Too many
 163	 * special cases crop of when we don't do this.  The most
 164	 * insidious is getting overlapping destination addresses
 165	 * simply because addresses are changed to page size
 166	 * granularity.
 167	 */
 168	for (i = 0; i < nr_segments; i++) {
 169		unsigned long mstart, mend;
 170
 171		mstart = image->segment[i].mem;
 172		mend   = mstart + image->segment[i].memsz;
 173		if (mstart > mend)
 174			return -EADDRNOTAVAIL;
 175		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 176			return -EADDRNOTAVAIL;
 177		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 178			return -EADDRNOTAVAIL;
 179	}
 180
 181	/* Verify our destination addresses do not overlap.
 182	 * If we alloed overlapping destination addresses
 183	 * through very weird things can happen with no
 184	 * easy explanation as one segment stops on another.
 185	 */
 186	for (i = 0; i < nr_segments; i++) {
 187		unsigned long mstart, mend;
 188		unsigned long j;
 189
 190		mstart = image->segment[i].mem;
 191		mend   = mstart + image->segment[i].memsz;
 192		for (j = 0; j < i; j++) {
 193			unsigned long pstart, pend;
 194
 195			pstart = image->segment[j].mem;
 196			pend   = pstart + image->segment[j].memsz;
 197			/* Do the segments overlap ? */
 198			if ((mend > pstart) && (mstart < pend))
 199				return -EINVAL;
 200		}
 201	}
 202
 203	/* Ensure our buffer sizes are strictly less than
 204	 * our memory sizes.  This should always be the case,
 205	 * and it is easier to check up front than to be surprised
 206	 * later on.
 207	 */
 208	for (i = 0; i < nr_segments; i++) {
 209		if (image->segment[i].bufsz > image->segment[i].memsz)
 210			return -EINVAL;
 211	}
 212
 213	/*
 214	 * Verify that no more than half of memory will be consumed. If the
 215	 * request from userspace is too large, a large amount of time will be
 216	 * wasted allocating pages, which can cause a soft lockup.
 217	 */
 218	for (i = 0; i < nr_segments; i++) {
 219		if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
 220			return -EINVAL;
 221
 222		total_pages += PAGE_COUNT(image->segment[i].memsz);
 223	}
 224
 225	if (total_pages > nr_pages / 2)
 226		return -EINVAL;
 227
 228	/*
 229	 * Verify we have good destination addresses.  Normally
 230	 * the caller is responsible for making certain we don't
 231	 * attempt to load the new image into invalid or reserved
 232	 * areas of RAM.  But crash kernels are preloaded into a
 233	 * reserved area of ram.  We must ensure the addresses
 234	 * are in the reserved area otherwise preloading the
 235	 * kernel could corrupt things.
 236	 */
 237
 238	if (image->type == KEXEC_TYPE_CRASH) {
 239		for (i = 0; i < nr_segments; i++) {
 240			unsigned long mstart, mend;
 241
 242			mstart = image->segment[i].mem;
 243			mend = mstart + image->segment[i].memsz - 1;
 244			/* Ensure we are within the crash kernel limits */
 245			if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
 246			    (mend > phys_to_boot_phys(crashk_res.end)))
 247				return -EADDRNOTAVAIL;
 248		}
 249	}
 250
 251	return 0;
 252}
 253
 254struct kimage *do_kimage_alloc_init(void)
 255{
 256	struct kimage *image;
 257
 258	/* Allocate a controlling structure */
 259	image = kzalloc(sizeof(*image), GFP_KERNEL);
 260	if (!image)
 261		return NULL;
 262
 263	image->head = 0;
 264	image->entry = &image->head;
 265	image->last_entry = &image->head;
 266	image->control_page = ~0; /* By default this does not apply */
 267	image->type = KEXEC_TYPE_DEFAULT;
 268
 269	/* Initialize the list of control pages */
 270	INIT_LIST_HEAD(&image->control_pages);
 271
 272	/* Initialize the list of destination pages */
 273	INIT_LIST_HEAD(&image->dest_pages);
 274
 275	/* Initialize the list of unusable pages */
 276	INIT_LIST_HEAD(&image->unusable_pages);
 277
 278	return image;
 279}
 280
 281int kimage_is_destination_range(struct kimage *image,
 282					unsigned long start,
 283					unsigned long end)
 284{
 285	unsigned long i;
 286
 287	for (i = 0; i < image->nr_segments; i++) {
 288		unsigned long mstart, mend;
 289
 290		mstart = image->segment[i].mem;
 291		mend = mstart + image->segment[i].memsz;
 292		if ((end > mstart) && (start < mend))
 293			return 1;
 294	}
 295
 296	return 0;
 297}
 298
 299static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 300{
 301	struct page *pages;
 302
 303	if (fatal_signal_pending(current))
 304		return NULL;
 305	pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 306	if (pages) {
 307		unsigned int count, i;
 308
 309		pages->mapping = NULL;
 310		set_page_private(pages, order);
 311		count = 1 << order;
 312		for (i = 0; i < count; i++)
 313			SetPageReserved(pages + i);
 314
 315		arch_kexec_post_alloc_pages(page_address(pages), count,
 316					    gfp_mask);
 317
 318		if (gfp_mask & __GFP_ZERO)
 319			for (i = 0; i < count; i++)
 320				clear_highpage(pages + i);
 321	}
 322
 323	return pages;
 324}
 325
 326static void kimage_free_pages(struct page *page)
 327{
 328	unsigned int order, count, i;
 329
 330	order = page_private(page);
 331	count = 1 << order;
 332
 333	arch_kexec_pre_free_pages(page_address(page), count);
 334
 335	for (i = 0; i < count; i++)
 336		ClearPageReserved(page + i);
 337	__free_pages(page, order);
 338}
 339
 340void kimage_free_page_list(struct list_head *list)
 341{
 342	struct page *page, *next;
 343
 344	list_for_each_entry_safe(page, next, list, lru) {
 345		list_del(&page->lru);
 346		kimage_free_pages(page);
 347	}
 348}
 349
 350static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 351							unsigned int order)
 352{
 353	/* Control pages are special, they are the intermediaries
 354	 * that are needed while we copy the rest of the pages
 355	 * to their final resting place.  As such they must
 356	 * not conflict with either the destination addresses
 357	 * or memory the kernel is already using.
 358	 *
 359	 * The only case where we really need more than one of
 360	 * these are for architectures where we cannot disable
 361	 * the MMU and must instead generate an identity mapped
 362	 * page table for all of the memory.
 363	 *
 364	 * At worst this runs in O(N) of the image size.
 365	 */
 366	struct list_head extra_pages;
 367	struct page *pages;
 368	unsigned int count;
 369
 370	count = 1 << order;
 371	INIT_LIST_HEAD(&extra_pages);
 372
 373	/* Loop while I can allocate a page and the page allocated
 374	 * is a destination page.
 375	 */
 376	do {
 377		unsigned long pfn, epfn, addr, eaddr;
 378
 379		pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
 380		if (!pages)
 381			break;
 382		pfn   = page_to_boot_pfn(pages);
 383		epfn  = pfn + count;
 384		addr  = pfn << PAGE_SHIFT;
 385		eaddr = epfn << PAGE_SHIFT;
 386		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 387			      kimage_is_destination_range(image, addr, eaddr)) {
 388			list_add(&pages->lru, &extra_pages);
 389			pages = NULL;
 390		}
 391	} while (!pages);
 392
 393	if (pages) {
 394		/* Remember the allocated page... */
 395		list_add(&pages->lru, &image->control_pages);
 396
 397		/* Because the page is already in it's destination
 398		 * location we will never allocate another page at
 399		 * that address.  Therefore kimage_alloc_pages
 400		 * will not return it (again) and we don't need
 401		 * to give it an entry in image->segment[].
 402		 */
 403	}
 404	/* Deal with the destination pages I have inadvertently allocated.
 405	 *
 406	 * Ideally I would convert multi-page allocations into single
 407	 * page allocations, and add everything to image->dest_pages.
 408	 *
 409	 * For now it is simpler to just free the pages.
 410	 */
 411	kimage_free_page_list(&extra_pages);
 412
 413	return pages;
 414}
 415
 416static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 417						      unsigned int order)
 418{
 419	/* Control pages are special, they are the intermediaries
 420	 * that are needed while we copy the rest of the pages
 421	 * to their final resting place.  As such they must
 422	 * not conflict with either the destination addresses
 423	 * or memory the kernel is already using.
 424	 *
 425	 * Control pages are also the only pags we must allocate
 426	 * when loading a crash kernel.  All of the other pages
 427	 * are specified by the segments and we just memcpy
 428	 * into them directly.
 429	 *
 430	 * The only case where we really need more than one of
 431	 * these are for architectures where we cannot disable
 432	 * the MMU and must instead generate an identity mapped
 433	 * page table for all of the memory.
 434	 *
 435	 * Given the low demand this implements a very simple
 436	 * allocator that finds the first hole of the appropriate
 437	 * size in the reserved memory region, and allocates all
 438	 * of the memory up to and including the hole.
 439	 */
 440	unsigned long hole_start, hole_end, size;
 441	struct page *pages;
 442
 443	pages = NULL;
 444	size = (1 << order) << PAGE_SHIFT;
 445	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 446	hole_end   = hole_start + size - 1;
 447	while (hole_end <= crashk_res.end) {
 448		unsigned long i;
 449
 450		cond_resched();
 451
 452		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 453			break;
 454		/* See if I overlap any of the segments */
 455		for (i = 0; i < image->nr_segments; i++) {
 456			unsigned long mstart, mend;
 457
 458			mstart = image->segment[i].mem;
 459			mend   = mstart + image->segment[i].memsz - 1;
 460			if ((hole_end >= mstart) && (hole_start <= mend)) {
 461				/* Advance the hole to the end of the segment */
 462				hole_start = (mend + (size - 1)) & ~(size - 1);
 463				hole_end   = hole_start + size - 1;
 464				break;
 465			}
 466		}
 467		/* If I don't overlap any segments I have found my hole! */
 468		if (i == image->nr_segments) {
 469			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 470			image->control_page = hole_end;
 471			break;
 472		}
 473	}
 474
 475	/* Ensure that these pages are decrypted if SME is enabled. */
 476	if (pages)
 477		arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
 478
 479	return pages;
 480}
 481
 482
 483struct page *kimage_alloc_control_pages(struct kimage *image,
 484					 unsigned int order)
 485{
 486	struct page *pages = NULL;
 487
 488	switch (image->type) {
 489	case KEXEC_TYPE_DEFAULT:
 490		pages = kimage_alloc_normal_control_pages(image, order);
 491		break;
 492	case KEXEC_TYPE_CRASH:
 493		pages = kimage_alloc_crash_control_pages(image, order);
 494		break;
 495	}
 496
 497	return pages;
 498}
 499
 500int kimage_crash_copy_vmcoreinfo(struct kimage *image)
 501{
 502	struct page *vmcoreinfo_page;
 503	void *safecopy;
 504
 505	if (image->type != KEXEC_TYPE_CRASH)
 506		return 0;
 507
 508	/*
 509	 * For kdump, allocate one vmcoreinfo safe copy from the
 510	 * crash memory. as we have arch_kexec_protect_crashkres()
 511	 * after kexec syscall, we naturally protect it from write
 512	 * (even read) access under kernel direct mapping. But on
 513	 * the other hand, we still need to operate it when crash
 514	 * happens to generate vmcoreinfo note, hereby we rely on
 515	 * vmap for this purpose.
 516	 */
 517	vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
 518	if (!vmcoreinfo_page) {
 519		pr_warn("Could not allocate vmcoreinfo buffer\n");
 520		return -ENOMEM;
 521	}
 522	safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
 523	if (!safecopy) {
 524		pr_warn("Could not vmap vmcoreinfo buffer\n");
 525		return -ENOMEM;
 526	}
 527
 528	image->vmcoreinfo_data_copy = safecopy;
 529	crash_update_vmcoreinfo_safecopy(safecopy);
 530
 531	return 0;
 532}
 533
 534static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 535{
 536	if (*image->entry != 0)
 537		image->entry++;
 538
 539	if (image->entry == image->last_entry) {
 540		kimage_entry_t *ind_page;
 541		struct page *page;
 542
 543		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 544		if (!page)
 545			return -ENOMEM;
 546
 547		ind_page = page_address(page);
 548		*image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
 549		image->entry = ind_page;
 550		image->last_entry = ind_page +
 551				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 552	}
 553	*image->entry = entry;
 554	image->entry++;
 555	*image->entry = 0;
 556
 557	return 0;
 558}
 559
 560static int kimage_set_destination(struct kimage *image,
 561				   unsigned long destination)
 562{
 563	int result;
 564
 565	destination &= PAGE_MASK;
 566	result = kimage_add_entry(image, destination | IND_DESTINATION);
 567
 568	return result;
 569}
 570
 571
 572static int kimage_add_page(struct kimage *image, unsigned long page)
 573{
 574	int result;
 575
 576	page &= PAGE_MASK;
 577	result = kimage_add_entry(image, page | IND_SOURCE);
 578
 579	return result;
 580}
 581
 582
 583static void kimage_free_extra_pages(struct kimage *image)
 584{
 585	/* Walk through and free any extra destination pages I may have */
 586	kimage_free_page_list(&image->dest_pages);
 587
 588	/* Walk through and free any unusable pages I have cached */
 589	kimage_free_page_list(&image->unusable_pages);
 590
 591}
 
 592void kimage_terminate(struct kimage *image)
 593{
 594	if (*image->entry != 0)
 595		image->entry++;
 596
 597	*image->entry = IND_DONE;
 598}
 599
 600#define for_each_kimage_entry(image, ptr, entry) \
 601	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 602		ptr = (entry & IND_INDIRECTION) ? \
 603			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 604
 605static void kimage_free_entry(kimage_entry_t entry)
 606{
 607	struct page *page;
 608
 609	page = boot_pfn_to_page(entry >> PAGE_SHIFT);
 610	kimage_free_pages(page);
 611}
 612
 613void kimage_free(struct kimage *image)
 614{
 615	kimage_entry_t *ptr, entry;
 616	kimage_entry_t ind = 0;
 617
 618	if (!image)
 619		return;
 620
 621	if (image->vmcoreinfo_data_copy) {
 622		crash_update_vmcoreinfo_safecopy(NULL);
 623		vunmap(image->vmcoreinfo_data_copy);
 624	}
 625
 626	kimage_free_extra_pages(image);
 627	for_each_kimage_entry(image, ptr, entry) {
 628		if (entry & IND_INDIRECTION) {
 629			/* Free the previous indirection page */
 630			if (ind & IND_INDIRECTION)
 631				kimage_free_entry(ind);
 632			/* Save this indirection page until we are
 633			 * done with it.
 634			 */
 635			ind = entry;
 636		} else if (entry & IND_SOURCE)
 637			kimage_free_entry(entry);
 638	}
 639	/* Free the final indirection page */
 640	if (ind & IND_INDIRECTION)
 641		kimage_free_entry(ind);
 642
 643	/* Handle any machine specific cleanup */
 644	machine_kexec_cleanup(image);
 645
 646	/* Free the kexec control pages... */
 647	kimage_free_page_list(&image->control_pages);
 648
 649	/*
 650	 * Free up any temporary buffers allocated. This might hit if
 651	 * error occurred much later after buffer allocation.
 652	 */
 653	if (image->file_mode)
 654		kimage_file_post_load_cleanup(image);
 655
 656	kfree(image);
 657}
 658
 659static kimage_entry_t *kimage_dst_used(struct kimage *image,
 660					unsigned long page)
 661{
 662	kimage_entry_t *ptr, entry;
 663	unsigned long destination = 0;
 664
 665	for_each_kimage_entry(image, ptr, entry) {
 666		if (entry & IND_DESTINATION)
 667			destination = entry & PAGE_MASK;
 668		else if (entry & IND_SOURCE) {
 669			if (page == destination)
 670				return ptr;
 671			destination += PAGE_SIZE;
 672		}
 673	}
 674
 675	return NULL;
 676}
 677
 678static struct page *kimage_alloc_page(struct kimage *image,
 679					gfp_t gfp_mask,
 680					unsigned long destination)
 681{
 682	/*
 683	 * Here we implement safeguards to ensure that a source page
 684	 * is not copied to its destination page before the data on
 685	 * the destination page is no longer useful.
 686	 *
 687	 * To do this we maintain the invariant that a source page is
 688	 * either its own destination page, or it is not a
 689	 * destination page at all.
 690	 *
 691	 * That is slightly stronger than required, but the proof
 692	 * that no problems will not occur is trivial, and the
 693	 * implementation is simply to verify.
 694	 *
 695	 * When allocating all pages normally this algorithm will run
 696	 * in O(N) time, but in the worst case it will run in O(N^2)
 697	 * time.   If the runtime is a problem the data structures can
 698	 * be fixed.
 699	 */
 700	struct page *page;
 701	unsigned long addr;
 702
 703	/*
 704	 * Walk through the list of destination pages, and see if I
 705	 * have a match.
 706	 */
 707	list_for_each_entry(page, &image->dest_pages, lru) {
 708		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 709		if (addr == destination) {
 710			list_del(&page->lru);
 711			return page;
 712		}
 713	}
 714	page = NULL;
 715	while (1) {
 716		kimage_entry_t *old;
 717
 718		/* Allocate a page, if we run out of memory give up */
 719		page = kimage_alloc_pages(gfp_mask, 0);
 720		if (!page)
 721			return NULL;
 722		/* If the page cannot be used file it away */
 723		if (page_to_boot_pfn(page) >
 724				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 725			list_add(&page->lru, &image->unusable_pages);
 726			continue;
 727		}
 728		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 729
 730		/* If it is the destination page we want use it */
 731		if (addr == destination)
 732			break;
 733
 734		/* If the page is not a destination page use it */
 735		if (!kimage_is_destination_range(image, addr,
 736						  addr + PAGE_SIZE))
 737			break;
 738
 739		/*
 740		 * I know that the page is someones destination page.
 741		 * See if there is already a source page for this
 742		 * destination page.  And if so swap the source pages.
 743		 */
 744		old = kimage_dst_used(image, addr);
 745		if (old) {
 746			/* If so move it */
 747			unsigned long old_addr;
 748			struct page *old_page;
 749
 750			old_addr = *old & PAGE_MASK;
 751			old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
 752			copy_highpage(page, old_page);
 753			*old = addr | (*old & ~PAGE_MASK);
 754
 755			/* The old page I have found cannot be a
 756			 * destination page, so return it if it's
 757			 * gfp_flags honor the ones passed in.
 758			 */
 759			if (!(gfp_mask & __GFP_HIGHMEM) &&
 760			    PageHighMem(old_page)) {
 761				kimage_free_pages(old_page);
 762				continue;
 763			}
 764			addr = old_addr;
 765			page = old_page;
 766			break;
 767		}
 768		/* Place the page on the destination list, to be used later */
 769		list_add(&page->lru, &image->dest_pages);
 770	}
 771
 772	return page;
 773}
 774
 775static int kimage_load_normal_segment(struct kimage *image,
 776					 struct kexec_segment *segment)
 777{
 778	unsigned long maddr;
 779	size_t ubytes, mbytes;
 780	int result;
 781	unsigned char __user *buf = NULL;
 782	unsigned char *kbuf = NULL;
 783
 784	result = 0;
 785	if (image->file_mode)
 786		kbuf = segment->kbuf;
 787	else
 788		buf = segment->buf;
 789	ubytes = segment->bufsz;
 790	mbytes = segment->memsz;
 791	maddr = segment->mem;
 792
 793	result = kimage_set_destination(image, maddr);
 794	if (result < 0)
 795		goto out;
 796
 797	while (mbytes) {
 798		struct page *page;
 799		char *ptr;
 800		size_t uchunk, mchunk;
 801
 802		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 803		if (!page) {
 804			result  = -ENOMEM;
 805			goto out;
 806		}
 807		result = kimage_add_page(image, page_to_boot_pfn(page)
 808								<< PAGE_SHIFT);
 809		if (result < 0)
 810			goto out;
 811
 812		ptr = kmap(page);
 813		/* Start with a clear page */
 814		clear_page(ptr);
 815		ptr += maddr & ~PAGE_MASK;
 816		mchunk = min_t(size_t, mbytes,
 817				PAGE_SIZE - (maddr & ~PAGE_MASK));
 818		uchunk = min(ubytes, mchunk);
 819
 820		/* For file based kexec, source pages are in kernel memory */
 821		if (image->file_mode)
 822			memcpy(ptr, kbuf, uchunk);
 823		else
 824			result = copy_from_user(ptr, buf, uchunk);
 825		kunmap(page);
 826		if (result) {
 827			result = -EFAULT;
 828			goto out;
 829		}
 830		ubytes -= uchunk;
 831		maddr  += mchunk;
 832		if (image->file_mode)
 833			kbuf += mchunk;
 834		else
 835			buf += mchunk;
 836		mbytes -= mchunk;
 837
 838		cond_resched();
 839	}
 840out:
 841	return result;
 842}
 843
 844static int kimage_load_crash_segment(struct kimage *image,
 845					struct kexec_segment *segment)
 846{
 847	/* For crash dumps kernels we simply copy the data from
 848	 * user space to it's destination.
 849	 * We do things a page at a time for the sake of kmap.
 850	 */
 851	unsigned long maddr;
 852	size_t ubytes, mbytes;
 853	int result;
 854	unsigned char __user *buf = NULL;
 855	unsigned char *kbuf = NULL;
 856
 857	result = 0;
 858	if (image->file_mode)
 859		kbuf = segment->kbuf;
 860	else
 861		buf = segment->buf;
 862	ubytes = segment->bufsz;
 863	mbytes = segment->memsz;
 864	maddr = segment->mem;
 865	while (mbytes) {
 866		struct page *page;
 867		char *ptr;
 868		size_t uchunk, mchunk;
 869
 870		page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
 871		if (!page) {
 872			result  = -ENOMEM;
 873			goto out;
 874		}
 875		arch_kexec_post_alloc_pages(page_address(page), 1, 0);
 876		ptr = kmap(page);
 877		ptr += maddr & ~PAGE_MASK;
 878		mchunk = min_t(size_t, mbytes,
 879				PAGE_SIZE - (maddr & ~PAGE_MASK));
 880		uchunk = min(ubytes, mchunk);
 881		if (mchunk > uchunk) {
 882			/* Zero the trailing part of the page */
 883			memset(ptr + uchunk, 0, mchunk - uchunk);
 884		}
 885
 886		/* For file based kexec, source pages are in kernel memory */
 887		if (image->file_mode)
 888			memcpy(ptr, kbuf, uchunk);
 889		else
 890			result = copy_from_user(ptr, buf, uchunk);
 891		kexec_flush_icache_page(page);
 892		kunmap(page);
 893		arch_kexec_pre_free_pages(page_address(page), 1);
 894		if (result) {
 895			result = -EFAULT;
 896			goto out;
 897		}
 898		ubytes -= uchunk;
 899		maddr  += mchunk;
 900		if (image->file_mode)
 901			kbuf += mchunk;
 902		else
 903			buf += mchunk;
 904		mbytes -= mchunk;
 905
 906		cond_resched();
 907	}
 908out:
 909	return result;
 910}
 911
 912int kimage_load_segment(struct kimage *image,
 913				struct kexec_segment *segment)
 914{
 915	int result = -ENOMEM;
 916
 917	switch (image->type) {
 918	case KEXEC_TYPE_DEFAULT:
 919		result = kimage_load_normal_segment(image, segment);
 920		break;
 921	case KEXEC_TYPE_CRASH:
 922		result = kimage_load_crash_segment(image, segment);
 923		break;
 924	}
 925
 926	return result;
 927}
 928
 929struct kimage *kexec_image;
 930struct kimage *kexec_crash_image;
 931int kexec_load_disabled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 932
 933/*
 934 * No panic_cpu check version of crash_kexec().  This function is called
 935 * only when panic_cpu holds the current CPU number; this is the only CPU
 936 * which processes crash_kexec routines.
 937 */
 938void __noclone __crash_kexec(struct pt_regs *regs)
 939{
 940	/* Take the kexec_mutex here to prevent sys_kexec_load
 941	 * running on one cpu from replacing the crash kernel
 942	 * we are using after a panic on a different cpu.
 943	 *
 944	 * If the crash kernel was not located in a fixed area
 945	 * of memory the xchg(&kexec_crash_image) would be
 946	 * sufficient.  But since I reuse the memory...
 947	 */
 948	if (mutex_trylock(&kexec_mutex)) {
 949		if (kexec_crash_image) {
 950			struct pt_regs fixed_regs;
 951
 952			crash_setup_regs(&fixed_regs, regs);
 953			crash_save_vmcoreinfo();
 954			machine_crash_shutdown(&fixed_regs);
 955			machine_kexec(kexec_crash_image);
 956		}
 957		mutex_unlock(&kexec_mutex);
 958	}
 959}
 960STACK_FRAME_NON_STANDARD(__crash_kexec);
 961
 962void crash_kexec(struct pt_regs *regs)
 963{
 964	int old_cpu, this_cpu;
 965
 966	/*
 967	 * Only one CPU is allowed to execute the crash_kexec() code as with
 968	 * panic().  Otherwise parallel calls of panic() and crash_kexec()
 969	 * may stop each other.  To exclude them, we use panic_cpu here too.
 970	 */
 971	this_cpu = raw_smp_processor_id();
 972	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 973	if (old_cpu == PANIC_CPU_INVALID) {
 974		/* This is the 1st CPU which comes here, so go ahead. */
 975		printk_safe_flush_on_panic();
 976		__crash_kexec(regs);
 977
 978		/*
 979		 * Reset panic_cpu to allow another panic()/crash_kexec()
 980		 * call.
 981		 */
 982		atomic_set(&panic_cpu, PANIC_CPU_INVALID);
 983	}
 984}
 985
 986size_t crash_get_memory_size(void)
 987{
 988	size_t size = 0;
 
 
 
 989
 990	mutex_lock(&kexec_mutex);
 991	if (crashk_res.end != crashk_res.start)
 992		size = resource_size(&crashk_res);
 993	mutex_unlock(&kexec_mutex);
 994	return size;
 995}
 996
 997void __weak crash_free_reserved_phys_range(unsigned long begin,
 998					   unsigned long end)
 999{
1000	unsigned long addr;
1001
1002	for (addr = begin; addr < end; addr += PAGE_SIZE)
1003		free_reserved_page(boot_pfn_to_page(addr >> PAGE_SHIFT));
1004}
1005
1006int crash_shrink_memory(unsigned long new_size)
1007{
1008	int ret = 0;
1009	unsigned long start, end;
1010	unsigned long old_size;
1011	struct resource *ram_res;
1012
1013	mutex_lock(&kexec_mutex);
 
1014
1015	if (kexec_crash_image) {
1016		ret = -ENOENT;
1017		goto unlock;
1018	}
1019	start = crashk_res.start;
1020	end = crashk_res.end;
1021	old_size = (end == 0) ? 0 : end - start + 1;
1022	if (new_size >= old_size) {
1023		ret = (new_size == old_size) ? 0 : -EINVAL;
1024		goto unlock;
1025	}
1026
1027	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1028	if (!ram_res) {
1029		ret = -ENOMEM;
1030		goto unlock;
1031	}
1032
1033	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1034	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1035
1036	crash_free_reserved_phys_range(end, crashk_res.end);
1037
1038	if ((start == end) && (crashk_res.parent != NULL))
1039		release_resource(&crashk_res);
1040
1041	ram_res->start = end;
1042	ram_res->end = crashk_res.end;
1043	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1044	ram_res->name = "System RAM";
1045
1046	crashk_res.end = end - 1;
1047
1048	insert_resource(&iomem_resource, ram_res);
1049
1050unlock:
1051	mutex_unlock(&kexec_mutex);
1052	return ret;
1053}
1054
1055void crash_save_cpu(struct pt_regs *regs, int cpu)
1056{
1057	struct elf_prstatus prstatus;
1058	u32 *buf;
1059
1060	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1061		return;
1062
1063	/* Using ELF notes here is opportunistic.
1064	 * I need a well defined structure format
1065	 * for the data I pass, and I need tags
1066	 * on the data to indicate what information I have
1067	 * squirrelled away.  ELF notes happen to provide
1068	 * all of that, so there is no need to invent something new.
1069	 */
1070	buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1071	if (!buf)
1072		return;
1073	memset(&prstatus, 0, sizeof(prstatus));
1074	prstatus.pr_pid = current->pid;
1075	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1076	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1077			      &prstatus, sizeof(prstatus));
1078	final_note(buf);
1079}
1080
1081static int __init crash_notes_memory_init(void)
1082{
1083	/* Allocate memory for saving cpu registers. */
1084	size_t size, align;
1085
1086	/*
1087	 * crash_notes could be allocated across 2 vmalloc pages when percpu
1088	 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1089	 * pages are also on 2 continuous physical pages. In this case the
1090	 * 2nd part of crash_notes in 2nd page could be lost since only the
1091	 * starting address and size of crash_notes are exported through sysfs.
1092	 * Here round up the size of crash_notes to the nearest power of two
1093	 * and pass it to __alloc_percpu as align value. This can make sure
1094	 * crash_notes is allocated inside one physical page.
1095	 */
1096	size = sizeof(note_buf_t);
1097	align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1098
1099	/*
1100	 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1101	 * definitely will be in 2 pages with that.
1102	 */
1103	BUILD_BUG_ON(size > PAGE_SIZE);
1104
1105	crash_notes = __alloc_percpu(size, align);
1106	if (!crash_notes) {
1107		pr_warn("Memory allocation for saving cpu register states failed\n");
1108		return -ENOMEM;
1109	}
1110	return 0;
1111}
1112subsys_initcall(crash_notes_memory_init);
1113
1114
1115/*
1116 * Move into place and start executing a preloaded standalone
1117 * executable.  If nothing was preloaded return an error.
1118 */
1119int kernel_kexec(void)
1120{
1121	int error = 0;
1122
1123	if (!mutex_trylock(&kexec_mutex))
1124		return -EBUSY;
1125	if (!kexec_image) {
1126		error = -EINVAL;
1127		goto Unlock;
1128	}
1129
1130#ifdef CONFIG_KEXEC_JUMP
1131	if (kexec_image->preserve_context) {
1132		lock_system_sleep();
1133		pm_prepare_console();
1134		error = freeze_processes();
1135		if (error) {
1136			error = -EBUSY;
1137			goto Restore_console;
1138		}
1139		suspend_console();
1140		error = dpm_suspend_start(PMSG_FREEZE);
1141		if (error)
1142			goto Resume_console;
1143		/* At this point, dpm_suspend_start() has been called,
1144		 * but *not* dpm_suspend_end(). We *must* call
1145		 * dpm_suspend_end() now.  Otherwise, drivers for
1146		 * some devices (e.g. interrupt controllers) become
1147		 * desynchronized with the actual state of the
1148		 * hardware at resume time, and evil weirdness ensues.
1149		 */
1150		error = dpm_suspend_end(PMSG_FREEZE);
1151		if (error)
1152			goto Resume_devices;
1153		error = suspend_disable_secondary_cpus();
1154		if (error)
1155			goto Enable_cpus;
1156		local_irq_disable();
1157		error = syscore_suspend();
1158		if (error)
1159			goto Enable_irqs;
1160	} else
1161#endif
1162	{
1163		kexec_in_progress = true;
1164		kernel_restart_prepare(NULL);
1165		migrate_to_reboot_cpu();
1166
1167		/*
1168		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1169		 * no further code needs to use CPU hotplug (which is true in
1170		 * the reboot case). However, the kexec path depends on using
1171		 * CPU hotplug again; so re-enable it here.
1172		 */
1173		cpu_hotplug_enable();
1174		pr_emerg("Starting new kernel\n");
1175		machine_shutdown();
1176	}
1177
 
1178	machine_kexec(kexec_image);
1179
1180#ifdef CONFIG_KEXEC_JUMP
1181	if (kexec_image->preserve_context) {
1182		syscore_resume();
1183 Enable_irqs:
1184		local_irq_enable();
1185 Enable_cpus:
1186		suspend_enable_secondary_cpus();
1187		dpm_resume_start(PMSG_RESTORE);
1188 Resume_devices:
1189		dpm_resume_end(PMSG_RESTORE);
1190 Resume_console:
1191		resume_console();
1192		thaw_processes();
1193 Restore_console:
1194		pm_restore_console();
1195		unlock_system_sleep();
1196	}
1197#endif
1198
1199 Unlock:
1200	mutex_unlock(&kexec_mutex);
1201	return error;
1202}
1203
1204/*
1205 * Protection mechanism for crashkernel reserved memory after
1206 * the kdump kernel is loaded.
1207 *
1208 * Provide an empty default implementation here -- architecture
1209 * code may override this
1210 */
1211void __weak arch_kexec_protect_crashkres(void)
1212{}
1213
1214void __weak arch_kexec_unprotect_crashkres(void)
1215{}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kexec.c - kexec system call core code.
   4 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   5 */
   6
   7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/mutex.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/ioport.h>
  21#include <linux/hardirq.h>
  22#include <linux/elf.h>
  23#include <linux/elfcore.h>
  24#include <linux/utsname.h>
  25#include <linux/numa.h>
  26#include <linux/suspend.h>
  27#include <linux/device.h>
  28#include <linux/freezer.h>
  29#include <linux/panic_notifier.h>
  30#include <linux/pm.h>
  31#include <linux/cpu.h>
  32#include <linux/uaccess.h>
  33#include <linux/io.h>
  34#include <linux/console.h>
  35#include <linux/vmalloc.h>
  36#include <linux/swap.h>
  37#include <linux/syscore_ops.h>
  38#include <linux/compiler.h>
  39#include <linux/hugetlb.h>
  40#include <linux/objtool.h>
  41#include <linux/kmsg_dump.h>
  42
  43#include <asm/page.h>
  44#include <asm/sections.h>
  45
  46#include <crypto/hash.h>
 
  47#include "kexec_internal.h"
  48
  49atomic_t __kexec_lock = ATOMIC_INIT(0);
  50
  51/* Per cpu memory for storing cpu states in case of system crash. */
  52note_buf_t __percpu *crash_notes;
  53
  54/* Flag to indicate we are going to kexec a new kernel */
  55bool kexec_in_progress = false;
  56
  57
  58/* Location of the reserved area for the crash kernel */
  59struct resource crashk_res = {
  60	.name  = "Crash kernel",
  61	.start = 0,
  62	.end   = 0,
  63	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  64	.desc  = IORES_DESC_CRASH_KERNEL
  65};
  66struct resource crashk_low_res = {
  67	.name  = "Crash kernel",
  68	.start = 0,
  69	.end   = 0,
  70	.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
  71	.desc  = IORES_DESC_CRASH_KERNEL
  72};
  73
  74int kexec_should_crash(struct task_struct *p)
  75{
  76	/*
  77	 * If crash_kexec_post_notifiers is enabled, don't run
  78	 * crash_kexec() here yet, which must be run after panic
  79	 * notifiers in panic().
  80	 */
  81	if (crash_kexec_post_notifiers)
  82		return 0;
  83	/*
  84	 * There are 4 panic() calls in make_task_dead() path, each of which
  85	 * corresponds to each of these 4 conditions.
  86	 */
  87	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  88		return 1;
  89	return 0;
  90}
  91
  92int kexec_crash_loaded(void)
  93{
  94	return !!kexec_crash_image;
  95}
  96EXPORT_SYMBOL_GPL(kexec_crash_loaded);
  97
  98/*
  99 * When kexec transitions to the new kernel there is a one-to-one
 100 * mapping between physical and virtual addresses.  On processors
 101 * where you can disable the MMU this is trivial, and easy.  For
 102 * others it is still a simple predictable page table to setup.
 103 *
 104 * In that environment kexec copies the new kernel to its final
 105 * resting place.  This means I can only support memory whose
 106 * physical address can fit in an unsigned long.  In particular
 107 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 108 * If the assembly stub has more restrictive requirements
 109 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 110 * defined more restrictively in <asm/kexec.h>.
 111 *
 112 * The code for the transition from the current kernel to the
 113 * new kernel is placed in the control_code_buffer, whose size
 114 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 115 * page of memory is necessary, but some architectures require more.
 116 * Because this memory must be identity mapped in the transition from
 117 * virtual to physical addresses it must live in the range
 118 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 119 * modifiable.
 120 *
 121 * The assembly stub in the control code buffer is passed a linked list
 122 * of descriptor pages detailing the source pages of the new kernel,
 123 * and the destination addresses of those source pages.  As this data
 124 * structure is not used in the context of the current OS, it must
 125 * be self-contained.
 126 *
 127 * The code has been made to work with highmem pages and will use a
 128 * destination page in its final resting place (if it happens
 129 * to allocate it).  The end product of this is that most of the
 130 * physical address space, and most of RAM can be used.
 131 *
 132 * Future directions include:
 133 *  - allocating a page table with the control code buffer identity
 134 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 135 *    reliable.
 136 */
 137
 138/*
 139 * KIMAGE_NO_DEST is an impossible destination address..., for
 140 * allocating pages whose destination address we do not care about.
 141 */
 142#define KIMAGE_NO_DEST (-1UL)
 143#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
 144
 145static struct page *kimage_alloc_page(struct kimage *image,
 146				       gfp_t gfp_mask,
 147				       unsigned long dest);
 148
 149int sanity_check_segment_list(struct kimage *image)
 150{
 151	int i;
 152	unsigned long nr_segments = image->nr_segments;
 153	unsigned long total_pages = 0;
 154	unsigned long nr_pages = totalram_pages();
 155
 156	/*
 157	 * Verify we have good destination addresses.  The caller is
 158	 * responsible for making certain we don't attempt to load
 159	 * the new image into invalid or reserved areas of RAM.  This
 160	 * just verifies it is an address we can use.
 161	 *
 162	 * Since the kernel does everything in page size chunks ensure
 163	 * the destination addresses are page aligned.  Too many
 164	 * special cases crop of when we don't do this.  The most
 165	 * insidious is getting overlapping destination addresses
 166	 * simply because addresses are changed to page size
 167	 * granularity.
 168	 */
 169	for (i = 0; i < nr_segments; i++) {
 170		unsigned long mstart, mend;
 171
 172		mstart = image->segment[i].mem;
 173		mend   = mstart + image->segment[i].memsz;
 174		if (mstart > mend)
 175			return -EADDRNOTAVAIL;
 176		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 177			return -EADDRNOTAVAIL;
 178		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 179			return -EADDRNOTAVAIL;
 180	}
 181
 182	/* Verify our destination addresses do not overlap.
 183	 * If we alloed overlapping destination addresses
 184	 * through very weird things can happen with no
 185	 * easy explanation as one segment stops on another.
 186	 */
 187	for (i = 0; i < nr_segments; i++) {
 188		unsigned long mstart, mend;
 189		unsigned long j;
 190
 191		mstart = image->segment[i].mem;
 192		mend   = mstart + image->segment[i].memsz;
 193		for (j = 0; j < i; j++) {
 194			unsigned long pstart, pend;
 195
 196			pstart = image->segment[j].mem;
 197			pend   = pstart + image->segment[j].memsz;
 198			/* Do the segments overlap ? */
 199			if ((mend > pstart) && (mstart < pend))
 200				return -EINVAL;
 201		}
 202	}
 203
 204	/* Ensure our buffer sizes are strictly less than
 205	 * our memory sizes.  This should always be the case,
 206	 * and it is easier to check up front than to be surprised
 207	 * later on.
 208	 */
 209	for (i = 0; i < nr_segments; i++) {
 210		if (image->segment[i].bufsz > image->segment[i].memsz)
 211			return -EINVAL;
 212	}
 213
 214	/*
 215	 * Verify that no more than half of memory will be consumed. If the
 216	 * request from userspace is too large, a large amount of time will be
 217	 * wasted allocating pages, which can cause a soft lockup.
 218	 */
 219	for (i = 0; i < nr_segments; i++) {
 220		if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
 221			return -EINVAL;
 222
 223		total_pages += PAGE_COUNT(image->segment[i].memsz);
 224	}
 225
 226	if (total_pages > nr_pages / 2)
 227		return -EINVAL;
 228
 229	/*
 230	 * Verify we have good destination addresses.  Normally
 231	 * the caller is responsible for making certain we don't
 232	 * attempt to load the new image into invalid or reserved
 233	 * areas of RAM.  But crash kernels are preloaded into a
 234	 * reserved area of ram.  We must ensure the addresses
 235	 * are in the reserved area otherwise preloading the
 236	 * kernel could corrupt things.
 237	 */
 238
 239	if (image->type == KEXEC_TYPE_CRASH) {
 240		for (i = 0; i < nr_segments; i++) {
 241			unsigned long mstart, mend;
 242
 243			mstart = image->segment[i].mem;
 244			mend = mstart + image->segment[i].memsz - 1;
 245			/* Ensure we are within the crash kernel limits */
 246			if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
 247			    (mend > phys_to_boot_phys(crashk_res.end)))
 248				return -EADDRNOTAVAIL;
 249		}
 250	}
 251
 252	return 0;
 253}
 254
 255struct kimage *do_kimage_alloc_init(void)
 256{
 257	struct kimage *image;
 258
 259	/* Allocate a controlling structure */
 260	image = kzalloc(sizeof(*image), GFP_KERNEL);
 261	if (!image)
 262		return NULL;
 263
 264	image->head = 0;
 265	image->entry = &image->head;
 266	image->last_entry = &image->head;
 267	image->control_page = ~0; /* By default this does not apply */
 268	image->type = KEXEC_TYPE_DEFAULT;
 269
 270	/* Initialize the list of control pages */
 271	INIT_LIST_HEAD(&image->control_pages);
 272
 273	/* Initialize the list of destination pages */
 274	INIT_LIST_HEAD(&image->dest_pages);
 275
 276	/* Initialize the list of unusable pages */
 277	INIT_LIST_HEAD(&image->unusable_pages);
 278
 279	return image;
 280}
 281
 282int kimage_is_destination_range(struct kimage *image,
 283					unsigned long start,
 284					unsigned long end)
 285{
 286	unsigned long i;
 287
 288	for (i = 0; i < image->nr_segments; i++) {
 289		unsigned long mstart, mend;
 290
 291		mstart = image->segment[i].mem;
 292		mend = mstart + image->segment[i].memsz;
 293		if ((end > mstart) && (start < mend))
 294			return 1;
 295	}
 296
 297	return 0;
 298}
 299
 300static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 301{
 302	struct page *pages;
 303
 304	if (fatal_signal_pending(current))
 305		return NULL;
 306	pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
 307	if (pages) {
 308		unsigned int count, i;
 309
 310		pages->mapping = NULL;
 311		set_page_private(pages, order);
 312		count = 1 << order;
 313		for (i = 0; i < count; i++)
 314			SetPageReserved(pages + i);
 315
 316		arch_kexec_post_alloc_pages(page_address(pages), count,
 317					    gfp_mask);
 318
 319		if (gfp_mask & __GFP_ZERO)
 320			for (i = 0; i < count; i++)
 321				clear_highpage(pages + i);
 322	}
 323
 324	return pages;
 325}
 326
 327static void kimage_free_pages(struct page *page)
 328{
 329	unsigned int order, count, i;
 330
 331	order = page_private(page);
 332	count = 1 << order;
 333
 334	arch_kexec_pre_free_pages(page_address(page), count);
 335
 336	for (i = 0; i < count; i++)
 337		ClearPageReserved(page + i);
 338	__free_pages(page, order);
 339}
 340
 341void kimage_free_page_list(struct list_head *list)
 342{
 343	struct page *page, *next;
 344
 345	list_for_each_entry_safe(page, next, list, lru) {
 346		list_del(&page->lru);
 347		kimage_free_pages(page);
 348	}
 349}
 350
 351static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 352							unsigned int order)
 353{
 354	/* Control pages are special, they are the intermediaries
 355	 * that are needed while we copy the rest of the pages
 356	 * to their final resting place.  As such they must
 357	 * not conflict with either the destination addresses
 358	 * or memory the kernel is already using.
 359	 *
 360	 * The only case where we really need more than one of
 361	 * these are for architectures where we cannot disable
 362	 * the MMU and must instead generate an identity mapped
 363	 * page table for all of the memory.
 364	 *
 365	 * At worst this runs in O(N) of the image size.
 366	 */
 367	struct list_head extra_pages;
 368	struct page *pages;
 369	unsigned int count;
 370
 371	count = 1 << order;
 372	INIT_LIST_HEAD(&extra_pages);
 373
 374	/* Loop while I can allocate a page and the page allocated
 375	 * is a destination page.
 376	 */
 377	do {
 378		unsigned long pfn, epfn, addr, eaddr;
 379
 380		pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
 381		if (!pages)
 382			break;
 383		pfn   = page_to_boot_pfn(pages);
 384		epfn  = pfn + count;
 385		addr  = pfn << PAGE_SHIFT;
 386		eaddr = epfn << PAGE_SHIFT;
 387		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 388			      kimage_is_destination_range(image, addr, eaddr)) {
 389			list_add(&pages->lru, &extra_pages);
 390			pages = NULL;
 391		}
 392	} while (!pages);
 393
 394	if (pages) {
 395		/* Remember the allocated page... */
 396		list_add(&pages->lru, &image->control_pages);
 397
 398		/* Because the page is already in it's destination
 399		 * location we will never allocate another page at
 400		 * that address.  Therefore kimage_alloc_pages
 401		 * will not return it (again) and we don't need
 402		 * to give it an entry in image->segment[].
 403		 */
 404	}
 405	/* Deal with the destination pages I have inadvertently allocated.
 406	 *
 407	 * Ideally I would convert multi-page allocations into single
 408	 * page allocations, and add everything to image->dest_pages.
 409	 *
 410	 * For now it is simpler to just free the pages.
 411	 */
 412	kimage_free_page_list(&extra_pages);
 413
 414	return pages;
 415}
 416
 417static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 418						      unsigned int order)
 419{
 420	/* Control pages are special, they are the intermediaries
 421	 * that are needed while we copy the rest of the pages
 422	 * to their final resting place.  As such they must
 423	 * not conflict with either the destination addresses
 424	 * or memory the kernel is already using.
 425	 *
 426	 * Control pages are also the only pags we must allocate
 427	 * when loading a crash kernel.  All of the other pages
 428	 * are specified by the segments and we just memcpy
 429	 * into them directly.
 430	 *
 431	 * The only case where we really need more than one of
 432	 * these are for architectures where we cannot disable
 433	 * the MMU and must instead generate an identity mapped
 434	 * page table for all of the memory.
 435	 *
 436	 * Given the low demand this implements a very simple
 437	 * allocator that finds the first hole of the appropriate
 438	 * size in the reserved memory region, and allocates all
 439	 * of the memory up to and including the hole.
 440	 */
 441	unsigned long hole_start, hole_end, size;
 442	struct page *pages;
 443
 444	pages = NULL;
 445	size = (1 << order) << PAGE_SHIFT;
 446	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 447	hole_end   = hole_start + size - 1;
 448	while (hole_end <= crashk_res.end) {
 449		unsigned long i;
 450
 451		cond_resched();
 452
 453		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 454			break;
 455		/* See if I overlap any of the segments */
 456		for (i = 0; i < image->nr_segments; i++) {
 457			unsigned long mstart, mend;
 458
 459			mstart = image->segment[i].mem;
 460			mend   = mstart + image->segment[i].memsz - 1;
 461			if ((hole_end >= mstart) && (hole_start <= mend)) {
 462				/* Advance the hole to the end of the segment */
 463				hole_start = (mend + (size - 1)) & ~(size - 1);
 464				hole_end   = hole_start + size - 1;
 465				break;
 466			}
 467		}
 468		/* If I don't overlap any segments I have found my hole! */
 469		if (i == image->nr_segments) {
 470			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 471			image->control_page = hole_end;
 472			break;
 473		}
 474	}
 475
 476	/* Ensure that these pages are decrypted if SME is enabled. */
 477	if (pages)
 478		arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
 479
 480	return pages;
 481}
 482
 483
 484struct page *kimage_alloc_control_pages(struct kimage *image,
 485					 unsigned int order)
 486{
 487	struct page *pages = NULL;
 488
 489	switch (image->type) {
 490	case KEXEC_TYPE_DEFAULT:
 491		pages = kimage_alloc_normal_control_pages(image, order);
 492		break;
 493	case KEXEC_TYPE_CRASH:
 494		pages = kimage_alloc_crash_control_pages(image, order);
 495		break;
 496	}
 497
 498	return pages;
 499}
 500
 501int kimage_crash_copy_vmcoreinfo(struct kimage *image)
 502{
 503	struct page *vmcoreinfo_page;
 504	void *safecopy;
 505
 506	if (image->type != KEXEC_TYPE_CRASH)
 507		return 0;
 508
 509	/*
 510	 * For kdump, allocate one vmcoreinfo safe copy from the
 511	 * crash memory. as we have arch_kexec_protect_crashkres()
 512	 * after kexec syscall, we naturally protect it from write
 513	 * (even read) access under kernel direct mapping. But on
 514	 * the other hand, we still need to operate it when crash
 515	 * happens to generate vmcoreinfo note, hereby we rely on
 516	 * vmap for this purpose.
 517	 */
 518	vmcoreinfo_page = kimage_alloc_control_pages(image, 0);
 519	if (!vmcoreinfo_page) {
 520		pr_warn("Could not allocate vmcoreinfo buffer\n");
 521		return -ENOMEM;
 522	}
 523	safecopy = vmap(&vmcoreinfo_page, 1, VM_MAP, PAGE_KERNEL);
 524	if (!safecopy) {
 525		pr_warn("Could not vmap vmcoreinfo buffer\n");
 526		return -ENOMEM;
 527	}
 528
 529	image->vmcoreinfo_data_copy = safecopy;
 530	crash_update_vmcoreinfo_safecopy(safecopy);
 531
 532	return 0;
 533}
 534
 535static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 536{
 537	if (*image->entry != 0)
 538		image->entry++;
 539
 540	if (image->entry == image->last_entry) {
 541		kimage_entry_t *ind_page;
 542		struct page *page;
 543
 544		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 545		if (!page)
 546			return -ENOMEM;
 547
 548		ind_page = page_address(page);
 549		*image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
 550		image->entry = ind_page;
 551		image->last_entry = ind_page +
 552				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 553	}
 554	*image->entry = entry;
 555	image->entry++;
 556	*image->entry = 0;
 557
 558	return 0;
 559}
 560
 561static int kimage_set_destination(struct kimage *image,
 562				   unsigned long destination)
 563{
 
 
 564	destination &= PAGE_MASK;
 
 565
 566	return kimage_add_entry(image, destination | IND_DESTINATION);
 567}
 568
 569
 570static int kimage_add_page(struct kimage *image, unsigned long page)
 571{
 
 
 572	page &= PAGE_MASK;
 
 573
 574	return kimage_add_entry(image, page | IND_SOURCE);
 575}
 576
 577
 578static void kimage_free_extra_pages(struct kimage *image)
 579{
 580	/* Walk through and free any extra destination pages I may have */
 581	kimage_free_page_list(&image->dest_pages);
 582
 583	/* Walk through and free any unusable pages I have cached */
 584	kimage_free_page_list(&image->unusable_pages);
 585
 586}
 587
 588void kimage_terminate(struct kimage *image)
 589{
 590	if (*image->entry != 0)
 591		image->entry++;
 592
 593	*image->entry = IND_DONE;
 594}
 595
 596#define for_each_kimage_entry(image, ptr, entry) \
 597	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 598		ptr = (entry & IND_INDIRECTION) ? \
 599			boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
 600
 601static void kimage_free_entry(kimage_entry_t entry)
 602{
 603	struct page *page;
 604
 605	page = boot_pfn_to_page(entry >> PAGE_SHIFT);
 606	kimage_free_pages(page);
 607}
 608
 609void kimage_free(struct kimage *image)
 610{
 611	kimage_entry_t *ptr, entry;
 612	kimage_entry_t ind = 0;
 613
 614	if (!image)
 615		return;
 616
 617	if (image->vmcoreinfo_data_copy) {
 618		crash_update_vmcoreinfo_safecopy(NULL);
 619		vunmap(image->vmcoreinfo_data_copy);
 620	}
 621
 622	kimage_free_extra_pages(image);
 623	for_each_kimage_entry(image, ptr, entry) {
 624		if (entry & IND_INDIRECTION) {
 625			/* Free the previous indirection page */
 626			if (ind & IND_INDIRECTION)
 627				kimage_free_entry(ind);
 628			/* Save this indirection page until we are
 629			 * done with it.
 630			 */
 631			ind = entry;
 632		} else if (entry & IND_SOURCE)
 633			kimage_free_entry(entry);
 634	}
 635	/* Free the final indirection page */
 636	if (ind & IND_INDIRECTION)
 637		kimage_free_entry(ind);
 638
 639	/* Handle any machine specific cleanup */
 640	machine_kexec_cleanup(image);
 641
 642	/* Free the kexec control pages... */
 643	kimage_free_page_list(&image->control_pages);
 644
 645	/*
 646	 * Free up any temporary buffers allocated. This might hit if
 647	 * error occurred much later after buffer allocation.
 648	 */
 649	if (image->file_mode)
 650		kimage_file_post_load_cleanup(image);
 651
 652	kfree(image);
 653}
 654
 655static kimage_entry_t *kimage_dst_used(struct kimage *image,
 656					unsigned long page)
 657{
 658	kimage_entry_t *ptr, entry;
 659	unsigned long destination = 0;
 660
 661	for_each_kimage_entry(image, ptr, entry) {
 662		if (entry & IND_DESTINATION)
 663			destination = entry & PAGE_MASK;
 664		else if (entry & IND_SOURCE) {
 665			if (page == destination)
 666				return ptr;
 667			destination += PAGE_SIZE;
 668		}
 669	}
 670
 671	return NULL;
 672}
 673
 674static struct page *kimage_alloc_page(struct kimage *image,
 675					gfp_t gfp_mask,
 676					unsigned long destination)
 677{
 678	/*
 679	 * Here we implement safeguards to ensure that a source page
 680	 * is not copied to its destination page before the data on
 681	 * the destination page is no longer useful.
 682	 *
 683	 * To do this we maintain the invariant that a source page is
 684	 * either its own destination page, or it is not a
 685	 * destination page at all.
 686	 *
 687	 * That is slightly stronger than required, but the proof
 688	 * that no problems will not occur is trivial, and the
 689	 * implementation is simply to verify.
 690	 *
 691	 * When allocating all pages normally this algorithm will run
 692	 * in O(N) time, but in the worst case it will run in O(N^2)
 693	 * time.   If the runtime is a problem the data structures can
 694	 * be fixed.
 695	 */
 696	struct page *page;
 697	unsigned long addr;
 698
 699	/*
 700	 * Walk through the list of destination pages, and see if I
 701	 * have a match.
 702	 */
 703	list_for_each_entry(page, &image->dest_pages, lru) {
 704		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 705		if (addr == destination) {
 706			list_del(&page->lru);
 707			return page;
 708		}
 709	}
 710	page = NULL;
 711	while (1) {
 712		kimage_entry_t *old;
 713
 714		/* Allocate a page, if we run out of memory give up */
 715		page = kimage_alloc_pages(gfp_mask, 0);
 716		if (!page)
 717			return NULL;
 718		/* If the page cannot be used file it away */
 719		if (page_to_boot_pfn(page) >
 720				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 721			list_add(&page->lru, &image->unusable_pages);
 722			continue;
 723		}
 724		addr = page_to_boot_pfn(page) << PAGE_SHIFT;
 725
 726		/* If it is the destination page we want use it */
 727		if (addr == destination)
 728			break;
 729
 730		/* If the page is not a destination page use it */
 731		if (!kimage_is_destination_range(image, addr,
 732						  addr + PAGE_SIZE))
 733			break;
 734
 735		/*
 736		 * I know that the page is someones destination page.
 737		 * See if there is already a source page for this
 738		 * destination page.  And if so swap the source pages.
 739		 */
 740		old = kimage_dst_used(image, addr);
 741		if (old) {
 742			/* If so move it */
 743			unsigned long old_addr;
 744			struct page *old_page;
 745
 746			old_addr = *old & PAGE_MASK;
 747			old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
 748			copy_highpage(page, old_page);
 749			*old = addr | (*old & ~PAGE_MASK);
 750
 751			/* The old page I have found cannot be a
 752			 * destination page, so return it if it's
 753			 * gfp_flags honor the ones passed in.
 754			 */
 755			if (!(gfp_mask & __GFP_HIGHMEM) &&
 756			    PageHighMem(old_page)) {
 757				kimage_free_pages(old_page);
 758				continue;
 759			}
 
 760			page = old_page;
 761			break;
 762		}
 763		/* Place the page on the destination list, to be used later */
 764		list_add(&page->lru, &image->dest_pages);
 765	}
 766
 767	return page;
 768}
 769
 770static int kimage_load_normal_segment(struct kimage *image,
 771					 struct kexec_segment *segment)
 772{
 773	unsigned long maddr;
 774	size_t ubytes, mbytes;
 775	int result;
 776	unsigned char __user *buf = NULL;
 777	unsigned char *kbuf = NULL;
 778
 
 779	if (image->file_mode)
 780		kbuf = segment->kbuf;
 781	else
 782		buf = segment->buf;
 783	ubytes = segment->bufsz;
 784	mbytes = segment->memsz;
 785	maddr = segment->mem;
 786
 787	result = kimage_set_destination(image, maddr);
 788	if (result < 0)
 789		goto out;
 790
 791	while (mbytes) {
 792		struct page *page;
 793		char *ptr;
 794		size_t uchunk, mchunk;
 795
 796		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 797		if (!page) {
 798			result  = -ENOMEM;
 799			goto out;
 800		}
 801		result = kimage_add_page(image, page_to_boot_pfn(page)
 802								<< PAGE_SHIFT);
 803		if (result < 0)
 804			goto out;
 805
 806		ptr = kmap_local_page(page);
 807		/* Start with a clear page */
 808		clear_page(ptr);
 809		ptr += maddr & ~PAGE_MASK;
 810		mchunk = min_t(size_t, mbytes,
 811				PAGE_SIZE - (maddr & ~PAGE_MASK));
 812		uchunk = min(ubytes, mchunk);
 813
 814		/* For file based kexec, source pages are in kernel memory */
 815		if (image->file_mode)
 816			memcpy(ptr, kbuf, uchunk);
 817		else
 818			result = copy_from_user(ptr, buf, uchunk);
 819		kunmap_local(ptr);
 820		if (result) {
 821			result = -EFAULT;
 822			goto out;
 823		}
 824		ubytes -= uchunk;
 825		maddr  += mchunk;
 826		if (image->file_mode)
 827			kbuf += mchunk;
 828		else
 829			buf += mchunk;
 830		mbytes -= mchunk;
 831
 832		cond_resched();
 833	}
 834out:
 835	return result;
 836}
 837
 838static int kimage_load_crash_segment(struct kimage *image,
 839					struct kexec_segment *segment)
 840{
 841	/* For crash dumps kernels we simply copy the data from
 842	 * user space to it's destination.
 843	 * We do things a page at a time for the sake of kmap.
 844	 */
 845	unsigned long maddr;
 846	size_t ubytes, mbytes;
 847	int result;
 848	unsigned char __user *buf = NULL;
 849	unsigned char *kbuf = NULL;
 850
 851	result = 0;
 852	if (image->file_mode)
 853		kbuf = segment->kbuf;
 854	else
 855		buf = segment->buf;
 856	ubytes = segment->bufsz;
 857	mbytes = segment->memsz;
 858	maddr = segment->mem;
 859	while (mbytes) {
 860		struct page *page;
 861		char *ptr;
 862		size_t uchunk, mchunk;
 863
 864		page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
 865		if (!page) {
 866			result  = -ENOMEM;
 867			goto out;
 868		}
 869		arch_kexec_post_alloc_pages(page_address(page), 1, 0);
 870		ptr = kmap_local_page(page);
 871		ptr += maddr & ~PAGE_MASK;
 872		mchunk = min_t(size_t, mbytes,
 873				PAGE_SIZE - (maddr & ~PAGE_MASK));
 874		uchunk = min(ubytes, mchunk);
 875		if (mchunk > uchunk) {
 876			/* Zero the trailing part of the page */
 877			memset(ptr + uchunk, 0, mchunk - uchunk);
 878		}
 879
 880		/* For file based kexec, source pages are in kernel memory */
 881		if (image->file_mode)
 882			memcpy(ptr, kbuf, uchunk);
 883		else
 884			result = copy_from_user(ptr, buf, uchunk);
 885		kexec_flush_icache_page(page);
 886		kunmap_local(ptr);
 887		arch_kexec_pre_free_pages(page_address(page), 1);
 888		if (result) {
 889			result = -EFAULT;
 890			goto out;
 891		}
 892		ubytes -= uchunk;
 893		maddr  += mchunk;
 894		if (image->file_mode)
 895			kbuf += mchunk;
 896		else
 897			buf += mchunk;
 898		mbytes -= mchunk;
 899
 900		cond_resched();
 901	}
 902out:
 903	return result;
 904}
 905
 906int kimage_load_segment(struct kimage *image,
 907				struct kexec_segment *segment)
 908{
 909	int result = -ENOMEM;
 910
 911	switch (image->type) {
 912	case KEXEC_TYPE_DEFAULT:
 913		result = kimage_load_normal_segment(image, segment);
 914		break;
 915	case KEXEC_TYPE_CRASH:
 916		result = kimage_load_crash_segment(image, segment);
 917		break;
 918	}
 919
 920	return result;
 921}
 922
 923struct kimage *kexec_image;
 924struct kimage *kexec_crash_image;
 925int kexec_load_disabled;
 926#ifdef CONFIG_SYSCTL
 927static struct ctl_table kexec_core_sysctls[] = {
 928	{
 929		.procname	= "kexec_load_disabled",
 930		.data		= &kexec_load_disabled,
 931		.maxlen		= sizeof(int),
 932		.mode		= 0644,
 933		/* only handle a transition from default "0" to "1" */
 934		.proc_handler	= proc_dointvec_minmax,
 935		.extra1		= SYSCTL_ONE,
 936		.extra2		= SYSCTL_ONE,
 937	},
 938	{ }
 939};
 940
 941static int __init kexec_core_sysctl_init(void)
 942{
 943	register_sysctl_init("kernel", kexec_core_sysctls);
 944	return 0;
 945}
 946late_initcall(kexec_core_sysctl_init);
 947#endif
 948
 949/*
 950 * No panic_cpu check version of crash_kexec().  This function is called
 951 * only when panic_cpu holds the current CPU number; this is the only CPU
 952 * which processes crash_kexec routines.
 953 */
 954void __noclone __crash_kexec(struct pt_regs *regs)
 955{
 956	/* Take the kexec_lock here to prevent sys_kexec_load
 957	 * running on one cpu from replacing the crash kernel
 958	 * we are using after a panic on a different cpu.
 959	 *
 960	 * If the crash kernel was not located in a fixed area
 961	 * of memory the xchg(&kexec_crash_image) would be
 962	 * sufficient.  But since I reuse the memory...
 963	 */
 964	if (kexec_trylock()) {
 965		if (kexec_crash_image) {
 966			struct pt_regs fixed_regs;
 967
 968			crash_setup_regs(&fixed_regs, regs);
 969			crash_save_vmcoreinfo();
 970			machine_crash_shutdown(&fixed_regs);
 971			machine_kexec(kexec_crash_image);
 972		}
 973		kexec_unlock();
 974	}
 975}
 976STACK_FRAME_NON_STANDARD(__crash_kexec);
 977
 978void crash_kexec(struct pt_regs *regs)
 979{
 980	int old_cpu, this_cpu;
 981
 982	/*
 983	 * Only one CPU is allowed to execute the crash_kexec() code as with
 984	 * panic().  Otherwise parallel calls of panic() and crash_kexec()
 985	 * may stop each other.  To exclude them, we use panic_cpu here too.
 986	 */
 987	this_cpu = raw_smp_processor_id();
 988	old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu);
 989	if (old_cpu == PANIC_CPU_INVALID) {
 990		/* This is the 1st CPU which comes here, so go ahead. */
 
 991		__crash_kexec(regs);
 992
 993		/*
 994		 * Reset panic_cpu to allow another panic()/crash_kexec()
 995		 * call.
 996		 */
 997		atomic_set(&panic_cpu, PANIC_CPU_INVALID);
 998	}
 999}
1000
1001ssize_t crash_get_memory_size(void)
1002{
1003	ssize_t size = 0;
1004
1005	if (!kexec_trylock())
1006		return -EBUSY;
1007
 
1008	if (crashk_res.end != crashk_res.start)
1009		size = resource_size(&crashk_res);
 
 
 
 
 
 
 
 
1010
1011	kexec_unlock();
1012	return size;
1013}
1014
1015int crash_shrink_memory(unsigned long new_size)
1016{
1017	int ret = 0;
1018	unsigned long start, end;
1019	unsigned long old_size;
1020	struct resource *ram_res;
1021
1022	if (!kexec_trylock())
1023		return -EBUSY;
1024
1025	if (kexec_crash_image) {
1026		ret = -ENOENT;
1027		goto unlock;
1028	}
1029	start = crashk_res.start;
1030	end = crashk_res.end;
1031	old_size = (end == 0) ? 0 : end - start + 1;
1032	if (new_size >= old_size) {
1033		ret = (new_size == old_size) ? 0 : -EINVAL;
1034		goto unlock;
1035	}
1036
1037	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1038	if (!ram_res) {
1039		ret = -ENOMEM;
1040		goto unlock;
1041	}
1042
1043	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1044	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1045
1046	crash_free_reserved_phys_range(end, crashk_res.end);
1047
1048	if ((start == end) && (crashk_res.parent != NULL))
1049		release_resource(&crashk_res);
1050
1051	ram_res->start = end;
1052	ram_res->end = crashk_res.end;
1053	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
1054	ram_res->name = "System RAM";
1055
1056	crashk_res.end = end - 1;
1057
1058	insert_resource(&iomem_resource, ram_res);
1059
1060unlock:
1061	kexec_unlock();
1062	return ret;
1063}
1064
1065void crash_save_cpu(struct pt_regs *regs, int cpu)
1066{
1067	struct elf_prstatus prstatus;
1068	u32 *buf;
1069
1070	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1071		return;
1072
1073	/* Using ELF notes here is opportunistic.
1074	 * I need a well defined structure format
1075	 * for the data I pass, and I need tags
1076	 * on the data to indicate what information I have
1077	 * squirrelled away.  ELF notes happen to provide
1078	 * all of that, so there is no need to invent something new.
1079	 */
1080	buf = (u32 *)per_cpu_ptr(crash_notes, cpu);
1081	if (!buf)
1082		return;
1083	memset(&prstatus, 0, sizeof(prstatus));
1084	prstatus.common.pr_pid = current->pid;
1085	elf_core_copy_regs(&prstatus.pr_reg, regs);
1086	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1087			      &prstatus, sizeof(prstatus));
1088	final_note(buf);
1089}
1090
1091static int __init crash_notes_memory_init(void)
1092{
1093	/* Allocate memory for saving cpu registers. */
1094	size_t size, align;
1095
1096	/*
1097	 * crash_notes could be allocated across 2 vmalloc pages when percpu
1098	 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
1099	 * pages are also on 2 continuous physical pages. In this case the
1100	 * 2nd part of crash_notes in 2nd page could be lost since only the
1101	 * starting address and size of crash_notes are exported through sysfs.
1102	 * Here round up the size of crash_notes to the nearest power of two
1103	 * and pass it to __alloc_percpu as align value. This can make sure
1104	 * crash_notes is allocated inside one physical page.
1105	 */
1106	size = sizeof(note_buf_t);
1107	align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
1108
1109	/*
1110	 * Break compile if size is bigger than PAGE_SIZE since crash_notes
1111	 * definitely will be in 2 pages with that.
1112	 */
1113	BUILD_BUG_ON(size > PAGE_SIZE);
1114
1115	crash_notes = __alloc_percpu(size, align);
1116	if (!crash_notes) {
1117		pr_warn("Memory allocation for saving cpu register states failed\n");
1118		return -ENOMEM;
1119	}
1120	return 0;
1121}
1122subsys_initcall(crash_notes_memory_init);
1123
1124
1125/*
1126 * Move into place and start executing a preloaded standalone
1127 * executable.  If nothing was preloaded return an error.
1128 */
1129int kernel_kexec(void)
1130{
1131	int error = 0;
1132
1133	if (!kexec_trylock())
1134		return -EBUSY;
1135	if (!kexec_image) {
1136		error = -EINVAL;
1137		goto Unlock;
1138	}
1139
1140#ifdef CONFIG_KEXEC_JUMP
1141	if (kexec_image->preserve_context) {
 
1142		pm_prepare_console();
1143		error = freeze_processes();
1144		if (error) {
1145			error = -EBUSY;
1146			goto Restore_console;
1147		}
1148		suspend_console();
1149		error = dpm_suspend_start(PMSG_FREEZE);
1150		if (error)
1151			goto Resume_console;
1152		/* At this point, dpm_suspend_start() has been called,
1153		 * but *not* dpm_suspend_end(). We *must* call
1154		 * dpm_suspend_end() now.  Otherwise, drivers for
1155		 * some devices (e.g. interrupt controllers) become
1156		 * desynchronized with the actual state of the
1157		 * hardware at resume time, and evil weirdness ensues.
1158		 */
1159		error = dpm_suspend_end(PMSG_FREEZE);
1160		if (error)
1161			goto Resume_devices;
1162		error = suspend_disable_secondary_cpus();
1163		if (error)
1164			goto Enable_cpus;
1165		local_irq_disable();
1166		error = syscore_suspend();
1167		if (error)
1168			goto Enable_irqs;
1169	} else
1170#endif
1171	{
1172		kexec_in_progress = true;
1173		kernel_restart_prepare("kexec reboot");
1174		migrate_to_reboot_cpu();
1175
1176		/*
1177		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1178		 * no further code needs to use CPU hotplug (which is true in
1179		 * the reboot case). However, the kexec path depends on using
1180		 * CPU hotplug again; so re-enable it here.
1181		 */
1182		cpu_hotplug_enable();
1183		pr_notice("Starting new kernel\n");
1184		machine_shutdown();
1185	}
1186
1187	kmsg_dump(KMSG_DUMP_SHUTDOWN);
1188	machine_kexec(kexec_image);
1189
1190#ifdef CONFIG_KEXEC_JUMP
1191	if (kexec_image->preserve_context) {
1192		syscore_resume();
1193 Enable_irqs:
1194		local_irq_enable();
1195 Enable_cpus:
1196		suspend_enable_secondary_cpus();
1197		dpm_resume_start(PMSG_RESTORE);
1198 Resume_devices:
1199		dpm_resume_end(PMSG_RESTORE);
1200 Resume_console:
1201		resume_console();
1202		thaw_processes();
1203 Restore_console:
1204		pm_restore_console();
 
1205	}
1206#endif
1207
1208 Unlock:
1209	kexec_unlock();
1210	return error;
1211}