Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * kexec.c - kexec system call
   3 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/mutex.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/ioport.h>
  21#include <linux/hardirq.h>
  22#include <linux/elf.h>
  23#include <linux/elfcore.h>
 
  24#include <linux/utsname.h>
  25#include <linux/numa.h>
  26#include <linux/suspend.h>
  27#include <linux/device.h>
  28#include <linux/freezer.h>
  29#include <linux/pm.h>
  30#include <linux/cpu.h>
  31#include <linux/console.h>
  32#include <linux/vmalloc.h>
  33#include <linux/swap.h>
 
  34#include <linux/syscore_ops.h>
  35#include <linux/compiler.h>
  36
  37#include <asm/page.h>
  38#include <asm/uaccess.h>
  39#include <asm/io.h>
 
  40#include <asm/sections.h>
  41
  42/* Per cpu memory for storing cpu states in case of system crash. */
  43note_buf_t __percpu *crash_notes;
  44
  45/* vmcoreinfo stuff */
  46static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
  47u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
  48size_t vmcoreinfo_size;
  49size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
  50
  51/* Flag to indicate we are going to kexec a new kernel */
  52bool kexec_in_progress = false;
  53
  54/* Location of the reserved area for the crash kernel */
  55struct resource crashk_res = {
  56	.name  = "Crash kernel",
  57	.start = 0,
  58	.end   = 0,
  59	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
  60};
  61struct resource crashk_low_res = {
  62	.name  = "Crash kernel",
  63	.start = 0,
  64	.end   = 0,
  65	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
  66};
  67
  68int kexec_should_crash(struct task_struct *p)
  69{
  70	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  71		return 1;
  72	return 0;
  73}
  74
  75/*
  76 * When kexec transitions to the new kernel there is a one-to-one
  77 * mapping between physical and virtual addresses.  On processors
  78 * where you can disable the MMU this is trivial, and easy.  For
  79 * others it is still a simple predictable page table to setup.
  80 *
  81 * In that environment kexec copies the new kernel to its final
  82 * resting place.  This means I can only support memory whose
  83 * physical address can fit in an unsigned long.  In particular
  84 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
  85 * If the assembly stub has more restrictive requirements
  86 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
  87 * defined more restrictively in <asm/kexec.h>.
  88 *
  89 * The code for the transition from the current kernel to the
  90 * the new kernel is placed in the control_code_buffer, whose size
  91 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
  92 * page of memory is necessary, but some architectures require more.
  93 * Because this memory must be identity mapped in the transition from
  94 * virtual to physical addresses it must live in the range
  95 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
  96 * modifiable.
  97 *
  98 * The assembly stub in the control code buffer is passed a linked list
  99 * of descriptor pages detailing the source pages of the new kernel,
 100 * and the destination addresses of those source pages.  As this data
 101 * structure is not used in the context of the current OS, it must
 102 * be self-contained.
 103 *
 104 * The code has been made to work with highmem pages and will use a
 105 * destination page in its final resting place (if it happens
 106 * to allocate it).  The end product of this is that most of the
 107 * physical address space, and most of RAM can be used.
 108 *
 109 * Future directions include:
 110 *  - allocating a page table with the control code buffer identity
 111 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 112 *    reliable.
 113 */
 114
 115/*
 116 * KIMAGE_NO_DEST is an impossible destination address..., for
 117 * allocating pages whose destination address we do not care about.
 118 */
 119#define KIMAGE_NO_DEST (-1UL)
 120
 121static int kimage_is_destination_range(struct kimage *image,
 122				       unsigned long start, unsigned long end);
 123static struct page *kimage_alloc_page(struct kimage *image,
 124				       gfp_t gfp_mask,
 125				       unsigned long dest);
 126
 127static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
 128	                    unsigned long nr_segments,
 129                            struct kexec_segment __user *segments)
 130{
 131	size_t segment_bytes;
 132	struct kimage *image;
 133	unsigned long i;
 134	int result;
 135
 136	/* Allocate a controlling structure */
 137	result = -ENOMEM;
 138	image = kzalloc(sizeof(*image), GFP_KERNEL);
 139	if (!image)
 140		goto out;
 141
 142	image->head = 0;
 143	image->entry = &image->head;
 144	image->last_entry = &image->head;
 145	image->control_page = ~0; /* By default this does not apply */
 146	image->start = entry;
 147	image->type = KEXEC_TYPE_DEFAULT;
 148
 149	/* Initialize the list of control pages */
 150	INIT_LIST_HEAD(&image->control_pages);
 151
 152	/* Initialize the list of destination pages */
 153	INIT_LIST_HEAD(&image->dest_pages);
 154
 155	/* Initialize the list of unusable pages */
 156	INIT_LIST_HEAD(&image->unuseable_pages);
 157
 158	/* Read in the segments */
 159	image->nr_segments = nr_segments;
 160	segment_bytes = nr_segments * sizeof(*segments);
 161	result = copy_from_user(image->segment, segments, segment_bytes);
 162	if (result) {
 163		result = -EFAULT;
 164		goto out;
 165	}
 166
 167	/*
 168	 * Verify we have good destination addresses.  The caller is
 169	 * responsible for making certain we don't attempt to load
 170	 * the new image into invalid or reserved areas of RAM.  This
 171	 * just verifies it is an address we can use.
 172	 *
 173	 * Since the kernel does everything in page size chunks ensure
 174	 * the destination addresses are page aligned.  Too many
 175	 * special cases crop of when we don't do this.  The most
 176	 * insidious is getting overlapping destination addresses
 177	 * simply because addresses are changed to page size
 178	 * granularity.
 179	 */
 180	result = -EADDRNOTAVAIL;
 181	for (i = 0; i < nr_segments; i++) {
 182		unsigned long mstart, mend;
 183
 184		mstart = image->segment[i].mem;
 185		mend   = mstart + image->segment[i].memsz;
 186		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 187			goto out;
 188		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 189			goto out;
 190	}
 191
 192	/* Verify our destination addresses do not overlap.
 193	 * If we alloed overlapping destination addresses
 194	 * through very weird things can happen with no
 195	 * easy explanation as one segment stops on another.
 196	 */
 197	result = -EINVAL;
 198	for (i = 0; i < nr_segments; i++) {
 199		unsigned long mstart, mend;
 200		unsigned long j;
 201
 202		mstart = image->segment[i].mem;
 203		mend   = mstart + image->segment[i].memsz;
 204		for (j = 0; j < i; j++) {
 205			unsigned long pstart, pend;
 206			pstart = image->segment[j].mem;
 207			pend   = pstart + image->segment[j].memsz;
 208			/* Do the segments overlap ? */
 209			if ((mend > pstart) && (mstart < pend))
 210				goto out;
 211		}
 212	}
 213
 214	/* Ensure our buffer sizes are strictly less than
 215	 * our memory sizes.  This should always be the case,
 216	 * and it is easier to check up front than to be surprised
 217	 * later on.
 218	 */
 219	result = -EINVAL;
 220	for (i = 0; i < nr_segments; i++) {
 221		if (image->segment[i].bufsz > image->segment[i].memsz)
 222			goto out;
 223	}
 224
 225	result = 0;
 226out:
 227	if (result == 0)
 228		*rimage = image;
 229	else
 230		kfree(image);
 231
 232	return result;
 233
 234}
 235
 236static void kimage_free_page_list(struct list_head *list);
 237
 238static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
 239				unsigned long nr_segments,
 240				struct kexec_segment __user *segments)
 241{
 242	int result;
 243	struct kimage *image;
 244
 245	/* Allocate and initialize a controlling structure */
 246	image = NULL;
 247	result = do_kimage_alloc(&image, entry, nr_segments, segments);
 248	if (result)
 249		goto out;
 250
 
 
 251	/*
 252	 * Find a location for the control code buffer, and add it
 253	 * the vector of segments so that it's pages will also be
 254	 * counted as destination pages.
 255	 */
 256	result = -ENOMEM;
 257	image->control_code_page = kimage_alloc_control_pages(image,
 258					   get_order(KEXEC_CONTROL_PAGE_SIZE));
 259	if (!image->control_code_page) {
 260		printk(KERN_ERR "Could not allocate control_code_buffer\n");
 261		goto out_free;
 262	}
 263
 264	image->swap_page = kimage_alloc_control_pages(image, 0);
 265	if (!image->swap_page) {
 266		printk(KERN_ERR "Could not allocate swap buffer\n");
 267		goto out_free;
 268	}
 269
 270	*rimage = image;
 271	return 0;
 
 
 
 
 272
 273out_free:
 274	kimage_free_page_list(&image->control_pages);
 275	kfree(image);
 276out:
 277	return result;
 278}
 279
 280static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
 281				unsigned long nr_segments,
 282				struct kexec_segment __user *segments)
 283{
 284	int result;
 285	struct kimage *image;
 286	unsigned long i;
 287
 288	image = NULL;
 289	/* Verify we have a valid entry point */
 290	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
 291		result = -EADDRNOTAVAIL;
 292		goto out;
 293	}
 294
 295	/* Allocate and initialize a controlling structure */
 296	result = do_kimage_alloc(&image, entry, nr_segments, segments);
 297	if (result)
 298		goto out;
 299
 300	/* Enable the special crash kernel control page
 301	 * allocation policy.
 302	 */
 303	image->control_page = crashk_res.start;
 304	image->type = KEXEC_TYPE_CRASH;
 305
 306	/*
 307	 * Verify we have good destination addresses.  Normally
 308	 * the caller is responsible for making certain we don't
 309	 * attempt to load the new image into invalid or reserved
 310	 * areas of RAM.  But crash kernels are preloaded into a
 311	 * reserved area of ram.  We must ensure the addresses
 312	 * are in the reserved area otherwise preloading the
 313	 * kernel could corrupt things.
 314	 */
 315	result = -EADDRNOTAVAIL;
 316	for (i = 0; i < nr_segments; i++) {
 317		unsigned long mstart, mend;
 318
 319		mstart = image->segment[i].mem;
 320		mend = mstart + image->segment[i].memsz - 1;
 321		/* Ensure we are within the crash kernel limits */
 322		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
 323			goto out_free;
 324	}
 325
 326	/*
 327	 * Find a location for the control code buffer, and add
 328	 * the vector of segments so that it's pages will also be
 329	 * counted as destination pages.
 330	 */
 331	result = -ENOMEM;
 332	image->control_code_page = kimage_alloc_control_pages(image,
 333					   get_order(KEXEC_CONTROL_PAGE_SIZE));
 334	if (!image->control_code_page) {
 335		printk(KERN_ERR "Could not allocate control_code_buffer\n");
 336		goto out_free;
 337	}
 338
 339	*rimage = image;
 340	return 0;
 341
 342out_free:
 343	kfree(image);
 344out:
 
 
 
 
 
 345	return result;
 346}
 347
 348static int kimage_is_destination_range(struct kimage *image,
 349					unsigned long start,
 350					unsigned long end)
 351{
 352	unsigned long i;
 353
 354	for (i = 0; i < image->nr_segments; i++) {
 355		unsigned long mstart, mend;
 356
 357		mstart = image->segment[i].mem;
 358		mend = mstart + image->segment[i].memsz;
 359		if ((end > mstart) && (start < mend))
 360			return 1;
 361	}
 362
 363	return 0;
 364}
 365
 366static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 367{
 368	struct page *pages;
 369
 370	pages = alloc_pages(gfp_mask, order);
 371	if (pages) {
 372		unsigned int count, i;
 373		pages->mapping = NULL;
 374		set_page_private(pages, order);
 375		count = 1 << order;
 376		for (i = 0; i < count; i++)
 377			SetPageReserved(pages + i);
 378	}
 379
 380	return pages;
 381}
 382
 383static void kimage_free_pages(struct page *page)
 384{
 385	unsigned int order, count, i;
 386
 387	order = page_private(page);
 388	count = 1 << order;
 389	for (i = 0; i < count; i++)
 390		ClearPageReserved(page + i);
 391	__free_pages(page, order);
 392}
 393
 394static void kimage_free_page_list(struct list_head *list)
 395{
 396	struct list_head *pos, *next;
 397
 398	list_for_each_safe(pos, next, list) {
 399		struct page *page;
 400
 401		page = list_entry(pos, struct page, lru);
 402		list_del(&page->lru);
 403		kimage_free_pages(page);
 404	}
 405}
 406
 407static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 408							unsigned int order)
 409{
 410	/* Control pages are special, they are the intermediaries
 411	 * that are needed while we copy the rest of the pages
 412	 * to their final resting place.  As such they must
 413	 * not conflict with either the destination addresses
 414	 * or memory the kernel is already using.
 415	 *
 416	 * The only case where we really need more than one of
 417	 * these are for architectures where we cannot disable
 418	 * the MMU and must instead generate an identity mapped
 419	 * page table for all of the memory.
 420	 *
 421	 * At worst this runs in O(N) of the image size.
 422	 */
 423	struct list_head extra_pages;
 424	struct page *pages;
 425	unsigned int count;
 426
 427	count = 1 << order;
 428	INIT_LIST_HEAD(&extra_pages);
 429
 430	/* Loop while I can allocate a page and the page allocated
 431	 * is a destination page.
 432	 */
 433	do {
 434		unsigned long pfn, epfn, addr, eaddr;
 435
 436		pages = kimage_alloc_pages(GFP_KERNEL, order);
 437		if (!pages)
 438			break;
 439		pfn   = page_to_pfn(pages);
 440		epfn  = pfn + count;
 441		addr  = pfn << PAGE_SHIFT;
 442		eaddr = epfn << PAGE_SHIFT;
 443		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 444			      kimage_is_destination_range(image, addr, eaddr)) {
 445			list_add(&pages->lru, &extra_pages);
 446			pages = NULL;
 447		}
 448	} while (!pages);
 449
 450	if (pages) {
 451		/* Remember the allocated page... */
 452		list_add(&pages->lru, &image->control_pages);
 453
 454		/* Because the page is already in it's destination
 455		 * location we will never allocate another page at
 456		 * that address.  Therefore kimage_alloc_pages
 457		 * will not return it (again) and we don't need
 458		 * to give it an entry in image->segment[].
 459		 */
 460	}
 461	/* Deal with the destination pages I have inadvertently allocated.
 462	 *
 463	 * Ideally I would convert multi-page allocations into single
 464	 * page allocations, and add everything to image->dest_pages.
 465	 *
 466	 * For now it is simpler to just free the pages.
 467	 */
 468	kimage_free_page_list(&extra_pages);
 469
 470	return pages;
 471}
 472
 473static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 474						      unsigned int order)
 475{
 476	/* Control pages are special, they are the intermediaries
 477	 * that are needed while we copy the rest of the pages
 478	 * to their final resting place.  As such they must
 479	 * not conflict with either the destination addresses
 480	 * or memory the kernel is already using.
 481	 *
 482	 * Control pages are also the only pags we must allocate
 483	 * when loading a crash kernel.  All of the other pages
 484	 * are specified by the segments and we just memcpy
 485	 * into them directly.
 486	 *
 487	 * The only case where we really need more than one of
 488	 * these are for architectures where we cannot disable
 489	 * the MMU and must instead generate an identity mapped
 490	 * page table for all of the memory.
 491	 *
 492	 * Given the low demand this implements a very simple
 493	 * allocator that finds the first hole of the appropriate
 494	 * size in the reserved memory region, and allocates all
 495	 * of the memory up to and including the hole.
 496	 */
 497	unsigned long hole_start, hole_end, size;
 498	struct page *pages;
 499
 500	pages = NULL;
 501	size = (1 << order) << PAGE_SHIFT;
 502	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 503	hole_end   = hole_start + size - 1;
 504	while (hole_end <= crashk_res.end) {
 505		unsigned long i;
 506
 507		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
 
 
 508			break;
 509		/* See if I overlap any of the segments */
 510		for (i = 0; i < image->nr_segments; i++) {
 511			unsigned long mstart, mend;
 512
 513			mstart = image->segment[i].mem;
 514			mend   = mstart + image->segment[i].memsz - 1;
 515			if ((hole_end >= mstart) && (hole_start <= mend)) {
 516				/* Advance the hole to the end of the segment */
 517				hole_start = (mend + (size - 1)) & ~(size - 1);
 518				hole_end   = hole_start + size - 1;
 519				break;
 520			}
 521		}
 522		/* If I don't overlap any segments I have found my hole! */
 523		if (i == image->nr_segments) {
 524			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 525			break;
 526		}
 527	}
 528	if (pages)
 529		image->control_page = hole_end;
 530
 531	return pages;
 532}
 533
 534
 535struct page *kimage_alloc_control_pages(struct kimage *image,
 536					 unsigned int order)
 537{
 538	struct page *pages = NULL;
 539
 540	switch (image->type) {
 541	case KEXEC_TYPE_DEFAULT:
 542		pages = kimage_alloc_normal_control_pages(image, order);
 543		break;
 544	case KEXEC_TYPE_CRASH:
 545		pages = kimage_alloc_crash_control_pages(image, order);
 546		break;
 547	}
 548
 549	return pages;
 550}
 551
 552static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 553{
 554	if (*image->entry != 0)
 555		image->entry++;
 556
 557	if (image->entry == image->last_entry) {
 558		kimage_entry_t *ind_page;
 559		struct page *page;
 560
 561		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 562		if (!page)
 563			return -ENOMEM;
 564
 565		ind_page = page_address(page);
 566		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
 567		image->entry = ind_page;
 568		image->last_entry = ind_page +
 569				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 570	}
 571	*image->entry = entry;
 572	image->entry++;
 573	*image->entry = 0;
 574
 575	return 0;
 576}
 577
 578static int kimage_set_destination(struct kimage *image,
 579				   unsigned long destination)
 580{
 581	int result;
 582
 583	destination &= PAGE_MASK;
 584	result = kimage_add_entry(image, destination | IND_DESTINATION);
 585	if (result == 0)
 586		image->destination = destination;
 587
 588	return result;
 589}
 590
 591
 592static int kimage_add_page(struct kimage *image, unsigned long page)
 593{
 594	int result;
 595
 596	page &= PAGE_MASK;
 597	result = kimage_add_entry(image, page | IND_SOURCE);
 598	if (result == 0)
 599		image->destination += PAGE_SIZE;
 600
 601	return result;
 602}
 603
 604
 605static void kimage_free_extra_pages(struct kimage *image)
 606{
 607	/* Walk through and free any extra destination pages I may have */
 608	kimage_free_page_list(&image->dest_pages);
 609
 610	/* Walk through and free any unusable pages I have cached */
 611	kimage_free_page_list(&image->unuseable_pages);
 612
 613}
 614static void kimage_terminate(struct kimage *image)
 615{
 616	if (*image->entry != 0)
 617		image->entry++;
 618
 619	*image->entry = IND_DONE;
 620}
 621
 622#define for_each_kimage_entry(image, ptr, entry) \
 623	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 624		ptr = (entry & IND_INDIRECTION)? \
 625			phys_to_virt((entry & PAGE_MASK)): ptr +1)
 626
 627static void kimage_free_entry(kimage_entry_t entry)
 628{
 629	struct page *page;
 630
 631	page = pfn_to_page(entry >> PAGE_SHIFT);
 632	kimage_free_pages(page);
 633}
 634
 635static void kimage_free(struct kimage *image)
 636{
 637	kimage_entry_t *ptr, entry;
 638	kimage_entry_t ind = 0;
 639
 640	if (!image)
 641		return;
 642
 643	kimage_free_extra_pages(image);
 644	for_each_kimage_entry(image, ptr, entry) {
 645		if (entry & IND_INDIRECTION) {
 646			/* Free the previous indirection page */
 647			if (ind & IND_INDIRECTION)
 648				kimage_free_entry(ind);
 649			/* Save this indirection page until we are
 650			 * done with it.
 651			 */
 652			ind = entry;
 653		}
 654		else if (entry & IND_SOURCE)
 655			kimage_free_entry(entry);
 656	}
 657	/* Free the final indirection page */
 658	if (ind & IND_INDIRECTION)
 659		kimage_free_entry(ind);
 660
 661	/* Handle any machine specific cleanup */
 662	machine_kexec_cleanup(image);
 663
 664	/* Free the kexec control pages... */
 665	kimage_free_page_list(&image->control_pages);
 666	kfree(image);
 667}
 668
 669static kimage_entry_t *kimage_dst_used(struct kimage *image,
 670					unsigned long page)
 671{
 672	kimage_entry_t *ptr, entry;
 673	unsigned long destination = 0;
 674
 675	for_each_kimage_entry(image, ptr, entry) {
 676		if (entry & IND_DESTINATION)
 677			destination = entry & PAGE_MASK;
 678		else if (entry & IND_SOURCE) {
 679			if (page == destination)
 680				return ptr;
 681			destination += PAGE_SIZE;
 682		}
 683	}
 684
 685	return NULL;
 686}
 687
 688static struct page *kimage_alloc_page(struct kimage *image,
 689					gfp_t gfp_mask,
 690					unsigned long destination)
 691{
 692	/*
 693	 * Here we implement safeguards to ensure that a source page
 694	 * is not copied to its destination page before the data on
 695	 * the destination page is no longer useful.
 696	 *
 697	 * To do this we maintain the invariant that a source page is
 698	 * either its own destination page, or it is not a
 699	 * destination page at all.
 700	 *
 701	 * That is slightly stronger than required, but the proof
 702	 * that no problems will not occur is trivial, and the
 703	 * implementation is simply to verify.
 704	 *
 705	 * When allocating all pages normally this algorithm will run
 706	 * in O(N) time, but in the worst case it will run in O(N^2)
 707	 * time.   If the runtime is a problem the data structures can
 708	 * be fixed.
 709	 */
 710	struct page *page;
 711	unsigned long addr;
 712
 713	/*
 714	 * Walk through the list of destination pages, and see if I
 715	 * have a match.
 716	 */
 717	list_for_each_entry(page, &image->dest_pages, lru) {
 718		addr = page_to_pfn(page) << PAGE_SHIFT;
 719		if (addr == destination) {
 720			list_del(&page->lru);
 721			return page;
 722		}
 723	}
 724	page = NULL;
 725	while (1) {
 726		kimage_entry_t *old;
 727
 728		/* Allocate a page, if we run out of memory give up */
 729		page = kimage_alloc_pages(gfp_mask, 0);
 730		if (!page)
 731			return NULL;
 732		/* If the page cannot be used file it away */
 733		if (page_to_pfn(page) >
 734				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 735			list_add(&page->lru, &image->unuseable_pages);
 736			continue;
 737		}
 738		addr = page_to_pfn(page) << PAGE_SHIFT;
 739
 740		/* If it is the destination page we want use it */
 741		if (addr == destination)
 742			break;
 743
 744		/* If the page is not a destination page use it */
 745		if (!kimage_is_destination_range(image, addr,
 746						  addr + PAGE_SIZE))
 747			break;
 748
 749		/*
 750		 * I know that the page is someones destination page.
 751		 * See if there is already a source page for this
 752		 * destination page.  And if so swap the source pages.
 753		 */
 754		old = kimage_dst_used(image, addr);
 755		if (old) {
 756			/* If so move it */
 757			unsigned long old_addr;
 758			struct page *old_page;
 759
 760			old_addr = *old & PAGE_MASK;
 761			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
 762			copy_highpage(page, old_page);
 763			*old = addr | (*old & ~PAGE_MASK);
 764
 765			/* The old page I have found cannot be a
 766			 * destination page, so return it if it's
 767			 * gfp_flags honor the ones passed in.
 768			 */
 769			if (!(gfp_mask & __GFP_HIGHMEM) &&
 770			    PageHighMem(old_page)) {
 771				kimage_free_pages(old_page);
 772				continue;
 773			}
 774			addr = old_addr;
 775			page = old_page;
 776			break;
 777		}
 778		else {
 779			/* Place the page on the destination list I
 780			 * will use it later.
 781			 */
 782			list_add(&page->lru, &image->dest_pages);
 783		}
 784	}
 785
 786	return page;
 787}
 788
 789static int kimage_load_normal_segment(struct kimage *image,
 790					 struct kexec_segment *segment)
 791{
 792	unsigned long maddr;
 793	size_t ubytes, mbytes;
 794	int result;
 795	unsigned char __user *buf;
 796
 797	result = 0;
 798	buf = segment->buf;
 799	ubytes = segment->bufsz;
 800	mbytes = segment->memsz;
 801	maddr = segment->mem;
 802
 803	result = kimage_set_destination(image, maddr);
 804	if (result < 0)
 805		goto out;
 806
 807	while (mbytes) {
 808		struct page *page;
 809		char *ptr;
 810		size_t uchunk, mchunk;
 811
 812		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 813		if (!page) {
 814			result  = -ENOMEM;
 815			goto out;
 816		}
 817		result = kimage_add_page(image, page_to_pfn(page)
 818								<< PAGE_SHIFT);
 819		if (result < 0)
 820			goto out;
 821
 822		ptr = kmap(page);
 823		/* Start with a clear page */
 824		clear_page(ptr);
 825		ptr += maddr & ~PAGE_MASK;
 826		mchunk = min_t(size_t, mbytes,
 827				PAGE_SIZE - (maddr & ~PAGE_MASK));
 828		uchunk = min(ubytes, mchunk);
 
 
 
 
 829
 830		result = copy_from_user(ptr, buf, uchunk);
 831		kunmap(page);
 832		if (result) {
 833			result = -EFAULT;
 834			goto out;
 835		}
 836		ubytes -= uchunk;
 837		maddr  += mchunk;
 838		buf    += mchunk;
 839		mbytes -= mchunk;
 840	}
 841out:
 842	return result;
 843}
 844
 845static int kimage_load_crash_segment(struct kimage *image,
 846					struct kexec_segment *segment)
 847{
 848	/* For crash dumps kernels we simply copy the data from
 849	 * user space to it's destination.
 850	 * We do things a page at a time for the sake of kmap.
 851	 */
 852	unsigned long maddr;
 853	size_t ubytes, mbytes;
 854	int result;
 855	unsigned char __user *buf;
 856
 857	result = 0;
 858	buf = segment->buf;
 859	ubytes = segment->bufsz;
 860	mbytes = segment->memsz;
 861	maddr = segment->mem;
 862	while (mbytes) {
 863		struct page *page;
 864		char *ptr;
 865		size_t uchunk, mchunk;
 866
 867		page = pfn_to_page(maddr >> PAGE_SHIFT);
 868		if (!page) {
 869			result  = -ENOMEM;
 870			goto out;
 871		}
 872		ptr = kmap(page);
 873		ptr += maddr & ~PAGE_MASK;
 874		mchunk = min_t(size_t, mbytes,
 875				PAGE_SIZE - (maddr & ~PAGE_MASK));
 876		uchunk = min(ubytes, mchunk);
 877		if (mchunk > uchunk) {
 
 
 
 878			/* Zero the trailing part of the page */
 879			memset(ptr + uchunk, 0, mchunk - uchunk);
 880		}
 881		result = copy_from_user(ptr, buf, uchunk);
 882		kexec_flush_icache_page(page);
 883		kunmap(page);
 884		if (result) {
 885			result = -EFAULT;
 886			goto out;
 887		}
 888		ubytes -= uchunk;
 889		maddr  += mchunk;
 890		buf    += mchunk;
 891		mbytes -= mchunk;
 892	}
 893out:
 894	return result;
 895}
 896
 897static int kimage_load_segment(struct kimage *image,
 898				struct kexec_segment *segment)
 899{
 900	int result = -ENOMEM;
 901
 902	switch (image->type) {
 903	case KEXEC_TYPE_DEFAULT:
 904		result = kimage_load_normal_segment(image, segment);
 905		break;
 906	case KEXEC_TYPE_CRASH:
 907		result = kimage_load_crash_segment(image, segment);
 908		break;
 909	}
 910
 911	return result;
 912}
 913
 914/*
 915 * Exec Kernel system call: for obvious reasons only root may call it.
 916 *
 917 * This call breaks up into three pieces.
 918 * - A generic part which loads the new kernel from the current
 919 *   address space, and very carefully places the data in the
 920 *   allocated pages.
 921 *
 922 * - A generic part that interacts with the kernel and tells all of
 923 *   the devices to shut down.  Preventing on-going dmas, and placing
 924 *   the devices in a consistent state so a later kernel can
 925 *   reinitialize them.
 926 *
 927 * - A machine specific part that includes the syscall number
 928 *   and then copies the image to it's final destination.  And
 929 *   jumps into the image at entry.
 930 *
 931 * kexec does not sync, or unmount filesystems so if you need
 932 * that to happen you need to do that yourself.
 933 */
 934struct kimage *kexec_image;
 935struct kimage *kexec_crash_image;
 936int kexec_load_disabled;
 937
 938static DEFINE_MUTEX(kexec_mutex);
 939
 940SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
 941		struct kexec_segment __user *, segments, unsigned long, flags)
 942{
 943	struct kimage **dest_image, *image;
 944	int result;
 945
 946	/* We only trust the superuser with rebooting the system. */
 947	if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
 948		return -EPERM;
 949
 950	/*
 951	 * Verify we have a legal set of flags
 952	 * This leaves us room for future extensions.
 953	 */
 954	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
 955		return -EINVAL;
 956
 957	/* Verify we are on the appropriate architecture */
 958	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
 959		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
 960		return -EINVAL;
 961
 962	/* Put an artificial cap on the number
 963	 * of segments passed to kexec_load.
 964	 */
 965	if (nr_segments > KEXEC_SEGMENT_MAX)
 966		return -EINVAL;
 967
 968	image = NULL;
 969	result = 0;
 970
 971	/* Because we write directly to the reserved memory
 972	 * region when loading crash kernels we need a mutex here to
 973	 * prevent multiple crash  kernels from attempting to load
 974	 * simultaneously, and to prevent a crash kernel from loading
 975	 * over the top of a in use crash kernel.
 976	 *
 977	 * KISS: always take the mutex.
 978	 */
 979	if (!mutex_trylock(&kexec_mutex))
 980		return -EBUSY;
 981
 982	dest_image = &kexec_image;
 983	if (flags & KEXEC_ON_CRASH)
 984		dest_image = &kexec_crash_image;
 985	if (nr_segments > 0) {
 986		unsigned long i;
 987
 988		/* Loading another kernel to reboot into */
 989		if ((flags & KEXEC_ON_CRASH) == 0)
 990			result = kimage_normal_alloc(&image, entry,
 991							nr_segments, segments);
 992		/* Loading another kernel to switch to if this one crashes */
 993		else if (flags & KEXEC_ON_CRASH) {
 994			/* Free any current crash dump kernel before
 995			 * we corrupt it.
 996			 */
 997			kimage_free(xchg(&kexec_crash_image, NULL));
 998			result = kimage_crash_alloc(&image, entry,
 999						     nr_segments, segments);
1000			crash_map_reserved_pages();
1001		}
1002		if (result)
1003			goto out;
1004
1005		if (flags & KEXEC_PRESERVE_CONTEXT)
1006			image->preserve_context = 1;
1007		result = machine_kexec_prepare(image);
1008		if (result)
1009			goto out;
1010
1011		for (i = 0; i < nr_segments; i++) {
1012			result = kimage_load_segment(image, &image->segment[i]);
1013			if (result)
1014				goto out;
1015		}
1016		kimage_terminate(image);
1017		if (flags & KEXEC_ON_CRASH)
1018			crash_unmap_reserved_pages();
1019	}
1020	/* Install the new kernel, and  Uninstall the old */
1021	image = xchg(dest_image, image);
1022
1023out:
1024	mutex_unlock(&kexec_mutex);
1025	kimage_free(image);
1026
1027	return result;
1028}
1029
1030/*
1031 * Add and remove page tables for crashkernel memory
1032 *
1033 * Provide an empty default implementation here -- architecture
1034 * code may override this
1035 */
1036void __weak crash_map_reserved_pages(void)
1037{}
1038
1039void __weak crash_unmap_reserved_pages(void)
1040{}
1041
1042#ifdef CONFIG_COMPAT
1043COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry,
1044		       compat_ulong_t, nr_segments,
1045		       struct compat_kexec_segment __user *, segments,
1046		       compat_ulong_t, flags)
1047{
1048	struct compat_kexec_segment in;
1049	struct kexec_segment out, __user *ksegments;
1050	unsigned long i, result;
1051
1052	/* Don't allow clients that don't understand the native
1053	 * architecture to do anything.
1054	 */
1055	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1056		return -EINVAL;
1057
1058	if (nr_segments > KEXEC_SEGMENT_MAX)
1059		return -EINVAL;
1060
1061	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1062	for (i=0; i < nr_segments; i++) {
1063		result = copy_from_user(&in, &segments[i], sizeof(in));
1064		if (result)
1065			return -EFAULT;
1066
1067		out.buf   = compat_ptr(in.buf);
1068		out.bufsz = in.bufsz;
1069		out.mem   = in.mem;
1070		out.memsz = in.memsz;
1071
1072		result = copy_to_user(&ksegments[i], &out, sizeof(out));
1073		if (result)
1074			return -EFAULT;
1075	}
1076
1077	return sys_kexec_load(entry, nr_segments, ksegments, flags);
1078}
1079#endif
1080
1081void crash_kexec(struct pt_regs *regs)
1082{
1083	/* Take the kexec_mutex here to prevent sys_kexec_load
1084	 * running on one cpu from replacing the crash kernel
1085	 * we are using after a panic on a different cpu.
1086	 *
1087	 * If the crash kernel was not located in a fixed area
1088	 * of memory the xchg(&kexec_crash_image) would be
1089	 * sufficient.  But since I reuse the memory...
1090	 */
1091	if (mutex_trylock(&kexec_mutex)) {
1092		if (kexec_crash_image) {
1093			struct pt_regs fixed_regs;
1094
 
 
1095			crash_setup_regs(&fixed_regs, regs);
1096			crash_save_vmcoreinfo();
1097			machine_crash_shutdown(&fixed_regs);
1098			machine_kexec(kexec_crash_image);
1099		}
1100		mutex_unlock(&kexec_mutex);
1101	}
1102}
1103
1104size_t crash_get_memory_size(void)
1105{
1106	size_t size = 0;
1107	mutex_lock(&kexec_mutex);
1108	if (crashk_res.end != crashk_res.start)
1109		size = resource_size(&crashk_res);
1110	mutex_unlock(&kexec_mutex);
1111	return size;
1112}
1113
1114void __weak crash_free_reserved_phys_range(unsigned long begin,
1115					   unsigned long end)
1116{
1117	unsigned long addr;
1118
1119	for (addr = begin; addr < end; addr += PAGE_SIZE)
1120		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
 
 
 
 
1121}
1122
1123int crash_shrink_memory(unsigned long new_size)
1124{
1125	int ret = 0;
1126	unsigned long start, end;
1127	unsigned long old_size;
1128	struct resource *ram_res;
1129
1130	mutex_lock(&kexec_mutex);
1131
1132	if (kexec_crash_image) {
1133		ret = -ENOENT;
1134		goto unlock;
1135	}
1136	start = crashk_res.start;
1137	end = crashk_res.end;
1138	old_size = (end == 0) ? 0 : end - start + 1;
1139	if (new_size >= old_size) {
1140		ret = (new_size == old_size) ? 0 : -EINVAL;
1141		goto unlock;
1142	}
1143
1144	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1145	if (!ram_res) {
1146		ret = -ENOMEM;
 
1147		goto unlock;
1148	}
1149
1150	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1151	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1152
1153	crash_map_reserved_pages();
1154	crash_free_reserved_phys_range(end, crashk_res.end);
1155
1156	if ((start == end) && (crashk_res.parent != NULL))
1157		release_resource(&crashk_res);
1158
1159	ram_res->start = end;
1160	ram_res->end = crashk_res.end;
1161	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1162	ram_res->name = "System RAM";
1163
1164	crashk_res.end = end - 1;
1165
1166	insert_resource(&iomem_resource, ram_res);
1167	crash_unmap_reserved_pages();
1168
1169unlock:
1170	mutex_unlock(&kexec_mutex);
1171	return ret;
1172}
1173
1174static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1175			    size_t data_len)
1176{
1177	struct elf_note note;
1178
1179	note.n_namesz = strlen(name) + 1;
1180	note.n_descsz = data_len;
1181	note.n_type   = type;
1182	memcpy(buf, &note, sizeof(note));
1183	buf += (sizeof(note) + 3)/4;
1184	memcpy(buf, name, note.n_namesz);
1185	buf += (note.n_namesz + 3)/4;
1186	memcpy(buf, data, note.n_descsz);
1187	buf += (note.n_descsz + 3)/4;
1188
1189	return buf;
1190}
1191
1192static void final_note(u32 *buf)
1193{
1194	struct elf_note note;
1195
1196	note.n_namesz = 0;
1197	note.n_descsz = 0;
1198	note.n_type   = 0;
1199	memcpy(buf, &note, sizeof(note));
1200}
1201
1202void crash_save_cpu(struct pt_regs *regs, int cpu)
1203{
1204	struct elf_prstatus prstatus;
1205	u32 *buf;
1206
1207	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1208		return;
1209
1210	/* Using ELF notes here is opportunistic.
1211	 * I need a well defined structure format
1212	 * for the data I pass, and I need tags
1213	 * on the data to indicate what information I have
1214	 * squirrelled away.  ELF notes happen to provide
1215	 * all of that, so there is no need to invent something new.
1216	 */
1217	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1218	if (!buf)
1219		return;
1220	memset(&prstatus, 0, sizeof(prstatus));
1221	prstatus.pr_pid = current->pid;
1222	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1223	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1224		      	      &prstatus, sizeof(prstatus));
1225	final_note(buf);
1226}
1227
1228static int __init crash_notes_memory_init(void)
1229{
1230	/* Allocate memory for saving cpu registers. */
1231	crash_notes = alloc_percpu(note_buf_t);
1232	if (!crash_notes) {
1233		printk("Kexec: Memory allocation for saving cpu register"
1234		" states failed\n");
1235		return -ENOMEM;
1236	}
1237	return 0;
1238}
1239subsys_initcall(crash_notes_memory_init);
1240
1241
1242/*
1243 * parsing the "crashkernel" commandline
1244 *
1245 * this code is intended to be called from architecture specific code
1246 */
1247
1248
1249/*
1250 * This function parses command lines in the format
1251 *
1252 *   crashkernel=ramsize-range:size[,...][@offset]
1253 *
1254 * The function returns 0 on success and -EINVAL on failure.
1255 */
1256static int __init parse_crashkernel_mem(char 			*cmdline,
1257					unsigned long long	system_ram,
1258					unsigned long long	*crash_size,
1259					unsigned long long	*crash_base)
1260{
1261	char *cur = cmdline, *tmp;
1262
1263	/* for each entry of the comma-separated list */
1264	do {
1265		unsigned long long start, end = ULLONG_MAX, size;
1266
1267		/* get the start of the range */
1268		start = memparse(cur, &tmp);
1269		if (cur == tmp) {
1270			pr_warning("crashkernel: Memory value expected\n");
1271			return -EINVAL;
1272		}
1273		cur = tmp;
1274		if (*cur != '-') {
1275			pr_warning("crashkernel: '-' expected\n");
1276			return -EINVAL;
1277		}
1278		cur++;
1279
1280		/* if no ':' is here, than we read the end */
1281		if (*cur != ':') {
1282			end = memparse(cur, &tmp);
1283			if (cur == tmp) {
1284				pr_warning("crashkernel: Memory "
1285						"value expected\n");
1286				return -EINVAL;
1287			}
1288			cur = tmp;
1289			if (end <= start) {
1290				pr_warning("crashkernel: end <= start\n");
1291				return -EINVAL;
1292			}
1293		}
1294
1295		if (*cur != ':') {
1296			pr_warning("crashkernel: ':' expected\n");
1297			return -EINVAL;
1298		}
1299		cur++;
1300
1301		size = memparse(cur, &tmp);
1302		if (cur == tmp) {
1303			pr_warning("Memory value expected\n");
1304			return -EINVAL;
1305		}
1306		cur = tmp;
1307		if (size >= system_ram) {
1308			pr_warning("crashkernel: invalid size\n");
1309			return -EINVAL;
1310		}
1311
1312		/* match ? */
1313		if (system_ram >= start && system_ram < end) {
1314			*crash_size = size;
1315			break;
1316		}
1317	} while (*cur++ == ',');
1318
1319	if (*crash_size > 0) {
1320		while (*cur && *cur != ' ' && *cur != '@')
1321			cur++;
1322		if (*cur == '@') {
1323			cur++;
1324			*crash_base = memparse(cur, &tmp);
1325			if (cur == tmp) {
1326				pr_warning("Memory value expected "
1327						"after '@'\n");
1328				return -EINVAL;
1329			}
1330		}
1331	}
1332
1333	return 0;
1334}
1335
1336/*
1337 * That function parses "simple" (old) crashkernel command lines like
1338 *
1339 * 	crashkernel=size[@offset]
1340 *
1341 * It returns 0 on success and -EINVAL on failure.
1342 */
1343static int __init parse_crashkernel_simple(char 		*cmdline,
1344					   unsigned long long 	*crash_size,
1345					   unsigned long long 	*crash_base)
1346{
1347	char *cur = cmdline;
1348
1349	*crash_size = memparse(cmdline, &cur);
1350	if (cmdline == cur) {
1351		pr_warning("crashkernel: memory value expected\n");
1352		return -EINVAL;
1353	}
1354
1355	if (*cur == '@')
1356		*crash_base = memparse(cur+1, &cur);
1357	else if (*cur != ' ' && *cur != '\0') {
1358		pr_warning("crashkernel: unrecognized char\n");
1359		return -EINVAL;
1360	}
1361
1362	return 0;
1363}
1364
1365#define SUFFIX_HIGH 0
1366#define SUFFIX_LOW  1
1367#define SUFFIX_NULL 2
1368static __initdata char *suffix_tbl[] = {
1369	[SUFFIX_HIGH] = ",high",
1370	[SUFFIX_LOW]  = ",low",
1371	[SUFFIX_NULL] = NULL,
1372};
1373
1374/*
1375 * That function parses "suffix"  crashkernel command lines like
1376 *
1377 *	crashkernel=size,[high|low]
1378 *
1379 * It returns 0 on success and -EINVAL on failure.
1380 */
1381static int __init parse_crashkernel_suffix(char *cmdline,
1382					   unsigned long long	*crash_size,
1383					   unsigned long long	*crash_base,
1384					   const char *suffix)
1385{
1386	char *cur = cmdline;
1387
1388	*crash_size = memparse(cmdline, &cur);
1389	if (cmdline == cur) {
1390		pr_warn("crashkernel: memory value expected\n");
1391		return -EINVAL;
1392	}
1393
1394	/* check with suffix */
1395	if (strncmp(cur, suffix, strlen(suffix))) {
1396		pr_warn("crashkernel: unrecognized char\n");
1397		return -EINVAL;
1398	}
1399	cur += strlen(suffix);
1400	if (*cur != ' ' && *cur != '\0') {
1401		pr_warn("crashkernel: unrecognized char\n");
1402		return -EINVAL;
1403	}
1404
1405	return 0;
1406}
1407
1408static __init char *get_last_crashkernel(char *cmdline,
1409			     const char *name,
1410			     const char *suffix)
1411{
1412	char *p = cmdline, *ck_cmdline = NULL;
1413
1414	/* find crashkernel and use the last one if there are more */
1415	p = strstr(p, name);
1416	while (p) {
1417		char *end_p = strchr(p, ' ');
1418		char *q;
1419
1420		if (!end_p)
1421			end_p = p + strlen(p);
1422
1423		if (!suffix) {
1424			int i;
1425
1426			/* skip the one with any known suffix */
1427			for (i = 0; suffix_tbl[i]; i++) {
1428				q = end_p - strlen(suffix_tbl[i]);
1429				if (!strncmp(q, suffix_tbl[i],
1430					     strlen(suffix_tbl[i])))
1431					goto next;
1432			}
1433			ck_cmdline = p;
1434		} else {
1435			q = end_p - strlen(suffix);
1436			if (!strncmp(q, suffix, strlen(suffix)))
1437				ck_cmdline = p;
1438		}
1439next:
1440		p = strstr(p+1, name);
1441	}
1442
1443	if (!ck_cmdline)
1444		return NULL;
1445
1446	return ck_cmdline;
1447}
1448
1449static int __init __parse_crashkernel(char *cmdline,
1450			     unsigned long long system_ram,
1451			     unsigned long long *crash_size,
1452			     unsigned long long *crash_base,
1453			     const char *name,
1454			     const char *suffix)
1455{
 
1456	char	*first_colon, *first_space;
1457	char	*ck_cmdline;
1458
1459	BUG_ON(!crash_size || !crash_base);
1460	*crash_size = 0;
1461	*crash_base = 0;
1462
1463	ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
 
 
 
 
 
1464
1465	if (!ck_cmdline)
1466		return -EINVAL;
1467
1468	ck_cmdline += strlen(name);
1469
1470	if (suffix)
1471		return parse_crashkernel_suffix(ck_cmdline, crash_size,
1472				crash_base, suffix);
1473	/*
1474	 * if the commandline contains a ':', then that's the extended
1475	 * syntax -- if not, it must be the classic syntax
1476	 */
1477	first_colon = strchr(ck_cmdline, ':');
1478	first_space = strchr(ck_cmdline, ' ');
1479	if (first_colon && (!first_space || first_colon < first_space))
1480		return parse_crashkernel_mem(ck_cmdline, system_ram,
1481				crash_size, crash_base);
 
 
 
1482
1483	return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
1484}
1485
1486/*
1487 * That function is the entry point for command line parsing and should be
1488 * called from the arch-specific code.
1489 */
1490int __init parse_crashkernel(char *cmdline,
1491			     unsigned long long system_ram,
1492			     unsigned long long *crash_size,
1493			     unsigned long long *crash_base)
1494{
1495	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1496					"crashkernel=", NULL);
1497}
1498
1499int __init parse_crashkernel_high(char *cmdline,
1500			     unsigned long long system_ram,
1501			     unsigned long long *crash_size,
1502			     unsigned long long *crash_base)
1503{
1504	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1505				"crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1506}
1507
1508int __init parse_crashkernel_low(char *cmdline,
1509			     unsigned long long system_ram,
1510			     unsigned long long *crash_size,
1511			     unsigned long long *crash_base)
1512{
1513	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1514				"crashkernel=", suffix_tbl[SUFFIX_LOW]);
1515}
1516
1517static void update_vmcoreinfo_note(void)
1518{
1519	u32 *buf = vmcoreinfo_note;
1520
1521	if (!vmcoreinfo_size)
1522		return;
 
 
 
 
 
1523	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1524			      vmcoreinfo_size);
1525	final_note(buf);
1526}
1527
1528void crash_save_vmcoreinfo(void)
1529{
1530	vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1531	update_vmcoreinfo_note();
1532}
1533
1534void vmcoreinfo_append_str(const char *fmt, ...)
1535{
1536	va_list args;
1537	char buf[0x50];
1538	size_t r;
1539
1540	va_start(args, fmt);
1541	r = vscnprintf(buf, sizeof(buf), fmt, args);
1542	va_end(args);
1543
1544	r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
 
1545
1546	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1547
1548	vmcoreinfo_size += r;
1549}
1550
1551/*
1552 * provide an empty default implementation here -- architecture
1553 * code may override this
1554 */
1555void __weak arch_crash_save_vmcoreinfo(void)
1556{}
1557
1558unsigned long __weak paddr_vmcoreinfo_note(void)
1559{
1560	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1561}
1562
1563static int __init crash_save_vmcoreinfo_init(void)
1564{
1565	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1566	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1567
1568	VMCOREINFO_SYMBOL(init_uts_ns);
1569	VMCOREINFO_SYMBOL(node_online_map);
1570#ifdef CONFIG_MMU
1571	VMCOREINFO_SYMBOL(swapper_pg_dir);
1572#endif
1573	VMCOREINFO_SYMBOL(_stext);
1574	VMCOREINFO_SYMBOL(vmap_area_list);
1575
1576#ifndef CONFIG_NEED_MULTIPLE_NODES
1577	VMCOREINFO_SYMBOL(mem_map);
1578	VMCOREINFO_SYMBOL(contig_page_data);
1579#endif
1580#ifdef CONFIG_SPARSEMEM
1581	VMCOREINFO_SYMBOL(mem_section);
1582	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1583	VMCOREINFO_STRUCT_SIZE(mem_section);
1584	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1585#endif
1586	VMCOREINFO_STRUCT_SIZE(page);
1587	VMCOREINFO_STRUCT_SIZE(pglist_data);
1588	VMCOREINFO_STRUCT_SIZE(zone);
1589	VMCOREINFO_STRUCT_SIZE(free_area);
1590	VMCOREINFO_STRUCT_SIZE(list_head);
1591	VMCOREINFO_SIZE(nodemask_t);
1592	VMCOREINFO_OFFSET(page, flags);
1593	VMCOREINFO_OFFSET(page, _count);
1594	VMCOREINFO_OFFSET(page, mapping);
1595	VMCOREINFO_OFFSET(page, lru);
1596	VMCOREINFO_OFFSET(page, _mapcount);
1597	VMCOREINFO_OFFSET(page, private);
1598	VMCOREINFO_OFFSET(pglist_data, node_zones);
1599	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1600#ifdef CONFIG_FLAT_NODE_MEM_MAP
1601	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1602#endif
1603	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1604	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1605	VMCOREINFO_OFFSET(pglist_data, node_id);
1606	VMCOREINFO_OFFSET(zone, free_area);
1607	VMCOREINFO_OFFSET(zone, vm_stat);
1608	VMCOREINFO_OFFSET(zone, spanned_pages);
1609	VMCOREINFO_OFFSET(free_area, free_list);
1610	VMCOREINFO_OFFSET(list_head, next);
1611	VMCOREINFO_OFFSET(list_head, prev);
1612	VMCOREINFO_OFFSET(vmap_area, va_start);
1613	VMCOREINFO_OFFSET(vmap_area, list);
1614	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1615	log_buf_kexec_setup();
1616	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1617	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1618	VMCOREINFO_NUMBER(PG_lru);
1619	VMCOREINFO_NUMBER(PG_private);
1620	VMCOREINFO_NUMBER(PG_swapcache);
1621	VMCOREINFO_NUMBER(PG_slab);
1622#ifdef CONFIG_MEMORY_FAILURE
1623	VMCOREINFO_NUMBER(PG_hwpoison);
1624#endif
1625	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1626
1627	arch_crash_save_vmcoreinfo();
1628	update_vmcoreinfo_note();
1629
1630	return 0;
1631}
1632
1633subsys_initcall(crash_save_vmcoreinfo_init);
1634
1635/*
1636 * Move into place and start executing a preloaded standalone
1637 * executable.  If nothing was preloaded return an error.
1638 */
1639int kernel_kexec(void)
1640{
1641	int error = 0;
1642
1643	if (!mutex_trylock(&kexec_mutex))
1644		return -EBUSY;
1645	if (!kexec_image) {
1646		error = -EINVAL;
1647		goto Unlock;
1648	}
1649
1650#ifdef CONFIG_KEXEC_JUMP
1651	if (kexec_image->preserve_context) {
1652		lock_system_sleep();
1653		pm_prepare_console();
1654		error = freeze_processes();
1655		if (error) {
1656			error = -EBUSY;
1657			goto Restore_console;
1658		}
1659		suspend_console();
1660		error = dpm_suspend_start(PMSG_FREEZE);
1661		if (error)
1662			goto Resume_console;
1663		/* At this point, dpm_suspend_start() has been called,
1664		 * but *not* dpm_suspend_end(). We *must* call
1665		 * dpm_suspend_end() now.  Otherwise, drivers for
1666		 * some devices (e.g. interrupt controllers) become
1667		 * desynchronized with the actual state of the
1668		 * hardware at resume time, and evil weirdness ensues.
1669		 */
1670		error = dpm_suspend_end(PMSG_FREEZE);
1671		if (error)
1672			goto Resume_devices;
1673		error = disable_nonboot_cpus();
1674		if (error)
1675			goto Enable_cpus;
1676		local_irq_disable();
1677		error = syscore_suspend();
1678		if (error)
1679			goto Enable_irqs;
1680	} else
1681#endif
1682	{
1683		kexec_in_progress = true;
1684		kernel_restart_prepare(NULL);
1685		migrate_to_reboot_cpu();
1686
1687		/*
1688		 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1689		 * no further code needs to use CPU hotplug (which is true in
1690		 * the reboot case). However, the kexec path depends on using
1691		 * CPU hotplug again; so re-enable it here.
1692		 */
1693		cpu_hotplug_enable();
1694		printk(KERN_EMERG "Starting new kernel\n");
1695		machine_shutdown();
1696	}
1697
1698	machine_kexec(kexec_image);
1699
1700#ifdef CONFIG_KEXEC_JUMP
1701	if (kexec_image->preserve_context) {
1702		syscore_resume();
1703 Enable_irqs:
1704		local_irq_enable();
1705 Enable_cpus:
1706		enable_nonboot_cpus();
1707		dpm_resume_start(PMSG_RESTORE);
1708 Resume_devices:
1709		dpm_resume_end(PMSG_RESTORE);
1710 Resume_console:
1711		resume_console();
1712		thaw_processes();
1713 Restore_console:
1714		pm_restore_console();
1715		unlock_system_sleep();
1716	}
1717#endif
1718
1719 Unlock:
1720	mutex_unlock(&kexec_mutex);
1721	return error;
1722}
v3.1
   1/*
   2 * kexec.c - kexec system call
   3 * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
   4 *
   5 * This source code is licensed under the GNU General Public License,
   6 * Version 2.  See the file COPYING for more details.
   7 */
   8
   9#include <linux/capability.h>
  10#include <linux/mm.h>
  11#include <linux/file.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14#include <linux/kexec.h>
  15#include <linux/mutex.h>
  16#include <linux/list.h>
  17#include <linux/highmem.h>
  18#include <linux/syscalls.h>
  19#include <linux/reboot.h>
  20#include <linux/ioport.h>
  21#include <linux/hardirq.h>
  22#include <linux/elf.h>
  23#include <linux/elfcore.h>
  24#include <generated/utsrelease.h>
  25#include <linux/utsname.h>
  26#include <linux/numa.h>
  27#include <linux/suspend.h>
  28#include <linux/device.h>
  29#include <linux/freezer.h>
  30#include <linux/pm.h>
  31#include <linux/cpu.h>
  32#include <linux/console.h>
  33#include <linux/vmalloc.h>
  34#include <linux/swap.h>
  35#include <linux/kmsg_dump.h>
  36#include <linux/syscore_ops.h>
 
  37
  38#include <asm/page.h>
  39#include <asm/uaccess.h>
  40#include <asm/io.h>
  41#include <asm/system.h>
  42#include <asm/sections.h>
  43
  44/* Per cpu memory for storing cpu states in case of system crash. */
  45note_buf_t __percpu *crash_notes;
  46
  47/* vmcoreinfo stuff */
  48static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
  49u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
  50size_t vmcoreinfo_size;
  51size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
  52
 
 
 
  53/* Location of the reserved area for the crash kernel */
  54struct resource crashk_res = {
  55	.name  = "Crash kernel",
  56	.start = 0,
  57	.end   = 0,
  58	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
  59};
 
 
 
 
 
 
  60
  61int kexec_should_crash(struct task_struct *p)
  62{
  63	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
  64		return 1;
  65	return 0;
  66}
  67
  68/*
  69 * When kexec transitions to the new kernel there is a one-to-one
  70 * mapping between physical and virtual addresses.  On processors
  71 * where you can disable the MMU this is trivial, and easy.  For
  72 * others it is still a simple predictable page table to setup.
  73 *
  74 * In that environment kexec copies the new kernel to its final
  75 * resting place.  This means I can only support memory whose
  76 * physical address can fit in an unsigned long.  In particular
  77 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
  78 * If the assembly stub has more restrictive requirements
  79 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
  80 * defined more restrictively in <asm/kexec.h>.
  81 *
  82 * The code for the transition from the current kernel to the
  83 * the new kernel is placed in the control_code_buffer, whose size
  84 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
  85 * page of memory is necessary, but some architectures require more.
  86 * Because this memory must be identity mapped in the transition from
  87 * virtual to physical addresses it must live in the range
  88 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
  89 * modifiable.
  90 *
  91 * The assembly stub in the control code buffer is passed a linked list
  92 * of descriptor pages detailing the source pages of the new kernel,
  93 * and the destination addresses of those source pages.  As this data
  94 * structure is not used in the context of the current OS, it must
  95 * be self-contained.
  96 *
  97 * The code has been made to work with highmem pages and will use a
  98 * destination page in its final resting place (if it happens
  99 * to allocate it).  The end product of this is that most of the
 100 * physical address space, and most of RAM can be used.
 101 *
 102 * Future directions include:
 103 *  - allocating a page table with the control code buffer identity
 104 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 105 *    reliable.
 106 */
 107
 108/*
 109 * KIMAGE_NO_DEST is an impossible destination address..., for
 110 * allocating pages whose destination address we do not care about.
 111 */
 112#define KIMAGE_NO_DEST (-1UL)
 113
 114static int kimage_is_destination_range(struct kimage *image,
 115				       unsigned long start, unsigned long end);
 116static struct page *kimage_alloc_page(struct kimage *image,
 117				       gfp_t gfp_mask,
 118				       unsigned long dest);
 119
 120static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
 121	                    unsigned long nr_segments,
 122                            struct kexec_segment __user *segments)
 123{
 124	size_t segment_bytes;
 125	struct kimage *image;
 126	unsigned long i;
 127	int result;
 128
 129	/* Allocate a controlling structure */
 130	result = -ENOMEM;
 131	image = kzalloc(sizeof(*image), GFP_KERNEL);
 132	if (!image)
 133		goto out;
 134
 135	image->head = 0;
 136	image->entry = &image->head;
 137	image->last_entry = &image->head;
 138	image->control_page = ~0; /* By default this does not apply */
 139	image->start = entry;
 140	image->type = KEXEC_TYPE_DEFAULT;
 141
 142	/* Initialize the list of control pages */
 143	INIT_LIST_HEAD(&image->control_pages);
 144
 145	/* Initialize the list of destination pages */
 146	INIT_LIST_HEAD(&image->dest_pages);
 147
 148	/* Initialize the list of unusable pages */
 149	INIT_LIST_HEAD(&image->unuseable_pages);
 150
 151	/* Read in the segments */
 152	image->nr_segments = nr_segments;
 153	segment_bytes = nr_segments * sizeof(*segments);
 154	result = copy_from_user(image->segment, segments, segment_bytes);
 155	if (result) {
 156		result = -EFAULT;
 157		goto out;
 158	}
 159
 160	/*
 161	 * Verify we have good destination addresses.  The caller is
 162	 * responsible for making certain we don't attempt to load
 163	 * the new image into invalid or reserved areas of RAM.  This
 164	 * just verifies it is an address we can use.
 165	 *
 166	 * Since the kernel does everything in page size chunks ensure
 167	 * the destination addresses are page aligned.  Too many
 168	 * special cases crop of when we don't do this.  The most
 169	 * insidious is getting overlapping destination addresses
 170	 * simply because addresses are changed to page size
 171	 * granularity.
 172	 */
 173	result = -EADDRNOTAVAIL;
 174	for (i = 0; i < nr_segments; i++) {
 175		unsigned long mstart, mend;
 176
 177		mstart = image->segment[i].mem;
 178		mend   = mstart + image->segment[i].memsz;
 179		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
 180			goto out;
 181		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
 182			goto out;
 183	}
 184
 185	/* Verify our destination addresses do not overlap.
 186	 * If we alloed overlapping destination addresses
 187	 * through very weird things can happen with no
 188	 * easy explanation as one segment stops on another.
 189	 */
 190	result = -EINVAL;
 191	for (i = 0; i < nr_segments; i++) {
 192		unsigned long mstart, mend;
 193		unsigned long j;
 194
 195		mstart = image->segment[i].mem;
 196		mend   = mstart + image->segment[i].memsz;
 197		for (j = 0; j < i; j++) {
 198			unsigned long pstart, pend;
 199			pstart = image->segment[j].mem;
 200			pend   = pstart + image->segment[j].memsz;
 201			/* Do the segments overlap ? */
 202			if ((mend > pstart) && (mstart < pend))
 203				goto out;
 204		}
 205	}
 206
 207	/* Ensure our buffer sizes are strictly less than
 208	 * our memory sizes.  This should always be the case,
 209	 * and it is easier to check up front than to be surprised
 210	 * later on.
 211	 */
 212	result = -EINVAL;
 213	for (i = 0; i < nr_segments; i++) {
 214		if (image->segment[i].bufsz > image->segment[i].memsz)
 215			goto out;
 216	}
 217
 218	result = 0;
 219out:
 220	if (result == 0)
 221		*rimage = image;
 222	else
 223		kfree(image);
 224
 225	return result;
 226
 227}
 228
 
 
 229static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
 230				unsigned long nr_segments,
 231				struct kexec_segment __user *segments)
 232{
 233	int result;
 234	struct kimage *image;
 235
 236	/* Allocate and initialize a controlling structure */
 237	image = NULL;
 238	result = do_kimage_alloc(&image, entry, nr_segments, segments);
 239	if (result)
 240		goto out;
 241
 242	*rimage = image;
 243
 244	/*
 245	 * Find a location for the control code buffer, and add it
 246	 * the vector of segments so that it's pages will also be
 247	 * counted as destination pages.
 248	 */
 249	result = -ENOMEM;
 250	image->control_code_page = kimage_alloc_control_pages(image,
 251					   get_order(KEXEC_CONTROL_PAGE_SIZE));
 252	if (!image->control_code_page) {
 253		printk(KERN_ERR "Could not allocate control_code_buffer\n");
 254		goto out;
 255	}
 256
 257	image->swap_page = kimage_alloc_control_pages(image, 0);
 258	if (!image->swap_page) {
 259		printk(KERN_ERR "Could not allocate swap buffer\n");
 260		goto out;
 261	}
 262
 263	result = 0;
 264 out:
 265	if (result == 0)
 266		*rimage = image;
 267	else
 268		kfree(image);
 269
 
 
 
 
 270	return result;
 271}
 272
 273static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
 274				unsigned long nr_segments,
 275				struct kexec_segment __user *segments)
 276{
 277	int result;
 278	struct kimage *image;
 279	unsigned long i;
 280
 281	image = NULL;
 282	/* Verify we have a valid entry point */
 283	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
 284		result = -EADDRNOTAVAIL;
 285		goto out;
 286	}
 287
 288	/* Allocate and initialize a controlling structure */
 289	result = do_kimage_alloc(&image, entry, nr_segments, segments);
 290	if (result)
 291		goto out;
 292
 293	/* Enable the special crash kernel control page
 294	 * allocation policy.
 295	 */
 296	image->control_page = crashk_res.start;
 297	image->type = KEXEC_TYPE_CRASH;
 298
 299	/*
 300	 * Verify we have good destination addresses.  Normally
 301	 * the caller is responsible for making certain we don't
 302	 * attempt to load the new image into invalid or reserved
 303	 * areas of RAM.  But crash kernels are preloaded into a
 304	 * reserved area of ram.  We must ensure the addresses
 305	 * are in the reserved area otherwise preloading the
 306	 * kernel could corrupt things.
 307	 */
 308	result = -EADDRNOTAVAIL;
 309	for (i = 0; i < nr_segments; i++) {
 310		unsigned long mstart, mend;
 311
 312		mstart = image->segment[i].mem;
 313		mend = mstart + image->segment[i].memsz - 1;
 314		/* Ensure we are within the crash kernel limits */
 315		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
 316			goto out;
 317	}
 318
 319	/*
 320	 * Find a location for the control code buffer, and add
 321	 * the vector of segments so that it's pages will also be
 322	 * counted as destination pages.
 323	 */
 324	result = -ENOMEM;
 325	image->control_code_page = kimage_alloc_control_pages(image,
 326					   get_order(KEXEC_CONTROL_PAGE_SIZE));
 327	if (!image->control_code_page) {
 328		printk(KERN_ERR "Could not allocate control_code_buffer\n");
 329		goto out;
 330	}
 331
 332	result = 0;
 
 
 
 
 333out:
 334	if (result == 0)
 335		*rimage = image;
 336	else
 337		kfree(image);
 338
 339	return result;
 340}
 341
 342static int kimage_is_destination_range(struct kimage *image,
 343					unsigned long start,
 344					unsigned long end)
 345{
 346	unsigned long i;
 347
 348	for (i = 0; i < image->nr_segments; i++) {
 349		unsigned long mstart, mend;
 350
 351		mstart = image->segment[i].mem;
 352		mend = mstart + image->segment[i].memsz;
 353		if ((end > mstart) && (start < mend))
 354			return 1;
 355	}
 356
 357	return 0;
 358}
 359
 360static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
 361{
 362	struct page *pages;
 363
 364	pages = alloc_pages(gfp_mask, order);
 365	if (pages) {
 366		unsigned int count, i;
 367		pages->mapping = NULL;
 368		set_page_private(pages, order);
 369		count = 1 << order;
 370		for (i = 0; i < count; i++)
 371			SetPageReserved(pages + i);
 372	}
 373
 374	return pages;
 375}
 376
 377static void kimage_free_pages(struct page *page)
 378{
 379	unsigned int order, count, i;
 380
 381	order = page_private(page);
 382	count = 1 << order;
 383	for (i = 0; i < count; i++)
 384		ClearPageReserved(page + i);
 385	__free_pages(page, order);
 386}
 387
 388static void kimage_free_page_list(struct list_head *list)
 389{
 390	struct list_head *pos, *next;
 391
 392	list_for_each_safe(pos, next, list) {
 393		struct page *page;
 394
 395		page = list_entry(pos, struct page, lru);
 396		list_del(&page->lru);
 397		kimage_free_pages(page);
 398	}
 399}
 400
 401static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
 402							unsigned int order)
 403{
 404	/* Control pages are special, they are the intermediaries
 405	 * that are needed while we copy the rest of the pages
 406	 * to their final resting place.  As such they must
 407	 * not conflict with either the destination addresses
 408	 * or memory the kernel is already using.
 409	 *
 410	 * The only case where we really need more than one of
 411	 * these are for architectures where we cannot disable
 412	 * the MMU and must instead generate an identity mapped
 413	 * page table for all of the memory.
 414	 *
 415	 * At worst this runs in O(N) of the image size.
 416	 */
 417	struct list_head extra_pages;
 418	struct page *pages;
 419	unsigned int count;
 420
 421	count = 1 << order;
 422	INIT_LIST_HEAD(&extra_pages);
 423
 424	/* Loop while I can allocate a page and the page allocated
 425	 * is a destination page.
 426	 */
 427	do {
 428		unsigned long pfn, epfn, addr, eaddr;
 429
 430		pages = kimage_alloc_pages(GFP_KERNEL, order);
 431		if (!pages)
 432			break;
 433		pfn   = page_to_pfn(pages);
 434		epfn  = pfn + count;
 435		addr  = pfn << PAGE_SHIFT;
 436		eaddr = epfn << PAGE_SHIFT;
 437		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
 438			      kimage_is_destination_range(image, addr, eaddr)) {
 439			list_add(&pages->lru, &extra_pages);
 440			pages = NULL;
 441		}
 442	} while (!pages);
 443
 444	if (pages) {
 445		/* Remember the allocated page... */
 446		list_add(&pages->lru, &image->control_pages);
 447
 448		/* Because the page is already in it's destination
 449		 * location we will never allocate another page at
 450		 * that address.  Therefore kimage_alloc_pages
 451		 * will not return it (again) and we don't need
 452		 * to give it an entry in image->segment[].
 453		 */
 454	}
 455	/* Deal with the destination pages I have inadvertently allocated.
 456	 *
 457	 * Ideally I would convert multi-page allocations into single
 458	 * page allocations, and add everything to image->dest_pages.
 459	 *
 460	 * For now it is simpler to just free the pages.
 461	 */
 462	kimage_free_page_list(&extra_pages);
 463
 464	return pages;
 465}
 466
 467static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
 468						      unsigned int order)
 469{
 470	/* Control pages are special, they are the intermediaries
 471	 * that are needed while we copy the rest of the pages
 472	 * to their final resting place.  As such they must
 473	 * not conflict with either the destination addresses
 474	 * or memory the kernel is already using.
 475	 *
 476	 * Control pages are also the only pags we must allocate
 477	 * when loading a crash kernel.  All of the other pages
 478	 * are specified by the segments and we just memcpy
 479	 * into them directly.
 480	 *
 481	 * The only case where we really need more than one of
 482	 * these are for architectures where we cannot disable
 483	 * the MMU and must instead generate an identity mapped
 484	 * page table for all of the memory.
 485	 *
 486	 * Given the low demand this implements a very simple
 487	 * allocator that finds the first hole of the appropriate
 488	 * size in the reserved memory region, and allocates all
 489	 * of the memory up to and including the hole.
 490	 */
 491	unsigned long hole_start, hole_end, size;
 492	struct page *pages;
 493
 494	pages = NULL;
 495	size = (1 << order) << PAGE_SHIFT;
 496	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
 497	hole_end   = hole_start + size - 1;
 498	while (hole_end <= crashk_res.end) {
 499		unsigned long i;
 500
 501		if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
 502			break;
 503		if (hole_end > crashk_res.end)
 504			break;
 505		/* See if I overlap any of the segments */
 506		for (i = 0; i < image->nr_segments; i++) {
 507			unsigned long mstart, mend;
 508
 509			mstart = image->segment[i].mem;
 510			mend   = mstart + image->segment[i].memsz - 1;
 511			if ((hole_end >= mstart) && (hole_start <= mend)) {
 512				/* Advance the hole to the end of the segment */
 513				hole_start = (mend + (size - 1)) & ~(size - 1);
 514				hole_end   = hole_start + size - 1;
 515				break;
 516			}
 517		}
 518		/* If I don't overlap any segments I have found my hole! */
 519		if (i == image->nr_segments) {
 520			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
 521			break;
 522		}
 523	}
 524	if (pages)
 525		image->control_page = hole_end;
 526
 527	return pages;
 528}
 529
 530
 531struct page *kimage_alloc_control_pages(struct kimage *image,
 532					 unsigned int order)
 533{
 534	struct page *pages = NULL;
 535
 536	switch (image->type) {
 537	case KEXEC_TYPE_DEFAULT:
 538		pages = kimage_alloc_normal_control_pages(image, order);
 539		break;
 540	case KEXEC_TYPE_CRASH:
 541		pages = kimage_alloc_crash_control_pages(image, order);
 542		break;
 543	}
 544
 545	return pages;
 546}
 547
 548static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
 549{
 550	if (*image->entry != 0)
 551		image->entry++;
 552
 553	if (image->entry == image->last_entry) {
 554		kimage_entry_t *ind_page;
 555		struct page *page;
 556
 557		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
 558		if (!page)
 559			return -ENOMEM;
 560
 561		ind_page = page_address(page);
 562		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
 563		image->entry = ind_page;
 564		image->last_entry = ind_page +
 565				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
 566	}
 567	*image->entry = entry;
 568	image->entry++;
 569	*image->entry = 0;
 570
 571	return 0;
 572}
 573
 574static int kimage_set_destination(struct kimage *image,
 575				   unsigned long destination)
 576{
 577	int result;
 578
 579	destination &= PAGE_MASK;
 580	result = kimage_add_entry(image, destination | IND_DESTINATION);
 581	if (result == 0)
 582		image->destination = destination;
 583
 584	return result;
 585}
 586
 587
 588static int kimage_add_page(struct kimage *image, unsigned long page)
 589{
 590	int result;
 591
 592	page &= PAGE_MASK;
 593	result = kimage_add_entry(image, page | IND_SOURCE);
 594	if (result == 0)
 595		image->destination += PAGE_SIZE;
 596
 597	return result;
 598}
 599
 600
 601static void kimage_free_extra_pages(struct kimage *image)
 602{
 603	/* Walk through and free any extra destination pages I may have */
 604	kimage_free_page_list(&image->dest_pages);
 605
 606	/* Walk through and free any unusable pages I have cached */
 607	kimage_free_page_list(&image->unuseable_pages);
 608
 609}
 610static void kimage_terminate(struct kimage *image)
 611{
 612	if (*image->entry != 0)
 613		image->entry++;
 614
 615	*image->entry = IND_DONE;
 616}
 617
 618#define for_each_kimage_entry(image, ptr, entry) \
 619	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
 620		ptr = (entry & IND_INDIRECTION)? \
 621			phys_to_virt((entry & PAGE_MASK)): ptr +1)
 622
 623static void kimage_free_entry(kimage_entry_t entry)
 624{
 625	struct page *page;
 626
 627	page = pfn_to_page(entry >> PAGE_SHIFT);
 628	kimage_free_pages(page);
 629}
 630
 631static void kimage_free(struct kimage *image)
 632{
 633	kimage_entry_t *ptr, entry;
 634	kimage_entry_t ind = 0;
 635
 636	if (!image)
 637		return;
 638
 639	kimage_free_extra_pages(image);
 640	for_each_kimage_entry(image, ptr, entry) {
 641		if (entry & IND_INDIRECTION) {
 642			/* Free the previous indirection page */
 643			if (ind & IND_INDIRECTION)
 644				kimage_free_entry(ind);
 645			/* Save this indirection page until we are
 646			 * done with it.
 647			 */
 648			ind = entry;
 649		}
 650		else if (entry & IND_SOURCE)
 651			kimage_free_entry(entry);
 652	}
 653	/* Free the final indirection page */
 654	if (ind & IND_INDIRECTION)
 655		kimage_free_entry(ind);
 656
 657	/* Handle any machine specific cleanup */
 658	machine_kexec_cleanup(image);
 659
 660	/* Free the kexec control pages... */
 661	kimage_free_page_list(&image->control_pages);
 662	kfree(image);
 663}
 664
 665static kimage_entry_t *kimage_dst_used(struct kimage *image,
 666					unsigned long page)
 667{
 668	kimage_entry_t *ptr, entry;
 669	unsigned long destination = 0;
 670
 671	for_each_kimage_entry(image, ptr, entry) {
 672		if (entry & IND_DESTINATION)
 673			destination = entry & PAGE_MASK;
 674		else if (entry & IND_SOURCE) {
 675			if (page == destination)
 676				return ptr;
 677			destination += PAGE_SIZE;
 678		}
 679	}
 680
 681	return NULL;
 682}
 683
 684static struct page *kimage_alloc_page(struct kimage *image,
 685					gfp_t gfp_mask,
 686					unsigned long destination)
 687{
 688	/*
 689	 * Here we implement safeguards to ensure that a source page
 690	 * is not copied to its destination page before the data on
 691	 * the destination page is no longer useful.
 692	 *
 693	 * To do this we maintain the invariant that a source page is
 694	 * either its own destination page, or it is not a
 695	 * destination page at all.
 696	 *
 697	 * That is slightly stronger than required, but the proof
 698	 * that no problems will not occur is trivial, and the
 699	 * implementation is simply to verify.
 700	 *
 701	 * When allocating all pages normally this algorithm will run
 702	 * in O(N) time, but in the worst case it will run in O(N^2)
 703	 * time.   If the runtime is a problem the data structures can
 704	 * be fixed.
 705	 */
 706	struct page *page;
 707	unsigned long addr;
 708
 709	/*
 710	 * Walk through the list of destination pages, and see if I
 711	 * have a match.
 712	 */
 713	list_for_each_entry(page, &image->dest_pages, lru) {
 714		addr = page_to_pfn(page) << PAGE_SHIFT;
 715		if (addr == destination) {
 716			list_del(&page->lru);
 717			return page;
 718		}
 719	}
 720	page = NULL;
 721	while (1) {
 722		kimage_entry_t *old;
 723
 724		/* Allocate a page, if we run out of memory give up */
 725		page = kimage_alloc_pages(gfp_mask, 0);
 726		if (!page)
 727			return NULL;
 728		/* If the page cannot be used file it away */
 729		if (page_to_pfn(page) >
 730				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
 731			list_add(&page->lru, &image->unuseable_pages);
 732			continue;
 733		}
 734		addr = page_to_pfn(page) << PAGE_SHIFT;
 735
 736		/* If it is the destination page we want use it */
 737		if (addr == destination)
 738			break;
 739
 740		/* If the page is not a destination page use it */
 741		if (!kimage_is_destination_range(image, addr,
 742						  addr + PAGE_SIZE))
 743			break;
 744
 745		/*
 746		 * I know that the page is someones destination page.
 747		 * See if there is already a source page for this
 748		 * destination page.  And if so swap the source pages.
 749		 */
 750		old = kimage_dst_used(image, addr);
 751		if (old) {
 752			/* If so move it */
 753			unsigned long old_addr;
 754			struct page *old_page;
 755
 756			old_addr = *old & PAGE_MASK;
 757			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
 758			copy_highpage(page, old_page);
 759			*old = addr | (*old & ~PAGE_MASK);
 760
 761			/* The old page I have found cannot be a
 762			 * destination page, so return it if it's
 763			 * gfp_flags honor the ones passed in.
 764			 */
 765			if (!(gfp_mask & __GFP_HIGHMEM) &&
 766			    PageHighMem(old_page)) {
 767				kimage_free_pages(old_page);
 768				continue;
 769			}
 770			addr = old_addr;
 771			page = old_page;
 772			break;
 773		}
 774		else {
 775			/* Place the page on the destination list I
 776			 * will use it later.
 777			 */
 778			list_add(&page->lru, &image->dest_pages);
 779		}
 780	}
 781
 782	return page;
 783}
 784
 785static int kimage_load_normal_segment(struct kimage *image,
 786					 struct kexec_segment *segment)
 787{
 788	unsigned long maddr;
 789	unsigned long ubytes, mbytes;
 790	int result;
 791	unsigned char __user *buf;
 792
 793	result = 0;
 794	buf = segment->buf;
 795	ubytes = segment->bufsz;
 796	mbytes = segment->memsz;
 797	maddr = segment->mem;
 798
 799	result = kimage_set_destination(image, maddr);
 800	if (result < 0)
 801		goto out;
 802
 803	while (mbytes) {
 804		struct page *page;
 805		char *ptr;
 806		size_t uchunk, mchunk;
 807
 808		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
 809		if (!page) {
 810			result  = -ENOMEM;
 811			goto out;
 812		}
 813		result = kimage_add_page(image, page_to_pfn(page)
 814								<< PAGE_SHIFT);
 815		if (result < 0)
 816			goto out;
 817
 818		ptr = kmap(page);
 819		/* Start with a clear page */
 820		clear_page(ptr);
 821		ptr += maddr & ~PAGE_MASK;
 822		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
 823		if (mchunk > mbytes)
 824			mchunk = mbytes;
 825
 826		uchunk = mchunk;
 827		if (uchunk > ubytes)
 828			uchunk = ubytes;
 829
 830		result = copy_from_user(ptr, buf, uchunk);
 831		kunmap(page);
 832		if (result) {
 833			result = -EFAULT;
 834			goto out;
 835		}
 836		ubytes -= uchunk;
 837		maddr  += mchunk;
 838		buf    += mchunk;
 839		mbytes -= mchunk;
 840	}
 841out:
 842	return result;
 843}
 844
 845static int kimage_load_crash_segment(struct kimage *image,
 846					struct kexec_segment *segment)
 847{
 848	/* For crash dumps kernels we simply copy the data from
 849	 * user space to it's destination.
 850	 * We do things a page at a time for the sake of kmap.
 851	 */
 852	unsigned long maddr;
 853	unsigned long ubytes, mbytes;
 854	int result;
 855	unsigned char __user *buf;
 856
 857	result = 0;
 858	buf = segment->buf;
 859	ubytes = segment->bufsz;
 860	mbytes = segment->memsz;
 861	maddr = segment->mem;
 862	while (mbytes) {
 863		struct page *page;
 864		char *ptr;
 865		size_t uchunk, mchunk;
 866
 867		page = pfn_to_page(maddr >> PAGE_SHIFT);
 868		if (!page) {
 869			result  = -ENOMEM;
 870			goto out;
 871		}
 872		ptr = kmap(page);
 873		ptr += maddr & ~PAGE_MASK;
 874		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
 875		if (mchunk > mbytes)
 876			mchunk = mbytes;
 877
 878		uchunk = mchunk;
 879		if (uchunk > ubytes) {
 880			uchunk = ubytes;
 881			/* Zero the trailing part of the page */
 882			memset(ptr + uchunk, 0, mchunk - uchunk);
 883		}
 884		result = copy_from_user(ptr, buf, uchunk);
 885		kexec_flush_icache_page(page);
 886		kunmap(page);
 887		if (result) {
 888			result = -EFAULT;
 889			goto out;
 890		}
 891		ubytes -= uchunk;
 892		maddr  += mchunk;
 893		buf    += mchunk;
 894		mbytes -= mchunk;
 895	}
 896out:
 897	return result;
 898}
 899
 900static int kimage_load_segment(struct kimage *image,
 901				struct kexec_segment *segment)
 902{
 903	int result = -ENOMEM;
 904
 905	switch (image->type) {
 906	case KEXEC_TYPE_DEFAULT:
 907		result = kimage_load_normal_segment(image, segment);
 908		break;
 909	case KEXEC_TYPE_CRASH:
 910		result = kimage_load_crash_segment(image, segment);
 911		break;
 912	}
 913
 914	return result;
 915}
 916
 917/*
 918 * Exec Kernel system call: for obvious reasons only root may call it.
 919 *
 920 * This call breaks up into three pieces.
 921 * - A generic part which loads the new kernel from the current
 922 *   address space, and very carefully places the data in the
 923 *   allocated pages.
 924 *
 925 * - A generic part that interacts with the kernel and tells all of
 926 *   the devices to shut down.  Preventing on-going dmas, and placing
 927 *   the devices in a consistent state so a later kernel can
 928 *   reinitialize them.
 929 *
 930 * - A machine specific part that includes the syscall number
 931 *   and the copies the image to it's final destination.  And
 932 *   jumps into the image at entry.
 933 *
 934 * kexec does not sync, or unmount filesystems so if you need
 935 * that to happen you need to do that yourself.
 936 */
 937struct kimage *kexec_image;
 938struct kimage *kexec_crash_image;
 
 939
 940static DEFINE_MUTEX(kexec_mutex);
 941
 942SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
 943		struct kexec_segment __user *, segments, unsigned long, flags)
 944{
 945	struct kimage **dest_image, *image;
 946	int result;
 947
 948	/* We only trust the superuser with rebooting the system. */
 949	if (!capable(CAP_SYS_BOOT))
 950		return -EPERM;
 951
 952	/*
 953	 * Verify we have a legal set of flags
 954	 * This leaves us room for future extensions.
 955	 */
 956	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
 957		return -EINVAL;
 958
 959	/* Verify we are on the appropriate architecture */
 960	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
 961		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
 962		return -EINVAL;
 963
 964	/* Put an artificial cap on the number
 965	 * of segments passed to kexec_load.
 966	 */
 967	if (nr_segments > KEXEC_SEGMENT_MAX)
 968		return -EINVAL;
 969
 970	image = NULL;
 971	result = 0;
 972
 973	/* Because we write directly to the reserved memory
 974	 * region when loading crash kernels we need a mutex here to
 975	 * prevent multiple crash  kernels from attempting to load
 976	 * simultaneously, and to prevent a crash kernel from loading
 977	 * over the top of a in use crash kernel.
 978	 *
 979	 * KISS: always take the mutex.
 980	 */
 981	if (!mutex_trylock(&kexec_mutex))
 982		return -EBUSY;
 983
 984	dest_image = &kexec_image;
 985	if (flags & KEXEC_ON_CRASH)
 986		dest_image = &kexec_crash_image;
 987	if (nr_segments > 0) {
 988		unsigned long i;
 989
 990		/* Loading another kernel to reboot into */
 991		if ((flags & KEXEC_ON_CRASH) == 0)
 992			result = kimage_normal_alloc(&image, entry,
 993							nr_segments, segments);
 994		/* Loading another kernel to switch to if this one crashes */
 995		else if (flags & KEXEC_ON_CRASH) {
 996			/* Free any current crash dump kernel before
 997			 * we corrupt it.
 998			 */
 999			kimage_free(xchg(&kexec_crash_image, NULL));
1000			result = kimage_crash_alloc(&image, entry,
1001						     nr_segments, segments);
 
1002		}
1003		if (result)
1004			goto out;
1005
1006		if (flags & KEXEC_PRESERVE_CONTEXT)
1007			image->preserve_context = 1;
1008		result = machine_kexec_prepare(image);
1009		if (result)
1010			goto out;
1011
1012		for (i = 0; i < nr_segments; i++) {
1013			result = kimage_load_segment(image, &image->segment[i]);
1014			if (result)
1015				goto out;
1016		}
1017		kimage_terminate(image);
 
 
1018	}
1019	/* Install the new kernel, and  Uninstall the old */
1020	image = xchg(dest_image, image);
1021
1022out:
1023	mutex_unlock(&kexec_mutex);
1024	kimage_free(image);
1025
1026	return result;
1027}
1028
 
 
 
 
 
 
 
 
 
 
 
 
1029#ifdef CONFIG_COMPAT
1030asmlinkage long compat_sys_kexec_load(unsigned long entry,
1031				unsigned long nr_segments,
1032				struct compat_kexec_segment __user *segments,
1033				unsigned long flags)
1034{
1035	struct compat_kexec_segment in;
1036	struct kexec_segment out, __user *ksegments;
1037	unsigned long i, result;
1038
1039	/* Don't allow clients that don't understand the native
1040	 * architecture to do anything.
1041	 */
1042	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1043		return -EINVAL;
1044
1045	if (nr_segments > KEXEC_SEGMENT_MAX)
1046		return -EINVAL;
1047
1048	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1049	for (i=0; i < nr_segments; i++) {
1050		result = copy_from_user(&in, &segments[i], sizeof(in));
1051		if (result)
1052			return -EFAULT;
1053
1054		out.buf   = compat_ptr(in.buf);
1055		out.bufsz = in.bufsz;
1056		out.mem   = in.mem;
1057		out.memsz = in.memsz;
1058
1059		result = copy_to_user(&ksegments[i], &out, sizeof(out));
1060		if (result)
1061			return -EFAULT;
1062	}
1063
1064	return sys_kexec_load(entry, nr_segments, ksegments, flags);
1065}
1066#endif
1067
1068void crash_kexec(struct pt_regs *regs)
1069{
1070	/* Take the kexec_mutex here to prevent sys_kexec_load
1071	 * running on one cpu from replacing the crash kernel
1072	 * we are using after a panic on a different cpu.
1073	 *
1074	 * If the crash kernel was not located in a fixed area
1075	 * of memory the xchg(&kexec_crash_image) would be
1076	 * sufficient.  But since I reuse the memory...
1077	 */
1078	if (mutex_trylock(&kexec_mutex)) {
1079		if (kexec_crash_image) {
1080			struct pt_regs fixed_regs;
1081
1082			kmsg_dump(KMSG_DUMP_KEXEC);
1083
1084			crash_setup_regs(&fixed_regs, regs);
1085			crash_save_vmcoreinfo();
1086			machine_crash_shutdown(&fixed_regs);
1087			machine_kexec(kexec_crash_image);
1088		}
1089		mutex_unlock(&kexec_mutex);
1090	}
1091}
1092
1093size_t crash_get_memory_size(void)
1094{
1095	size_t size = 0;
1096	mutex_lock(&kexec_mutex);
1097	if (crashk_res.end != crashk_res.start)
1098		size = resource_size(&crashk_res);
1099	mutex_unlock(&kexec_mutex);
1100	return size;
1101}
1102
1103void __weak crash_free_reserved_phys_range(unsigned long begin,
1104					   unsigned long end)
1105{
1106	unsigned long addr;
1107
1108	for (addr = begin; addr < end; addr += PAGE_SIZE) {
1109		ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1110		init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1111		free_page((unsigned long)__va(addr));
1112		totalram_pages++;
1113	}
1114}
1115
1116int crash_shrink_memory(unsigned long new_size)
1117{
1118	int ret = 0;
1119	unsigned long start, end;
 
 
1120
1121	mutex_lock(&kexec_mutex);
1122
1123	if (kexec_crash_image) {
1124		ret = -ENOENT;
1125		goto unlock;
1126	}
1127	start = crashk_res.start;
1128	end = crashk_res.end;
 
 
 
 
 
1129
1130	if (new_size >= end - start + 1) {
1131		ret = -EINVAL;
1132		if (new_size == end - start + 1)
1133			ret = 0;
1134		goto unlock;
1135	}
1136
1137	start = roundup(start, PAGE_SIZE);
1138	end = roundup(start + new_size, PAGE_SIZE);
1139
 
1140	crash_free_reserved_phys_range(end, crashk_res.end);
1141
1142	if ((start == end) && (crashk_res.parent != NULL))
1143		release_resource(&crashk_res);
 
 
 
 
 
 
1144	crashk_res.end = end - 1;
1145
 
 
 
1146unlock:
1147	mutex_unlock(&kexec_mutex);
1148	return ret;
1149}
1150
1151static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1152			    size_t data_len)
1153{
1154	struct elf_note note;
1155
1156	note.n_namesz = strlen(name) + 1;
1157	note.n_descsz = data_len;
1158	note.n_type   = type;
1159	memcpy(buf, &note, sizeof(note));
1160	buf += (sizeof(note) + 3)/4;
1161	memcpy(buf, name, note.n_namesz);
1162	buf += (note.n_namesz + 3)/4;
1163	memcpy(buf, data, note.n_descsz);
1164	buf += (note.n_descsz + 3)/4;
1165
1166	return buf;
1167}
1168
1169static void final_note(u32 *buf)
1170{
1171	struct elf_note note;
1172
1173	note.n_namesz = 0;
1174	note.n_descsz = 0;
1175	note.n_type   = 0;
1176	memcpy(buf, &note, sizeof(note));
1177}
1178
1179void crash_save_cpu(struct pt_regs *regs, int cpu)
1180{
1181	struct elf_prstatus prstatus;
1182	u32 *buf;
1183
1184	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1185		return;
1186
1187	/* Using ELF notes here is opportunistic.
1188	 * I need a well defined structure format
1189	 * for the data I pass, and I need tags
1190	 * on the data to indicate what information I have
1191	 * squirrelled away.  ELF notes happen to provide
1192	 * all of that, so there is no need to invent something new.
1193	 */
1194	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1195	if (!buf)
1196		return;
1197	memset(&prstatus, 0, sizeof(prstatus));
1198	prstatus.pr_pid = current->pid;
1199	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1200	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1201		      	      &prstatus, sizeof(prstatus));
1202	final_note(buf);
1203}
1204
1205static int __init crash_notes_memory_init(void)
1206{
1207	/* Allocate memory for saving cpu registers. */
1208	crash_notes = alloc_percpu(note_buf_t);
1209	if (!crash_notes) {
1210		printk("Kexec: Memory allocation for saving cpu register"
1211		" states failed\n");
1212		return -ENOMEM;
1213	}
1214	return 0;
1215}
1216module_init(crash_notes_memory_init)
1217
1218
1219/*
1220 * parsing the "crashkernel" commandline
1221 *
1222 * this code is intended to be called from architecture specific code
1223 */
1224
1225
1226/*
1227 * This function parses command lines in the format
1228 *
1229 *   crashkernel=ramsize-range:size[,...][@offset]
1230 *
1231 * The function returns 0 on success and -EINVAL on failure.
1232 */
1233static int __init parse_crashkernel_mem(char 			*cmdline,
1234					unsigned long long	system_ram,
1235					unsigned long long	*crash_size,
1236					unsigned long long	*crash_base)
1237{
1238	char *cur = cmdline, *tmp;
1239
1240	/* for each entry of the comma-separated list */
1241	do {
1242		unsigned long long start, end = ULLONG_MAX, size;
1243
1244		/* get the start of the range */
1245		start = memparse(cur, &tmp);
1246		if (cur == tmp) {
1247			pr_warning("crashkernel: Memory value expected\n");
1248			return -EINVAL;
1249		}
1250		cur = tmp;
1251		if (*cur != '-') {
1252			pr_warning("crashkernel: '-' expected\n");
1253			return -EINVAL;
1254		}
1255		cur++;
1256
1257		/* if no ':' is here, than we read the end */
1258		if (*cur != ':') {
1259			end = memparse(cur, &tmp);
1260			if (cur == tmp) {
1261				pr_warning("crashkernel: Memory "
1262						"value expected\n");
1263				return -EINVAL;
1264			}
1265			cur = tmp;
1266			if (end <= start) {
1267				pr_warning("crashkernel: end <= start\n");
1268				return -EINVAL;
1269			}
1270		}
1271
1272		if (*cur != ':') {
1273			pr_warning("crashkernel: ':' expected\n");
1274			return -EINVAL;
1275		}
1276		cur++;
1277
1278		size = memparse(cur, &tmp);
1279		if (cur == tmp) {
1280			pr_warning("Memory value expected\n");
1281			return -EINVAL;
1282		}
1283		cur = tmp;
1284		if (size >= system_ram) {
1285			pr_warning("crashkernel: invalid size\n");
1286			return -EINVAL;
1287		}
1288
1289		/* match ? */
1290		if (system_ram >= start && system_ram < end) {
1291			*crash_size = size;
1292			break;
1293		}
1294	} while (*cur++ == ',');
1295
1296	if (*crash_size > 0) {
1297		while (*cur && *cur != ' ' && *cur != '@')
1298			cur++;
1299		if (*cur == '@') {
1300			cur++;
1301			*crash_base = memparse(cur, &tmp);
1302			if (cur == tmp) {
1303				pr_warning("Memory value expected "
1304						"after '@'\n");
1305				return -EINVAL;
1306			}
1307		}
1308	}
1309
1310	return 0;
1311}
1312
1313/*
1314 * That function parses "simple" (old) crashkernel command lines like
1315 *
1316 * 	crashkernel=size[@offset]
1317 *
1318 * It returns 0 on success and -EINVAL on failure.
1319 */
1320static int __init parse_crashkernel_simple(char 		*cmdline,
1321					   unsigned long long 	*crash_size,
1322					   unsigned long long 	*crash_base)
1323{
1324	char *cur = cmdline;
1325
1326	*crash_size = memparse(cmdline, &cur);
1327	if (cmdline == cur) {
1328		pr_warning("crashkernel: memory value expected\n");
1329		return -EINVAL;
1330	}
1331
1332	if (*cur == '@')
1333		*crash_base = memparse(cur+1, &cur);
 
 
 
 
1334
1335	return 0;
1336}
1337
 
 
 
 
 
 
 
 
 
1338/*
1339 * That function is the entry point for command line parsing and should be
1340 * called from the arch-specific code.
 
 
 
1341 */
1342int __init parse_crashkernel(char 		 *cmdline,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1343			     unsigned long long system_ram,
1344			     unsigned long long *crash_size,
1345			     unsigned long long *crash_base)
 
 
1346{
1347	char 	*p = cmdline, *ck_cmdline = NULL;
1348	char	*first_colon, *first_space;
 
1349
1350	BUG_ON(!crash_size || !crash_base);
1351	*crash_size = 0;
1352	*crash_base = 0;
1353
1354	/* find crashkernel and use the last one if there are more */
1355	p = strstr(p, "crashkernel=");
1356	while (p) {
1357		ck_cmdline = p;
1358		p = strstr(p+1, "crashkernel=");
1359	}
1360
1361	if (!ck_cmdline)
1362		return -EINVAL;
1363
1364	ck_cmdline += 12; /* strlen("crashkernel=") */
1365
 
 
 
1366	/*
1367	 * if the commandline contains a ':', then that's the extended
1368	 * syntax -- if not, it must be the classic syntax
1369	 */
1370	first_colon = strchr(ck_cmdline, ':');
1371	first_space = strchr(ck_cmdline, ' ');
1372	if (first_colon && (!first_space || first_colon < first_space))
1373		return parse_crashkernel_mem(ck_cmdline, system_ram,
1374				crash_size, crash_base);
1375	else
1376		return parse_crashkernel_simple(ck_cmdline, crash_size,
1377				crash_base);
1378
1379	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1380}
1381
 
 
 
 
 
 
 
 
1382
 
 
 
 
 
 
 
 
1383
1384void crash_save_vmcoreinfo(void)
1385{
1386	u32 *buf;
1387
1388	if (!vmcoreinfo_size)
1389		return;
1390
1391	vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1392
1393	buf = (u32 *)vmcoreinfo_note;
1394
1395	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1396			      vmcoreinfo_size);
 
 
1397
1398	final_note(buf);
 
 
 
1399}
1400
1401void vmcoreinfo_append_str(const char *fmt, ...)
1402{
1403	va_list args;
1404	char buf[0x50];
1405	int r;
1406
1407	va_start(args, fmt);
1408	r = vsnprintf(buf, sizeof(buf), fmt, args);
1409	va_end(args);
1410
1411	if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1412		r = vmcoreinfo_max_size - vmcoreinfo_size;
1413
1414	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1415
1416	vmcoreinfo_size += r;
1417}
1418
1419/*
1420 * provide an empty default implementation here -- architecture
1421 * code may override this
1422 */
1423void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1424{}
1425
1426unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1427{
1428	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1429}
1430
1431static int __init crash_save_vmcoreinfo_init(void)
1432{
1433	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1434	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1435
1436	VMCOREINFO_SYMBOL(init_uts_ns);
1437	VMCOREINFO_SYMBOL(node_online_map);
 
1438	VMCOREINFO_SYMBOL(swapper_pg_dir);
 
1439	VMCOREINFO_SYMBOL(_stext);
1440	VMCOREINFO_SYMBOL(vmlist);
1441
1442#ifndef CONFIG_NEED_MULTIPLE_NODES
1443	VMCOREINFO_SYMBOL(mem_map);
1444	VMCOREINFO_SYMBOL(contig_page_data);
1445#endif
1446#ifdef CONFIG_SPARSEMEM
1447	VMCOREINFO_SYMBOL(mem_section);
1448	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1449	VMCOREINFO_STRUCT_SIZE(mem_section);
1450	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1451#endif
1452	VMCOREINFO_STRUCT_SIZE(page);
1453	VMCOREINFO_STRUCT_SIZE(pglist_data);
1454	VMCOREINFO_STRUCT_SIZE(zone);
1455	VMCOREINFO_STRUCT_SIZE(free_area);
1456	VMCOREINFO_STRUCT_SIZE(list_head);
1457	VMCOREINFO_SIZE(nodemask_t);
1458	VMCOREINFO_OFFSET(page, flags);
1459	VMCOREINFO_OFFSET(page, _count);
1460	VMCOREINFO_OFFSET(page, mapping);
1461	VMCOREINFO_OFFSET(page, lru);
 
 
1462	VMCOREINFO_OFFSET(pglist_data, node_zones);
1463	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1464#ifdef CONFIG_FLAT_NODE_MEM_MAP
1465	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1466#endif
1467	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1468	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1469	VMCOREINFO_OFFSET(pglist_data, node_id);
1470	VMCOREINFO_OFFSET(zone, free_area);
1471	VMCOREINFO_OFFSET(zone, vm_stat);
1472	VMCOREINFO_OFFSET(zone, spanned_pages);
1473	VMCOREINFO_OFFSET(free_area, free_list);
1474	VMCOREINFO_OFFSET(list_head, next);
1475	VMCOREINFO_OFFSET(list_head, prev);
1476	VMCOREINFO_OFFSET(vm_struct, addr);
 
1477	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1478	log_buf_kexec_setup();
1479	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1480	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1481	VMCOREINFO_NUMBER(PG_lru);
1482	VMCOREINFO_NUMBER(PG_private);
1483	VMCOREINFO_NUMBER(PG_swapcache);
 
 
 
 
 
1484
1485	arch_crash_save_vmcoreinfo();
 
1486
1487	return 0;
1488}
1489
1490module_init(crash_save_vmcoreinfo_init)
1491
1492/*
1493 * Move into place and start executing a preloaded standalone
1494 * executable.  If nothing was preloaded return an error.
1495 */
1496int kernel_kexec(void)
1497{
1498	int error = 0;
1499
1500	if (!mutex_trylock(&kexec_mutex))
1501		return -EBUSY;
1502	if (!kexec_image) {
1503		error = -EINVAL;
1504		goto Unlock;
1505	}
1506
1507#ifdef CONFIG_KEXEC_JUMP
1508	if (kexec_image->preserve_context) {
1509		mutex_lock(&pm_mutex);
1510		pm_prepare_console();
1511		error = freeze_processes();
1512		if (error) {
1513			error = -EBUSY;
1514			goto Restore_console;
1515		}
1516		suspend_console();
1517		error = dpm_suspend_start(PMSG_FREEZE);
1518		if (error)
1519			goto Resume_console;
1520		/* At this point, dpm_suspend_start() has been called,
1521		 * but *not* dpm_suspend_noirq(). We *must* call
1522		 * dpm_suspend_noirq() now.  Otherwise, drivers for
1523		 * some devices (e.g. interrupt controllers) become
1524		 * desynchronized with the actual state of the
1525		 * hardware at resume time, and evil weirdness ensues.
1526		 */
1527		error = dpm_suspend_noirq(PMSG_FREEZE);
1528		if (error)
1529			goto Resume_devices;
1530		error = disable_nonboot_cpus();
1531		if (error)
1532			goto Enable_cpus;
1533		local_irq_disable();
1534		error = syscore_suspend();
1535		if (error)
1536			goto Enable_irqs;
1537	} else
1538#endif
1539	{
 
1540		kernel_restart_prepare(NULL);
 
 
 
 
 
 
 
 
 
1541		printk(KERN_EMERG "Starting new kernel\n");
1542		machine_shutdown();
1543	}
1544
1545	machine_kexec(kexec_image);
1546
1547#ifdef CONFIG_KEXEC_JUMP
1548	if (kexec_image->preserve_context) {
1549		syscore_resume();
1550 Enable_irqs:
1551		local_irq_enable();
1552 Enable_cpus:
1553		enable_nonboot_cpus();
1554		dpm_resume_noirq(PMSG_RESTORE);
1555 Resume_devices:
1556		dpm_resume_end(PMSG_RESTORE);
1557 Resume_console:
1558		resume_console();
1559		thaw_processes();
1560 Restore_console:
1561		pm_restore_console();
1562		mutex_unlock(&pm_mutex);
1563	}
1564#endif
1565
1566 Unlock:
1567	mutex_unlock(&kexec_mutex);
1568	return error;
1569}