Loading...
1/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/capability.h>
10#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
15#include <linux/mutex.h>
16#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
20#include <linux/ioport.h>
21#include <linux/hardirq.h>
22#include <linux/elf.h>
23#include <linux/elfcore.h>
24#include <generated/utsrelease.h>
25#include <linux/utsname.h>
26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
33#include <linux/vmalloc.h>
34#include <linux/swap.h>
35#include <linux/kmsg_dump.h>
36#include <linux/syscore_ops.h>
37
38#include <asm/page.h>
39#include <asm/uaccess.h>
40#include <asm/io.h>
41#include <asm/system.h>
42#include <asm/sections.h>
43
44/* Per cpu memory for storing cpu states in case of system crash. */
45note_buf_t __percpu *crash_notes;
46
47/* vmcoreinfo stuff */
48static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
49u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
50size_t vmcoreinfo_size;
51size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
52
53/* Location of the reserved area for the crash kernel */
54struct resource crashk_res = {
55 .name = "Crash kernel",
56 .start = 0,
57 .end = 0,
58 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
59};
60
61int kexec_should_crash(struct task_struct *p)
62{
63 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
64 return 1;
65 return 0;
66}
67
68/*
69 * When kexec transitions to the new kernel there is a one-to-one
70 * mapping between physical and virtual addresses. On processors
71 * where you can disable the MMU this is trivial, and easy. For
72 * others it is still a simple predictable page table to setup.
73 *
74 * In that environment kexec copies the new kernel to its final
75 * resting place. This means I can only support memory whose
76 * physical address can fit in an unsigned long. In particular
77 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
78 * If the assembly stub has more restrictive requirements
79 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
80 * defined more restrictively in <asm/kexec.h>.
81 *
82 * The code for the transition from the current kernel to the
83 * the new kernel is placed in the control_code_buffer, whose size
84 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
85 * page of memory is necessary, but some architectures require more.
86 * Because this memory must be identity mapped in the transition from
87 * virtual to physical addresses it must live in the range
88 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
89 * modifiable.
90 *
91 * The assembly stub in the control code buffer is passed a linked list
92 * of descriptor pages detailing the source pages of the new kernel,
93 * and the destination addresses of those source pages. As this data
94 * structure is not used in the context of the current OS, it must
95 * be self-contained.
96 *
97 * The code has been made to work with highmem pages and will use a
98 * destination page in its final resting place (if it happens
99 * to allocate it). The end product of this is that most of the
100 * physical address space, and most of RAM can be used.
101 *
102 * Future directions include:
103 * - allocating a page table with the control code buffer identity
104 * mapped, to simplify machine_kexec and make kexec_on_panic more
105 * reliable.
106 */
107
108/*
109 * KIMAGE_NO_DEST is an impossible destination address..., for
110 * allocating pages whose destination address we do not care about.
111 */
112#define KIMAGE_NO_DEST (-1UL)
113
114static int kimage_is_destination_range(struct kimage *image,
115 unsigned long start, unsigned long end);
116static struct page *kimage_alloc_page(struct kimage *image,
117 gfp_t gfp_mask,
118 unsigned long dest);
119
120static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
121 unsigned long nr_segments,
122 struct kexec_segment __user *segments)
123{
124 size_t segment_bytes;
125 struct kimage *image;
126 unsigned long i;
127 int result;
128
129 /* Allocate a controlling structure */
130 result = -ENOMEM;
131 image = kzalloc(sizeof(*image), GFP_KERNEL);
132 if (!image)
133 goto out;
134
135 image->head = 0;
136 image->entry = &image->head;
137 image->last_entry = &image->head;
138 image->control_page = ~0; /* By default this does not apply */
139 image->start = entry;
140 image->type = KEXEC_TYPE_DEFAULT;
141
142 /* Initialize the list of control pages */
143 INIT_LIST_HEAD(&image->control_pages);
144
145 /* Initialize the list of destination pages */
146 INIT_LIST_HEAD(&image->dest_pages);
147
148 /* Initialize the list of unusable pages */
149 INIT_LIST_HEAD(&image->unuseable_pages);
150
151 /* Read in the segments */
152 image->nr_segments = nr_segments;
153 segment_bytes = nr_segments * sizeof(*segments);
154 result = copy_from_user(image->segment, segments, segment_bytes);
155 if (result) {
156 result = -EFAULT;
157 goto out;
158 }
159
160 /*
161 * Verify we have good destination addresses. The caller is
162 * responsible for making certain we don't attempt to load
163 * the new image into invalid or reserved areas of RAM. This
164 * just verifies it is an address we can use.
165 *
166 * Since the kernel does everything in page size chunks ensure
167 * the destination addresses are page aligned. Too many
168 * special cases crop of when we don't do this. The most
169 * insidious is getting overlapping destination addresses
170 * simply because addresses are changed to page size
171 * granularity.
172 */
173 result = -EADDRNOTAVAIL;
174 for (i = 0; i < nr_segments; i++) {
175 unsigned long mstart, mend;
176
177 mstart = image->segment[i].mem;
178 mend = mstart + image->segment[i].memsz;
179 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
180 goto out;
181 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
182 goto out;
183 }
184
185 /* Verify our destination addresses do not overlap.
186 * If we alloed overlapping destination addresses
187 * through very weird things can happen with no
188 * easy explanation as one segment stops on another.
189 */
190 result = -EINVAL;
191 for (i = 0; i < nr_segments; i++) {
192 unsigned long mstart, mend;
193 unsigned long j;
194
195 mstart = image->segment[i].mem;
196 mend = mstart + image->segment[i].memsz;
197 for (j = 0; j < i; j++) {
198 unsigned long pstart, pend;
199 pstart = image->segment[j].mem;
200 pend = pstart + image->segment[j].memsz;
201 /* Do the segments overlap ? */
202 if ((mend > pstart) && (mstart < pend))
203 goto out;
204 }
205 }
206
207 /* Ensure our buffer sizes are strictly less than
208 * our memory sizes. This should always be the case,
209 * and it is easier to check up front than to be surprised
210 * later on.
211 */
212 result = -EINVAL;
213 for (i = 0; i < nr_segments; i++) {
214 if (image->segment[i].bufsz > image->segment[i].memsz)
215 goto out;
216 }
217
218 result = 0;
219out:
220 if (result == 0)
221 *rimage = image;
222 else
223 kfree(image);
224
225 return result;
226
227}
228
229static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
230 unsigned long nr_segments,
231 struct kexec_segment __user *segments)
232{
233 int result;
234 struct kimage *image;
235
236 /* Allocate and initialize a controlling structure */
237 image = NULL;
238 result = do_kimage_alloc(&image, entry, nr_segments, segments);
239 if (result)
240 goto out;
241
242 *rimage = image;
243
244 /*
245 * Find a location for the control code buffer, and add it
246 * the vector of segments so that it's pages will also be
247 * counted as destination pages.
248 */
249 result = -ENOMEM;
250 image->control_code_page = kimage_alloc_control_pages(image,
251 get_order(KEXEC_CONTROL_PAGE_SIZE));
252 if (!image->control_code_page) {
253 printk(KERN_ERR "Could not allocate control_code_buffer\n");
254 goto out;
255 }
256
257 image->swap_page = kimage_alloc_control_pages(image, 0);
258 if (!image->swap_page) {
259 printk(KERN_ERR "Could not allocate swap buffer\n");
260 goto out;
261 }
262
263 result = 0;
264 out:
265 if (result == 0)
266 *rimage = image;
267 else
268 kfree(image);
269
270 return result;
271}
272
273static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
274 unsigned long nr_segments,
275 struct kexec_segment __user *segments)
276{
277 int result;
278 struct kimage *image;
279 unsigned long i;
280
281 image = NULL;
282 /* Verify we have a valid entry point */
283 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
284 result = -EADDRNOTAVAIL;
285 goto out;
286 }
287
288 /* Allocate and initialize a controlling structure */
289 result = do_kimage_alloc(&image, entry, nr_segments, segments);
290 if (result)
291 goto out;
292
293 /* Enable the special crash kernel control page
294 * allocation policy.
295 */
296 image->control_page = crashk_res.start;
297 image->type = KEXEC_TYPE_CRASH;
298
299 /*
300 * Verify we have good destination addresses. Normally
301 * the caller is responsible for making certain we don't
302 * attempt to load the new image into invalid or reserved
303 * areas of RAM. But crash kernels are preloaded into a
304 * reserved area of ram. We must ensure the addresses
305 * are in the reserved area otherwise preloading the
306 * kernel could corrupt things.
307 */
308 result = -EADDRNOTAVAIL;
309 for (i = 0; i < nr_segments; i++) {
310 unsigned long mstart, mend;
311
312 mstart = image->segment[i].mem;
313 mend = mstart + image->segment[i].memsz - 1;
314 /* Ensure we are within the crash kernel limits */
315 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
316 goto out;
317 }
318
319 /*
320 * Find a location for the control code buffer, and add
321 * the vector of segments so that it's pages will also be
322 * counted as destination pages.
323 */
324 result = -ENOMEM;
325 image->control_code_page = kimage_alloc_control_pages(image,
326 get_order(KEXEC_CONTROL_PAGE_SIZE));
327 if (!image->control_code_page) {
328 printk(KERN_ERR "Could not allocate control_code_buffer\n");
329 goto out;
330 }
331
332 result = 0;
333out:
334 if (result == 0)
335 *rimage = image;
336 else
337 kfree(image);
338
339 return result;
340}
341
342static int kimage_is_destination_range(struct kimage *image,
343 unsigned long start,
344 unsigned long end)
345{
346 unsigned long i;
347
348 for (i = 0; i < image->nr_segments; i++) {
349 unsigned long mstart, mend;
350
351 mstart = image->segment[i].mem;
352 mend = mstart + image->segment[i].memsz;
353 if ((end > mstart) && (start < mend))
354 return 1;
355 }
356
357 return 0;
358}
359
360static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
361{
362 struct page *pages;
363
364 pages = alloc_pages(gfp_mask, order);
365 if (pages) {
366 unsigned int count, i;
367 pages->mapping = NULL;
368 set_page_private(pages, order);
369 count = 1 << order;
370 for (i = 0; i < count; i++)
371 SetPageReserved(pages + i);
372 }
373
374 return pages;
375}
376
377static void kimage_free_pages(struct page *page)
378{
379 unsigned int order, count, i;
380
381 order = page_private(page);
382 count = 1 << order;
383 for (i = 0; i < count; i++)
384 ClearPageReserved(page + i);
385 __free_pages(page, order);
386}
387
388static void kimage_free_page_list(struct list_head *list)
389{
390 struct list_head *pos, *next;
391
392 list_for_each_safe(pos, next, list) {
393 struct page *page;
394
395 page = list_entry(pos, struct page, lru);
396 list_del(&page->lru);
397 kimage_free_pages(page);
398 }
399}
400
401static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
402 unsigned int order)
403{
404 /* Control pages are special, they are the intermediaries
405 * that are needed while we copy the rest of the pages
406 * to their final resting place. As such they must
407 * not conflict with either the destination addresses
408 * or memory the kernel is already using.
409 *
410 * The only case where we really need more than one of
411 * these are for architectures where we cannot disable
412 * the MMU and must instead generate an identity mapped
413 * page table for all of the memory.
414 *
415 * At worst this runs in O(N) of the image size.
416 */
417 struct list_head extra_pages;
418 struct page *pages;
419 unsigned int count;
420
421 count = 1 << order;
422 INIT_LIST_HEAD(&extra_pages);
423
424 /* Loop while I can allocate a page and the page allocated
425 * is a destination page.
426 */
427 do {
428 unsigned long pfn, epfn, addr, eaddr;
429
430 pages = kimage_alloc_pages(GFP_KERNEL, order);
431 if (!pages)
432 break;
433 pfn = page_to_pfn(pages);
434 epfn = pfn + count;
435 addr = pfn << PAGE_SHIFT;
436 eaddr = epfn << PAGE_SHIFT;
437 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
438 kimage_is_destination_range(image, addr, eaddr)) {
439 list_add(&pages->lru, &extra_pages);
440 pages = NULL;
441 }
442 } while (!pages);
443
444 if (pages) {
445 /* Remember the allocated page... */
446 list_add(&pages->lru, &image->control_pages);
447
448 /* Because the page is already in it's destination
449 * location we will never allocate another page at
450 * that address. Therefore kimage_alloc_pages
451 * will not return it (again) and we don't need
452 * to give it an entry in image->segment[].
453 */
454 }
455 /* Deal with the destination pages I have inadvertently allocated.
456 *
457 * Ideally I would convert multi-page allocations into single
458 * page allocations, and add everything to image->dest_pages.
459 *
460 * For now it is simpler to just free the pages.
461 */
462 kimage_free_page_list(&extra_pages);
463
464 return pages;
465}
466
467static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
468 unsigned int order)
469{
470 /* Control pages are special, they are the intermediaries
471 * that are needed while we copy the rest of the pages
472 * to their final resting place. As such they must
473 * not conflict with either the destination addresses
474 * or memory the kernel is already using.
475 *
476 * Control pages are also the only pags we must allocate
477 * when loading a crash kernel. All of the other pages
478 * are specified by the segments and we just memcpy
479 * into them directly.
480 *
481 * The only case where we really need more than one of
482 * these are for architectures where we cannot disable
483 * the MMU and must instead generate an identity mapped
484 * page table for all of the memory.
485 *
486 * Given the low demand this implements a very simple
487 * allocator that finds the first hole of the appropriate
488 * size in the reserved memory region, and allocates all
489 * of the memory up to and including the hole.
490 */
491 unsigned long hole_start, hole_end, size;
492 struct page *pages;
493
494 pages = NULL;
495 size = (1 << order) << PAGE_SHIFT;
496 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
497 hole_end = hole_start + size - 1;
498 while (hole_end <= crashk_res.end) {
499 unsigned long i;
500
501 if (hole_end > KEXEC_CONTROL_MEMORY_LIMIT)
502 break;
503 if (hole_end > crashk_res.end)
504 break;
505 /* See if I overlap any of the segments */
506 for (i = 0; i < image->nr_segments; i++) {
507 unsigned long mstart, mend;
508
509 mstart = image->segment[i].mem;
510 mend = mstart + image->segment[i].memsz - 1;
511 if ((hole_end >= mstart) && (hole_start <= mend)) {
512 /* Advance the hole to the end of the segment */
513 hole_start = (mend + (size - 1)) & ~(size - 1);
514 hole_end = hole_start + size - 1;
515 break;
516 }
517 }
518 /* If I don't overlap any segments I have found my hole! */
519 if (i == image->nr_segments) {
520 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
521 break;
522 }
523 }
524 if (pages)
525 image->control_page = hole_end;
526
527 return pages;
528}
529
530
531struct page *kimage_alloc_control_pages(struct kimage *image,
532 unsigned int order)
533{
534 struct page *pages = NULL;
535
536 switch (image->type) {
537 case KEXEC_TYPE_DEFAULT:
538 pages = kimage_alloc_normal_control_pages(image, order);
539 break;
540 case KEXEC_TYPE_CRASH:
541 pages = kimage_alloc_crash_control_pages(image, order);
542 break;
543 }
544
545 return pages;
546}
547
548static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
549{
550 if (*image->entry != 0)
551 image->entry++;
552
553 if (image->entry == image->last_entry) {
554 kimage_entry_t *ind_page;
555 struct page *page;
556
557 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
558 if (!page)
559 return -ENOMEM;
560
561 ind_page = page_address(page);
562 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
563 image->entry = ind_page;
564 image->last_entry = ind_page +
565 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
566 }
567 *image->entry = entry;
568 image->entry++;
569 *image->entry = 0;
570
571 return 0;
572}
573
574static int kimage_set_destination(struct kimage *image,
575 unsigned long destination)
576{
577 int result;
578
579 destination &= PAGE_MASK;
580 result = kimage_add_entry(image, destination | IND_DESTINATION);
581 if (result == 0)
582 image->destination = destination;
583
584 return result;
585}
586
587
588static int kimage_add_page(struct kimage *image, unsigned long page)
589{
590 int result;
591
592 page &= PAGE_MASK;
593 result = kimage_add_entry(image, page | IND_SOURCE);
594 if (result == 0)
595 image->destination += PAGE_SIZE;
596
597 return result;
598}
599
600
601static void kimage_free_extra_pages(struct kimage *image)
602{
603 /* Walk through and free any extra destination pages I may have */
604 kimage_free_page_list(&image->dest_pages);
605
606 /* Walk through and free any unusable pages I have cached */
607 kimage_free_page_list(&image->unuseable_pages);
608
609}
610static void kimage_terminate(struct kimage *image)
611{
612 if (*image->entry != 0)
613 image->entry++;
614
615 *image->entry = IND_DONE;
616}
617
618#define for_each_kimage_entry(image, ptr, entry) \
619 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
620 ptr = (entry & IND_INDIRECTION)? \
621 phys_to_virt((entry & PAGE_MASK)): ptr +1)
622
623static void kimage_free_entry(kimage_entry_t entry)
624{
625 struct page *page;
626
627 page = pfn_to_page(entry >> PAGE_SHIFT);
628 kimage_free_pages(page);
629}
630
631static void kimage_free(struct kimage *image)
632{
633 kimage_entry_t *ptr, entry;
634 kimage_entry_t ind = 0;
635
636 if (!image)
637 return;
638
639 kimage_free_extra_pages(image);
640 for_each_kimage_entry(image, ptr, entry) {
641 if (entry & IND_INDIRECTION) {
642 /* Free the previous indirection page */
643 if (ind & IND_INDIRECTION)
644 kimage_free_entry(ind);
645 /* Save this indirection page until we are
646 * done with it.
647 */
648 ind = entry;
649 }
650 else if (entry & IND_SOURCE)
651 kimage_free_entry(entry);
652 }
653 /* Free the final indirection page */
654 if (ind & IND_INDIRECTION)
655 kimage_free_entry(ind);
656
657 /* Handle any machine specific cleanup */
658 machine_kexec_cleanup(image);
659
660 /* Free the kexec control pages... */
661 kimage_free_page_list(&image->control_pages);
662 kfree(image);
663}
664
665static kimage_entry_t *kimage_dst_used(struct kimage *image,
666 unsigned long page)
667{
668 kimage_entry_t *ptr, entry;
669 unsigned long destination = 0;
670
671 for_each_kimage_entry(image, ptr, entry) {
672 if (entry & IND_DESTINATION)
673 destination = entry & PAGE_MASK;
674 else if (entry & IND_SOURCE) {
675 if (page == destination)
676 return ptr;
677 destination += PAGE_SIZE;
678 }
679 }
680
681 return NULL;
682}
683
684static struct page *kimage_alloc_page(struct kimage *image,
685 gfp_t gfp_mask,
686 unsigned long destination)
687{
688 /*
689 * Here we implement safeguards to ensure that a source page
690 * is not copied to its destination page before the data on
691 * the destination page is no longer useful.
692 *
693 * To do this we maintain the invariant that a source page is
694 * either its own destination page, or it is not a
695 * destination page at all.
696 *
697 * That is slightly stronger than required, but the proof
698 * that no problems will not occur is trivial, and the
699 * implementation is simply to verify.
700 *
701 * When allocating all pages normally this algorithm will run
702 * in O(N) time, but in the worst case it will run in O(N^2)
703 * time. If the runtime is a problem the data structures can
704 * be fixed.
705 */
706 struct page *page;
707 unsigned long addr;
708
709 /*
710 * Walk through the list of destination pages, and see if I
711 * have a match.
712 */
713 list_for_each_entry(page, &image->dest_pages, lru) {
714 addr = page_to_pfn(page) << PAGE_SHIFT;
715 if (addr == destination) {
716 list_del(&page->lru);
717 return page;
718 }
719 }
720 page = NULL;
721 while (1) {
722 kimage_entry_t *old;
723
724 /* Allocate a page, if we run out of memory give up */
725 page = kimage_alloc_pages(gfp_mask, 0);
726 if (!page)
727 return NULL;
728 /* If the page cannot be used file it away */
729 if (page_to_pfn(page) >
730 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
731 list_add(&page->lru, &image->unuseable_pages);
732 continue;
733 }
734 addr = page_to_pfn(page) << PAGE_SHIFT;
735
736 /* If it is the destination page we want use it */
737 if (addr == destination)
738 break;
739
740 /* If the page is not a destination page use it */
741 if (!kimage_is_destination_range(image, addr,
742 addr + PAGE_SIZE))
743 break;
744
745 /*
746 * I know that the page is someones destination page.
747 * See if there is already a source page for this
748 * destination page. And if so swap the source pages.
749 */
750 old = kimage_dst_used(image, addr);
751 if (old) {
752 /* If so move it */
753 unsigned long old_addr;
754 struct page *old_page;
755
756 old_addr = *old & PAGE_MASK;
757 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
758 copy_highpage(page, old_page);
759 *old = addr | (*old & ~PAGE_MASK);
760
761 /* The old page I have found cannot be a
762 * destination page, so return it if it's
763 * gfp_flags honor the ones passed in.
764 */
765 if (!(gfp_mask & __GFP_HIGHMEM) &&
766 PageHighMem(old_page)) {
767 kimage_free_pages(old_page);
768 continue;
769 }
770 addr = old_addr;
771 page = old_page;
772 break;
773 }
774 else {
775 /* Place the page on the destination list I
776 * will use it later.
777 */
778 list_add(&page->lru, &image->dest_pages);
779 }
780 }
781
782 return page;
783}
784
785static int kimage_load_normal_segment(struct kimage *image,
786 struct kexec_segment *segment)
787{
788 unsigned long maddr;
789 unsigned long ubytes, mbytes;
790 int result;
791 unsigned char __user *buf;
792
793 result = 0;
794 buf = segment->buf;
795 ubytes = segment->bufsz;
796 mbytes = segment->memsz;
797 maddr = segment->mem;
798
799 result = kimage_set_destination(image, maddr);
800 if (result < 0)
801 goto out;
802
803 while (mbytes) {
804 struct page *page;
805 char *ptr;
806 size_t uchunk, mchunk;
807
808 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
809 if (!page) {
810 result = -ENOMEM;
811 goto out;
812 }
813 result = kimage_add_page(image, page_to_pfn(page)
814 << PAGE_SHIFT);
815 if (result < 0)
816 goto out;
817
818 ptr = kmap(page);
819 /* Start with a clear page */
820 clear_page(ptr);
821 ptr += maddr & ~PAGE_MASK;
822 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
823 if (mchunk > mbytes)
824 mchunk = mbytes;
825
826 uchunk = mchunk;
827 if (uchunk > ubytes)
828 uchunk = ubytes;
829
830 result = copy_from_user(ptr, buf, uchunk);
831 kunmap(page);
832 if (result) {
833 result = -EFAULT;
834 goto out;
835 }
836 ubytes -= uchunk;
837 maddr += mchunk;
838 buf += mchunk;
839 mbytes -= mchunk;
840 }
841out:
842 return result;
843}
844
845static int kimage_load_crash_segment(struct kimage *image,
846 struct kexec_segment *segment)
847{
848 /* For crash dumps kernels we simply copy the data from
849 * user space to it's destination.
850 * We do things a page at a time for the sake of kmap.
851 */
852 unsigned long maddr;
853 unsigned long ubytes, mbytes;
854 int result;
855 unsigned char __user *buf;
856
857 result = 0;
858 buf = segment->buf;
859 ubytes = segment->bufsz;
860 mbytes = segment->memsz;
861 maddr = segment->mem;
862 while (mbytes) {
863 struct page *page;
864 char *ptr;
865 size_t uchunk, mchunk;
866
867 page = pfn_to_page(maddr >> PAGE_SHIFT);
868 if (!page) {
869 result = -ENOMEM;
870 goto out;
871 }
872 ptr = kmap(page);
873 ptr += maddr & ~PAGE_MASK;
874 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
875 if (mchunk > mbytes)
876 mchunk = mbytes;
877
878 uchunk = mchunk;
879 if (uchunk > ubytes) {
880 uchunk = ubytes;
881 /* Zero the trailing part of the page */
882 memset(ptr + uchunk, 0, mchunk - uchunk);
883 }
884 result = copy_from_user(ptr, buf, uchunk);
885 kexec_flush_icache_page(page);
886 kunmap(page);
887 if (result) {
888 result = -EFAULT;
889 goto out;
890 }
891 ubytes -= uchunk;
892 maddr += mchunk;
893 buf += mchunk;
894 mbytes -= mchunk;
895 }
896out:
897 return result;
898}
899
900static int kimage_load_segment(struct kimage *image,
901 struct kexec_segment *segment)
902{
903 int result = -ENOMEM;
904
905 switch (image->type) {
906 case KEXEC_TYPE_DEFAULT:
907 result = kimage_load_normal_segment(image, segment);
908 break;
909 case KEXEC_TYPE_CRASH:
910 result = kimage_load_crash_segment(image, segment);
911 break;
912 }
913
914 return result;
915}
916
917/*
918 * Exec Kernel system call: for obvious reasons only root may call it.
919 *
920 * This call breaks up into three pieces.
921 * - A generic part which loads the new kernel from the current
922 * address space, and very carefully places the data in the
923 * allocated pages.
924 *
925 * - A generic part that interacts with the kernel and tells all of
926 * the devices to shut down. Preventing on-going dmas, and placing
927 * the devices in a consistent state so a later kernel can
928 * reinitialize them.
929 *
930 * - A machine specific part that includes the syscall number
931 * and the copies the image to it's final destination. And
932 * jumps into the image at entry.
933 *
934 * kexec does not sync, or unmount filesystems so if you need
935 * that to happen you need to do that yourself.
936 */
937struct kimage *kexec_image;
938struct kimage *kexec_crash_image;
939
940static DEFINE_MUTEX(kexec_mutex);
941
942SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
943 struct kexec_segment __user *, segments, unsigned long, flags)
944{
945 struct kimage **dest_image, *image;
946 int result;
947
948 /* We only trust the superuser with rebooting the system. */
949 if (!capable(CAP_SYS_BOOT))
950 return -EPERM;
951
952 /*
953 * Verify we have a legal set of flags
954 * This leaves us room for future extensions.
955 */
956 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
957 return -EINVAL;
958
959 /* Verify we are on the appropriate architecture */
960 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
961 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
962 return -EINVAL;
963
964 /* Put an artificial cap on the number
965 * of segments passed to kexec_load.
966 */
967 if (nr_segments > KEXEC_SEGMENT_MAX)
968 return -EINVAL;
969
970 image = NULL;
971 result = 0;
972
973 /* Because we write directly to the reserved memory
974 * region when loading crash kernels we need a mutex here to
975 * prevent multiple crash kernels from attempting to load
976 * simultaneously, and to prevent a crash kernel from loading
977 * over the top of a in use crash kernel.
978 *
979 * KISS: always take the mutex.
980 */
981 if (!mutex_trylock(&kexec_mutex))
982 return -EBUSY;
983
984 dest_image = &kexec_image;
985 if (flags & KEXEC_ON_CRASH)
986 dest_image = &kexec_crash_image;
987 if (nr_segments > 0) {
988 unsigned long i;
989
990 /* Loading another kernel to reboot into */
991 if ((flags & KEXEC_ON_CRASH) == 0)
992 result = kimage_normal_alloc(&image, entry,
993 nr_segments, segments);
994 /* Loading another kernel to switch to if this one crashes */
995 else if (flags & KEXEC_ON_CRASH) {
996 /* Free any current crash dump kernel before
997 * we corrupt it.
998 */
999 kimage_free(xchg(&kexec_crash_image, NULL));
1000 result = kimage_crash_alloc(&image, entry,
1001 nr_segments, segments);
1002 }
1003 if (result)
1004 goto out;
1005
1006 if (flags & KEXEC_PRESERVE_CONTEXT)
1007 image->preserve_context = 1;
1008 result = machine_kexec_prepare(image);
1009 if (result)
1010 goto out;
1011
1012 for (i = 0; i < nr_segments; i++) {
1013 result = kimage_load_segment(image, &image->segment[i]);
1014 if (result)
1015 goto out;
1016 }
1017 kimage_terminate(image);
1018 }
1019 /* Install the new kernel, and Uninstall the old */
1020 image = xchg(dest_image, image);
1021
1022out:
1023 mutex_unlock(&kexec_mutex);
1024 kimage_free(image);
1025
1026 return result;
1027}
1028
1029#ifdef CONFIG_COMPAT
1030asmlinkage long compat_sys_kexec_load(unsigned long entry,
1031 unsigned long nr_segments,
1032 struct compat_kexec_segment __user *segments,
1033 unsigned long flags)
1034{
1035 struct compat_kexec_segment in;
1036 struct kexec_segment out, __user *ksegments;
1037 unsigned long i, result;
1038
1039 /* Don't allow clients that don't understand the native
1040 * architecture to do anything.
1041 */
1042 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1043 return -EINVAL;
1044
1045 if (nr_segments > KEXEC_SEGMENT_MAX)
1046 return -EINVAL;
1047
1048 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1049 for (i=0; i < nr_segments; i++) {
1050 result = copy_from_user(&in, &segments[i], sizeof(in));
1051 if (result)
1052 return -EFAULT;
1053
1054 out.buf = compat_ptr(in.buf);
1055 out.bufsz = in.bufsz;
1056 out.mem = in.mem;
1057 out.memsz = in.memsz;
1058
1059 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1060 if (result)
1061 return -EFAULT;
1062 }
1063
1064 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1065}
1066#endif
1067
1068void crash_kexec(struct pt_regs *regs)
1069{
1070 /* Take the kexec_mutex here to prevent sys_kexec_load
1071 * running on one cpu from replacing the crash kernel
1072 * we are using after a panic on a different cpu.
1073 *
1074 * If the crash kernel was not located in a fixed area
1075 * of memory the xchg(&kexec_crash_image) would be
1076 * sufficient. But since I reuse the memory...
1077 */
1078 if (mutex_trylock(&kexec_mutex)) {
1079 if (kexec_crash_image) {
1080 struct pt_regs fixed_regs;
1081
1082 kmsg_dump(KMSG_DUMP_KEXEC);
1083
1084 crash_setup_regs(&fixed_regs, regs);
1085 crash_save_vmcoreinfo();
1086 machine_crash_shutdown(&fixed_regs);
1087 machine_kexec(kexec_crash_image);
1088 }
1089 mutex_unlock(&kexec_mutex);
1090 }
1091}
1092
1093size_t crash_get_memory_size(void)
1094{
1095 size_t size = 0;
1096 mutex_lock(&kexec_mutex);
1097 if (crashk_res.end != crashk_res.start)
1098 size = resource_size(&crashk_res);
1099 mutex_unlock(&kexec_mutex);
1100 return size;
1101}
1102
1103void __weak crash_free_reserved_phys_range(unsigned long begin,
1104 unsigned long end)
1105{
1106 unsigned long addr;
1107
1108 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1109 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1110 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1111 free_page((unsigned long)__va(addr));
1112 totalram_pages++;
1113 }
1114}
1115
1116int crash_shrink_memory(unsigned long new_size)
1117{
1118 int ret = 0;
1119 unsigned long start, end;
1120
1121 mutex_lock(&kexec_mutex);
1122
1123 if (kexec_crash_image) {
1124 ret = -ENOENT;
1125 goto unlock;
1126 }
1127 start = crashk_res.start;
1128 end = crashk_res.end;
1129
1130 if (new_size >= end - start + 1) {
1131 ret = -EINVAL;
1132 if (new_size == end - start + 1)
1133 ret = 0;
1134 goto unlock;
1135 }
1136
1137 start = roundup(start, PAGE_SIZE);
1138 end = roundup(start + new_size, PAGE_SIZE);
1139
1140 crash_free_reserved_phys_range(end, crashk_res.end);
1141
1142 if ((start == end) && (crashk_res.parent != NULL))
1143 release_resource(&crashk_res);
1144 crashk_res.end = end - 1;
1145
1146unlock:
1147 mutex_unlock(&kexec_mutex);
1148 return ret;
1149}
1150
1151static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1152 size_t data_len)
1153{
1154 struct elf_note note;
1155
1156 note.n_namesz = strlen(name) + 1;
1157 note.n_descsz = data_len;
1158 note.n_type = type;
1159 memcpy(buf, ¬e, sizeof(note));
1160 buf += (sizeof(note) + 3)/4;
1161 memcpy(buf, name, note.n_namesz);
1162 buf += (note.n_namesz + 3)/4;
1163 memcpy(buf, data, note.n_descsz);
1164 buf += (note.n_descsz + 3)/4;
1165
1166 return buf;
1167}
1168
1169static void final_note(u32 *buf)
1170{
1171 struct elf_note note;
1172
1173 note.n_namesz = 0;
1174 note.n_descsz = 0;
1175 note.n_type = 0;
1176 memcpy(buf, ¬e, sizeof(note));
1177}
1178
1179void crash_save_cpu(struct pt_regs *regs, int cpu)
1180{
1181 struct elf_prstatus prstatus;
1182 u32 *buf;
1183
1184 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1185 return;
1186
1187 /* Using ELF notes here is opportunistic.
1188 * I need a well defined structure format
1189 * for the data I pass, and I need tags
1190 * on the data to indicate what information I have
1191 * squirrelled away. ELF notes happen to provide
1192 * all of that, so there is no need to invent something new.
1193 */
1194 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1195 if (!buf)
1196 return;
1197 memset(&prstatus, 0, sizeof(prstatus));
1198 prstatus.pr_pid = current->pid;
1199 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1200 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1201 &prstatus, sizeof(prstatus));
1202 final_note(buf);
1203}
1204
1205static int __init crash_notes_memory_init(void)
1206{
1207 /* Allocate memory for saving cpu registers. */
1208 crash_notes = alloc_percpu(note_buf_t);
1209 if (!crash_notes) {
1210 printk("Kexec: Memory allocation for saving cpu register"
1211 " states failed\n");
1212 return -ENOMEM;
1213 }
1214 return 0;
1215}
1216module_init(crash_notes_memory_init)
1217
1218
1219/*
1220 * parsing the "crashkernel" commandline
1221 *
1222 * this code is intended to be called from architecture specific code
1223 */
1224
1225
1226/*
1227 * This function parses command lines in the format
1228 *
1229 * crashkernel=ramsize-range:size[,...][@offset]
1230 *
1231 * The function returns 0 on success and -EINVAL on failure.
1232 */
1233static int __init parse_crashkernel_mem(char *cmdline,
1234 unsigned long long system_ram,
1235 unsigned long long *crash_size,
1236 unsigned long long *crash_base)
1237{
1238 char *cur = cmdline, *tmp;
1239
1240 /* for each entry of the comma-separated list */
1241 do {
1242 unsigned long long start, end = ULLONG_MAX, size;
1243
1244 /* get the start of the range */
1245 start = memparse(cur, &tmp);
1246 if (cur == tmp) {
1247 pr_warning("crashkernel: Memory value expected\n");
1248 return -EINVAL;
1249 }
1250 cur = tmp;
1251 if (*cur != '-') {
1252 pr_warning("crashkernel: '-' expected\n");
1253 return -EINVAL;
1254 }
1255 cur++;
1256
1257 /* if no ':' is here, than we read the end */
1258 if (*cur != ':') {
1259 end = memparse(cur, &tmp);
1260 if (cur == tmp) {
1261 pr_warning("crashkernel: Memory "
1262 "value expected\n");
1263 return -EINVAL;
1264 }
1265 cur = tmp;
1266 if (end <= start) {
1267 pr_warning("crashkernel: end <= start\n");
1268 return -EINVAL;
1269 }
1270 }
1271
1272 if (*cur != ':') {
1273 pr_warning("crashkernel: ':' expected\n");
1274 return -EINVAL;
1275 }
1276 cur++;
1277
1278 size = memparse(cur, &tmp);
1279 if (cur == tmp) {
1280 pr_warning("Memory value expected\n");
1281 return -EINVAL;
1282 }
1283 cur = tmp;
1284 if (size >= system_ram) {
1285 pr_warning("crashkernel: invalid size\n");
1286 return -EINVAL;
1287 }
1288
1289 /* match ? */
1290 if (system_ram >= start && system_ram < end) {
1291 *crash_size = size;
1292 break;
1293 }
1294 } while (*cur++ == ',');
1295
1296 if (*crash_size > 0) {
1297 while (*cur && *cur != ' ' && *cur != '@')
1298 cur++;
1299 if (*cur == '@') {
1300 cur++;
1301 *crash_base = memparse(cur, &tmp);
1302 if (cur == tmp) {
1303 pr_warning("Memory value expected "
1304 "after '@'\n");
1305 return -EINVAL;
1306 }
1307 }
1308 }
1309
1310 return 0;
1311}
1312
1313/*
1314 * That function parses "simple" (old) crashkernel command lines like
1315 *
1316 * crashkernel=size[@offset]
1317 *
1318 * It returns 0 on success and -EINVAL on failure.
1319 */
1320static int __init parse_crashkernel_simple(char *cmdline,
1321 unsigned long long *crash_size,
1322 unsigned long long *crash_base)
1323{
1324 char *cur = cmdline;
1325
1326 *crash_size = memparse(cmdline, &cur);
1327 if (cmdline == cur) {
1328 pr_warning("crashkernel: memory value expected\n");
1329 return -EINVAL;
1330 }
1331
1332 if (*cur == '@')
1333 *crash_base = memparse(cur+1, &cur);
1334
1335 return 0;
1336}
1337
1338/*
1339 * That function is the entry point for command line parsing and should be
1340 * called from the arch-specific code.
1341 */
1342int __init parse_crashkernel(char *cmdline,
1343 unsigned long long system_ram,
1344 unsigned long long *crash_size,
1345 unsigned long long *crash_base)
1346{
1347 char *p = cmdline, *ck_cmdline = NULL;
1348 char *first_colon, *first_space;
1349
1350 BUG_ON(!crash_size || !crash_base);
1351 *crash_size = 0;
1352 *crash_base = 0;
1353
1354 /* find crashkernel and use the last one if there are more */
1355 p = strstr(p, "crashkernel=");
1356 while (p) {
1357 ck_cmdline = p;
1358 p = strstr(p+1, "crashkernel=");
1359 }
1360
1361 if (!ck_cmdline)
1362 return -EINVAL;
1363
1364 ck_cmdline += 12; /* strlen("crashkernel=") */
1365
1366 /*
1367 * if the commandline contains a ':', then that's the extended
1368 * syntax -- if not, it must be the classic syntax
1369 */
1370 first_colon = strchr(ck_cmdline, ':');
1371 first_space = strchr(ck_cmdline, ' ');
1372 if (first_colon && (!first_space || first_colon < first_space))
1373 return parse_crashkernel_mem(ck_cmdline, system_ram,
1374 crash_size, crash_base);
1375 else
1376 return parse_crashkernel_simple(ck_cmdline, crash_size,
1377 crash_base);
1378
1379 return 0;
1380}
1381
1382
1383
1384void crash_save_vmcoreinfo(void)
1385{
1386 u32 *buf;
1387
1388 if (!vmcoreinfo_size)
1389 return;
1390
1391 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1392
1393 buf = (u32 *)vmcoreinfo_note;
1394
1395 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1396 vmcoreinfo_size);
1397
1398 final_note(buf);
1399}
1400
1401void vmcoreinfo_append_str(const char *fmt, ...)
1402{
1403 va_list args;
1404 char buf[0x50];
1405 int r;
1406
1407 va_start(args, fmt);
1408 r = vsnprintf(buf, sizeof(buf), fmt, args);
1409 va_end(args);
1410
1411 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1412 r = vmcoreinfo_max_size - vmcoreinfo_size;
1413
1414 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1415
1416 vmcoreinfo_size += r;
1417}
1418
1419/*
1420 * provide an empty default implementation here -- architecture
1421 * code may override this
1422 */
1423void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1424{}
1425
1426unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1427{
1428 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1429}
1430
1431static int __init crash_save_vmcoreinfo_init(void)
1432{
1433 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1434 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1435
1436 VMCOREINFO_SYMBOL(init_uts_ns);
1437 VMCOREINFO_SYMBOL(node_online_map);
1438 VMCOREINFO_SYMBOL(swapper_pg_dir);
1439 VMCOREINFO_SYMBOL(_stext);
1440 VMCOREINFO_SYMBOL(vmlist);
1441
1442#ifndef CONFIG_NEED_MULTIPLE_NODES
1443 VMCOREINFO_SYMBOL(mem_map);
1444 VMCOREINFO_SYMBOL(contig_page_data);
1445#endif
1446#ifdef CONFIG_SPARSEMEM
1447 VMCOREINFO_SYMBOL(mem_section);
1448 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1449 VMCOREINFO_STRUCT_SIZE(mem_section);
1450 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1451#endif
1452 VMCOREINFO_STRUCT_SIZE(page);
1453 VMCOREINFO_STRUCT_SIZE(pglist_data);
1454 VMCOREINFO_STRUCT_SIZE(zone);
1455 VMCOREINFO_STRUCT_SIZE(free_area);
1456 VMCOREINFO_STRUCT_SIZE(list_head);
1457 VMCOREINFO_SIZE(nodemask_t);
1458 VMCOREINFO_OFFSET(page, flags);
1459 VMCOREINFO_OFFSET(page, _count);
1460 VMCOREINFO_OFFSET(page, mapping);
1461 VMCOREINFO_OFFSET(page, lru);
1462 VMCOREINFO_OFFSET(pglist_data, node_zones);
1463 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1464#ifdef CONFIG_FLAT_NODE_MEM_MAP
1465 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1466#endif
1467 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1468 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1469 VMCOREINFO_OFFSET(pglist_data, node_id);
1470 VMCOREINFO_OFFSET(zone, free_area);
1471 VMCOREINFO_OFFSET(zone, vm_stat);
1472 VMCOREINFO_OFFSET(zone, spanned_pages);
1473 VMCOREINFO_OFFSET(free_area, free_list);
1474 VMCOREINFO_OFFSET(list_head, next);
1475 VMCOREINFO_OFFSET(list_head, prev);
1476 VMCOREINFO_OFFSET(vm_struct, addr);
1477 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1478 log_buf_kexec_setup();
1479 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1480 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1481 VMCOREINFO_NUMBER(PG_lru);
1482 VMCOREINFO_NUMBER(PG_private);
1483 VMCOREINFO_NUMBER(PG_swapcache);
1484
1485 arch_crash_save_vmcoreinfo();
1486
1487 return 0;
1488}
1489
1490module_init(crash_save_vmcoreinfo_init)
1491
1492/*
1493 * Move into place and start executing a preloaded standalone
1494 * executable. If nothing was preloaded return an error.
1495 */
1496int kernel_kexec(void)
1497{
1498 int error = 0;
1499
1500 if (!mutex_trylock(&kexec_mutex))
1501 return -EBUSY;
1502 if (!kexec_image) {
1503 error = -EINVAL;
1504 goto Unlock;
1505 }
1506
1507#ifdef CONFIG_KEXEC_JUMP
1508 if (kexec_image->preserve_context) {
1509 mutex_lock(&pm_mutex);
1510 pm_prepare_console();
1511 error = freeze_processes();
1512 if (error) {
1513 error = -EBUSY;
1514 goto Restore_console;
1515 }
1516 suspend_console();
1517 error = dpm_suspend_start(PMSG_FREEZE);
1518 if (error)
1519 goto Resume_console;
1520 /* At this point, dpm_suspend_start() has been called,
1521 * but *not* dpm_suspend_noirq(). We *must* call
1522 * dpm_suspend_noirq() now. Otherwise, drivers for
1523 * some devices (e.g. interrupt controllers) become
1524 * desynchronized with the actual state of the
1525 * hardware at resume time, and evil weirdness ensues.
1526 */
1527 error = dpm_suspend_noirq(PMSG_FREEZE);
1528 if (error)
1529 goto Resume_devices;
1530 error = disable_nonboot_cpus();
1531 if (error)
1532 goto Enable_cpus;
1533 local_irq_disable();
1534 error = syscore_suspend();
1535 if (error)
1536 goto Enable_irqs;
1537 } else
1538#endif
1539 {
1540 kernel_restart_prepare(NULL);
1541 printk(KERN_EMERG "Starting new kernel\n");
1542 machine_shutdown();
1543 }
1544
1545 machine_kexec(kexec_image);
1546
1547#ifdef CONFIG_KEXEC_JUMP
1548 if (kexec_image->preserve_context) {
1549 syscore_resume();
1550 Enable_irqs:
1551 local_irq_enable();
1552 Enable_cpus:
1553 enable_nonboot_cpus();
1554 dpm_resume_noirq(PMSG_RESTORE);
1555 Resume_devices:
1556 dpm_resume_end(PMSG_RESTORE);
1557 Resume_console:
1558 resume_console();
1559 thaw_processes();
1560 Restore_console:
1561 pm_restore_console();
1562 mutex_unlock(&pm_mutex);
1563 }
1564#endif
1565
1566 Unlock:
1567 mutex_unlock(&kexec_mutex);
1568 return error;
1569}
1/*
2 * kexec.c - kexec system call
3 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/capability.h>
10#include <linux/mm.h>
11#include <linux/file.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/kexec.h>
15#include <linux/mutex.h>
16#include <linux/list.h>
17#include <linux/highmem.h>
18#include <linux/syscalls.h>
19#include <linux/reboot.h>
20#include <linux/ioport.h>
21#include <linux/hardirq.h>
22#include <linux/elf.h>
23#include <linux/elfcore.h>
24#include <generated/utsrelease.h>
25#include <linux/utsname.h>
26#include <linux/numa.h>
27#include <linux/suspend.h>
28#include <linux/device.h>
29#include <linux/freezer.h>
30#include <linux/pm.h>
31#include <linux/cpu.h>
32#include <linux/console.h>
33#include <linux/vmalloc.h>
34#include <linux/swap.h>
35#include <linux/syscore_ops.h>
36
37#include <asm/page.h>
38#include <asm/uaccess.h>
39#include <asm/io.h>
40#include <asm/sections.h>
41
42/* Per cpu memory for storing cpu states in case of system crash. */
43note_buf_t __percpu *crash_notes;
44
45/* vmcoreinfo stuff */
46static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
47u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
48size_t vmcoreinfo_size;
49size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
50
51/* Location of the reserved area for the crash kernel */
52struct resource crashk_res = {
53 .name = "Crash kernel",
54 .start = 0,
55 .end = 0,
56 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
57};
58
59int kexec_should_crash(struct task_struct *p)
60{
61 if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
62 return 1;
63 return 0;
64}
65
66/*
67 * When kexec transitions to the new kernel there is a one-to-one
68 * mapping between physical and virtual addresses. On processors
69 * where you can disable the MMU this is trivial, and easy. For
70 * others it is still a simple predictable page table to setup.
71 *
72 * In that environment kexec copies the new kernel to its final
73 * resting place. This means I can only support memory whose
74 * physical address can fit in an unsigned long. In particular
75 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
76 * If the assembly stub has more restrictive requirements
77 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
78 * defined more restrictively in <asm/kexec.h>.
79 *
80 * The code for the transition from the current kernel to the
81 * the new kernel is placed in the control_code_buffer, whose size
82 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
83 * page of memory is necessary, but some architectures require more.
84 * Because this memory must be identity mapped in the transition from
85 * virtual to physical addresses it must live in the range
86 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
87 * modifiable.
88 *
89 * The assembly stub in the control code buffer is passed a linked list
90 * of descriptor pages detailing the source pages of the new kernel,
91 * and the destination addresses of those source pages. As this data
92 * structure is not used in the context of the current OS, it must
93 * be self-contained.
94 *
95 * The code has been made to work with highmem pages and will use a
96 * destination page in its final resting place (if it happens
97 * to allocate it). The end product of this is that most of the
98 * physical address space, and most of RAM can be used.
99 *
100 * Future directions include:
101 * - allocating a page table with the control code buffer identity
102 * mapped, to simplify machine_kexec and make kexec_on_panic more
103 * reliable.
104 */
105
106/*
107 * KIMAGE_NO_DEST is an impossible destination address..., for
108 * allocating pages whose destination address we do not care about.
109 */
110#define KIMAGE_NO_DEST (-1UL)
111
112static int kimage_is_destination_range(struct kimage *image,
113 unsigned long start, unsigned long end);
114static struct page *kimage_alloc_page(struct kimage *image,
115 gfp_t gfp_mask,
116 unsigned long dest);
117
118static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
119 unsigned long nr_segments,
120 struct kexec_segment __user *segments)
121{
122 size_t segment_bytes;
123 struct kimage *image;
124 unsigned long i;
125 int result;
126
127 /* Allocate a controlling structure */
128 result = -ENOMEM;
129 image = kzalloc(sizeof(*image), GFP_KERNEL);
130 if (!image)
131 goto out;
132
133 image->head = 0;
134 image->entry = &image->head;
135 image->last_entry = &image->head;
136 image->control_page = ~0; /* By default this does not apply */
137 image->start = entry;
138 image->type = KEXEC_TYPE_DEFAULT;
139
140 /* Initialize the list of control pages */
141 INIT_LIST_HEAD(&image->control_pages);
142
143 /* Initialize the list of destination pages */
144 INIT_LIST_HEAD(&image->dest_pages);
145
146 /* Initialize the list of unusable pages */
147 INIT_LIST_HEAD(&image->unuseable_pages);
148
149 /* Read in the segments */
150 image->nr_segments = nr_segments;
151 segment_bytes = nr_segments * sizeof(*segments);
152 result = copy_from_user(image->segment, segments, segment_bytes);
153 if (result) {
154 result = -EFAULT;
155 goto out;
156 }
157
158 /*
159 * Verify we have good destination addresses. The caller is
160 * responsible for making certain we don't attempt to load
161 * the new image into invalid or reserved areas of RAM. This
162 * just verifies it is an address we can use.
163 *
164 * Since the kernel does everything in page size chunks ensure
165 * the destination addresses are page aligned. Too many
166 * special cases crop of when we don't do this. The most
167 * insidious is getting overlapping destination addresses
168 * simply because addresses are changed to page size
169 * granularity.
170 */
171 result = -EADDRNOTAVAIL;
172 for (i = 0; i < nr_segments; i++) {
173 unsigned long mstart, mend;
174
175 mstart = image->segment[i].mem;
176 mend = mstart + image->segment[i].memsz;
177 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
178 goto out;
179 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
180 goto out;
181 }
182
183 /* Verify our destination addresses do not overlap.
184 * If we alloed overlapping destination addresses
185 * through very weird things can happen with no
186 * easy explanation as one segment stops on another.
187 */
188 result = -EINVAL;
189 for (i = 0; i < nr_segments; i++) {
190 unsigned long mstart, mend;
191 unsigned long j;
192
193 mstart = image->segment[i].mem;
194 mend = mstart + image->segment[i].memsz;
195 for (j = 0; j < i; j++) {
196 unsigned long pstart, pend;
197 pstart = image->segment[j].mem;
198 pend = pstart + image->segment[j].memsz;
199 /* Do the segments overlap ? */
200 if ((mend > pstart) && (mstart < pend))
201 goto out;
202 }
203 }
204
205 /* Ensure our buffer sizes are strictly less than
206 * our memory sizes. This should always be the case,
207 * and it is easier to check up front than to be surprised
208 * later on.
209 */
210 result = -EINVAL;
211 for (i = 0; i < nr_segments; i++) {
212 if (image->segment[i].bufsz > image->segment[i].memsz)
213 goto out;
214 }
215
216 result = 0;
217out:
218 if (result == 0)
219 *rimage = image;
220 else
221 kfree(image);
222
223 return result;
224
225}
226
227static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
228 unsigned long nr_segments,
229 struct kexec_segment __user *segments)
230{
231 int result;
232 struct kimage *image;
233
234 /* Allocate and initialize a controlling structure */
235 image = NULL;
236 result = do_kimage_alloc(&image, entry, nr_segments, segments);
237 if (result)
238 goto out;
239
240 *rimage = image;
241
242 /*
243 * Find a location for the control code buffer, and add it
244 * the vector of segments so that it's pages will also be
245 * counted as destination pages.
246 */
247 result = -ENOMEM;
248 image->control_code_page = kimage_alloc_control_pages(image,
249 get_order(KEXEC_CONTROL_PAGE_SIZE));
250 if (!image->control_code_page) {
251 printk(KERN_ERR "Could not allocate control_code_buffer\n");
252 goto out;
253 }
254
255 image->swap_page = kimage_alloc_control_pages(image, 0);
256 if (!image->swap_page) {
257 printk(KERN_ERR "Could not allocate swap buffer\n");
258 goto out;
259 }
260
261 result = 0;
262 out:
263 if (result == 0)
264 *rimage = image;
265 else
266 kfree(image);
267
268 return result;
269}
270
271static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
272 unsigned long nr_segments,
273 struct kexec_segment __user *segments)
274{
275 int result;
276 struct kimage *image;
277 unsigned long i;
278
279 image = NULL;
280 /* Verify we have a valid entry point */
281 if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
282 result = -EADDRNOTAVAIL;
283 goto out;
284 }
285
286 /* Allocate and initialize a controlling structure */
287 result = do_kimage_alloc(&image, entry, nr_segments, segments);
288 if (result)
289 goto out;
290
291 /* Enable the special crash kernel control page
292 * allocation policy.
293 */
294 image->control_page = crashk_res.start;
295 image->type = KEXEC_TYPE_CRASH;
296
297 /*
298 * Verify we have good destination addresses. Normally
299 * the caller is responsible for making certain we don't
300 * attempt to load the new image into invalid or reserved
301 * areas of RAM. But crash kernels are preloaded into a
302 * reserved area of ram. We must ensure the addresses
303 * are in the reserved area otherwise preloading the
304 * kernel could corrupt things.
305 */
306 result = -EADDRNOTAVAIL;
307 for (i = 0; i < nr_segments; i++) {
308 unsigned long mstart, mend;
309
310 mstart = image->segment[i].mem;
311 mend = mstart + image->segment[i].memsz - 1;
312 /* Ensure we are within the crash kernel limits */
313 if ((mstart < crashk_res.start) || (mend > crashk_res.end))
314 goto out;
315 }
316
317 /*
318 * Find a location for the control code buffer, and add
319 * the vector of segments so that it's pages will also be
320 * counted as destination pages.
321 */
322 result = -ENOMEM;
323 image->control_code_page = kimage_alloc_control_pages(image,
324 get_order(KEXEC_CONTROL_PAGE_SIZE));
325 if (!image->control_code_page) {
326 printk(KERN_ERR "Could not allocate control_code_buffer\n");
327 goto out;
328 }
329
330 result = 0;
331out:
332 if (result == 0)
333 *rimage = image;
334 else
335 kfree(image);
336
337 return result;
338}
339
340static int kimage_is_destination_range(struct kimage *image,
341 unsigned long start,
342 unsigned long end)
343{
344 unsigned long i;
345
346 for (i = 0; i < image->nr_segments; i++) {
347 unsigned long mstart, mend;
348
349 mstart = image->segment[i].mem;
350 mend = mstart + image->segment[i].memsz;
351 if ((end > mstart) && (start < mend))
352 return 1;
353 }
354
355 return 0;
356}
357
358static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
359{
360 struct page *pages;
361
362 pages = alloc_pages(gfp_mask, order);
363 if (pages) {
364 unsigned int count, i;
365 pages->mapping = NULL;
366 set_page_private(pages, order);
367 count = 1 << order;
368 for (i = 0; i < count; i++)
369 SetPageReserved(pages + i);
370 }
371
372 return pages;
373}
374
375static void kimage_free_pages(struct page *page)
376{
377 unsigned int order, count, i;
378
379 order = page_private(page);
380 count = 1 << order;
381 for (i = 0; i < count; i++)
382 ClearPageReserved(page + i);
383 __free_pages(page, order);
384}
385
386static void kimage_free_page_list(struct list_head *list)
387{
388 struct list_head *pos, *next;
389
390 list_for_each_safe(pos, next, list) {
391 struct page *page;
392
393 page = list_entry(pos, struct page, lru);
394 list_del(&page->lru);
395 kimage_free_pages(page);
396 }
397}
398
399static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
400 unsigned int order)
401{
402 /* Control pages are special, they are the intermediaries
403 * that are needed while we copy the rest of the pages
404 * to their final resting place. As such they must
405 * not conflict with either the destination addresses
406 * or memory the kernel is already using.
407 *
408 * The only case where we really need more than one of
409 * these are for architectures where we cannot disable
410 * the MMU and must instead generate an identity mapped
411 * page table for all of the memory.
412 *
413 * At worst this runs in O(N) of the image size.
414 */
415 struct list_head extra_pages;
416 struct page *pages;
417 unsigned int count;
418
419 count = 1 << order;
420 INIT_LIST_HEAD(&extra_pages);
421
422 /* Loop while I can allocate a page and the page allocated
423 * is a destination page.
424 */
425 do {
426 unsigned long pfn, epfn, addr, eaddr;
427
428 pages = kimage_alloc_pages(GFP_KERNEL, order);
429 if (!pages)
430 break;
431 pfn = page_to_pfn(pages);
432 epfn = pfn + count;
433 addr = pfn << PAGE_SHIFT;
434 eaddr = epfn << PAGE_SHIFT;
435 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
436 kimage_is_destination_range(image, addr, eaddr)) {
437 list_add(&pages->lru, &extra_pages);
438 pages = NULL;
439 }
440 } while (!pages);
441
442 if (pages) {
443 /* Remember the allocated page... */
444 list_add(&pages->lru, &image->control_pages);
445
446 /* Because the page is already in it's destination
447 * location we will never allocate another page at
448 * that address. Therefore kimage_alloc_pages
449 * will not return it (again) and we don't need
450 * to give it an entry in image->segment[].
451 */
452 }
453 /* Deal with the destination pages I have inadvertently allocated.
454 *
455 * Ideally I would convert multi-page allocations into single
456 * page allocations, and add everything to image->dest_pages.
457 *
458 * For now it is simpler to just free the pages.
459 */
460 kimage_free_page_list(&extra_pages);
461
462 return pages;
463}
464
465static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
466 unsigned int order)
467{
468 /* Control pages are special, they are the intermediaries
469 * that are needed while we copy the rest of the pages
470 * to their final resting place. As such they must
471 * not conflict with either the destination addresses
472 * or memory the kernel is already using.
473 *
474 * Control pages are also the only pags we must allocate
475 * when loading a crash kernel. All of the other pages
476 * are specified by the segments and we just memcpy
477 * into them directly.
478 *
479 * The only case where we really need more than one of
480 * these are for architectures where we cannot disable
481 * the MMU and must instead generate an identity mapped
482 * page table for all of the memory.
483 *
484 * Given the low demand this implements a very simple
485 * allocator that finds the first hole of the appropriate
486 * size in the reserved memory region, and allocates all
487 * of the memory up to and including the hole.
488 */
489 unsigned long hole_start, hole_end, size;
490 struct page *pages;
491
492 pages = NULL;
493 size = (1 << order) << PAGE_SHIFT;
494 hole_start = (image->control_page + (size - 1)) & ~(size - 1);
495 hole_end = hole_start + size - 1;
496 while (hole_end <= crashk_res.end) {
497 unsigned long i;
498
499 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
500 break;
501 if (hole_end > crashk_res.end)
502 break;
503 /* See if I overlap any of the segments */
504 for (i = 0; i < image->nr_segments; i++) {
505 unsigned long mstart, mend;
506
507 mstart = image->segment[i].mem;
508 mend = mstart + image->segment[i].memsz - 1;
509 if ((hole_end >= mstart) && (hole_start <= mend)) {
510 /* Advance the hole to the end of the segment */
511 hole_start = (mend + (size - 1)) & ~(size - 1);
512 hole_end = hole_start + size - 1;
513 break;
514 }
515 }
516 /* If I don't overlap any segments I have found my hole! */
517 if (i == image->nr_segments) {
518 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
519 break;
520 }
521 }
522 if (pages)
523 image->control_page = hole_end;
524
525 return pages;
526}
527
528
529struct page *kimage_alloc_control_pages(struct kimage *image,
530 unsigned int order)
531{
532 struct page *pages = NULL;
533
534 switch (image->type) {
535 case KEXEC_TYPE_DEFAULT:
536 pages = kimage_alloc_normal_control_pages(image, order);
537 break;
538 case KEXEC_TYPE_CRASH:
539 pages = kimage_alloc_crash_control_pages(image, order);
540 break;
541 }
542
543 return pages;
544}
545
546static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
547{
548 if (*image->entry != 0)
549 image->entry++;
550
551 if (image->entry == image->last_entry) {
552 kimage_entry_t *ind_page;
553 struct page *page;
554
555 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
556 if (!page)
557 return -ENOMEM;
558
559 ind_page = page_address(page);
560 *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
561 image->entry = ind_page;
562 image->last_entry = ind_page +
563 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
564 }
565 *image->entry = entry;
566 image->entry++;
567 *image->entry = 0;
568
569 return 0;
570}
571
572static int kimage_set_destination(struct kimage *image,
573 unsigned long destination)
574{
575 int result;
576
577 destination &= PAGE_MASK;
578 result = kimage_add_entry(image, destination | IND_DESTINATION);
579 if (result == 0)
580 image->destination = destination;
581
582 return result;
583}
584
585
586static int kimage_add_page(struct kimage *image, unsigned long page)
587{
588 int result;
589
590 page &= PAGE_MASK;
591 result = kimage_add_entry(image, page | IND_SOURCE);
592 if (result == 0)
593 image->destination += PAGE_SIZE;
594
595 return result;
596}
597
598
599static void kimage_free_extra_pages(struct kimage *image)
600{
601 /* Walk through and free any extra destination pages I may have */
602 kimage_free_page_list(&image->dest_pages);
603
604 /* Walk through and free any unusable pages I have cached */
605 kimage_free_page_list(&image->unuseable_pages);
606
607}
608static void kimage_terminate(struct kimage *image)
609{
610 if (*image->entry != 0)
611 image->entry++;
612
613 *image->entry = IND_DONE;
614}
615
616#define for_each_kimage_entry(image, ptr, entry) \
617 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
618 ptr = (entry & IND_INDIRECTION)? \
619 phys_to_virt((entry & PAGE_MASK)): ptr +1)
620
621static void kimage_free_entry(kimage_entry_t entry)
622{
623 struct page *page;
624
625 page = pfn_to_page(entry >> PAGE_SHIFT);
626 kimage_free_pages(page);
627}
628
629static void kimage_free(struct kimage *image)
630{
631 kimage_entry_t *ptr, entry;
632 kimage_entry_t ind = 0;
633
634 if (!image)
635 return;
636
637 kimage_free_extra_pages(image);
638 for_each_kimage_entry(image, ptr, entry) {
639 if (entry & IND_INDIRECTION) {
640 /* Free the previous indirection page */
641 if (ind & IND_INDIRECTION)
642 kimage_free_entry(ind);
643 /* Save this indirection page until we are
644 * done with it.
645 */
646 ind = entry;
647 }
648 else if (entry & IND_SOURCE)
649 kimage_free_entry(entry);
650 }
651 /* Free the final indirection page */
652 if (ind & IND_INDIRECTION)
653 kimage_free_entry(ind);
654
655 /* Handle any machine specific cleanup */
656 machine_kexec_cleanup(image);
657
658 /* Free the kexec control pages... */
659 kimage_free_page_list(&image->control_pages);
660 kfree(image);
661}
662
663static kimage_entry_t *kimage_dst_used(struct kimage *image,
664 unsigned long page)
665{
666 kimage_entry_t *ptr, entry;
667 unsigned long destination = 0;
668
669 for_each_kimage_entry(image, ptr, entry) {
670 if (entry & IND_DESTINATION)
671 destination = entry & PAGE_MASK;
672 else if (entry & IND_SOURCE) {
673 if (page == destination)
674 return ptr;
675 destination += PAGE_SIZE;
676 }
677 }
678
679 return NULL;
680}
681
682static struct page *kimage_alloc_page(struct kimage *image,
683 gfp_t gfp_mask,
684 unsigned long destination)
685{
686 /*
687 * Here we implement safeguards to ensure that a source page
688 * is not copied to its destination page before the data on
689 * the destination page is no longer useful.
690 *
691 * To do this we maintain the invariant that a source page is
692 * either its own destination page, or it is not a
693 * destination page at all.
694 *
695 * That is slightly stronger than required, but the proof
696 * that no problems will not occur is trivial, and the
697 * implementation is simply to verify.
698 *
699 * When allocating all pages normally this algorithm will run
700 * in O(N) time, but in the worst case it will run in O(N^2)
701 * time. If the runtime is a problem the data structures can
702 * be fixed.
703 */
704 struct page *page;
705 unsigned long addr;
706
707 /*
708 * Walk through the list of destination pages, and see if I
709 * have a match.
710 */
711 list_for_each_entry(page, &image->dest_pages, lru) {
712 addr = page_to_pfn(page) << PAGE_SHIFT;
713 if (addr == destination) {
714 list_del(&page->lru);
715 return page;
716 }
717 }
718 page = NULL;
719 while (1) {
720 kimage_entry_t *old;
721
722 /* Allocate a page, if we run out of memory give up */
723 page = kimage_alloc_pages(gfp_mask, 0);
724 if (!page)
725 return NULL;
726 /* If the page cannot be used file it away */
727 if (page_to_pfn(page) >
728 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
729 list_add(&page->lru, &image->unuseable_pages);
730 continue;
731 }
732 addr = page_to_pfn(page) << PAGE_SHIFT;
733
734 /* If it is the destination page we want use it */
735 if (addr == destination)
736 break;
737
738 /* If the page is not a destination page use it */
739 if (!kimage_is_destination_range(image, addr,
740 addr + PAGE_SIZE))
741 break;
742
743 /*
744 * I know that the page is someones destination page.
745 * See if there is already a source page for this
746 * destination page. And if so swap the source pages.
747 */
748 old = kimage_dst_used(image, addr);
749 if (old) {
750 /* If so move it */
751 unsigned long old_addr;
752 struct page *old_page;
753
754 old_addr = *old & PAGE_MASK;
755 old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
756 copy_highpage(page, old_page);
757 *old = addr | (*old & ~PAGE_MASK);
758
759 /* The old page I have found cannot be a
760 * destination page, so return it if it's
761 * gfp_flags honor the ones passed in.
762 */
763 if (!(gfp_mask & __GFP_HIGHMEM) &&
764 PageHighMem(old_page)) {
765 kimage_free_pages(old_page);
766 continue;
767 }
768 addr = old_addr;
769 page = old_page;
770 break;
771 }
772 else {
773 /* Place the page on the destination list I
774 * will use it later.
775 */
776 list_add(&page->lru, &image->dest_pages);
777 }
778 }
779
780 return page;
781}
782
783static int kimage_load_normal_segment(struct kimage *image,
784 struct kexec_segment *segment)
785{
786 unsigned long maddr;
787 unsigned long ubytes, mbytes;
788 int result;
789 unsigned char __user *buf;
790
791 result = 0;
792 buf = segment->buf;
793 ubytes = segment->bufsz;
794 mbytes = segment->memsz;
795 maddr = segment->mem;
796
797 result = kimage_set_destination(image, maddr);
798 if (result < 0)
799 goto out;
800
801 while (mbytes) {
802 struct page *page;
803 char *ptr;
804 size_t uchunk, mchunk;
805
806 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
807 if (!page) {
808 result = -ENOMEM;
809 goto out;
810 }
811 result = kimage_add_page(image, page_to_pfn(page)
812 << PAGE_SHIFT);
813 if (result < 0)
814 goto out;
815
816 ptr = kmap(page);
817 /* Start with a clear page */
818 clear_page(ptr);
819 ptr += maddr & ~PAGE_MASK;
820 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
821 if (mchunk > mbytes)
822 mchunk = mbytes;
823
824 uchunk = mchunk;
825 if (uchunk > ubytes)
826 uchunk = ubytes;
827
828 result = copy_from_user(ptr, buf, uchunk);
829 kunmap(page);
830 if (result) {
831 result = -EFAULT;
832 goto out;
833 }
834 ubytes -= uchunk;
835 maddr += mchunk;
836 buf += mchunk;
837 mbytes -= mchunk;
838 }
839out:
840 return result;
841}
842
843static int kimage_load_crash_segment(struct kimage *image,
844 struct kexec_segment *segment)
845{
846 /* For crash dumps kernels we simply copy the data from
847 * user space to it's destination.
848 * We do things a page at a time for the sake of kmap.
849 */
850 unsigned long maddr;
851 unsigned long ubytes, mbytes;
852 int result;
853 unsigned char __user *buf;
854
855 result = 0;
856 buf = segment->buf;
857 ubytes = segment->bufsz;
858 mbytes = segment->memsz;
859 maddr = segment->mem;
860 while (mbytes) {
861 struct page *page;
862 char *ptr;
863 size_t uchunk, mchunk;
864
865 page = pfn_to_page(maddr >> PAGE_SHIFT);
866 if (!page) {
867 result = -ENOMEM;
868 goto out;
869 }
870 ptr = kmap(page);
871 ptr += maddr & ~PAGE_MASK;
872 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
873 if (mchunk > mbytes)
874 mchunk = mbytes;
875
876 uchunk = mchunk;
877 if (uchunk > ubytes) {
878 uchunk = ubytes;
879 /* Zero the trailing part of the page */
880 memset(ptr + uchunk, 0, mchunk - uchunk);
881 }
882 result = copy_from_user(ptr, buf, uchunk);
883 kexec_flush_icache_page(page);
884 kunmap(page);
885 if (result) {
886 result = -EFAULT;
887 goto out;
888 }
889 ubytes -= uchunk;
890 maddr += mchunk;
891 buf += mchunk;
892 mbytes -= mchunk;
893 }
894out:
895 return result;
896}
897
898static int kimage_load_segment(struct kimage *image,
899 struct kexec_segment *segment)
900{
901 int result = -ENOMEM;
902
903 switch (image->type) {
904 case KEXEC_TYPE_DEFAULT:
905 result = kimage_load_normal_segment(image, segment);
906 break;
907 case KEXEC_TYPE_CRASH:
908 result = kimage_load_crash_segment(image, segment);
909 break;
910 }
911
912 return result;
913}
914
915/*
916 * Exec Kernel system call: for obvious reasons only root may call it.
917 *
918 * This call breaks up into three pieces.
919 * - A generic part which loads the new kernel from the current
920 * address space, and very carefully places the data in the
921 * allocated pages.
922 *
923 * - A generic part that interacts with the kernel and tells all of
924 * the devices to shut down. Preventing on-going dmas, and placing
925 * the devices in a consistent state so a later kernel can
926 * reinitialize them.
927 *
928 * - A machine specific part that includes the syscall number
929 * and the copies the image to it's final destination. And
930 * jumps into the image at entry.
931 *
932 * kexec does not sync, or unmount filesystems so if you need
933 * that to happen you need to do that yourself.
934 */
935struct kimage *kexec_image;
936struct kimage *kexec_crash_image;
937
938static DEFINE_MUTEX(kexec_mutex);
939
940SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
941 struct kexec_segment __user *, segments, unsigned long, flags)
942{
943 struct kimage **dest_image, *image;
944 int result;
945
946 /* We only trust the superuser with rebooting the system. */
947 if (!capable(CAP_SYS_BOOT))
948 return -EPERM;
949
950 /*
951 * Verify we have a legal set of flags
952 * This leaves us room for future extensions.
953 */
954 if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
955 return -EINVAL;
956
957 /* Verify we are on the appropriate architecture */
958 if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
959 ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
960 return -EINVAL;
961
962 /* Put an artificial cap on the number
963 * of segments passed to kexec_load.
964 */
965 if (nr_segments > KEXEC_SEGMENT_MAX)
966 return -EINVAL;
967
968 image = NULL;
969 result = 0;
970
971 /* Because we write directly to the reserved memory
972 * region when loading crash kernels we need a mutex here to
973 * prevent multiple crash kernels from attempting to load
974 * simultaneously, and to prevent a crash kernel from loading
975 * over the top of a in use crash kernel.
976 *
977 * KISS: always take the mutex.
978 */
979 if (!mutex_trylock(&kexec_mutex))
980 return -EBUSY;
981
982 dest_image = &kexec_image;
983 if (flags & KEXEC_ON_CRASH)
984 dest_image = &kexec_crash_image;
985 if (nr_segments > 0) {
986 unsigned long i;
987
988 /* Loading another kernel to reboot into */
989 if ((flags & KEXEC_ON_CRASH) == 0)
990 result = kimage_normal_alloc(&image, entry,
991 nr_segments, segments);
992 /* Loading another kernel to switch to if this one crashes */
993 else if (flags & KEXEC_ON_CRASH) {
994 /* Free any current crash dump kernel before
995 * we corrupt it.
996 */
997 kimage_free(xchg(&kexec_crash_image, NULL));
998 result = kimage_crash_alloc(&image, entry,
999 nr_segments, segments);
1000 crash_map_reserved_pages();
1001 }
1002 if (result)
1003 goto out;
1004
1005 if (flags & KEXEC_PRESERVE_CONTEXT)
1006 image->preserve_context = 1;
1007 result = machine_kexec_prepare(image);
1008 if (result)
1009 goto out;
1010
1011 for (i = 0; i < nr_segments; i++) {
1012 result = kimage_load_segment(image, &image->segment[i]);
1013 if (result)
1014 goto out;
1015 }
1016 kimage_terminate(image);
1017 if (flags & KEXEC_ON_CRASH)
1018 crash_unmap_reserved_pages();
1019 }
1020 /* Install the new kernel, and Uninstall the old */
1021 image = xchg(dest_image, image);
1022
1023out:
1024 mutex_unlock(&kexec_mutex);
1025 kimage_free(image);
1026
1027 return result;
1028}
1029
1030/*
1031 * Add and remove page tables for crashkernel memory
1032 *
1033 * Provide an empty default implementation here -- architecture
1034 * code may override this
1035 */
1036void __weak crash_map_reserved_pages(void)
1037{}
1038
1039void __weak crash_unmap_reserved_pages(void)
1040{}
1041
1042#ifdef CONFIG_COMPAT
1043asmlinkage long compat_sys_kexec_load(unsigned long entry,
1044 unsigned long nr_segments,
1045 struct compat_kexec_segment __user *segments,
1046 unsigned long flags)
1047{
1048 struct compat_kexec_segment in;
1049 struct kexec_segment out, __user *ksegments;
1050 unsigned long i, result;
1051
1052 /* Don't allow clients that don't understand the native
1053 * architecture to do anything.
1054 */
1055 if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1056 return -EINVAL;
1057
1058 if (nr_segments > KEXEC_SEGMENT_MAX)
1059 return -EINVAL;
1060
1061 ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1062 for (i=0; i < nr_segments; i++) {
1063 result = copy_from_user(&in, &segments[i], sizeof(in));
1064 if (result)
1065 return -EFAULT;
1066
1067 out.buf = compat_ptr(in.buf);
1068 out.bufsz = in.bufsz;
1069 out.mem = in.mem;
1070 out.memsz = in.memsz;
1071
1072 result = copy_to_user(&ksegments[i], &out, sizeof(out));
1073 if (result)
1074 return -EFAULT;
1075 }
1076
1077 return sys_kexec_load(entry, nr_segments, ksegments, flags);
1078}
1079#endif
1080
1081void crash_kexec(struct pt_regs *regs)
1082{
1083 /* Take the kexec_mutex here to prevent sys_kexec_load
1084 * running on one cpu from replacing the crash kernel
1085 * we are using after a panic on a different cpu.
1086 *
1087 * If the crash kernel was not located in a fixed area
1088 * of memory the xchg(&kexec_crash_image) would be
1089 * sufficient. But since I reuse the memory...
1090 */
1091 if (mutex_trylock(&kexec_mutex)) {
1092 if (kexec_crash_image) {
1093 struct pt_regs fixed_regs;
1094
1095 crash_setup_regs(&fixed_regs, regs);
1096 crash_save_vmcoreinfo();
1097 machine_crash_shutdown(&fixed_regs);
1098 machine_kexec(kexec_crash_image);
1099 }
1100 mutex_unlock(&kexec_mutex);
1101 }
1102}
1103
1104size_t crash_get_memory_size(void)
1105{
1106 size_t size = 0;
1107 mutex_lock(&kexec_mutex);
1108 if (crashk_res.end != crashk_res.start)
1109 size = resource_size(&crashk_res);
1110 mutex_unlock(&kexec_mutex);
1111 return size;
1112}
1113
1114void __weak crash_free_reserved_phys_range(unsigned long begin,
1115 unsigned long end)
1116{
1117 unsigned long addr;
1118
1119 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1120 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1121 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1122 free_page((unsigned long)__va(addr));
1123 totalram_pages++;
1124 }
1125}
1126
1127int crash_shrink_memory(unsigned long new_size)
1128{
1129 int ret = 0;
1130 unsigned long start, end;
1131 unsigned long old_size;
1132 struct resource *ram_res;
1133
1134 mutex_lock(&kexec_mutex);
1135
1136 if (kexec_crash_image) {
1137 ret = -ENOENT;
1138 goto unlock;
1139 }
1140 start = crashk_res.start;
1141 end = crashk_res.end;
1142 old_size = (end == 0) ? 0 : end - start + 1;
1143 if (new_size >= old_size) {
1144 ret = (new_size == old_size) ? 0 : -EINVAL;
1145 goto unlock;
1146 }
1147
1148 ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1149 if (!ram_res) {
1150 ret = -ENOMEM;
1151 goto unlock;
1152 }
1153
1154 start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1155 end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1156
1157 crash_map_reserved_pages();
1158 crash_free_reserved_phys_range(end, crashk_res.end);
1159
1160 if ((start == end) && (crashk_res.parent != NULL))
1161 release_resource(&crashk_res);
1162
1163 ram_res->start = end;
1164 ram_res->end = crashk_res.end;
1165 ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1166 ram_res->name = "System RAM";
1167
1168 crashk_res.end = end - 1;
1169
1170 insert_resource(&iomem_resource, ram_res);
1171 crash_unmap_reserved_pages();
1172
1173unlock:
1174 mutex_unlock(&kexec_mutex);
1175 return ret;
1176}
1177
1178static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1179 size_t data_len)
1180{
1181 struct elf_note note;
1182
1183 note.n_namesz = strlen(name) + 1;
1184 note.n_descsz = data_len;
1185 note.n_type = type;
1186 memcpy(buf, ¬e, sizeof(note));
1187 buf += (sizeof(note) + 3)/4;
1188 memcpy(buf, name, note.n_namesz);
1189 buf += (note.n_namesz + 3)/4;
1190 memcpy(buf, data, note.n_descsz);
1191 buf += (note.n_descsz + 3)/4;
1192
1193 return buf;
1194}
1195
1196static void final_note(u32 *buf)
1197{
1198 struct elf_note note;
1199
1200 note.n_namesz = 0;
1201 note.n_descsz = 0;
1202 note.n_type = 0;
1203 memcpy(buf, ¬e, sizeof(note));
1204}
1205
1206void crash_save_cpu(struct pt_regs *regs, int cpu)
1207{
1208 struct elf_prstatus prstatus;
1209 u32 *buf;
1210
1211 if ((cpu < 0) || (cpu >= nr_cpu_ids))
1212 return;
1213
1214 /* Using ELF notes here is opportunistic.
1215 * I need a well defined structure format
1216 * for the data I pass, and I need tags
1217 * on the data to indicate what information I have
1218 * squirrelled away. ELF notes happen to provide
1219 * all of that, so there is no need to invent something new.
1220 */
1221 buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1222 if (!buf)
1223 return;
1224 memset(&prstatus, 0, sizeof(prstatus));
1225 prstatus.pr_pid = current->pid;
1226 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1227 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1228 &prstatus, sizeof(prstatus));
1229 final_note(buf);
1230}
1231
1232static int __init crash_notes_memory_init(void)
1233{
1234 /* Allocate memory for saving cpu registers. */
1235 crash_notes = alloc_percpu(note_buf_t);
1236 if (!crash_notes) {
1237 printk("Kexec: Memory allocation for saving cpu register"
1238 " states failed\n");
1239 return -ENOMEM;
1240 }
1241 return 0;
1242}
1243module_init(crash_notes_memory_init)
1244
1245
1246/*
1247 * parsing the "crashkernel" commandline
1248 *
1249 * this code is intended to be called from architecture specific code
1250 */
1251
1252
1253/*
1254 * This function parses command lines in the format
1255 *
1256 * crashkernel=ramsize-range:size[,...][@offset]
1257 *
1258 * The function returns 0 on success and -EINVAL on failure.
1259 */
1260static int __init parse_crashkernel_mem(char *cmdline,
1261 unsigned long long system_ram,
1262 unsigned long long *crash_size,
1263 unsigned long long *crash_base)
1264{
1265 char *cur = cmdline, *tmp;
1266
1267 /* for each entry of the comma-separated list */
1268 do {
1269 unsigned long long start, end = ULLONG_MAX, size;
1270
1271 /* get the start of the range */
1272 start = memparse(cur, &tmp);
1273 if (cur == tmp) {
1274 pr_warning("crashkernel: Memory value expected\n");
1275 return -EINVAL;
1276 }
1277 cur = tmp;
1278 if (*cur != '-') {
1279 pr_warning("crashkernel: '-' expected\n");
1280 return -EINVAL;
1281 }
1282 cur++;
1283
1284 /* if no ':' is here, than we read the end */
1285 if (*cur != ':') {
1286 end = memparse(cur, &tmp);
1287 if (cur == tmp) {
1288 pr_warning("crashkernel: Memory "
1289 "value expected\n");
1290 return -EINVAL;
1291 }
1292 cur = tmp;
1293 if (end <= start) {
1294 pr_warning("crashkernel: end <= start\n");
1295 return -EINVAL;
1296 }
1297 }
1298
1299 if (*cur != ':') {
1300 pr_warning("crashkernel: ':' expected\n");
1301 return -EINVAL;
1302 }
1303 cur++;
1304
1305 size = memparse(cur, &tmp);
1306 if (cur == tmp) {
1307 pr_warning("Memory value expected\n");
1308 return -EINVAL;
1309 }
1310 cur = tmp;
1311 if (size >= system_ram) {
1312 pr_warning("crashkernel: invalid size\n");
1313 return -EINVAL;
1314 }
1315
1316 /* match ? */
1317 if (system_ram >= start && system_ram < end) {
1318 *crash_size = size;
1319 break;
1320 }
1321 } while (*cur++ == ',');
1322
1323 if (*crash_size > 0) {
1324 while (*cur && *cur != ' ' && *cur != '@')
1325 cur++;
1326 if (*cur == '@') {
1327 cur++;
1328 *crash_base = memparse(cur, &tmp);
1329 if (cur == tmp) {
1330 pr_warning("Memory value expected "
1331 "after '@'\n");
1332 return -EINVAL;
1333 }
1334 }
1335 }
1336
1337 return 0;
1338}
1339
1340/*
1341 * That function parses "simple" (old) crashkernel command lines like
1342 *
1343 * crashkernel=size[@offset]
1344 *
1345 * It returns 0 on success and -EINVAL on failure.
1346 */
1347static int __init parse_crashkernel_simple(char *cmdline,
1348 unsigned long long *crash_size,
1349 unsigned long long *crash_base)
1350{
1351 char *cur = cmdline;
1352
1353 *crash_size = memparse(cmdline, &cur);
1354 if (cmdline == cur) {
1355 pr_warning("crashkernel: memory value expected\n");
1356 return -EINVAL;
1357 }
1358
1359 if (*cur == '@')
1360 *crash_base = memparse(cur+1, &cur);
1361 else if (*cur != ' ' && *cur != '\0') {
1362 pr_warning("crashkernel: unrecognized char\n");
1363 return -EINVAL;
1364 }
1365
1366 return 0;
1367}
1368
1369/*
1370 * That function is the entry point for command line parsing and should be
1371 * called from the arch-specific code.
1372 */
1373int __init parse_crashkernel(char *cmdline,
1374 unsigned long long system_ram,
1375 unsigned long long *crash_size,
1376 unsigned long long *crash_base)
1377{
1378 char *p = cmdline, *ck_cmdline = NULL;
1379 char *first_colon, *first_space;
1380
1381 BUG_ON(!crash_size || !crash_base);
1382 *crash_size = 0;
1383 *crash_base = 0;
1384
1385 /* find crashkernel and use the last one if there are more */
1386 p = strstr(p, "crashkernel=");
1387 while (p) {
1388 ck_cmdline = p;
1389 p = strstr(p+1, "crashkernel=");
1390 }
1391
1392 if (!ck_cmdline)
1393 return -EINVAL;
1394
1395 ck_cmdline += 12; /* strlen("crashkernel=") */
1396
1397 /*
1398 * if the commandline contains a ':', then that's the extended
1399 * syntax -- if not, it must be the classic syntax
1400 */
1401 first_colon = strchr(ck_cmdline, ':');
1402 first_space = strchr(ck_cmdline, ' ');
1403 if (first_colon && (!first_space || first_colon < first_space))
1404 return parse_crashkernel_mem(ck_cmdline, system_ram,
1405 crash_size, crash_base);
1406 else
1407 return parse_crashkernel_simple(ck_cmdline, crash_size,
1408 crash_base);
1409
1410 return 0;
1411}
1412
1413
1414static void update_vmcoreinfo_note(void)
1415{
1416 u32 *buf = vmcoreinfo_note;
1417
1418 if (!vmcoreinfo_size)
1419 return;
1420 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1421 vmcoreinfo_size);
1422 final_note(buf);
1423}
1424
1425void crash_save_vmcoreinfo(void)
1426{
1427 vmcoreinfo_append_str("CRASHTIME=%ld", get_seconds());
1428 update_vmcoreinfo_note();
1429}
1430
1431void vmcoreinfo_append_str(const char *fmt, ...)
1432{
1433 va_list args;
1434 char buf[0x50];
1435 int r;
1436
1437 va_start(args, fmt);
1438 r = vsnprintf(buf, sizeof(buf), fmt, args);
1439 va_end(args);
1440
1441 if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1442 r = vmcoreinfo_max_size - vmcoreinfo_size;
1443
1444 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1445
1446 vmcoreinfo_size += r;
1447}
1448
1449/*
1450 * provide an empty default implementation here -- architecture
1451 * code may override this
1452 */
1453void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1454{}
1455
1456unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1457{
1458 return __pa((unsigned long)(char *)&vmcoreinfo_note);
1459}
1460
1461static int __init crash_save_vmcoreinfo_init(void)
1462{
1463 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1464 VMCOREINFO_PAGESIZE(PAGE_SIZE);
1465
1466 VMCOREINFO_SYMBOL(init_uts_ns);
1467 VMCOREINFO_SYMBOL(node_online_map);
1468#ifdef CONFIG_MMU
1469 VMCOREINFO_SYMBOL(swapper_pg_dir);
1470#endif
1471 VMCOREINFO_SYMBOL(_stext);
1472 VMCOREINFO_SYMBOL(vmlist);
1473
1474#ifndef CONFIG_NEED_MULTIPLE_NODES
1475 VMCOREINFO_SYMBOL(mem_map);
1476 VMCOREINFO_SYMBOL(contig_page_data);
1477#endif
1478#ifdef CONFIG_SPARSEMEM
1479 VMCOREINFO_SYMBOL(mem_section);
1480 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1481 VMCOREINFO_STRUCT_SIZE(mem_section);
1482 VMCOREINFO_OFFSET(mem_section, section_mem_map);
1483#endif
1484 VMCOREINFO_STRUCT_SIZE(page);
1485 VMCOREINFO_STRUCT_SIZE(pglist_data);
1486 VMCOREINFO_STRUCT_SIZE(zone);
1487 VMCOREINFO_STRUCT_SIZE(free_area);
1488 VMCOREINFO_STRUCT_SIZE(list_head);
1489 VMCOREINFO_SIZE(nodemask_t);
1490 VMCOREINFO_OFFSET(page, flags);
1491 VMCOREINFO_OFFSET(page, _count);
1492 VMCOREINFO_OFFSET(page, mapping);
1493 VMCOREINFO_OFFSET(page, lru);
1494 VMCOREINFO_OFFSET(pglist_data, node_zones);
1495 VMCOREINFO_OFFSET(pglist_data, nr_zones);
1496#ifdef CONFIG_FLAT_NODE_MEM_MAP
1497 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1498#endif
1499 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1500 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1501 VMCOREINFO_OFFSET(pglist_data, node_id);
1502 VMCOREINFO_OFFSET(zone, free_area);
1503 VMCOREINFO_OFFSET(zone, vm_stat);
1504 VMCOREINFO_OFFSET(zone, spanned_pages);
1505 VMCOREINFO_OFFSET(free_area, free_list);
1506 VMCOREINFO_OFFSET(list_head, next);
1507 VMCOREINFO_OFFSET(list_head, prev);
1508 VMCOREINFO_OFFSET(vm_struct, addr);
1509 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1510 log_buf_kexec_setup();
1511 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1512 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1513 VMCOREINFO_NUMBER(PG_lru);
1514 VMCOREINFO_NUMBER(PG_private);
1515 VMCOREINFO_NUMBER(PG_swapcache);
1516
1517 arch_crash_save_vmcoreinfo();
1518 update_vmcoreinfo_note();
1519
1520 return 0;
1521}
1522
1523module_init(crash_save_vmcoreinfo_init)
1524
1525/*
1526 * Move into place and start executing a preloaded standalone
1527 * executable. If nothing was preloaded return an error.
1528 */
1529int kernel_kexec(void)
1530{
1531 int error = 0;
1532
1533 if (!mutex_trylock(&kexec_mutex))
1534 return -EBUSY;
1535 if (!kexec_image) {
1536 error = -EINVAL;
1537 goto Unlock;
1538 }
1539
1540#ifdef CONFIG_KEXEC_JUMP
1541 if (kexec_image->preserve_context) {
1542 lock_system_sleep();
1543 pm_prepare_console();
1544 error = freeze_processes();
1545 if (error) {
1546 error = -EBUSY;
1547 goto Restore_console;
1548 }
1549 suspend_console();
1550 error = dpm_suspend_start(PMSG_FREEZE);
1551 if (error)
1552 goto Resume_console;
1553 /* At this point, dpm_suspend_start() has been called,
1554 * but *not* dpm_suspend_end(). We *must* call
1555 * dpm_suspend_end() now. Otherwise, drivers for
1556 * some devices (e.g. interrupt controllers) become
1557 * desynchronized with the actual state of the
1558 * hardware at resume time, and evil weirdness ensues.
1559 */
1560 error = dpm_suspend_end(PMSG_FREEZE);
1561 if (error)
1562 goto Resume_devices;
1563 error = disable_nonboot_cpus();
1564 if (error)
1565 goto Enable_cpus;
1566 local_irq_disable();
1567 error = syscore_suspend();
1568 if (error)
1569 goto Enable_irqs;
1570 } else
1571#endif
1572 {
1573 kernel_restart_prepare(NULL);
1574 printk(KERN_EMERG "Starting new kernel\n");
1575 machine_shutdown();
1576 }
1577
1578 machine_kexec(kexec_image);
1579
1580#ifdef CONFIG_KEXEC_JUMP
1581 if (kexec_image->preserve_context) {
1582 syscore_resume();
1583 Enable_irqs:
1584 local_irq_enable();
1585 Enable_cpus:
1586 enable_nonboot_cpus();
1587 dpm_resume_start(PMSG_RESTORE);
1588 Resume_devices:
1589 dpm_resume_end(PMSG_RESTORE);
1590 Resume_console:
1591 resume_console();
1592 thaw_processes();
1593 Restore_console:
1594 pm_restore_console();
1595 unlock_system_sleep();
1596 }
1597#endif
1598
1599 Unlock:
1600 mutex_unlock(&kexec_mutex);
1601 return error;
1602}