Loading...
1/*
2 * machine_kexec.c for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8#include <linux/compiler.h>
9#include <linux/kexec.h>
10#include <linux/mm.h>
11#include <linux/delay.h>
12
13#include <asm/cacheflush.h>
14#include <asm/page.h>
15
16extern const unsigned char relocate_new_kernel[];
17extern const size_t relocate_new_kernel_size;
18
19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page;
21
22int (*_machine_kexec_prepare)(struct kimage *) = NULL;
23void (*_machine_kexec_shutdown)(void) = NULL;
24void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
25#ifdef CONFIG_SMP
26void (*relocated_kexec_smp_wait) (void *);
27atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
28void (*_crash_smp_send_stop)(void) = NULL;
29#endif
30
31static void kexec_image_info(const struct kimage *kimage)
32{
33 unsigned long i;
34
35 pr_debug("kexec kimage info:\n");
36 pr_debug(" type: %d\n", kimage->type);
37 pr_debug(" start: %lx\n", kimage->start);
38 pr_debug(" head: %lx\n", kimage->head);
39 pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
40
41 for (i = 0; i < kimage->nr_segments; i++) {
42 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
43 i,
44 kimage->segment[i].mem,
45 kimage->segment[i].mem + kimage->segment[i].memsz,
46 (unsigned long)kimage->segment[i].memsz,
47 (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
48 }
49}
50
51int
52machine_kexec_prepare(struct kimage *kimage)
53{
54 kexec_image_info(kimage);
55
56 if (_machine_kexec_prepare)
57 return _machine_kexec_prepare(kimage);
58 return 0;
59}
60
61void
62machine_kexec_cleanup(struct kimage *kimage)
63{
64}
65
66void
67machine_shutdown(void)
68{
69 if (_machine_kexec_shutdown)
70 _machine_kexec_shutdown();
71}
72
73void
74machine_crash_shutdown(struct pt_regs *regs)
75{
76 if (_machine_crash_shutdown)
77 _machine_crash_shutdown(regs);
78 else
79 default_machine_crash_shutdown(regs);
80}
81
82typedef void (*noretfun_t)(void) __noreturn;
83
84void
85machine_kexec(struct kimage *image)
86{
87 unsigned long reboot_code_buffer;
88 unsigned long entry;
89 unsigned long *ptr;
90
91 reboot_code_buffer =
92 (unsigned long)page_address(image->control_code_page);
93
94 kexec_start_address =
95 (unsigned long) phys_to_virt(image->start);
96
97 if (image->type == KEXEC_TYPE_DEFAULT) {
98 kexec_indirection_page =
99 (unsigned long) phys_to_virt(image->head & PAGE_MASK);
100 } else {
101 kexec_indirection_page = (unsigned long)&image->head;
102 }
103
104 memcpy((void*)reboot_code_buffer, relocate_new_kernel,
105 relocate_new_kernel_size);
106
107 /*
108 * The generic kexec code builds a page list with physical
109 * addresses. they are directly accessible through KSEG0 (or
110 * CKSEG0 or XPHYS if on 64bit system), hence the
111 * phys_to_virt() call.
112 */
113 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
114 ptr = (entry & IND_INDIRECTION) ?
115 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
116 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
117 *ptr & IND_DESTINATION)
118 *ptr = (unsigned long) phys_to_virt(*ptr);
119 }
120
121 /*
122 * we do not want to be bothered.
123 */
124 local_irq_disable();
125
126 printk("Will call new kernel at %08lx\n", image->start);
127 printk("Bye ...\n");
128 __flush_cache_all();
129#ifdef CONFIG_SMP
130 /* All secondary cpus now may jump to kexec_wait cycle */
131 relocated_kexec_smp_wait = reboot_code_buffer +
132 (void *)(kexec_smp_wait - relocate_new_kernel);
133 smp_wmb();
134 atomic_set(&kexec_ready_to_reboot, 1);
135#endif
136 ((noretfun_t) reboot_code_buffer)();
137}
1/*
2 * machine_kexec.c for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/kexec.h>
10#include <linux/mm.h>
11#include <linux/delay.h>
12
13#include <asm/cacheflush.h>
14#include <asm/page.h>
15
16extern const unsigned char relocate_new_kernel[];
17extern const size_t relocate_new_kernel_size;
18
19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page;
21
22int
23machine_kexec_prepare(struct kimage *kimage)
24{
25 return 0;
26}
27
28void
29machine_kexec_cleanup(struct kimage *kimage)
30{
31}
32
33void
34machine_shutdown(void)
35{
36}
37
38void
39machine_crash_shutdown(struct pt_regs *regs)
40{
41}
42
43typedef void (*noretfun_t)(void) __attribute__((noreturn));
44
45void
46machine_kexec(struct kimage *image)
47{
48 unsigned long reboot_code_buffer;
49 unsigned long entry;
50 unsigned long *ptr;
51
52 reboot_code_buffer =
53 (unsigned long)page_address(image->control_code_page);
54
55 kexec_start_address = image->start;
56 kexec_indirection_page =
57 (unsigned long) phys_to_virt(image->head & PAGE_MASK);
58
59 memcpy((void*)reboot_code_buffer, relocate_new_kernel,
60 relocate_new_kernel_size);
61
62 /*
63 * The generic kexec code builds a page list with physical
64 * addresses. they are directly accessible through KSEG0 (or
65 * CKSEG0 or XPHYS if on 64bit system), hence the
66 * pys_to_virt() call.
67 */
68 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
69 ptr = (entry & IND_INDIRECTION) ?
70 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
71 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
72 *ptr & IND_DESTINATION)
73 *ptr = (unsigned long) phys_to_virt(*ptr);
74 }
75
76 /*
77 * we do not want to be bothered.
78 */
79 local_irq_disable();
80
81 printk("Will call new kernel at %08lx\n", image->start);
82 printk("Bye ...\n");
83 __flush_cache_all();
84 ((noretfun_t) reboot_code_buffer)();
85}