Loading...
1/*
2 * machine_kexec.c for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8#include <linux/compiler.h>
9#include <linux/kexec.h>
10#include <linux/mm.h>
11#include <linux/delay.h>
12
13#include <asm/cacheflush.h>
14#include <asm/page.h>
15
16extern const unsigned char relocate_new_kernel[];
17extern const size_t relocate_new_kernel_size;
18
19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page;
21
22int (*_machine_kexec_prepare)(struct kimage *) = NULL;
23void (*_machine_kexec_shutdown)(void) = NULL;
24void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
25#ifdef CONFIG_SMP
26void (*relocated_kexec_smp_wait) (void *);
27atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
28#endif
29
30int
31machine_kexec_prepare(struct kimage *kimage)
32{
33 if (_machine_kexec_prepare)
34 return _machine_kexec_prepare(kimage);
35 return 0;
36}
37
38void
39machine_kexec_cleanup(struct kimage *kimage)
40{
41}
42
43void
44machine_shutdown(void)
45{
46 if (_machine_kexec_shutdown)
47 _machine_kexec_shutdown();
48}
49
50void
51machine_crash_shutdown(struct pt_regs *regs)
52{
53 if (_machine_crash_shutdown)
54 _machine_crash_shutdown(regs);
55 else
56 default_machine_crash_shutdown(regs);
57}
58
59typedef void (*noretfun_t)(void) __noreturn;
60
61void
62machine_kexec(struct kimage *image)
63{
64 unsigned long reboot_code_buffer;
65 unsigned long entry;
66 unsigned long *ptr;
67
68 reboot_code_buffer =
69 (unsigned long)page_address(image->control_code_page);
70
71 kexec_start_address =
72 (unsigned long) phys_to_virt(image->start);
73
74 if (image->type == KEXEC_TYPE_DEFAULT) {
75 kexec_indirection_page =
76 (unsigned long) phys_to_virt(image->head & PAGE_MASK);
77 } else {
78 kexec_indirection_page = (unsigned long)&image->head;
79 }
80
81 memcpy((void*)reboot_code_buffer, relocate_new_kernel,
82 relocate_new_kernel_size);
83
84 /*
85 * The generic kexec code builds a page list with physical
86 * addresses. they are directly accessible through KSEG0 (or
87 * CKSEG0 or XPHYS if on 64bit system), hence the
88 * phys_to_virt() call.
89 */
90 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
91 ptr = (entry & IND_INDIRECTION) ?
92 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
93 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
94 *ptr & IND_DESTINATION)
95 *ptr = (unsigned long) phys_to_virt(*ptr);
96 }
97
98 /*
99 * we do not want to be bothered.
100 */
101 local_irq_disable();
102
103 printk("Will call new kernel at %08lx\n", image->start);
104 printk("Bye ...\n");
105 __flush_cache_all();
106#ifdef CONFIG_SMP
107 /* All secondary cpus now may jump to kexec_wait cycle */
108 relocated_kexec_smp_wait = reboot_code_buffer +
109 (void *)(kexec_smp_wait - relocate_new_kernel);
110 smp_wmb();
111 atomic_set(&kexec_ready_to_reboot, 1);
112#endif
113 ((noretfun_t) reboot_code_buffer)();
114}
1/*
2 * machine_kexec.c for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8#include <linux/compiler.h>
9#include <linux/kexec.h>
10#include <linux/mm.h>
11#include <linux/delay.h>
12
13#include <asm/cacheflush.h>
14#include <asm/page.h>
15
16extern const unsigned char relocate_new_kernel[];
17extern const size_t relocate_new_kernel_size;
18
19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page;
21
22int (*_machine_kexec_prepare)(struct kimage *) = NULL;
23void (*_machine_kexec_shutdown)(void) = NULL;
24void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
25#ifdef CONFIG_SMP
26void (*relocated_kexec_smp_wait) (void *);
27atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
28void (*_crash_smp_send_stop)(void) = NULL;
29#endif
30
31static void kexec_image_info(const struct kimage *kimage)
32{
33 unsigned long i;
34
35 pr_debug("kexec kimage info:\n");
36 pr_debug(" type: %d\n", kimage->type);
37 pr_debug(" start: %lx\n", kimage->start);
38 pr_debug(" head: %lx\n", kimage->head);
39 pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
40
41 for (i = 0; i < kimage->nr_segments; i++) {
42 pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
43 i,
44 kimage->segment[i].mem,
45 kimage->segment[i].mem + kimage->segment[i].memsz,
46 (unsigned long)kimage->segment[i].memsz,
47 (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
48 }
49}
50
51int
52machine_kexec_prepare(struct kimage *kimage)
53{
54 kexec_image_info(kimage);
55
56 if (_machine_kexec_prepare)
57 return _machine_kexec_prepare(kimage);
58 return 0;
59}
60
61void
62machine_kexec_cleanup(struct kimage *kimage)
63{
64}
65
66void
67machine_shutdown(void)
68{
69 if (_machine_kexec_shutdown)
70 _machine_kexec_shutdown();
71}
72
73void
74machine_crash_shutdown(struct pt_regs *regs)
75{
76 if (_machine_crash_shutdown)
77 _machine_crash_shutdown(regs);
78 else
79 default_machine_crash_shutdown(regs);
80}
81
82typedef void (*noretfun_t)(void) __noreturn;
83
84void
85machine_kexec(struct kimage *image)
86{
87 unsigned long reboot_code_buffer;
88 unsigned long entry;
89 unsigned long *ptr;
90
91 reboot_code_buffer =
92 (unsigned long)page_address(image->control_code_page);
93
94 kexec_start_address =
95 (unsigned long) phys_to_virt(image->start);
96
97 if (image->type == KEXEC_TYPE_DEFAULT) {
98 kexec_indirection_page =
99 (unsigned long) phys_to_virt(image->head & PAGE_MASK);
100 } else {
101 kexec_indirection_page = (unsigned long)&image->head;
102 }
103
104 memcpy((void*)reboot_code_buffer, relocate_new_kernel,
105 relocate_new_kernel_size);
106
107 /*
108 * The generic kexec code builds a page list with physical
109 * addresses. they are directly accessible through KSEG0 (or
110 * CKSEG0 or XPHYS if on 64bit system), hence the
111 * phys_to_virt() call.
112 */
113 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
114 ptr = (entry & IND_INDIRECTION) ?
115 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
116 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
117 *ptr & IND_DESTINATION)
118 *ptr = (unsigned long) phys_to_virt(*ptr);
119 }
120
121 /*
122 * we do not want to be bothered.
123 */
124 local_irq_disable();
125
126 printk("Will call new kernel at %08lx\n", image->start);
127 printk("Bye ...\n");
128 __flush_cache_all();
129#ifdef CONFIG_SMP
130 /* All secondary cpus now may jump to kexec_wait cycle */
131 relocated_kexec_smp_wait = reboot_code_buffer +
132 (void *)(kexec_smp_wait - relocate_new_kernel);
133 smp_wmb();
134 atomic_set(&kexec_ready_to_reboot, 1);
135#endif
136 ((noretfun_t) reboot_code_buffer)();
137}