Loading...
1/*
2 * arch/ia64/kernel/relocate_kernel.S
3 *
4 * Relocate kexec'able kernel and start it
5 *
6 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
7 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
8 * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
9 *
10 * This source code is licensed under the GNU General Public License,
11 * Version 2. See the file COPYING for more details.
12 */
13#include <asm/asmmacro.h>
14#include <asm/kregs.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/mca_asm.h>
18
19 /* Must be relocatable PIC code callable as a C function
20 */
21GLOBAL_ENTRY(relocate_new_kernel)
22 .prologue
23 alloc r31=ar.pfs,4,0,0,0
24 .body
25.reloc_entry:
26{
27 rsm psr.i| psr.ic
28 mov r2=ip
29}
30 ;;
31{
32 flushrs // must be first insn in group
33 srlz.i
34}
35 ;;
36 dep r2=0,r2,61,3 //to physical address
37 ;;
38 //first switch to physical mode
39 add r3=1f-.reloc_entry, r2
40 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
41 mov ar.rsc=0 // put RSE in enforced lazy mode
42 ;;
43 add sp=(memory_stack_end - 16 - .reloc_entry),r2
44 add r8=(register_stack - .reloc_entry),r2
45 ;;
46 mov r18=ar.rnat
47 mov ar.bspstore=r8
48 ;;
49 mov cr.ipsr=r16
50 mov cr.iip=r3
51 mov cr.ifs=r0
52 srlz.i
53 ;;
54 mov ar.rnat=r18
55 rfi // note: this unmask MCA/INIT (psr.mc)
56 ;;
571:
58 //physical mode code begin
59 mov b6=in1
60 dep r28=0,in2,61,3 //to physical address
61
62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
65 ;;
66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2
68 ;;
69 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
70 ld4 r19=[r2],4 // r19=ptce_count[0]
71 ld4 r21=[r17],4 // r21=ptce_stride[0]
72 ;;
73 ld4 r20=[r2] // r20=ptce_count[1]
74 ld4 r22=[r17] // r22=ptce_stride[1]
75 mov r24=r0
76 ;;
77 adds r20=-1,r20
78 ;;
79#undef O
802:
81 cmp.ltu p6,p7=r24,r19
82(p7) br.cond.dpnt.few 4f
83 mov ar.lc=r20
843:
85 ptc.e r18
86 ;;
87 add r18=r22,r18
88 br.cloop.sptk.few 3b
89 ;;
90 add r18=r21,r18
91 add r24=1,r24
92 ;;
93 br.sptk.few 2b
944:
95 srlz.i
96 ;;
97 // purge TR entry for kernel text and data
98 movl r16=KERNEL_START
99 mov r18=KERNEL_TR_PAGE_SHIFT<<2
100 ;;
101 ptr.i r16, r18
102 ptr.d r16, r18
103 ;;
104 srlz.i
105 ;;
106
107 // purge TR entry for pal code
108 mov r16=in3
109 mov r18=IA64_GRANULE_SHIFT<<2
110 ;;
111 ptr.i r16,r18
112 ;;
113 srlz.i
114 ;;
115
116 // purge TR entry for stack
117 mov r16=IA64_KR(CURRENT_STACK)
118 ;;
119 shl r16=r16,IA64_GRANULE_SHIFT
120 movl r19=PAGE_OFFSET
121 ;;
122 add r16=r19,r16
123 mov r18=IA64_GRANULE_SHIFT<<2
124 ;;
125 ptr.d r16,r18
126 ;;
127 srlz.i
128 ;;
129
130 //copy segments
131 movl r16=PAGE_MASK
132 mov r30=in0 // in0 is page_list
133 br.sptk.few .dest_page
134 ;;
135.loop:
136 ld8 r30=[in0], 8;;
137.dest_page:
138 tbit.z p0, p6=r30, 0;; // 0x1 dest page
139(p6) and r17=r30, r16
140(p6) br.cond.sptk.few .loop;;
141
142 tbit.z p0, p6=r30, 1;; // 0x2 indirect page
143(p6) and in0=r30, r16
144(p6) br.cond.sptk.few .loop;;
145
146 tbit.z p0, p6=r30, 2;; // 0x4 end flag
147(p6) br.cond.sptk.few .end_loop;;
148
149 tbit.z p6, p0=r30, 3;; // 0x8 source page
150(p6) br.cond.sptk.few .loop
151
152 and r18=r30, r16
153
154 // simple copy page, may optimize later
155 movl r14=PAGE_SIZE/8 - 1;;
156 mov ar.lc=r14;;
1571:
158 ld8 r14=[r18], 8;;
159 st8 [r17]=r14;;
160 fc.i r17
161 add r17=8, r17
162 br.ctop.sptk.few 1b
163 br.sptk.few .loop
164 ;;
165
166.end_loop:
167 sync.i // for fc.i
168 ;;
169 srlz.i
170 ;;
171 srlz.d
172 ;;
173 br.call.sptk.many b0=b6;;
174
175.align 32
176memory_stack:
177 .fill 8192, 1, 0
178memory_stack_end:
179register_stack:
180 .fill 8192, 1, 0
181register_stack_end:
182relocate_new_kernel_end:
183END(relocate_new_kernel)
184
185.global relocate_new_kernel_size
186relocate_new_kernel_size:
187 data8 relocate_new_kernel_end - relocate_new_kernel
188
189GLOBAL_ENTRY(ia64_dump_cpu_regs)
190 .prologue
191 alloc loc0=ar.pfs,1,2,0,0
192 .body
193 mov ar.rsc=0 // put RSE in enforced lazy mode
194 add loc1=4*8, in0 // save r4 and r5 first
195 ;;
196{
197 flushrs // flush dirty regs to backing store
198 srlz.i
199}
200 st8 [loc1]=r4, 8
201 ;;
202 st8 [loc1]=r5, 8
203 ;;
204 add loc1=32*8, in0
205 mov r4=ar.rnat
206 ;;
207 st8 [in0]=r0, 8 // r0
208 st8 [loc1]=r4, 8 // rnat
209 mov r5=pr
210 ;;
211 st8 [in0]=r1, 8 // r1
212 st8 [loc1]=r5, 8 // pr
213 mov r4=b0
214 ;;
215 st8 [in0]=r2, 8 // r2
216 st8 [loc1]=r4, 8 // b0
217 mov r5=b1;
218 ;;
219 st8 [in0]=r3, 24 // r3
220 st8 [loc1]=r5, 8 // b1
221 mov r4=b2
222 ;;
223 st8 [in0]=r6, 8 // r6
224 st8 [loc1]=r4, 8 // b2
225 mov r5=b3
226 ;;
227 st8 [in0]=r7, 8 // r7
228 st8 [loc1]=r5, 8 // b3
229 mov r4=b4
230 ;;
231 st8 [in0]=r8, 8 // r8
232 st8 [loc1]=r4, 8 // b4
233 mov r5=b5
234 ;;
235 st8 [in0]=r9, 8 // r9
236 st8 [loc1]=r5, 8 // b5
237 mov r4=b6
238 ;;
239 st8 [in0]=r10, 8 // r10
240 st8 [loc1]=r5, 8 // b6
241 mov r5=b7
242 ;;
243 st8 [in0]=r11, 8 // r11
244 st8 [loc1]=r5, 8 // b7
245 mov r4=b0
246 ;;
247 st8 [in0]=r12, 8 // r12
248 st8 [loc1]=r4, 8 // ip
249 mov r5=loc0
250 ;;
251 st8 [in0]=r13, 8 // r13
252 extr.u r5=r5, 0, 38 // ar.pfs.pfm
253 mov r4=r0 // user mask
254 ;;
255 st8 [in0]=r14, 8 // r14
256 st8 [loc1]=r5, 8 // cfm
257 ;;
258 st8 [in0]=r15, 8 // r15
259 st8 [loc1]=r4, 8 // user mask
260 mov r5=ar.rsc
261 ;;
262 st8 [in0]=r16, 8 // r16
263 st8 [loc1]=r5, 8 // ar.rsc
264 mov r4=ar.bsp
265 ;;
266 st8 [in0]=r17, 8 // r17
267 st8 [loc1]=r4, 8 // ar.bsp
268 mov r5=ar.bspstore
269 ;;
270 st8 [in0]=r18, 8 // r18
271 st8 [loc1]=r5, 8 // ar.bspstore
272 mov r4=ar.rnat
273 ;;
274 st8 [in0]=r19, 8 // r19
275 st8 [loc1]=r4, 8 // ar.rnat
276 mov r5=ar.ccv
277 ;;
278 st8 [in0]=r20, 8 // r20
279 st8 [loc1]=r5, 8 // ar.ccv
280 mov r4=ar.unat
281 ;;
282 st8 [in0]=r21, 8 // r21
283 st8 [loc1]=r4, 8 // ar.unat
284 mov r5 = ar.fpsr
285 ;;
286 st8 [in0]=r22, 8 // r22
287 st8 [loc1]=r5, 8 // ar.fpsr
288 mov r4 = ar.unat
289 ;;
290 st8 [in0]=r23, 8 // r23
291 st8 [loc1]=r4, 8 // unat
292 mov r5 = ar.fpsr
293 ;;
294 st8 [in0]=r24, 8 // r24
295 st8 [loc1]=r5, 8 // fpsr
296 mov r4 = ar.pfs
297 ;;
298 st8 [in0]=r25, 8 // r25
299 st8 [loc1]=r4, 8 // ar.pfs
300 mov r5 = ar.lc
301 ;;
302 st8 [in0]=r26, 8 // r26
303 st8 [loc1]=r5, 8 // ar.lc
304 mov r4 = ar.ec
305 ;;
306 st8 [in0]=r27, 8 // r27
307 st8 [loc1]=r4, 8 // ar.ec
308 mov r5 = ar.csd
309 ;;
310 st8 [in0]=r28, 8 // r28
311 st8 [loc1]=r5, 8 // ar.csd
312 mov r4 = ar.ssd
313 ;;
314 st8 [in0]=r29, 8 // r29
315 st8 [loc1]=r4, 8 // ar.ssd
316 ;;
317 st8 [in0]=r30, 8 // r30
318 ;;
319 st8 [in0]=r31, 8 // r31
320 mov ar.pfs=loc0
321 ;;
322 br.ret.sptk.many rp
323END(ia64_dump_cpu_regs)
324
325
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/ia64/kernel/relocate_kernel.S
4 *
5 * Relocate kexec'able kernel and start it
6 *
7 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
8 * Copyright (C) 2005 Khalid Aziz <khalid.aziz@hp.com>
9 * Copyright (C) 2005 Intel Corp, Zou Nan hai <nanhai.zou@intel.com>
10 */
11#include <linux/pgtable.h>
12#include <asm/asmmacro.h>
13#include <asm/kregs.h>
14#include <asm/page.h>
15#include <asm/mca_asm.h>
16
17 /* Must be relocatable PIC code callable as a C function
18 */
19GLOBAL_ENTRY(relocate_new_kernel)
20 .prologue
21 alloc r31=ar.pfs,4,0,0,0
22 .body
23.reloc_entry:
24{
25 rsm psr.i| psr.ic
26 mov r2=ip
27}
28 ;;
29{
30 flushrs // must be first insn in group
31 srlz.i
32}
33 ;;
34 dep r2=0,r2,61,3 //to physical address
35 ;;
36 //first switch to physical mode
37 add r3=1f-.reloc_entry, r2
38 movl r16 = IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_IC
39 mov ar.rsc=0 // put RSE in enforced lazy mode
40 ;;
41 add sp=(memory_stack_end - 16 - .reloc_entry),r2
42 add r8=(register_stack - .reloc_entry),r2
43 ;;
44 mov r18=ar.rnat
45 mov ar.bspstore=r8
46 ;;
47 mov cr.ipsr=r16
48 mov cr.iip=r3
49 mov cr.ifs=r0
50 srlz.i
51 ;;
52 mov ar.rnat=r18
53 rfi // note: this unmask MCA/INIT (psr.mc)
54 ;;
551:
56 //physical mode code begin
57 mov b6=in1
58 dep r28=0,in2,61,3 //to physical address
59
60 // purge all TC entries
61#define O(member) IA64_CPUINFO_##member##_OFFSET
62 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
63 ;;
64 addl r17=O(PTCE_STRIDE),r2
65 addl r2=O(PTCE_BASE),r2
66 ;;
67 ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
68 ld4 r19=[r2],4 // r19=ptce_count[0]
69 ld4 r21=[r17],4 // r21=ptce_stride[0]
70 ;;
71 ld4 r20=[r2] // r20=ptce_count[1]
72 ld4 r22=[r17] // r22=ptce_stride[1]
73 mov r24=r0
74 ;;
75 adds r20=-1,r20
76 ;;
77#undef O
782:
79 cmp.ltu p6,p7=r24,r19
80(p7) br.cond.dpnt.few 4f
81 mov ar.lc=r20
823:
83 ptc.e r18
84 ;;
85 add r18=r22,r18
86 br.cloop.sptk.few 3b
87 ;;
88 add r18=r21,r18
89 add r24=1,r24
90 ;;
91 br.sptk.few 2b
924:
93 srlz.i
94 ;;
95 // purge TR entry for kernel text and data
96 movl r16=KERNEL_START
97 mov r18=KERNEL_TR_PAGE_SHIFT<<2
98 ;;
99 ptr.i r16, r18
100 ptr.d r16, r18
101 ;;
102 srlz.i
103 ;;
104
105 // purge TR entry for pal code
106 mov r16=in3
107 mov r18=IA64_GRANULE_SHIFT<<2
108 ;;
109 ptr.i r16,r18
110 ;;
111 srlz.i
112 ;;
113
114 // purge TR entry for stack
115 mov r16=IA64_KR(CURRENT_STACK)
116 ;;
117 shl r16=r16,IA64_GRANULE_SHIFT
118 movl r19=PAGE_OFFSET
119 ;;
120 add r16=r19,r16
121 mov r18=IA64_GRANULE_SHIFT<<2
122 ;;
123 ptr.d r16,r18
124 ;;
125 srlz.i
126 ;;
127
128 //copy segments
129 movl r16=PAGE_MASK
130 mov r30=in0 // in0 is page_list
131 br.sptk.few .dest_page
132 ;;
133.loop:
134 ld8 r30=[in0], 8;;
135.dest_page:
136 tbit.z p0, p6=r30, 0;; // 0x1 dest page
137(p6) and r17=r30, r16
138(p6) br.cond.sptk.few .loop;;
139
140 tbit.z p0, p6=r30, 1;; // 0x2 indirect page
141(p6) and in0=r30, r16
142(p6) br.cond.sptk.few .loop;;
143
144 tbit.z p0, p6=r30, 2;; // 0x4 end flag
145(p6) br.cond.sptk.few .end_loop;;
146
147 tbit.z p6, p0=r30, 3;; // 0x8 source page
148(p6) br.cond.sptk.few .loop
149
150 and r18=r30, r16
151
152 // simple copy page, may optimize later
153 movl r14=PAGE_SIZE/8 - 1;;
154 mov ar.lc=r14;;
1551:
156 ld8 r14=[r18], 8;;
157 st8 [r17]=r14;;
158 fc.i r17
159 add r17=8, r17
160 br.ctop.sptk.few 1b
161 br.sptk.few .loop
162 ;;
163
164.end_loop:
165 sync.i // for fc.i
166 ;;
167 srlz.i
168 ;;
169 srlz.d
170 ;;
171 br.call.sptk.many b0=b6;;
172
173.align 32
174memory_stack:
175 .fill 8192, 1, 0
176memory_stack_end:
177register_stack:
178 .fill 8192, 1, 0
179register_stack_end:
180relocate_new_kernel_end:
181END(relocate_new_kernel)
182
183.global relocate_new_kernel_size
184relocate_new_kernel_size:
185 data8 relocate_new_kernel_end - relocate_new_kernel
186
187GLOBAL_ENTRY(ia64_dump_cpu_regs)
188 .prologue
189 alloc loc0=ar.pfs,1,2,0,0
190 .body
191 mov ar.rsc=0 // put RSE in enforced lazy mode
192 add loc1=4*8, in0 // save r4 and r5 first
193 ;;
194{
195 flushrs // flush dirty regs to backing store
196 srlz.i
197}
198 st8 [loc1]=r4, 8
199 ;;
200 st8 [loc1]=r5, 8
201 ;;
202 add loc1=32*8, in0
203 mov r4=ar.rnat
204 ;;
205 st8 [in0]=r0, 8 // r0
206 st8 [loc1]=r4, 8 // rnat
207 mov r5=pr
208 ;;
209 st8 [in0]=r1, 8 // r1
210 st8 [loc1]=r5, 8 // pr
211 mov r4=b0
212 ;;
213 st8 [in0]=r2, 8 // r2
214 st8 [loc1]=r4, 8 // b0
215 mov r5=b1;
216 ;;
217 st8 [in0]=r3, 24 // r3
218 st8 [loc1]=r5, 8 // b1
219 mov r4=b2
220 ;;
221 st8 [in0]=r6, 8 // r6
222 st8 [loc1]=r4, 8 // b2
223 mov r5=b3
224 ;;
225 st8 [in0]=r7, 8 // r7
226 st8 [loc1]=r5, 8 // b3
227 mov r4=b4
228 ;;
229 st8 [in0]=r8, 8 // r8
230 st8 [loc1]=r4, 8 // b4
231 mov r5=b5
232 ;;
233 st8 [in0]=r9, 8 // r9
234 st8 [loc1]=r5, 8 // b5
235 mov r4=b6
236 ;;
237 st8 [in0]=r10, 8 // r10
238 st8 [loc1]=r5, 8 // b6
239 mov r5=b7
240 ;;
241 st8 [in0]=r11, 8 // r11
242 st8 [loc1]=r5, 8 // b7
243 mov r4=b0
244 ;;
245 st8 [in0]=r12, 8 // r12
246 st8 [loc1]=r4, 8 // ip
247 mov r5=loc0
248 ;;
249 st8 [in0]=r13, 8 // r13
250 extr.u r5=r5, 0, 38 // ar.pfs.pfm
251 mov r4=r0 // user mask
252 ;;
253 st8 [in0]=r14, 8 // r14
254 st8 [loc1]=r5, 8 // cfm
255 ;;
256 st8 [in0]=r15, 8 // r15
257 st8 [loc1]=r4, 8 // user mask
258 mov r5=ar.rsc
259 ;;
260 st8 [in0]=r16, 8 // r16
261 st8 [loc1]=r5, 8 // ar.rsc
262 mov r4=ar.bsp
263 ;;
264 st8 [in0]=r17, 8 // r17
265 st8 [loc1]=r4, 8 // ar.bsp
266 mov r5=ar.bspstore
267 ;;
268 st8 [in0]=r18, 8 // r18
269 st8 [loc1]=r5, 8 // ar.bspstore
270 mov r4=ar.rnat
271 ;;
272 st8 [in0]=r19, 8 // r19
273 st8 [loc1]=r4, 8 // ar.rnat
274 mov r5=ar.ccv
275 ;;
276 st8 [in0]=r20, 8 // r20
277 st8 [loc1]=r5, 8 // ar.ccv
278 mov r4=ar.unat
279 ;;
280 st8 [in0]=r21, 8 // r21
281 st8 [loc1]=r4, 8 // ar.unat
282 mov r5 = ar.fpsr
283 ;;
284 st8 [in0]=r22, 8 // r22
285 st8 [loc1]=r5, 8 // ar.fpsr
286 mov r4 = ar.unat
287 ;;
288 st8 [in0]=r23, 8 // r23
289 st8 [loc1]=r4, 8 // unat
290 mov r5 = ar.fpsr
291 ;;
292 st8 [in0]=r24, 8 // r24
293 st8 [loc1]=r5, 8 // fpsr
294 mov r4 = ar.pfs
295 ;;
296 st8 [in0]=r25, 8 // r25
297 st8 [loc1]=r4, 8 // ar.pfs
298 mov r5 = ar.lc
299 ;;
300 st8 [in0]=r26, 8 // r26
301 st8 [loc1]=r5, 8 // ar.lc
302 mov r4 = ar.ec
303 ;;
304 st8 [in0]=r27, 8 // r27
305 st8 [loc1]=r4, 8 // ar.ec
306 mov r5 = ar.csd
307 ;;
308 st8 [in0]=r28, 8 // r28
309 st8 [loc1]=r5, 8 // ar.csd
310 mov r4 = ar.ssd
311 ;;
312 st8 [in0]=r29, 8 // r29
313 st8 [loc1]=r4, 8 // ar.ssd
314 ;;
315 st8 [in0]=r30, 8 // r30
316 ;;
317 st8 [in0]=r31, 8 // r31
318 mov ar.pfs=loc0
319 ;;
320 br.ret.sptk.many rp
321END(ia64_dump_cpu_regs)