Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/export.h>
3#include <linux/linkage.h>
4#include <asm/processor.h>
5#include <asm/ppc_asm.h>
6#include <asm/reg.h>
7#include <asm/asm-offsets.h>
8#include <asm/cputable.h>
9#include <asm/thread_info.h>
10#include <asm/page.h>
11#include <asm/ptrace.h>
12#include <asm/asm-compat.h>
13
14/*
15 * Load state from memory into VMX registers including VSCR.
16 * Assumes the caller has enabled VMX in the MSR.
17 */
18_GLOBAL(load_vr_state)
19 li r4,VRSTATE_VSCR
20 lvx v0,r4,r3
21 mtvscr v0
22 REST_32VRS(0,r4,r3)
23 blr
24EXPORT_SYMBOL(load_vr_state)
25_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
26
27/*
28 * Store VMX state into memory, including VSCR.
29 * Assumes the caller has enabled VMX in the MSR.
30 */
31_GLOBAL(store_vr_state)
32 SAVE_32VRS(0, r4, r3)
33 mfvscr v0
34 li r4, VRSTATE_VSCR
35 stvx v0, r4, r3
36 lvx v0, 0, r3
37 blr
38EXPORT_SYMBOL(store_vr_state)
39
40/*
41 * Disable VMX for the task which had it previously,
42 * and save its vector registers in its thread_struct.
43 * Enables the VMX for use in the kernel on return.
44 * On SMP we know the VMX is free, since we give it up every
45 * switch (ie, no lazy save of the vector registers).
46 *
47 * Note that on 32-bit this can only use registers that will be
48 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
49 */
50_GLOBAL(load_up_altivec)
51 mfmsr r5 /* grab the current MSR */
52#ifdef CONFIG_PPC_BOOK3S_64
53 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
54 ori r5,r5,MSR_RI
55#endif
56 oris r5,r5,MSR_VEC@h
57 MTMSRD(r5) /* enable use of AltiVec now */
58 isync
59
60 /*
61 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
62 * to optimise userspace context save/restore. Whenever we take an
63 * altivec unavailable exception we must set VRSAVE to something non
64 * zero. Set it to all 1s. See also the programming note in the ISA.
65 */
66 mfspr r4,SPRN_VRSAVE
67 cmpwi 0,r4,0
68 bne+ 1f
69 li r4,-1
70 mtspr SPRN_VRSAVE,r4
711:
72 /* enable use of VMX after return */
73#ifdef CONFIG_PPC32
74 addi r5,r2,THREAD
75 oris r9,r9,MSR_VEC@h
76#else
77 ld r4,PACACURRENT(r13)
78 addi r5,r4,THREAD /* Get THREAD */
79 oris r12,r12,MSR_VEC@h
80 std r12,_MSR(r1)
81#ifdef CONFIG_PPC_BOOK3S_64
82 li r4,0
83 stb r4,PACASRR_VALID(r13)
84#endif
85#endif
86 li r4,1
87 stb r4,THREAD_LOAD_VEC(r5)
88 addi r6,r5,THREAD_VRSTATE
89 li r10,VRSTATE_VSCR
90 stw r4,THREAD_USED_VR(r5)
91 lvx v0,r10,r6
92 mtvscr v0
93 REST_32VRS(0,r4,r6)
94 /* restore registers and return */
95 blr
96_ASM_NOKPROBE_SYMBOL(load_up_altivec)
97
98/*
99 * save_altivec(tsk)
100 * Save the vector registers to its thread_struct
101 */
102_GLOBAL(save_altivec)
103 addi r3,r3,THREAD /* want THREAD of task */
104 PPC_LL r7,THREAD_VRSAVEAREA(r3)
105 PPC_LL r5,PT_REGS(r3)
106 PPC_LCMPI 0,r7,0
107 bne 2f
108 addi r7,r3,THREAD_VRSTATE
1092: SAVE_32VRS(0,r4,r7)
110 mfvscr v0
111 li r4,VRSTATE_VSCR
112 stvx v0,r4,r7
113 lvx v0,0,r7
114 blr
115
116#ifdef CONFIG_VSX
117
118#ifdef CONFIG_PPC32
119#error This asm code isn't ready for 32-bit kernels
120#endif
121
122/*
123 * load_up_vsx(unused, unused, tsk)
124 * Disable VSX for the task which had it previously,
125 * and save its vector registers in its thread_struct.
126 * Reuse the fp and vsx saves, but first check to see if they have
127 * been saved already.
128 */
129_GLOBAL(load_up_vsx)
130/* Load FP and VSX registers if they haven't been done yet */
131 andi. r5,r12,MSR_FP
132 beql+ load_up_fpu /* skip if already loaded */
133 andis. r5,r12,MSR_VEC@h
134 beql+ load_up_altivec /* skip if already loaded */
135
136#ifdef CONFIG_PPC_BOOK3S_64
137 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
138 li r5,MSR_RI
139 mtmsrd r5,1
140#endif
141
142 ld r4,PACACURRENT(r13)
143 addi r4,r4,THREAD /* Get THREAD */
144 li r6,1
145 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
146 /* enable use of VSX after return */
147 oris r12,r12,MSR_VSX@h
148 std r12,_MSR(r1)
149 li r4,0
150 stb r4,PACASRR_VALID(r13)
151 b fast_interrupt_return_srr
152
153#endif /* CONFIG_VSX */
154
155
156/*
157 * The routines below are in assembler so we can closely control the
158 * usage of floating-point registers. These routines must be called
159 * with preempt disabled.
160 */
161 .data
162#ifdef CONFIG_PPC32
163fpzero:
164 .long 0
165fpone:
166 .long 0x3f800000 /* 1.0 in single-precision FP */
167fphalf:
168 .long 0x3f000000 /* 0.5 in single-precision FP */
169
170#define LDCONST(fr, name) \
171 lis r11,name@ha; \
172 lfs fr,name@l(r11)
173#else
174
175fpzero:
176 .quad 0
177fpone:
178 .quad 0x3ff0000000000000 /* 1.0 */
179fphalf:
180 .quad 0x3fe0000000000000 /* 0.5 */
181
182#ifdef CONFIG_PPC_KERNEL_PCREL
183#define LDCONST(fr, name) \
184 pla r11,name@pcrel; \
185 lfd fr,0(r11)
186#else
187#define LDCONST(fr, name) \
188 addis r11,r2,name@toc@ha; \
189 lfd fr,name@toc@l(r11)
190#endif
191#endif
192 .text
193/*
194 * Internal routine to enable floating point and set FPSCR to 0.
195 * Don't call it from C; it doesn't use the normal calling convention.
196 */
197SYM_FUNC_START_LOCAL(fpenable)
198#ifdef CONFIG_PPC32
199 stwu r1,-64(r1)
200#else
201 stdu r1,-64(r1)
202#endif
203 mfmsr r10
204 ori r11,r10,MSR_FP
205 mtmsr r11
206 isync
207 stfd fr0,24(r1)
208 stfd fr1,16(r1)
209 stfd fr31,8(r1)
210 LDCONST(fr1, fpzero)
211 mffs fr31
212 MTFSF_L(fr1)
213 blr
214SYM_FUNC_END(fpenable)
215
216fpdisable:
217 mtlr r12
218 MTFSF_L(fr31)
219 lfd fr31,8(r1)
220 lfd fr1,16(r1)
221 lfd fr0,24(r1)
222 mtmsr r10
223 isync
224 addi r1,r1,64
225 blr
226
227/*
228 * Vector add, floating point.
229 */
230_GLOBAL(vaddfp)
231 mflr r12
232 bl fpenable
233 li r0,4
234 mtctr r0
235 li r6,0
2361: lfsx fr0,r4,r6
237 lfsx fr1,r5,r6
238 fadds fr0,fr0,fr1
239 stfsx fr0,r3,r6
240 addi r6,r6,4
241 bdnz 1b
242 b fpdisable
243
244/*
245 * Vector subtract, floating point.
246 */
247_GLOBAL(vsubfp)
248 mflr r12
249 bl fpenable
250 li r0,4
251 mtctr r0
252 li r6,0
2531: lfsx fr0,r4,r6
254 lfsx fr1,r5,r6
255 fsubs fr0,fr0,fr1
256 stfsx fr0,r3,r6
257 addi r6,r6,4
258 bdnz 1b
259 b fpdisable
260
261/*
262 * Vector multiply and add, floating point.
263 */
264_GLOBAL(vmaddfp)
265 mflr r12
266 bl fpenable
267 stfd fr2,32(r1)
268 li r0,4
269 mtctr r0
270 li r7,0
2711: lfsx fr0,r4,r7
272 lfsx fr1,r5,r7
273 lfsx fr2,r6,r7
274 fmadds fr0,fr0,fr2,fr1
275 stfsx fr0,r3,r7
276 addi r7,r7,4
277 bdnz 1b
278 lfd fr2,32(r1)
279 b fpdisable
280
281/*
282 * Vector negative multiply and subtract, floating point.
283 */
284_GLOBAL(vnmsubfp)
285 mflr r12
286 bl fpenable
287 stfd fr2,32(r1)
288 li r0,4
289 mtctr r0
290 li r7,0
2911: lfsx fr0,r4,r7
292 lfsx fr1,r5,r7
293 lfsx fr2,r6,r7
294 fnmsubs fr0,fr0,fr2,fr1
295 stfsx fr0,r3,r7
296 addi r7,r7,4
297 bdnz 1b
298 lfd fr2,32(r1)
299 b fpdisable
300
301/*
302 * Vector reciprocal estimate. We just compute 1.0/x.
303 * r3 -> destination, r4 -> source.
304 */
305_GLOBAL(vrefp)
306 mflr r12
307 bl fpenable
308 li r0,4
309 LDCONST(fr1, fpone)
310 mtctr r0
311 li r6,0
3121: lfsx fr0,r4,r6
313 fdivs fr0,fr1,fr0
314 stfsx fr0,r3,r6
315 addi r6,r6,4
316 bdnz 1b
317 b fpdisable
318
319/*
320 * Vector reciprocal square-root estimate, floating point.
321 * We use the frsqrte instruction for the initial estimate followed
322 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
323 * r3 -> destination, r4 -> source.
324 */
325_GLOBAL(vrsqrtefp)
326 mflr r12
327 bl fpenable
328 stfd fr2,32(r1)
329 stfd fr3,40(r1)
330 stfd fr4,48(r1)
331 stfd fr5,56(r1)
332 li r0,4
333 LDCONST(fr4, fpone)
334 LDCONST(fr5, fphalf)
335 mtctr r0
336 li r6,0
3371: lfsx fr0,r4,r6
338 frsqrte fr1,fr0 /* r = frsqrte(s) */
339 fmuls fr3,fr1,fr0 /* r * s */
340 fmuls fr2,fr1,fr5 /* r * 0.5 */
341 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
342 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
343 fmuls fr3,fr1,fr0 /* r * s */
344 fmuls fr2,fr1,fr5 /* r * 0.5 */
345 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
346 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
347 stfsx fr1,r3,r6
348 addi r6,r6,4
349 bdnz 1b
350 lfd fr5,56(r1)
351 lfd fr4,48(r1)
352 lfd fr3,40(r1)
353 lfd fr2,32(r1)
354 b fpdisable
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <asm/processor.h>
3#include <asm/ppc_asm.h>
4#include <asm/reg.h>
5#include <asm/asm-offsets.h>
6#include <asm/cputable.h>
7#include <asm/thread_info.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/export.h>
11#include <asm/asm-compat.h>
12
13/*
14 * Load state from memory into VMX registers including VSCR.
15 * Assumes the caller has enabled VMX in the MSR.
16 */
17_GLOBAL(load_vr_state)
18 li r4,VRSTATE_VSCR
19 lvx v0,r4,r3
20 mtvscr v0
21 REST_32VRS(0,r4,r3)
22 blr
23EXPORT_SYMBOL(load_vr_state)
24_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
25
26/*
27 * Store VMX state into memory, including VSCR.
28 * Assumes the caller has enabled VMX in the MSR.
29 */
30_GLOBAL(store_vr_state)
31 SAVE_32VRS(0, r4, r3)
32 mfvscr v0
33 li r4, VRSTATE_VSCR
34 stvx v0, r4, r3
35 blr
36EXPORT_SYMBOL(store_vr_state)
37
38/*
39 * Disable VMX for the task which had it previously,
40 * and save its vector registers in its thread_struct.
41 * Enables the VMX for use in the kernel on return.
42 * On SMP we know the VMX is free, since we give it up every
43 * switch (ie, no lazy save of the vector registers).
44 *
45 * Note that on 32-bit this can only use registers that will be
46 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
47 */
48_GLOBAL(load_up_altivec)
49 mfmsr r5 /* grab the current MSR */
50 oris r5,r5,MSR_VEC@h
51 MTMSRD(r5) /* enable use of AltiVec now */
52 isync
53
54 /*
55 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
56 * to optimise userspace context save/restore. Whenever we take an
57 * altivec unavailable exception we must set VRSAVE to something non
58 * zero. Set it to all 1s. See also the programming note in the ISA.
59 */
60 mfspr r4,SPRN_VRSAVE
61 cmpwi 0,r4,0
62 bne+ 1f
63 li r4,-1
64 mtspr SPRN_VRSAVE,r4
651:
66 /* enable use of VMX after return */
67#ifdef CONFIG_PPC32
68 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
69 oris r9,r9,MSR_VEC@h
70#else
71 ld r4,PACACURRENT(r13)
72 addi r5,r4,THREAD /* Get THREAD */
73 oris r12,r12,MSR_VEC@h
74 std r12,_MSR(r1)
75#endif
76 /* Don't care if r4 overflows, this is desired behaviour */
77 lbz r4,THREAD_LOAD_VEC(r5)
78 addi r4,r4,1
79 stb r4,THREAD_LOAD_VEC(r5)
80 addi r6,r5,THREAD_VRSTATE
81 li r4,1
82 li r10,VRSTATE_VSCR
83 stw r4,THREAD_USED_VR(r5)
84 lvx v0,r10,r6
85 mtvscr v0
86 REST_32VRS(0,r4,r6)
87 /* restore registers and return */
88 blr
89
90/*
91 * save_altivec(tsk)
92 * Save the vector registers to its thread_struct
93 */
94_GLOBAL(save_altivec)
95 addi r3,r3,THREAD /* want THREAD of task */
96 PPC_LL r7,THREAD_VRSAVEAREA(r3)
97 PPC_LL r5,PT_REGS(r3)
98 PPC_LCMPI 0,r7,0
99 bne 2f
100 addi r7,r3,THREAD_VRSTATE
1012: SAVE_32VRS(0,r4,r7)
102 mfvscr v0
103 li r4,VRSTATE_VSCR
104 stvx v0,r4,r7
105 blr
106
107#ifdef CONFIG_VSX
108
109#ifdef CONFIG_PPC32
110#error This asm code isn't ready for 32-bit kernels
111#endif
112
113/*
114 * load_up_vsx(unused, unused, tsk)
115 * Disable VSX for the task which had it previously,
116 * and save its vector registers in its thread_struct.
117 * Reuse the fp and vsx saves, but first check to see if they have
118 * been saved already.
119 */
120_GLOBAL(load_up_vsx)
121/* Load FP and VSX registers if they haven't been done yet */
122 andi. r5,r12,MSR_FP
123 beql+ load_up_fpu /* skip if already loaded */
124 andis. r5,r12,MSR_VEC@h
125 beql+ load_up_altivec /* skip if already loaded */
126
127 ld r4,PACACURRENT(r13)
128 addi r4,r4,THREAD /* Get THREAD */
129 li r6,1
130 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
131 /* enable use of VSX after return */
132 oris r12,r12,MSR_VSX@h
133 std r12,_MSR(r1)
134 b fast_exception_return
135
136#endif /* CONFIG_VSX */
137
138
139/*
140 * The routines below are in assembler so we can closely control the
141 * usage of floating-point registers. These routines must be called
142 * with preempt disabled.
143 */
144#ifdef CONFIG_PPC32
145 .data
146fpzero:
147 .long 0
148fpone:
149 .long 0x3f800000 /* 1.0 in single-precision FP */
150fphalf:
151 .long 0x3f000000 /* 0.5 in single-precision FP */
152
153#define LDCONST(fr, name) \
154 lis r11,name@ha; \
155 lfs fr,name@l(r11)
156#else
157
158 .section ".toc","aw"
159fpzero:
160 .tc FD_0_0[TC],0
161fpone:
162 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
163fphalf:
164 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
165
166#define LDCONST(fr, name) \
167 lfd fr,name@toc(r2)
168#endif
169
170 .text
171/*
172 * Internal routine to enable floating point and set FPSCR to 0.
173 * Don't call it from C; it doesn't use the normal calling convention.
174 */
175fpenable:
176#ifdef CONFIG_PPC32
177 stwu r1,-64(r1)
178#else
179 stdu r1,-64(r1)
180#endif
181 mfmsr r10
182 ori r11,r10,MSR_FP
183 mtmsr r11
184 isync
185 stfd fr0,24(r1)
186 stfd fr1,16(r1)
187 stfd fr31,8(r1)
188 LDCONST(fr1, fpzero)
189 mffs fr31
190 MTFSF_L(fr1)
191 blr
192
193fpdisable:
194 mtlr r12
195 MTFSF_L(fr31)
196 lfd fr31,8(r1)
197 lfd fr1,16(r1)
198 lfd fr0,24(r1)
199 mtmsr r10
200 isync
201 addi r1,r1,64
202 blr
203
204/*
205 * Vector add, floating point.
206 */
207_GLOBAL(vaddfp)
208 mflr r12
209 bl fpenable
210 li r0,4
211 mtctr r0
212 li r6,0
2131: lfsx fr0,r4,r6
214 lfsx fr1,r5,r6
215 fadds fr0,fr0,fr1
216 stfsx fr0,r3,r6
217 addi r6,r6,4
218 bdnz 1b
219 b fpdisable
220
221/*
222 * Vector subtract, floating point.
223 */
224_GLOBAL(vsubfp)
225 mflr r12
226 bl fpenable
227 li r0,4
228 mtctr r0
229 li r6,0
2301: lfsx fr0,r4,r6
231 lfsx fr1,r5,r6
232 fsubs fr0,fr0,fr1
233 stfsx fr0,r3,r6
234 addi r6,r6,4
235 bdnz 1b
236 b fpdisable
237
238/*
239 * Vector multiply and add, floating point.
240 */
241_GLOBAL(vmaddfp)
242 mflr r12
243 bl fpenable
244 stfd fr2,32(r1)
245 li r0,4
246 mtctr r0
247 li r7,0
2481: lfsx fr0,r4,r7
249 lfsx fr1,r5,r7
250 lfsx fr2,r6,r7
251 fmadds fr0,fr0,fr2,fr1
252 stfsx fr0,r3,r7
253 addi r7,r7,4
254 bdnz 1b
255 lfd fr2,32(r1)
256 b fpdisable
257
258/*
259 * Vector negative multiply and subtract, floating point.
260 */
261_GLOBAL(vnmsubfp)
262 mflr r12
263 bl fpenable
264 stfd fr2,32(r1)
265 li r0,4
266 mtctr r0
267 li r7,0
2681: lfsx fr0,r4,r7
269 lfsx fr1,r5,r7
270 lfsx fr2,r6,r7
271 fnmsubs fr0,fr0,fr2,fr1
272 stfsx fr0,r3,r7
273 addi r7,r7,4
274 bdnz 1b
275 lfd fr2,32(r1)
276 b fpdisable
277
278/*
279 * Vector reciprocal estimate. We just compute 1.0/x.
280 * r3 -> destination, r4 -> source.
281 */
282_GLOBAL(vrefp)
283 mflr r12
284 bl fpenable
285 li r0,4
286 LDCONST(fr1, fpone)
287 mtctr r0
288 li r6,0
2891: lfsx fr0,r4,r6
290 fdivs fr0,fr1,fr0
291 stfsx fr0,r3,r6
292 addi r6,r6,4
293 bdnz 1b
294 b fpdisable
295
296/*
297 * Vector reciprocal square-root estimate, floating point.
298 * We use the frsqrte instruction for the initial estimate followed
299 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
300 * r3 -> destination, r4 -> source.
301 */
302_GLOBAL(vrsqrtefp)
303 mflr r12
304 bl fpenable
305 stfd fr2,32(r1)
306 stfd fr3,40(r1)
307 stfd fr4,48(r1)
308 stfd fr5,56(r1)
309 li r0,4
310 LDCONST(fr4, fpone)
311 LDCONST(fr5, fphalf)
312 mtctr r0
313 li r6,0
3141: lfsx fr0,r4,r6
315 frsqrte fr1,fr0 /* r = frsqrte(s) */
316 fmuls fr3,fr1,fr0 /* r * s */
317 fmuls fr2,fr1,fr5 /* r * 0.5 */
318 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
319 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
320 fmuls fr3,fr1,fr0 /* r * s */
321 fmuls fr2,fr1,fr5 /* r * 0.5 */
322 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
323 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
324 stfsx fr1,r3,r6
325 addi r6,r6,4
326 bdnz 1b
327 lfd fr5,56(r1)
328 lfd fr4,48(r1)
329 lfd fr3,40(r1)
330 lfd fr2,32(r1)
331 b fpdisable