Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/processor.h>
4#include <asm/ppc_asm.h>
5#include <asm/reg.h>
6#include <asm/asm-offsets.h>
7#include <asm/cputable.h>
8#include <asm/thread_info.h>
9#include <asm/page.h>
10#include <asm/ptrace.h>
11#include <asm/export.h>
12#include <asm/asm-compat.h>
13
14/*
15 * Load state from memory into VMX registers including VSCR.
16 * Assumes the caller has enabled VMX in the MSR.
17 */
18_GLOBAL(load_vr_state)
19 li r4,VRSTATE_VSCR
20 lvx v0,r4,r3
21 mtvscr v0
22 REST_32VRS(0,r4,r3)
23 blr
24EXPORT_SYMBOL(load_vr_state)
25_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
26
27/*
28 * Store VMX state into memory, including VSCR.
29 * Assumes the caller has enabled VMX in the MSR.
30 */
31_GLOBAL(store_vr_state)
32 SAVE_32VRS(0, r4, r3)
33 mfvscr v0
34 li r4, VRSTATE_VSCR
35 stvx v0, r4, r3
36 blr
37EXPORT_SYMBOL(store_vr_state)
38
39/*
40 * Disable VMX for the task which had it previously,
41 * and save its vector registers in its thread_struct.
42 * Enables the VMX for use in the kernel on return.
43 * On SMP we know the VMX is free, since we give it up every
44 * switch (ie, no lazy save of the vector registers).
45 *
46 * Note that on 32-bit this can only use registers that will be
47 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
48 */
49_GLOBAL(load_up_altivec)
50 mfmsr r5 /* grab the current MSR */
51#ifdef CONFIG_PPC_BOOK3S_64
52 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
53 ori r5,r5,MSR_RI
54#endif
55 oris r5,r5,MSR_VEC@h
56 MTMSRD(r5) /* enable use of AltiVec now */
57 isync
58
59 /*
60 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
61 * to optimise userspace context save/restore. Whenever we take an
62 * altivec unavailable exception we must set VRSAVE to something non
63 * zero. Set it to all 1s. See also the programming note in the ISA.
64 */
65 mfspr r4,SPRN_VRSAVE
66 cmpwi 0,r4,0
67 bne+ 1f
68 li r4,-1
69 mtspr SPRN_VRSAVE,r4
701:
71 /* enable use of VMX after return */
72#ifdef CONFIG_PPC32
73 addi r5,r2,THREAD
74 oris r9,r9,MSR_VEC@h
75#else
76 ld r4,PACACURRENT(r13)
77 addi r5,r4,THREAD /* Get THREAD */
78 oris r12,r12,MSR_VEC@h
79 std r12,_MSR(r1)
80#ifdef CONFIG_PPC_BOOK3S_64
81 li r4,0
82 stb r4,PACASRR_VALID(r13)
83#endif
84#endif
85 li r4,1
86 stb r4,THREAD_LOAD_VEC(r5)
87 addi r6,r5,THREAD_VRSTATE
88 li r10,VRSTATE_VSCR
89 stw r4,THREAD_USED_VR(r5)
90 lvx v0,r10,r6
91 mtvscr v0
92 REST_32VRS(0,r4,r6)
93 /* restore registers and return */
94 blr
95_ASM_NOKPROBE_SYMBOL(load_up_altivec)
96
97/*
98 * save_altivec(tsk)
99 * Save the vector registers to its thread_struct
100 */
101_GLOBAL(save_altivec)
102 addi r3,r3,THREAD /* want THREAD of task */
103 PPC_LL r7,THREAD_VRSAVEAREA(r3)
104 PPC_LL r5,PT_REGS(r3)
105 PPC_LCMPI 0,r7,0
106 bne 2f
107 addi r7,r3,THREAD_VRSTATE
1082: SAVE_32VRS(0,r4,r7)
109 mfvscr v0
110 li r4,VRSTATE_VSCR
111 stvx v0,r4,r7
112 blr
113
114#ifdef CONFIG_VSX
115
116#ifdef CONFIG_PPC32
117#error This asm code isn't ready for 32-bit kernels
118#endif
119
120/*
121 * load_up_vsx(unused, unused, tsk)
122 * Disable VSX for the task which had it previously,
123 * and save its vector registers in its thread_struct.
124 * Reuse the fp and vsx saves, but first check to see if they have
125 * been saved already.
126 */
127_GLOBAL(load_up_vsx)
128/* Load FP and VSX registers if they haven't been done yet */
129 andi. r5,r12,MSR_FP
130 beql+ load_up_fpu /* skip if already loaded */
131 andis. r5,r12,MSR_VEC@h
132 beql+ load_up_altivec /* skip if already loaded */
133
134#ifdef CONFIG_PPC_BOOK3S_64
135 /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
136 li r5,MSR_RI
137 mtmsrd r5,1
138#endif
139
140 ld r4,PACACURRENT(r13)
141 addi r4,r4,THREAD /* Get THREAD */
142 li r6,1
143 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
144 /* enable use of VSX after return */
145 oris r12,r12,MSR_VSX@h
146 std r12,_MSR(r1)
147 li r4,0
148 stb r4,PACASRR_VALID(r13)
149 b fast_interrupt_return_srr
150
151#endif /* CONFIG_VSX */
152
153
154/*
155 * The routines below are in assembler so we can closely control the
156 * usage of floating-point registers. These routines must be called
157 * with preempt disabled.
158 */
159 .data
160#ifdef CONFIG_PPC32
161fpzero:
162 .long 0
163fpone:
164 .long 0x3f800000 /* 1.0 in single-precision FP */
165fphalf:
166 .long 0x3f000000 /* 0.5 in single-precision FP */
167
168#define LDCONST(fr, name) \
169 lis r11,name@ha; \
170 lfs fr,name@l(r11)
171#else
172
173fpzero:
174 .quad 0
175fpone:
176 .quad 0x3ff0000000000000 /* 1.0 */
177fphalf:
178 .quad 0x3fe0000000000000 /* 0.5 */
179
180#define LDCONST(fr, name) \
181 addis r11,r2,name@toc@ha; \
182 lfd fr,name@toc@l(r11)
183#endif
184 .text
185/*
186 * Internal routine to enable floating point and set FPSCR to 0.
187 * Don't call it from C; it doesn't use the normal calling convention.
188 */
189SYM_FUNC_START_LOCAL(fpenable)
190#ifdef CONFIG_PPC32
191 stwu r1,-64(r1)
192#else
193 stdu r1,-64(r1)
194#endif
195 mfmsr r10
196 ori r11,r10,MSR_FP
197 mtmsr r11
198 isync
199 stfd fr0,24(r1)
200 stfd fr1,16(r1)
201 stfd fr31,8(r1)
202 LDCONST(fr1, fpzero)
203 mffs fr31
204 MTFSF_L(fr1)
205 blr
206SYM_FUNC_END(fpenable)
207
208fpdisable:
209 mtlr r12
210 MTFSF_L(fr31)
211 lfd fr31,8(r1)
212 lfd fr1,16(r1)
213 lfd fr0,24(r1)
214 mtmsr r10
215 isync
216 addi r1,r1,64
217 blr
218
219/*
220 * Vector add, floating point.
221 */
222_GLOBAL(vaddfp)
223 mflr r12
224 bl fpenable
225 li r0,4
226 mtctr r0
227 li r6,0
2281: lfsx fr0,r4,r6
229 lfsx fr1,r5,r6
230 fadds fr0,fr0,fr1
231 stfsx fr0,r3,r6
232 addi r6,r6,4
233 bdnz 1b
234 b fpdisable
235
236/*
237 * Vector subtract, floating point.
238 */
239_GLOBAL(vsubfp)
240 mflr r12
241 bl fpenable
242 li r0,4
243 mtctr r0
244 li r6,0
2451: lfsx fr0,r4,r6
246 lfsx fr1,r5,r6
247 fsubs fr0,fr0,fr1
248 stfsx fr0,r3,r6
249 addi r6,r6,4
250 bdnz 1b
251 b fpdisable
252
253/*
254 * Vector multiply and add, floating point.
255 */
256_GLOBAL(vmaddfp)
257 mflr r12
258 bl fpenable
259 stfd fr2,32(r1)
260 li r0,4
261 mtctr r0
262 li r7,0
2631: lfsx fr0,r4,r7
264 lfsx fr1,r5,r7
265 lfsx fr2,r6,r7
266 fmadds fr0,fr0,fr2,fr1
267 stfsx fr0,r3,r7
268 addi r7,r7,4
269 bdnz 1b
270 lfd fr2,32(r1)
271 b fpdisable
272
273/*
274 * Vector negative multiply and subtract, floating point.
275 */
276_GLOBAL(vnmsubfp)
277 mflr r12
278 bl fpenable
279 stfd fr2,32(r1)
280 li r0,4
281 mtctr r0
282 li r7,0
2831: lfsx fr0,r4,r7
284 lfsx fr1,r5,r7
285 lfsx fr2,r6,r7
286 fnmsubs fr0,fr0,fr2,fr1
287 stfsx fr0,r3,r7
288 addi r7,r7,4
289 bdnz 1b
290 lfd fr2,32(r1)
291 b fpdisable
292
293/*
294 * Vector reciprocal estimate. We just compute 1.0/x.
295 * r3 -> destination, r4 -> source.
296 */
297_GLOBAL(vrefp)
298 mflr r12
299 bl fpenable
300 li r0,4
301 LDCONST(fr1, fpone)
302 mtctr r0
303 li r6,0
3041: lfsx fr0,r4,r6
305 fdivs fr0,fr1,fr0
306 stfsx fr0,r3,r6
307 addi r6,r6,4
308 bdnz 1b
309 b fpdisable
310
311/*
312 * Vector reciprocal square-root estimate, floating point.
313 * We use the frsqrte instruction for the initial estimate followed
314 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
315 * r3 -> destination, r4 -> source.
316 */
317_GLOBAL(vrsqrtefp)
318 mflr r12
319 bl fpenable
320 stfd fr2,32(r1)
321 stfd fr3,40(r1)
322 stfd fr4,48(r1)
323 stfd fr5,56(r1)
324 li r0,4
325 LDCONST(fr4, fpone)
326 LDCONST(fr5, fphalf)
327 mtctr r0
328 li r6,0
3291: lfsx fr0,r4,r6
330 frsqrte fr1,fr0 /* r = frsqrte(s) */
331 fmuls fr3,fr1,fr0 /* r * s */
332 fmuls fr2,fr1,fr5 /* r * 0.5 */
333 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
334 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
335 fmuls fr3,fr1,fr0 /* r * s */
336 fmuls fr2,fr1,fr5 /* r * 0.5 */
337 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
338 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
339 stfsx fr1,r3,r6
340 addi r6,r6,4
341 bdnz 1b
342 lfd fr5,56(r1)
343 lfd fr4,48(r1)
344 lfd fr3,40(r1)
345 lfd fr2,32(r1)
346 b fpdisable
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <asm/processor.h>
3#include <asm/ppc_asm.h>
4#include <asm/reg.h>
5#include <asm/asm-offsets.h>
6#include <asm/cputable.h>
7#include <asm/thread_info.h>
8#include <asm/page.h>
9#include <asm/ptrace.h>
10#include <asm/export.h>
11
12/*
13 * Load state from memory into VMX registers including VSCR.
14 * Assumes the caller has enabled VMX in the MSR.
15 */
16_GLOBAL(load_vr_state)
17 li r4,VRSTATE_VSCR
18 lvx v0,r4,r3
19 mtvscr v0
20 REST_32VRS(0,r4,r3)
21 blr
22EXPORT_SYMBOL(load_vr_state)
23
24/*
25 * Store VMX state into memory, including VSCR.
26 * Assumes the caller has enabled VMX in the MSR.
27 */
28_GLOBAL(store_vr_state)
29 SAVE_32VRS(0, r4, r3)
30 mfvscr v0
31 li r4, VRSTATE_VSCR
32 stvx v0, r4, r3
33 blr
34EXPORT_SYMBOL(store_vr_state)
35
36/*
37 * Disable VMX for the task which had it previously,
38 * and save its vector registers in its thread_struct.
39 * Enables the VMX for use in the kernel on return.
40 * On SMP we know the VMX is free, since we give it up every
41 * switch (ie, no lazy save of the vector registers).
42 *
43 * Note that on 32-bit this can only use registers that will be
44 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
45 */
46_GLOBAL(load_up_altivec)
47 mfmsr r5 /* grab the current MSR */
48 oris r5,r5,MSR_VEC@h
49 MTMSRD(r5) /* enable use of AltiVec now */
50 isync
51
52 /*
53 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
54 * to optimise userspace context save/restore. Whenever we take an
55 * altivec unavailable exception we must set VRSAVE to something non
56 * zero. Set it to all 1s. See also the programming note in the ISA.
57 */
58 mfspr r4,SPRN_VRSAVE
59 cmpwi 0,r4,0
60 bne+ 1f
61 li r4,-1
62 mtspr SPRN_VRSAVE,r4
631:
64 /* enable use of VMX after return */
65#ifdef CONFIG_PPC32
66 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
67 oris r9,r9,MSR_VEC@h
68#else
69 ld r4,PACACURRENT(r13)
70 addi r5,r4,THREAD /* Get THREAD */
71 oris r12,r12,MSR_VEC@h
72 std r12,_MSR(r1)
73#endif
74 /* Don't care if r4 overflows, this is desired behaviour */
75 lbz r4,THREAD_LOAD_VEC(r5)
76 addi r4,r4,1
77 stb r4,THREAD_LOAD_VEC(r5)
78 addi r6,r5,THREAD_VRSTATE
79 li r4,1
80 li r10,VRSTATE_VSCR
81 stw r4,THREAD_USED_VR(r5)
82 lvx v0,r10,r6
83 mtvscr v0
84 REST_32VRS(0,r4,r6)
85 /* restore registers and return */
86 blr
87
88/*
89 * save_altivec(tsk)
90 * Save the vector registers to its thread_struct
91 */
92_GLOBAL(save_altivec)
93 addi r3,r3,THREAD /* want THREAD of task */
94 PPC_LL r7,THREAD_VRSAVEAREA(r3)
95 PPC_LL r5,PT_REGS(r3)
96 PPC_LCMPI 0,r7,0
97 bne 2f
98 addi r7,r3,THREAD_VRSTATE
992: SAVE_32VRS(0,r4,r7)
100 mfvscr v0
101 li r4,VRSTATE_VSCR
102 stvx v0,r4,r7
103 blr
104
105#ifdef CONFIG_VSX
106
107#ifdef CONFIG_PPC32
108#error This asm code isn't ready for 32-bit kernels
109#endif
110
111/*
112 * load_up_vsx(unused, unused, tsk)
113 * Disable VSX for the task which had it previously,
114 * and save its vector registers in its thread_struct.
115 * Reuse the fp and vsx saves, but first check to see if they have
116 * been saved already.
117 */
118_GLOBAL(load_up_vsx)
119/* Load FP and VSX registers if they haven't been done yet */
120 andi. r5,r12,MSR_FP
121 beql+ load_up_fpu /* skip if already loaded */
122 andis. r5,r12,MSR_VEC@h
123 beql+ load_up_altivec /* skip if already loaded */
124
125 ld r4,PACACURRENT(r13)
126 addi r4,r4,THREAD /* Get THREAD */
127 li r6,1
128 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
129 /* enable use of VSX after return */
130 oris r12,r12,MSR_VSX@h
131 std r12,_MSR(r1)
132 b fast_exception_return
133
134#endif /* CONFIG_VSX */
135
136
137/*
138 * The routines below are in assembler so we can closely control the
139 * usage of floating-point registers. These routines must be called
140 * with preempt disabled.
141 */
142#ifdef CONFIG_PPC32
143 .data
144fpzero:
145 .long 0
146fpone:
147 .long 0x3f800000 /* 1.0 in single-precision FP */
148fphalf:
149 .long 0x3f000000 /* 0.5 in single-precision FP */
150
151#define LDCONST(fr, name) \
152 lis r11,name@ha; \
153 lfs fr,name@l(r11)
154#else
155
156 .section ".toc","aw"
157fpzero:
158 .tc FD_0_0[TC],0
159fpone:
160 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
161fphalf:
162 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
163
164#define LDCONST(fr, name) \
165 lfd fr,name@toc(r2)
166#endif
167
168 .text
169/*
170 * Internal routine to enable floating point and set FPSCR to 0.
171 * Don't call it from C; it doesn't use the normal calling convention.
172 */
173fpenable:
174#ifdef CONFIG_PPC32
175 stwu r1,-64(r1)
176#else
177 stdu r1,-64(r1)
178#endif
179 mfmsr r10
180 ori r11,r10,MSR_FP
181 mtmsr r11
182 isync
183 stfd fr0,24(r1)
184 stfd fr1,16(r1)
185 stfd fr31,8(r1)
186 LDCONST(fr1, fpzero)
187 mffs fr31
188 MTFSF_L(fr1)
189 blr
190
191fpdisable:
192 mtlr r12
193 MTFSF_L(fr31)
194 lfd fr31,8(r1)
195 lfd fr1,16(r1)
196 lfd fr0,24(r1)
197 mtmsr r10
198 isync
199 addi r1,r1,64
200 blr
201
202/*
203 * Vector add, floating point.
204 */
205_GLOBAL(vaddfp)
206 mflr r12
207 bl fpenable
208 li r0,4
209 mtctr r0
210 li r6,0
2111: lfsx fr0,r4,r6
212 lfsx fr1,r5,r6
213 fadds fr0,fr0,fr1
214 stfsx fr0,r3,r6
215 addi r6,r6,4
216 bdnz 1b
217 b fpdisable
218
219/*
220 * Vector subtract, floating point.
221 */
222_GLOBAL(vsubfp)
223 mflr r12
224 bl fpenable
225 li r0,4
226 mtctr r0
227 li r6,0
2281: lfsx fr0,r4,r6
229 lfsx fr1,r5,r6
230 fsubs fr0,fr0,fr1
231 stfsx fr0,r3,r6
232 addi r6,r6,4
233 bdnz 1b
234 b fpdisable
235
236/*
237 * Vector multiply and add, floating point.
238 */
239_GLOBAL(vmaddfp)
240 mflr r12
241 bl fpenable
242 stfd fr2,32(r1)
243 li r0,4
244 mtctr r0
245 li r7,0
2461: lfsx fr0,r4,r7
247 lfsx fr1,r5,r7
248 lfsx fr2,r6,r7
249 fmadds fr0,fr0,fr2,fr1
250 stfsx fr0,r3,r7
251 addi r7,r7,4
252 bdnz 1b
253 lfd fr2,32(r1)
254 b fpdisable
255
256/*
257 * Vector negative multiply and subtract, floating point.
258 */
259_GLOBAL(vnmsubfp)
260 mflr r12
261 bl fpenable
262 stfd fr2,32(r1)
263 li r0,4
264 mtctr r0
265 li r7,0
2661: lfsx fr0,r4,r7
267 lfsx fr1,r5,r7
268 lfsx fr2,r6,r7
269 fnmsubs fr0,fr0,fr2,fr1
270 stfsx fr0,r3,r7
271 addi r7,r7,4
272 bdnz 1b
273 lfd fr2,32(r1)
274 b fpdisable
275
276/*
277 * Vector reciprocal estimate. We just compute 1.0/x.
278 * r3 -> destination, r4 -> source.
279 */
280_GLOBAL(vrefp)
281 mflr r12
282 bl fpenable
283 li r0,4
284 LDCONST(fr1, fpone)
285 mtctr r0
286 li r6,0
2871: lfsx fr0,r4,r6
288 fdivs fr0,fr1,fr0
289 stfsx fr0,r3,r6
290 addi r6,r6,4
291 bdnz 1b
292 b fpdisable
293
294/*
295 * Vector reciprocal square-root estimate, floating point.
296 * We use the frsqrte instruction for the initial estimate followed
297 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
298 * r3 -> destination, r4 -> source.
299 */
300_GLOBAL(vrsqrtefp)
301 mflr r12
302 bl fpenable
303 stfd fr2,32(r1)
304 stfd fr3,40(r1)
305 stfd fr4,48(r1)
306 stfd fr5,56(r1)
307 li r0,4
308 LDCONST(fr4, fpone)
309 LDCONST(fr5, fphalf)
310 mtctr r0
311 li r6,0
3121: lfsx fr0,r4,r6
313 frsqrte fr1,fr0 /* r = frsqrte(s) */
314 fmuls fr3,fr1,fr0 /* r * s */
315 fmuls fr2,fr1,fr5 /* r * 0.5 */
316 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
317 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
318 fmuls fr3,fr1,fr0 /* r * s */
319 fmuls fr2,fr1,fr5 /* r * 0.5 */
320 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
321 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
322 stfsx fr1,r3,r6
323 addi r6,r6,4
324 bdnz 1b
325 lfd fr5,56(r1)
326 lfd fr4,48(r1)
327 lfd fr3,40(r1)
328 lfd fr2,32(r1)
329 b fpdisable