Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/export.h>
  3#include <linux/linkage.h>
  4#include <asm/processor.h>
  5#include <asm/ppc_asm.h>
  6#include <asm/reg.h>
  7#include <asm/asm-offsets.h>
  8#include <asm/cputable.h>
  9#include <asm/thread_info.h>
 10#include <asm/page.h>
 11#include <asm/ptrace.h>
 12#include <asm/asm-compat.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 13
 14/*
 15 * Load state from memory into VMX registers including VSCR.
 16 * Assumes the caller has enabled VMX in the MSR.
 17 */
 18_GLOBAL(load_vr_state)
 19	li	r4,VRSTATE_VSCR
 20	lvx	v0,r4,r3
 21	mtvscr	v0
 22	REST_32VRS(0,r4,r3)
 23	blr
 24EXPORT_SYMBOL(load_vr_state)
 25_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
 26
 27/*
 28 * Store VMX state into memory, including VSCR.
 29 * Assumes the caller has enabled VMX in the MSR.
 30 */
 31_GLOBAL(store_vr_state)
 32	SAVE_32VRS(0, r4, r3)
 33	mfvscr	v0
 34	li	r4, VRSTATE_VSCR
 35	stvx	v0, r4, r3
 36	lvx	v0, 0, r3
 37	blr
 38EXPORT_SYMBOL(store_vr_state)
 39
 40/*
 41 * Disable VMX for the task which had it previously,
 42 * and save its vector registers in its thread_struct.
 43 * Enables the VMX for use in the kernel on return.
 44 * On SMP we know the VMX is free, since we give it up every
 45 * switch (ie, no lazy save of the vector registers).
 46 *
 47 * Note that on 32-bit this can only use registers that will be
 48 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 49 */
 50_GLOBAL(load_up_altivec)
 51	mfmsr	r5			/* grab the current MSR */
 52#ifdef CONFIG_PPC_BOOK3S_64
 53	/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
 54	ori	r5,r5,MSR_RI
 55#endif
 56	oris	r5,r5,MSR_VEC@h
 57	MTMSRD(r5)			/* enable use of AltiVec now */
 58	isync
 59
 60	/*
 61	 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
 62	 * to optimise userspace context save/restore. Whenever we take an
 63	 * altivec unavailable exception we must set VRSAVE to something non
 64	 * zero. Set it to all 1s. See also the programming note in the ISA.
 65	 */
 66	mfspr	r4,SPRN_VRSAVE
 67	cmpwi	0,r4,0
 68	bne+	1f
 69	li	r4,-1
 70	mtspr	SPRN_VRSAVE,r4
 711:
 72	/* enable use of VMX after return */
 73#ifdef CONFIG_PPC32
 74	addi	r5,r2,THREAD
 75	oris	r9,r9,MSR_VEC@h
 76#else
 77	ld	r4,PACACURRENT(r13)
 78	addi	r5,r4,THREAD		/* Get THREAD */
 79	oris	r12,r12,MSR_VEC@h
 80	std	r12,_MSR(r1)
 81#ifdef CONFIG_PPC_BOOK3S_64
 82	li	r4,0
 83	stb	r4,PACASRR_VALID(r13)
 84#endif
 85#endif
 86	li	r4,1
 
 87	stb	r4,THREAD_LOAD_VEC(r5)
 88	addi	r6,r5,THREAD_VRSTATE
 
 89	li	r10,VRSTATE_VSCR
 90	stw	r4,THREAD_USED_VR(r5)
 91	lvx	v0,r10,r6
 92	mtvscr	v0
 93	REST_32VRS(0,r4,r6)
 94	/* restore registers and return */
 95	blr
 96_ASM_NOKPROBE_SYMBOL(load_up_altivec)
 97
 98/*
 99 * save_altivec(tsk)
100 * Save the vector registers to its thread_struct
101 */
102_GLOBAL(save_altivec)
103	addi	r3,r3,THREAD		/* want THREAD of task */
104	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
105	PPC_LL	r5,PT_REGS(r3)
106	PPC_LCMPI	0,r7,0
107	bne	2f
108	addi	r7,r3,THREAD_VRSTATE
1092:	SAVE_32VRS(0,r4,r7)
110	mfvscr	v0
111	li	r4,VRSTATE_VSCR
112	stvx	v0,r4,r7
113	lvx	v0,0,r7
114	blr
115
116#ifdef CONFIG_VSX
117
118#ifdef CONFIG_PPC32
119#error This asm code isn't ready for 32-bit kernels
120#endif
121
122/*
123 * load_up_vsx(unused, unused, tsk)
124 * Disable VSX for the task which had it previously,
125 * and save its vector registers in its thread_struct.
126 * Reuse the fp and vsx saves, but first check to see if they have
127 * been saved already.
128 */
129_GLOBAL(load_up_vsx)
130/* Load FP and VSX registers if they haven't been done yet */
131	andi.	r5,r12,MSR_FP
132	beql+	load_up_fpu		/* skip if already loaded */
133	andis.	r5,r12,MSR_VEC@h
134	beql+	load_up_altivec		/* skip if already loaded */
135
136#ifdef CONFIG_PPC_BOOK3S_64
137	/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
138	li	r5,MSR_RI
139	mtmsrd	r5,1
140#endif
141
142	ld	r4,PACACURRENT(r13)
143	addi	r4,r4,THREAD		/* Get THREAD */
144	li	r6,1
145	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
146	/* enable use of VSX after return */
147	oris	r12,r12,MSR_VSX@h
148	std	r12,_MSR(r1)
149	li	r4,0
150	stb	r4,PACASRR_VALID(r13)
151	b	fast_interrupt_return_srr
152
153#endif /* CONFIG_VSX */
154
155
156/*
157 * The routines below are in assembler so we can closely control the
158 * usage of floating-point registers.  These routines must be called
159 * with preempt disabled.
160 */
161	.data
162#ifdef CONFIG_PPC32
 
163fpzero:
164	.long	0
165fpone:
166	.long	0x3f800000	/* 1.0 in single-precision FP */
167fphalf:
168	.long	0x3f000000	/* 0.5 in single-precision FP */
169
170#define LDCONST(fr, name)	\
171	lis	r11,name@ha;	\
172	lfs	fr,name@l(r11)
173#else
174
 
175fpzero:
176	.quad	0
177fpone:
178	.quad	0x3ff0000000000000	/* 1.0 */
179fphalf:
180	.quad	0x3fe0000000000000	/* 0.5 */
181
182#ifdef CONFIG_PPC_KERNEL_PCREL
183#define LDCONST(fr, name)		\
184	pla	r11,name@pcrel;		\
185	lfd	fr,0(r11)
186#else
187#define LDCONST(fr, name)		\
188	addis	r11,r2,name@toc@ha;	\
189	lfd	fr,name@toc@l(r11)
190#endif
191#endif
 
192	.text
193/*
194 * Internal routine to enable floating point and set FPSCR to 0.
195 * Don't call it from C; it doesn't use the normal calling convention.
196 */
197SYM_FUNC_START_LOCAL(fpenable)
198#ifdef CONFIG_PPC32
199	stwu	r1,-64(r1)
200#else
201	stdu	r1,-64(r1)
202#endif
203	mfmsr	r10
204	ori	r11,r10,MSR_FP
205	mtmsr	r11
206	isync
207	stfd	fr0,24(r1)
208	stfd	fr1,16(r1)
209	stfd	fr31,8(r1)
210	LDCONST(fr1, fpzero)
211	mffs	fr31
212	MTFSF_L(fr1)
213	blr
214SYM_FUNC_END(fpenable)
215
216fpdisable:
217	mtlr	r12
218	MTFSF_L(fr31)
219	lfd	fr31,8(r1)
220	lfd	fr1,16(r1)
221	lfd	fr0,24(r1)
222	mtmsr	r10
223	isync
224	addi	r1,r1,64
225	blr
226
227/*
228 * Vector add, floating point.
229 */
230_GLOBAL(vaddfp)
231	mflr	r12
232	bl	fpenable
233	li	r0,4
234	mtctr	r0
235	li	r6,0
2361:	lfsx	fr0,r4,r6
237	lfsx	fr1,r5,r6
238	fadds	fr0,fr0,fr1
239	stfsx	fr0,r3,r6
240	addi	r6,r6,4
241	bdnz	1b
242	b	fpdisable
243
244/*
245 * Vector subtract, floating point.
246 */
247_GLOBAL(vsubfp)
248	mflr	r12
249	bl	fpenable
250	li	r0,4
251	mtctr	r0
252	li	r6,0
2531:	lfsx	fr0,r4,r6
254	lfsx	fr1,r5,r6
255	fsubs	fr0,fr0,fr1
256	stfsx	fr0,r3,r6
257	addi	r6,r6,4
258	bdnz	1b
259	b	fpdisable
260
261/*
262 * Vector multiply and add, floating point.
263 */
264_GLOBAL(vmaddfp)
265	mflr	r12
266	bl	fpenable
267	stfd	fr2,32(r1)
268	li	r0,4
269	mtctr	r0
270	li	r7,0
2711:	lfsx	fr0,r4,r7
272	lfsx	fr1,r5,r7
273	lfsx	fr2,r6,r7
274	fmadds	fr0,fr0,fr2,fr1
275	stfsx	fr0,r3,r7
276	addi	r7,r7,4
277	bdnz	1b
278	lfd	fr2,32(r1)
279	b	fpdisable
280
281/*
282 * Vector negative multiply and subtract, floating point.
283 */
284_GLOBAL(vnmsubfp)
285	mflr	r12
286	bl	fpenable
287	stfd	fr2,32(r1)
288	li	r0,4
289	mtctr	r0
290	li	r7,0
2911:	lfsx	fr0,r4,r7
292	lfsx	fr1,r5,r7
293	lfsx	fr2,r6,r7
294	fnmsubs	fr0,fr0,fr2,fr1
295	stfsx	fr0,r3,r7
296	addi	r7,r7,4
297	bdnz	1b
298	lfd	fr2,32(r1)
299	b	fpdisable
300
301/*
302 * Vector reciprocal estimate.  We just compute 1.0/x.
303 * r3 -> destination, r4 -> source.
304 */
305_GLOBAL(vrefp)
306	mflr	r12
307	bl	fpenable
308	li	r0,4
309	LDCONST(fr1, fpone)
310	mtctr	r0
311	li	r6,0
3121:	lfsx	fr0,r4,r6
313	fdivs	fr0,fr1,fr0
314	stfsx	fr0,r3,r6
315	addi	r6,r6,4
316	bdnz	1b
317	b	fpdisable
318
319/*
320 * Vector reciprocal square-root estimate, floating point.
321 * We use the frsqrte instruction for the initial estimate followed
322 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
323 * r3 -> destination, r4 -> source.
324 */
325_GLOBAL(vrsqrtefp)
326	mflr	r12
327	bl	fpenable
328	stfd	fr2,32(r1)
329	stfd	fr3,40(r1)
330	stfd	fr4,48(r1)
331	stfd	fr5,56(r1)
332	li	r0,4
333	LDCONST(fr4, fpone)
334	LDCONST(fr5, fphalf)
335	mtctr	r0
336	li	r6,0
3371:	lfsx	fr0,r4,r6
338	frsqrte	fr1,fr0		/* r = frsqrte(s) */
339	fmuls	fr3,fr1,fr0	/* r * s */
340	fmuls	fr2,fr1,fr5	/* r * 0.5 */
341	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
342	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
343	fmuls	fr3,fr1,fr0	/* r * s */
344	fmuls	fr2,fr1,fr5	/* r * 0.5 */
345	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
346	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
347	stfsx	fr1,r3,r6
348	addi	r6,r6,4
349	bdnz	1b
350	lfd	fr5,56(r1)
351	lfd	fr4,48(r1)
352	lfd	fr3,40(r1)
353	lfd	fr2,32(r1)
354	b	fpdisable
v4.6
 
 
 
  1#include <asm/processor.h>
  2#include <asm/ppc_asm.h>
  3#include <asm/reg.h>
  4#include <asm/asm-offsets.h>
  5#include <asm/cputable.h>
  6#include <asm/thread_info.h>
  7#include <asm/page.h>
  8#include <asm/ptrace.h>
  9
 10#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 11/* void do_load_up_transact_altivec(struct thread_struct *thread)
 12 *
 13 * This is similar to load_up_altivec but for the transactional version of the
 14 * vector regs.  It doesn't mess with the task MSR or valid flags.
 15 * Furthermore, VEC laziness is not supported with TM currently.
 16 */
 17_GLOBAL(do_load_up_transact_altivec)
 18	mfmsr	r6
 19	oris	r5,r6,MSR_VEC@h
 20	MTMSRD(r5)
 21	isync
 22
 23	li	r4,1
 24	stw	r4,THREAD_USED_VR(r3)
 25
 26	li	r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
 27	lvx	v0,r10,r3
 28	mtvscr	v0
 29	addi	r10,r3,THREAD_TRANSACT_VRSTATE
 30	REST_32VRS(0,r4,r10)
 31
 32	blr
 33#endif
 34
 35/*
 36 * Load state from memory into VMX registers including VSCR.
 37 * Assumes the caller has enabled VMX in the MSR.
 38 */
 39_GLOBAL(load_vr_state)
 40	li	r4,VRSTATE_VSCR
 41	lvx	v0,r4,r3
 42	mtvscr	v0
 43	REST_32VRS(0,r4,r3)
 44	blr
 
 
 45
 46/*
 47 * Store VMX state into memory, including VSCR.
 48 * Assumes the caller has enabled VMX in the MSR.
 49 */
 50_GLOBAL(store_vr_state)
 51	SAVE_32VRS(0, r4, r3)
 52	mfvscr	v0
 53	li	r4, VRSTATE_VSCR
 54	stvx	v0, r4, r3
 
 55	blr
 
 56
 57/*
 58 * Disable VMX for the task which had it previously,
 59 * and save its vector registers in its thread_struct.
 60 * Enables the VMX for use in the kernel on return.
 61 * On SMP we know the VMX is free, since we give it up every
 62 * switch (ie, no lazy save of the vector registers).
 63 *
 64 * Note that on 32-bit this can only use registers that will be
 65 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 66 */
 67_GLOBAL(load_up_altivec)
 68	mfmsr	r5			/* grab the current MSR */
 
 
 
 
 69	oris	r5,r5,MSR_VEC@h
 70	MTMSRD(r5)			/* enable use of AltiVec now */
 71	isync
 72
 73	/* Hack: if we get an altivec unavailable trap with VRSAVE
 74	 * set to all zeros, we assume this is a broken application
 75	 * that fails to set it properly, and thus we switch it to
 76	 * all 1's
 
 77	 */
 78	mfspr	r4,SPRN_VRSAVE
 79	cmpwi	0,r4,0
 80	bne+	1f
 81	li	r4,-1
 82	mtspr	SPRN_VRSAVE,r4
 831:
 84	/* enable use of VMX after return */
 85#ifdef CONFIG_PPC32
 86	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 87	oris	r9,r9,MSR_VEC@h
 88#else
 89	ld	r4,PACACURRENT(r13)
 90	addi	r5,r4,THREAD		/* Get THREAD */
 91	oris	r12,r12,MSR_VEC@h
 92	std	r12,_MSR(r1)
 
 
 
 93#endif
 94	/* Don't care if r4 overflows, this is desired behaviour */
 95	lbz	r4,THREAD_LOAD_VEC(r5)
 96	addi	r4,r4,1
 97	stb	r4,THREAD_LOAD_VEC(r5)
 98	addi	r6,r5,THREAD_VRSTATE
 99	li	r4,1
100	li	r10,VRSTATE_VSCR
101	stw	r4,THREAD_USED_VR(r5)
102	lvx	v0,r10,r6
103	mtvscr	v0
104	REST_32VRS(0,r4,r6)
105	/* restore registers and return */
106	blr
 
107
108/*
109 * save_altivec(tsk)
110 * Save the vector registers to its thread_struct
111 */
112_GLOBAL(save_altivec)
113	addi	r3,r3,THREAD		/* want THREAD of task */
114	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
115	PPC_LL	r5,PT_REGS(r3)
116	PPC_LCMPI	0,r7,0
117	bne	2f
118	addi	r7,r3,THREAD_VRSTATE
1192:	SAVE_32VRS(0,r4,r7)
120	mfvscr	v0
121	li	r4,VRSTATE_VSCR
122	stvx	v0,r4,r7
 
123	blr
124
125#ifdef CONFIG_VSX
126
127#ifdef CONFIG_PPC32
128#error This asm code isn't ready for 32-bit kernels
129#endif
130
131/*
132 * load_up_vsx(unused, unused, tsk)
133 * Disable VSX for the task which had it previously,
134 * and save its vector registers in its thread_struct.
135 * Reuse the fp and vsx saves, but first check to see if they have
136 * been saved already.
137 */
138_GLOBAL(load_up_vsx)
139/* Load FP and VSX registers if they haven't been done yet */
140	andi.	r5,r12,MSR_FP
141	beql+	load_up_fpu		/* skip if already loaded */
142	andis.	r5,r12,MSR_VEC@h
143	beql+	load_up_altivec		/* skip if already loaded */
144
 
 
 
 
 
 
145	ld	r4,PACACURRENT(r13)
146	addi	r4,r4,THREAD		/* Get THREAD */
147	li	r6,1
148	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
149	/* enable use of VSX after return */
150	oris	r12,r12,MSR_VSX@h
151	std	r12,_MSR(r1)
152	b	fast_exception_return
 
 
153
154#endif /* CONFIG_VSX */
155
156
157/*
158 * The routines below are in assembler so we can closely control the
159 * usage of floating-point registers.  These routines must be called
160 * with preempt disabled.
161 */
 
162#ifdef CONFIG_PPC32
163	.data
164fpzero:
165	.long	0
166fpone:
167	.long	0x3f800000	/* 1.0 in single-precision FP */
168fphalf:
169	.long	0x3f000000	/* 0.5 in single-precision FP */
170
171#define LDCONST(fr, name)	\
172	lis	r11,name@ha;	\
173	lfs	fr,name@l(r11)
174#else
175
176	.section ".toc","aw"
177fpzero:
178	.tc	FD_0_0[TC],0
179fpone:
180	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
181fphalf:
182	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
183
184#define LDCONST(fr, name)	\
185	lfd	fr,name@toc(r2)
 
 
 
 
 
 
 
186#endif
187
188	.text
189/*
190 * Internal routine to enable floating point and set FPSCR to 0.
191 * Don't call it from C; it doesn't use the normal calling convention.
192 */
193fpenable:
194#ifdef CONFIG_PPC32
195	stwu	r1,-64(r1)
196#else
197	stdu	r1,-64(r1)
198#endif
199	mfmsr	r10
200	ori	r11,r10,MSR_FP
201	mtmsr	r11
202	isync
203	stfd	fr0,24(r1)
204	stfd	fr1,16(r1)
205	stfd	fr31,8(r1)
206	LDCONST(fr1, fpzero)
207	mffs	fr31
208	MTFSF_L(fr1)
209	blr
 
210
211fpdisable:
212	mtlr	r12
213	MTFSF_L(fr31)
214	lfd	fr31,8(r1)
215	lfd	fr1,16(r1)
216	lfd	fr0,24(r1)
217	mtmsr	r10
218	isync
219	addi	r1,r1,64
220	blr
221
222/*
223 * Vector add, floating point.
224 */
225_GLOBAL(vaddfp)
226	mflr	r12
227	bl	fpenable
228	li	r0,4
229	mtctr	r0
230	li	r6,0
2311:	lfsx	fr0,r4,r6
232	lfsx	fr1,r5,r6
233	fadds	fr0,fr0,fr1
234	stfsx	fr0,r3,r6
235	addi	r6,r6,4
236	bdnz	1b
237	b	fpdisable
238
239/*
240 * Vector subtract, floating point.
241 */
242_GLOBAL(vsubfp)
243	mflr	r12
244	bl	fpenable
245	li	r0,4
246	mtctr	r0
247	li	r6,0
2481:	lfsx	fr0,r4,r6
249	lfsx	fr1,r5,r6
250	fsubs	fr0,fr0,fr1
251	stfsx	fr0,r3,r6
252	addi	r6,r6,4
253	bdnz	1b
254	b	fpdisable
255
256/*
257 * Vector multiply and add, floating point.
258 */
259_GLOBAL(vmaddfp)
260	mflr	r12
261	bl	fpenable
262	stfd	fr2,32(r1)
263	li	r0,4
264	mtctr	r0
265	li	r7,0
2661:	lfsx	fr0,r4,r7
267	lfsx	fr1,r5,r7
268	lfsx	fr2,r6,r7
269	fmadds	fr0,fr0,fr2,fr1
270	stfsx	fr0,r3,r7
271	addi	r7,r7,4
272	bdnz	1b
273	lfd	fr2,32(r1)
274	b	fpdisable
275
276/*
277 * Vector negative multiply and subtract, floating point.
278 */
279_GLOBAL(vnmsubfp)
280	mflr	r12
281	bl	fpenable
282	stfd	fr2,32(r1)
283	li	r0,4
284	mtctr	r0
285	li	r7,0
2861:	lfsx	fr0,r4,r7
287	lfsx	fr1,r5,r7
288	lfsx	fr2,r6,r7
289	fnmsubs	fr0,fr0,fr2,fr1
290	stfsx	fr0,r3,r7
291	addi	r7,r7,4
292	bdnz	1b
293	lfd	fr2,32(r1)
294	b	fpdisable
295
296/*
297 * Vector reciprocal estimate.  We just compute 1.0/x.
298 * r3 -> destination, r4 -> source.
299 */
300_GLOBAL(vrefp)
301	mflr	r12
302	bl	fpenable
303	li	r0,4
304	LDCONST(fr1, fpone)
305	mtctr	r0
306	li	r6,0
3071:	lfsx	fr0,r4,r6
308	fdivs	fr0,fr1,fr0
309	stfsx	fr0,r3,r6
310	addi	r6,r6,4
311	bdnz	1b
312	b	fpdisable
313
314/*
315 * Vector reciprocal square-root estimate, floating point.
316 * We use the frsqrte instruction for the initial estimate followed
317 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
318 * r3 -> destination, r4 -> source.
319 */
320_GLOBAL(vrsqrtefp)
321	mflr	r12
322	bl	fpenable
323	stfd	fr2,32(r1)
324	stfd	fr3,40(r1)
325	stfd	fr4,48(r1)
326	stfd	fr5,56(r1)
327	li	r0,4
328	LDCONST(fr4, fpone)
329	LDCONST(fr5, fphalf)
330	mtctr	r0
331	li	r6,0
3321:	lfsx	fr0,r4,r6
333	frsqrte	fr1,fr0		/* r = frsqrte(s) */
334	fmuls	fr3,fr1,fr0	/* r * s */
335	fmuls	fr2,fr1,fr5	/* r * 0.5 */
336	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
337	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
338	fmuls	fr3,fr1,fr0	/* r * s */
339	fmuls	fr2,fr1,fr5	/* r * 0.5 */
340	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
341	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
342	stfsx	fr1,r3,r6
343	addi	r6,r6,4
344	bdnz	1b
345	lfd	fr5,56(r1)
346	lfd	fr4,48(r1)
347	lfd	fr3,40(r1)
348	lfd	fr2,32(r1)
349	b	fpdisable