Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <asm/processor.h>
  3#include <asm/ppc_asm.h>
  4#include <asm/reg.h>
  5#include <asm/asm-offsets.h>
  6#include <asm/cputable.h>
  7#include <asm/thread_info.h>
  8#include <asm/page.h>
  9#include <asm/ptrace.h>
 10#include <asm/export.h>
 11
 12/*
 13 * Load state from memory into VMX registers including VSCR.
 14 * Assumes the caller has enabled VMX in the MSR.
 15 */
 16_GLOBAL(load_vr_state)
 17	li	r4,VRSTATE_VSCR
 18	lvx	v0,r4,r3
 19	mtvscr	v0
 20	REST_32VRS(0,r4,r3)
 21	blr
 22EXPORT_SYMBOL(load_vr_state)
 23
 24/*
 25 * Store VMX state into memory, including VSCR.
 26 * Assumes the caller has enabled VMX in the MSR.
 27 */
 28_GLOBAL(store_vr_state)
 29	SAVE_32VRS(0, r4, r3)
 30	mfvscr	v0
 31	li	r4, VRSTATE_VSCR
 32	stvx	v0, r4, r3
 33	blr
 34EXPORT_SYMBOL(store_vr_state)
 35
 36/*
 
 37 * Disable VMX for the task which had it previously,
 38 * and save its vector registers in its thread_struct.
 39 * Enables the VMX for use in the kernel on return.
 40 * On SMP we know the VMX is free, since we give it up every
 41 * switch (ie, no lazy save of the vector registers).
 42 *
 43 * Note that on 32-bit this can only use registers that will be
 44 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 45 */
 46_GLOBAL(load_up_altivec)
 47	mfmsr	r5			/* grab the current MSR */
 48	oris	r5,r5,MSR_VEC@h
 49	MTMSRD(r5)			/* enable use of AltiVec now */
 50	isync
 51
 52	/*
 53	 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
 54	 * to optimise userspace context save/restore. Whenever we take an
 55	 * altivec unavailable exception we must set VRSAVE to something non
 56	 * zero. Set it to all 1s. See also the programming note in the ISA.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57	 */
 58	mfspr	r4,SPRN_VRSAVE
 59	cmpwi	0,r4,0
 60	bne+	1f
 61	li	r4,-1
 62	mtspr	SPRN_VRSAVE,r4
 631:
 64	/* enable use of VMX after return */
 65#ifdef CONFIG_PPC32
 66	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 67	oris	r9,r9,MSR_VEC@h
 68#else
 69	ld	r4,PACACURRENT(r13)
 70	addi	r5,r4,THREAD		/* Get THREAD */
 71	oris	r12,r12,MSR_VEC@h
 72	std	r12,_MSR(r1)
 73#endif
 74	/* Don't care if r4 overflows, this is desired behaviour */
 75	lbz	r4,THREAD_LOAD_VEC(r5)
 76	addi	r4,r4,1
 77	stb	r4,THREAD_LOAD_VEC(r5)
 78	addi	r6,r5,THREAD_VRSTATE
 79	li	r4,1
 80	li	r10,VRSTATE_VSCR
 81	stw	r4,THREAD_USED_VR(r5)
 82	lvx	v0,r10,r6
 83	mtvscr	v0
 84	REST_32VRS(0,r4,r6)
 
 
 
 
 
 
 85	/* restore registers and return */
 86	blr
 87
 
 
 
 
 
 
 
 
 
 
 88/*
 89 * save_altivec(tsk)
 90 * Save the vector registers to its thread_struct
 
 
 91 */
 92_GLOBAL(save_altivec)
 
 
 
 
 
 
 
 93	addi	r3,r3,THREAD		/* want THREAD of task */
 94	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
 95	PPC_LL	r5,PT_REGS(r3)
 96	PPC_LCMPI	0,r7,0
 97	bne	2f
 98	addi	r7,r3,THREAD_VRSTATE
 992:	SAVE_32VRS(0,r4,r7)
100	mfvscr	v0
101	li	r4,VRSTATE_VSCR
102	stvx	v0,r4,r7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103	blr
104
105#ifdef CONFIG_VSX
106
107#ifdef CONFIG_PPC32
108#error This asm code isn't ready for 32-bit kernels
109#endif
110
111/*
112 * load_up_vsx(unused, unused, tsk)
113 * Disable VSX for the task which had it previously,
114 * and save its vector registers in its thread_struct.
115 * Reuse the fp and vsx saves, but first check to see if they have
116 * been saved already.
117 */
118_GLOBAL(load_up_vsx)
119/* Load FP and VSX registers if they haven't been done yet */
120	andi.	r5,r12,MSR_FP
121	beql+	load_up_fpu		/* skip if already loaded */
122	andis.	r5,r12,MSR_VEC@h
123	beql+	load_up_altivec		/* skip if already loaded */
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125	ld	r4,PACACURRENT(r13)
126	addi	r4,r4,THREAD		/* Get THREAD */
127	li	r6,1
128	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
129	/* enable use of VSX after return */
130	oris	r12,r12,MSR_VSX@h
131	std	r12,_MSR(r1)
 
 
 
 
 
132	b	fast_exception_return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
134#endif /* CONFIG_VSX */
135
136
137/*
138 * The routines below are in assembler so we can closely control the
139 * usage of floating-point registers.  These routines must be called
140 * with preempt disabled.
141 */
142#ifdef CONFIG_PPC32
143	.data
144fpzero:
145	.long	0
146fpone:
147	.long	0x3f800000	/* 1.0 in single-precision FP */
148fphalf:
149	.long	0x3f000000	/* 0.5 in single-precision FP */
150
151#define LDCONST(fr, name)	\
152	lis	r11,name@ha;	\
153	lfs	fr,name@l(r11)
154#else
155
156	.section ".toc","aw"
157fpzero:
158	.tc	FD_0_0[TC],0
159fpone:
160	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
161fphalf:
162	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
163
164#define LDCONST(fr, name)	\
165	lfd	fr,name@toc(r2)
166#endif
167
168	.text
169/*
170 * Internal routine to enable floating point and set FPSCR to 0.
171 * Don't call it from C; it doesn't use the normal calling convention.
172 */
173fpenable:
174#ifdef CONFIG_PPC32
175	stwu	r1,-64(r1)
176#else
177	stdu	r1,-64(r1)
178#endif
179	mfmsr	r10
180	ori	r11,r10,MSR_FP
181	mtmsr	r11
182	isync
183	stfd	fr0,24(r1)
184	stfd	fr1,16(r1)
185	stfd	fr31,8(r1)
186	LDCONST(fr1, fpzero)
187	mffs	fr31
188	MTFSF_L(fr1)
189	blr
190
191fpdisable:
192	mtlr	r12
193	MTFSF_L(fr31)
194	lfd	fr31,8(r1)
195	lfd	fr1,16(r1)
196	lfd	fr0,24(r1)
197	mtmsr	r10
198	isync
199	addi	r1,r1,64
200	blr
201
202/*
203 * Vector add, floating point.
204 */
205_GLOBAL(vaddfp)
206	mflr	r12
207	bl	fpenable
208	li	r0,4
209	mtctr	r0
210	li	r6,0
2111:	lfsx	fr0,r4,r6
212	lfsx	fr1,r5,r6
213	fadds	fr0,fr0,fr1
214	stfsx	fr0,r3,r6
215	addi	r6,r6,4
216	bdnz	1b
217	b	fpdisable
218
219/*
220 * Vector subtract, floating point.
221 */
222_GLOBAL(vsubfp)
223	mflr	r12
224	bl	fpenable
225	li	r0,4
226	mtctr	r0
227	li	r6,0
2281:	lfsx	fr0,r4,r6
229	lfsx	fr1,r5,r6
230	fsubs	fr0,fr0,fr1
231	stfsx	fr0,r3,r6
232	addi	r6,r6,4
233	bdnz	1b
234	b	fpdisable
235
236/*
237 * Vector multiply and add, floating point.
238 */
239_GLOBAL(vmaddfp)
240	mflr	r12
241	bl	fpenable
242	stfd	fr2,32(r1)
243	li	r0,4
244	mtctr	r0
245	li	r7,0
2461:	lfsx	fr0,r4,r7
247	lfsx	fr1,r5,r7
248	lfsx	fr2,r6,r7
249	fmadds	fr0,fr0,fr2,fr1
250	stfsx	fr0,r3,r7
251	addi	r7,r7,4
252	bdnz	1b
253	lfd	fr2,32(r1)
254	b	fpdisable
255
256/*
257 * Vector negative multiply and subtract, floating point.
258 */
259_GLOBAL(vnmsubfp)
260	mflr	r12
261	bl	fpenable
262	stfd	fr2,32(r1)
263	li	r0,4
264	mtctr	r0
265	li	r7,0
2661:	lfsx	fr0,r4,r7
267	lfsx	fr1,r5,r7
268	lfsx	fr2,r6,r7
269	fnmsubs	fr0,fr0,fr2,fr1
270	stfsx	fr0,r3,r7
271	addi	r7,r7,4
272	bdnz	1b
273	lfd	fr2,32(r1)
274	b	fpdisable
275
276/*
277 * Vector reciprocal estimate.  We just compute 1.0/x.
278 * r3 -> destination, r4 -> source.
279 */
280_GLOBAL(vrefp)
281	mflr	r12
282	bl	fpenable
283	li	r0,4
284	LDCONST(fr1, fpone)
285	mtctr	r0
286	li	r6,0
2871:	lfsx	fr0,r4,r6
288	fdivs	fr0,fr1,fr0
289	stfsx	fr0,r3,r6
290	addi	r6,r6,4
291	bdnz	1b
292	b	fpdisable
293
294/*
295 * Vector reciprocal square-root estimate, floating point.
296 * We use the frsqrte instruction for the initial estimate followed
297 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
298 * r3 -> destination, r4 -> source.
299 */
300_GLOBAL(vrsqrtefp)
301	mflr	r12
302	bl	fpenable
303	stfd	fr2,32(r1)
304	stfd	fr3,40(r1)
305	stfd	fr4,48(r1)
306	stfd	fr5,56(r1)
307	li	r0,4
308	LDCONST(fr4, fpone)
309	LDCONST(fr5, fphalf)
310	mtctr	r0
311	li	r6,0
3121:	lfsx	fr0,r4,r6
313	frsqrte	fr1,fr0		/* r = frsqrte(s) */
314	fmuls	fr3,fr1,fr0	/* r * s */
315	fmuls	fr2,fr1,fr5	/* r * 0.5 */
316	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
317	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
318	fmuls	fr3,fr1,fr0	/* r * s */
319	fmuls	fr2,fr1,fr5	/* r * 0.5 */
320	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
321	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
322	stfsx	fr1,r3,r6
323	addi	r6,r6,4
324	bdnz	1b
325	lfd	fr5,56(r1)
326	lfd	fr4,48(r1)
327	lfd	fr3,40(r1)
328	lfd	fr2,32(r1)
329	b	fpdisable
v3.5.6
 
  1#include <asm/processor.h>
  2#include <asm/ppc_asm.h>
  3#include <asm/reg.h>
  4#include <asm/asm-offsets.h>
  5#include <asm/cputable.h>
  6#include <asm/thread_info.h>
  7#include <asm/page.h>
  8#include <asm/ptrace.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  9
 10/*
 11 * load_up_altivec(unused, unused, tsk)
 12 * Disable VMX for the task which had it previously,
 13 * and save its vector registers in its thread_struct.
 14 * Enables the VMX for use in the kernel on return.
 15 * On SMP we know the VMX is free, since we give it up every
 16 * switch (ie, no lazy save of the vector registers).
 
 
 
 17 */
 18_GLOBAL(load_up_altivec)
 19	mfmsr	r5			/* grab the current MSR */
 20	oris	r5,r5,MSR_VEC@h
 21	MTMSRD(r5)			/* enable use of AltiVec now */
 22	isync
 23
 24/*
 25 * For SMP, we don't do lazy VMX switching because it just gets too
 26 * horrendously complex, especially when a task switches from one CPU
 27 * to another.  Instead we call giveup_altvec in switch_to.
 28 * VRSAVE isn't dealt with here, that is done in the normal context
 29 * switch code. Note that we could rely on vrsave value to eventually
 30 * avoid saving all of the VREGs here...
 31 */
 32#ifndef CONFIG_SMP
 33	LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
 34	toreal(r3)
 35	PPC_LL	r4,ADDROFF(last_task_used_altivec)(r3)
 36	PPC_LCMPI	0,r4,0
 37	beq	1f
 38
 39	/* Save VMX state to last_task_used_altivec's THREAD struct */
 40	toreal(r4)
 41	addi	r4,r4,THREAD
 42	SAVE_32VRS(0,r5,r4)
 43	mfvscr	vr0
 44	li	r10,THREAD_VSCR
 45	stvx	vr0,r10,r4
 46	/* Disable VMX for last_task_used_altivec */
 47	PPC_LL	r5,PT_REGS(r4)
 48	toreal(r5)
 49	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 50	lis	r10,MSR_VEC@h
 51	andc	r4,r4,r10
 52	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 531:
 54#endif /* CONFIG_SMP */
 55
 56	/* Hack: if we get an altivec unavailable trap with VRSAVE
 57	 * set to all zeros, we assume this is a broken application
 58	 * that fails to set it properly, and thus we switch it to
 59	 * all 1's
 60	 */
 61	mfspr	r4,SPRN_VRSAVE
 62	cmpwi	0,r4,0
 63	bne+	1f
 64	li	r4,-1
 65	mtspr	SPRN_VRSAVE,r4
 661:
 67	/* enable use of VMX after return */
 68#ifdef CONFIG_PPC32
 69	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 70	oris	r9,r9,MSR_VEC@h
 71#else
 72	ld	r4,PACACURRENT(r13)
 73	addi	r5,r4,THREAD		/* Get THREAD */
 74	oris	r12,r12,MSR_VEC@h
 75	std	r12,_MSR(r1)
 76#endif
 
 
 
 
 
 77	li	r4,1
 78	li	r10,THREAD_VSCR
 79	stw	r4,THREAD_USED_VR(r5)
 80	lvx	vr0,r10,r5
 81	mtvscr	vr0
 82	REST_32VRS(0,r4,r5)
 83#ifndef CONFIG_SMP
 84	/* Update last_task_used_altivec to 'current' */
 85	subi	r4,r5,THREAD		/* Back to 'current' */
 86	fromreal(r4)
 87	PPC_STL	r4,ADDROFF(last_task_used_altivec)(r3)
 88#endif /* CONFIG_SMP */
 89	/* restore registers and return */
 90	blr
 91
 92_GLOBAL(giveup_altivec_notask)
 93	mfmsr	r3
 94	andis.	r4,r3,MSR_VEC@h
 95	bnelr				/* Already enabled? */
 96	oris	r3,r3,MSR_VEC@h
 97	SYNC
 98	MTMSRD(r3)			/* enable use of VMX now */
 99	isync
100	blr
101
102/*
103 * giveup_altivec(tsk)
104 * Disable VMX for the task given as the argument,
105 * and save the vector registers in its thread_struct.
106 * Enables the VMX for use in the kernel on return.
107 */
108_GLOBAL(giveup_altivec)
109	mfmsr	r5
110	oris	r5,r5,MSR_VEC@h
111	SYNC
112	MTMSRD(r5)			/* enable use of VMX now */
113	isync
114	PPC_LCMPI	0,r3,0
115	beqlr				/* if no previous owner, done */
116	addi	r3,r3,THREAD		/* want THREAD of task */
 
117	PPC_LL	r5,PT_REGS(r3)
118	PPC_LCMPI	0,r5,0
119	SAVE_32VRS(0,r4,r3)
120	mfvscr	vr0
121	li	r4,THREAD_VSCR
122	stvx	vr0,r4,r3
123	beq	1f
124	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
125#ifdef CONFIG_VSX
126BEGIN_FTR_SECTION
127	lis	r3,(MSR_VEC|MSR_VSX)@h
128FTR_SECTION_ELSE
129	lis	r3,MSR_VEC@h
130ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
131#else
132	lis	r3,MSR_VEC@h
133#endif
134	andc	r4,r4,r3		/* disable FP for previous task */
135	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1361:
137#ifndef CONFIG_SMP
138	li	r5,0
139	LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
140	PPC_STL	r5,ADDROFF(last_task_used_altivec)(r4)
141#endif /* CONFIG_SMP */
142	blr
143
144#ifdef CONFIG_VSX
145
146#ifdef CONFIG_PPC32
147#error This asm code isn't ready for 32-bit kernels
148#endif
149
150/*
151 * load_up_vsx(unused, unused, tsk)
152 * Disable VSX for the task which had it previously,
153 * and save its vector registers in its thread_struct.
154 * Reuse the fp and vsx saves, but first check to see if they have
155 * been saved already.
156 */
157_GLOBAL(load_up_vsx)
158/* Load FP and VSX registers if they haven't been done yet */
159	andi.	r5,r12,MSR_FP
160	beql+	load_up_fpu		/* skip if already loaded */
161	andis.	r5,r12,MSR_VEC@h
162	beql+	load_up_altivec		/* skip if already loaded */
163
164#ifndef CONFIG_SMP
165	ld	r3,last_task_used_vsx@got(r2)
166	ld	r4,0(r3)
167	cmpdi	0,r4,0
168	beq	1f
169	/* Disable VSX for last_task_used_vsx */
170	addi	r4,r4,THREAD
171	ld	r5,PT_REGS(r4)
172	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
173	lis	r6,MSR_VSX@h
174	andc	r6,r4,r6
175	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1761:
177#endif /* CONFIG_SMP */
178	ld	r4,PACACURRENT(r13)
179	addi	r4,r4,THREAD		/* Get THREAD */
180	li	r6,1
181	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
182	/* enable use of VSX after return */
183	oris	r12,r12,MSR_VSX@h
184	std	r12,_MSR(r1)
185#ifndef CONFIG_SMP
186	/* Update last_task_used_vsx to 'current' */
187	ld	r4,PACACURRENT(r13)
188	std	r4,0(r3)
189#endif /* CONFIG_SMP */
190	b	fast_exception_return
191
192/*
193 * __giveup_vsx(tsk)
194 * Disable VSX for the task given as the argument.
195 * Does NOT save vsx registers.
196 * Enables the VSX for use in the kernel on return.
197 */
198_GLOBAL(__giveup_vsx)
199	mfmsr	r5
200	oris	r5,r5,MSR_VSX@h
201	mtmsrd	r5			/* enable use of VSX now */
202	isync
203
204	cmpdi	0,r3,0
205	beqlr-				/* if no previous owner, done */
206	addi	r3,r3,THREAD		/* want THREAD of task */
207	ld	r5,PT_REGS(r3)
208	cmpdi	0,r5,0
209	beq	1f
210	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
211	lis	r3,MSR_VSX@h
212	andc	r4,r4,r3		/* disable VSX for previous task */
213	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2141:
215#ifndef CONFIG_SMP
216	li	r5,0
217	ld	r4,last_task_used_vsx@got(r2)
218	std	r5,0(r4)
219#endif /* CONFIG_SMP */
220	blr
221
222#endif /* CONFIG_VSX */
223
224
225/*
226 * The routines below are in assembler so we can closely control the
227 * usage of floating-point registers.  These routines must be called
228 * with preempt disabled.
229 */
230#ifdef CONFIG_PPC32
231	.data
232fpzero:
233	.long	0
234fpone:
235	.long	0x3f800000	/* 1.0 in single-precision FP */
236fphalf:
237	.long	0x3f000000	/* 0.5 in single-precision FP */
238
239#define LDCONST(fr, name)	\
240	lis	r11,name@ha;	\
241	lfs	fr,name@l(r11)
242#else
243
244	.section ".toc","aw"
245fpzero:
246	.tc	FD_0_0[TC],0
247fpone:
248	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
249fphalf:
250	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
251
252#define LDCONST(fr, name)	\
253	lfd	fr,name@toc(r2)
254#endif
255
256	.text
257/*
258 * Internal routine to enable floating point and set FPSCR to 0.
259 * Don't call it from C; it doesn't use the normal calling convention.
260 */
261fpenable:
262#ifdef CONFIG_PPC32
263	stwu	r1,-64(r1)
264#else
265	stdu	r1,-64(r1)
266#endif
267	mfmsr	r10
268	ori	r11,r10,MSR_FP
269	mtmsr	r11
270	isync
271	stfd	fr0,24(r1)
272	stfd	fr1,16(r1)
273	stfd	fr31,8(r1)
274	LDCONST(fr1, fpzero)
275	mffs	fr31
276	MTFSF_L(fr1)
277	blr
278
279fpdisable:
280	mtlr	r12
281	MTFSF_L(fr31)
282	lfd	fr31,8(r1)
283	lfd	fr1,16(r1)
284	lfd	fr0,24(r1)
285	mtmsr	r10
286	isync
287	addi	r1,r1,64
288	blr
289
290/*
291 * Vector add, floating point.
292 */
293_GLOBAL(vaddfp)
294	mflr	r12
295	bl	fpenable
296	li	r0,4
297	mtctr	r0
298	li	r6,0
2991:	lfsx	fr0,r4,r6
300	lfsx	fr1,r5,r6
301	fadds	fr0,fr0,fr1
302	stfsx	fr0,r3,r6
303	addi	r6,r6,4
304	bdnz	1b
305	b	fpdisable
306
307/*
308 * Vector subtract, floating point.
309 */
310_GLOBAL(vsubfp)
311	mflr	r12
312	bl	fpenable
313	li	r0,4
314	mtctr	r0
315	li	r6,0
3161:	lfsx	fr0,r4,r6
317	lfsx	fr1,r5,r6
318	fsubs	fr0,fr0,fr1
319	stfsx	fr0,r3,r6
320	addi	r6,r6,4
321	bdnz	1b
322	b	fpdisable
323
324/*
325 * Vector multiply and add, floating point.
326 */
327_GLOBAL(vmaddfp)
328	mflr	r12
329	bl	fpenable
330	stfd	fr2,32(r1)
331	li	r0,4
332	mtctr	r0
333	li	r7,0
3341:	lfsx	fr0,r4,r7
335	lfsx	fr1,r5,r7
336	lfsx	fr2,r6,r7
337	fmadds	fr0,fr0,fr2,fr1
338	stfsx	fr0,r3,r7
339	addi	r7,r7,4
340	bdnz	1b
341	lfd	fr2,32(r1)
342	b	fpdisable
343
344/*
345 * Vector negative multiply and subtract, floating point.
346 */
347_GLOBAL(vnmsubfp)
348	mflr	r12
349	bl	fpenable
350	stfd	fr2,32(r1)
351	li	r0,4
352	mtctr	r0
353	li	r7,0
3541:	lfsx	fr0,r4,r7
355	lfsx	fr1,r5,r7
356	lfsx	fr2,r6,r7
357	fnmsubs	fr0,fr0,fr2,fr1
358	stfsx	fr0,r3,r7
359	addi	r7,r7,4
360	bdnz	1b
361	lfd	fr2,32(r1)
362	b	fpdisable
363
364/*
365 * Vector reciprocal estimate.  We just compute 1.0/x.
366 * r3 -> destination, r4 -> source.
367 */
368_GLOBAL(vrefp)
369	mflr	r12
370	bl	fpenable
371	li	r0,4
372	LDCONST(fr1, fpone)
373	mtctr	r0
374	li	r6,0
3751:	lfsx	fr0,r4,r6
376	fdivs	fr0,fr1,fr0
377	stfsx	fr0,r3,r6
378	addi	r6,r6,4
379	bdnz	1b
380	b	fpdisable
381
382/*
383 * Vector reciprocal square-root estimate, floating point.
384 * We use the frsqrte instruction for the initial estimate followed
385 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
386 * r3 -> destination, r4 -> source.
387 */
388_GLOBAL(vrsqrtefp)
389	mflr	r12
390	bl	fpenable
391	stfd	fr2,32(r1)
392	stfd	fr3,40(r1)
393	stfd	fr4,48(r1)
394	stfd	fr5,56(r1)
395	li	r0,4
396	LDCONST(fr4, fpone)
397	LDCONST(fr5, fphalf)
398	mtctr	r0
399	li	r6,0
4001:	lfsx	fr0,r4,r6
401	frsqrte	fr1,fr0		/* r = frsqrte(s) */
402	fmuls	fr3,fr1,fr0	/* r * s */
403	fmuls	fr2,fr1,fr5	/* r * 0.5 */
404	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
405	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
406	fmuls	fr3,fr1,fr0	/* r * s */
407	fmuls	fr2,fr1,fr5	/* r * 0.5 */
408	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
409	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
410	stfsx	fr1,r3,r6
411	addi	r6,r6,4
412	bdnz	1b
413	lfd	fr5,56(r1)
414	lfd	fr4,48(r1)
415	lfd	fr3,40(r1)
416	lfd	fr2,32(r1)
417	b	fpdisable