Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/export.h>
  3#include <linux/linkage.h>
  4#include <asm/processor.h>
  5#include <asm/ppc_asm.h>
  6#include <asm/reg.h>
  7#include <asm/asm-offsets.h>
  8#include <asm/cputable.h>
  9#include <asm/thread_info.h>
 10#include <asm/page.h>
 11#include <asm/ptrace.h>
 12#include <asm/asm-compat.h>
 13
 14/*
 15 * Load state from memory into VMX registers including VSCR.
 16 * Assumes the caller has enabled VMX in the MSR.
 17 */
 18_GLOBAL(load_vr_state)
 19	li	r4,VRSTATE_VSCR
 20	lvx	v0,r4,r3
 21	mtvscr	v0
 22	REST_32VRS(0,r4,r3)
 23	blr
 24EXPORT_SYMBOL(load_vr_state)
 25_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
 26
 27/*
 28 * Store VMX state into memory, including VSCR.
 29 * Assumes the caller has enabled VMX in the MSR.
 30 */
 31_GLOBAL(store_vr_state)
 32	SAVE_32VRS(0, r4, r3)
 33	mfvscr	v0
 34	li	r4, VRSTATE_VSCR
 35	stvx	v0, r4, r3
 36	lvx	v0, 0, r3
 37	blr
 38EXPORT_SYMBOL(store_vr_state)
 39
 40/*
 
 41 * Disable VMX for the task which had it previously,
 42 * and save its vector registers in its thread_struct.
 43 * Enables the VMX for use in the kernel on return.
 44 * On SMP we know the VMX is free, since we give it up every
 45 * switch (ie, no lazy save of the vector registers).
 46 *
 47 * Note that on 32-bit this can only use registers that will be
 48 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 49 */
 50_GLOBAL(load_up_altivec)
 51	mfmsr	r5			/* grab the current MSR */
 52#ifdef CONFIG_PPC_BOOK3S_64
 53	/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
 54	ori	r5,r5,MSR_RI
 55#endif
 56	oris	r5,r5,MSR_VEC@h
 57	MTMSRD(r5)			/* enable use of AltiVec now */
 58	isync
 59
 60	/*
 61	 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
 62	 * to optimise userspace context save/restore. Whenever we take an
 63	 * altivec unavailable exception we must set VRSAVE to something non
 64	 * zero. Set it to all 1s. See also the programming note in the ISA.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65	 */
 66	mfspr	r4,SPRN_VRSAVE
 67	cmpwi	0,r4,0
 68	bne+	1f
 69	li	r4,-1
 70	mtspr	SPRN_VRSAVE,r4
 711:
 72	/* enable use of VMX after return */
 73#ifdef CONFIG_PPC32
 74	addi	r5,r2,THREAD
 75	oris	r9,r9,MSR_VEC@h
 76#else
 77	ld	r4,PACACURRENT(r13)
 78	addi	r5,r4,THREAD		/* Get THREAD */
 79	oris	r12,r12,MSR_VEC@h
 80	std	r12,_MSR(r1)
 81#ifdef CONFIG_PPC_BOOK3S_64
 82	li	r4,0
 83	stb	r4,PACASRR_VALID(r13)
 84#endif
 85#endif
 86	li	r4,1
 87	stb	r4,THREAD_LOAD_VEC(r5)
 88	addi	r6,r5,THREAD_VRSTATE
 89	li	r10,VRSTATE_VSCR
 90	stw	r4,THREAD_USED_VR(r5)
 91	lvx	v0,r10,r6
 92	mtvscr	v0
 93	REST_32VRS(0,r4,r6)
 
 
 
 
 
 
 94	/* restore registers and return */
 95	blr
 96_ASM_NOKPROBE_SYMBOL(load_up_altivec)
 97
 98/*
 99 * save_altivec(tsk)
100 * Save the vector registers to its thread_struct
 
 
101 */
102_GLOBAL(save_altivec)
 
 
 
 
 
 
 
103	addi	r3,r3,THREAD		/* want THREAD of task */
104	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
105	PPC_LL	r5,PT_REGS(r3)
106	PPC_LCMPI	0,r7,0
107	bne	2f
108	addi	r7,r3,THREAD_VRSTATE
1092:	SAVE_32VRS(0,r4,r7)
110	mfvscr	v0
111	li	r4,VRSTATE_VSCR
112	stvx	v0,r4,r7
113	lvx	v0,0,r7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114	blr
115
116#ifdef CONFIG_VSX
117
118#ifdef CONFIG_PPC32
119#error This asm code isn't ready for 32-bit kernels
120#endif
121
122/*
123 * load_up_vsx(unused, unused, tsk)
124 * Disable VSX for the task which had it previously,
125 * and save its vector registers in its thread_struct.
126 * Reuse the fp and vsx saves, but first check to see if they have
127 * been saved already.
128 */
129_GLOBAL(load_up_vsx)
130/* Load FP and VSX registers if they haven't been done yet */
131	andi.	r5,r12,MSR_FP
132	beql+	load_up_fpu		/* skip if already loaded */
133	andis.	r5,r12,MSR_VEC@h
134	beql+	load_up_altivec		/* skip if already loaded */
135
136#ifdef CONFIG_PPC_BOOK3S_64
137	/* interrupt doesn't set MSR[RI] and HPT can fault on current access */
138	li	r5,MSR_RI
139	mtmsrd	r5,1
140#endif
141
 
 
 
 
 
 
 
 
142	ld	r4,PACACURRENT(r13)
143	addi	r4,r4,THREAD		/* Get THREAD */
144	li	r6,1
145	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
146	/* enable use of VSX after return */
147	oris	r12,r12,MSR_VSX@h
148	std	r12,_MSR(r1)
149	li	r4,0
150	stb	r4,PACASRR_VALID(r13)
151	b	fast_interrupt_return_srr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
153#endif /* CONFIG_VSX */
154
155
156/*
157 * The routines below are in assembler so we can closely control the
158 * usage of floating-point registers.  These routines must be called
159 * with preempt disabled.
160 */
161	.data
162#ifdef CONFIG_PPC32
 
163fpzero:
164	.long	0
165fpone:
166	.long	0x3f800000	/* 1.0 in single-precision FP */
167fphalf:
168	.long	0x3f000000	/* 0.5 in single-precision FP */
169
170#define LDCONST(fr, name)	\
171	lis	r11,name@ha;	\
172	lfs	fr,name@l(r11)
173#else
174
 
175fpzero:
176	.quad	0
177fpone:
178	.quad	0x3ff0000000000000	/* 1.0 */
179fphalf:
180	.quad	0x3fe0000000000000	/* 0.5 */
181
182#ifdef CONFIG_PPC_KERNEL_PCREL
183#define LDCONST(fr, name)		\
184	pla	r11,name@pcrel;		\
185	lfd	fr,0(r11)
186#else
187#define LDCONST(fr, name)		\
188	addis	r11,r2,name@toc@ha;	\
189	lfd	fr,name@toc@l(r11)
190#endif
191#endif
 
192	.text
193/*
194 * Internal routine to enable floating point and set FPSCR to 0.
195 * Don't call it from C; it doesn't use the normal calling convention.
196 */
197SYM_FUNC_START_LOCAL(fpenable)
198#ifdef CONFIG_PPC32
199	stwu	r1,-64(r1)
200#else
201	stdu	r1,-64(r1)
202#endif
203	mfmsr	r10
204	ori	r11,r10,MSR_FP
205	mtmsr	r11
206	isync
207	stfd	fr0,24(r1)
208	stfd	fr1,16(r1)
209	stfd	fr31,8(r1)
210	LDCONST(fr1, fpzero)
211	mffs	fr31
212	MTFSF_L(fr1)
213	blr
214SYM_FUNC_END(fpenable)
215
216fpdisable:
217	mtlr	r12
218	MTFSF_L(fr31)
219	lfd	fr31,8(r1)
220	lfd	fr1,16(r1)
221	lfd	fr0,24(r1)
222	mtmsr	r10
223	isync
224	addi	r1,r1,64
225	blr
226
227/*
228 * Vector add, floating point.
229 */
230_GLOBAL(vaddfp)
231	mflr	r12
232	bl	fpenable
233	li	r0,4
234	mtctr	r0
235	li	r6,0
2361:	lfsx	fr0,r4,r6
237	lfsx	fr1,r5,r6
238	fadds	fr0,fr0,fr1
239	stfsx	fr0,r3,r6
240	addi	r6,r6,4
241	bdnz	1b
242	b	fpdisable
243
244/*
245 * Vector subtract, floating point.
246 */
247_GLOBAL(vsubfp)
248	mflr	r12
249	bl	fpenable
250	li	r0,4
251	mtctr	r0
252	li	r6,0
2531:	lfsx	fr0,r4,r6
254	lfsx	fr1,r5,r6
255	fsubs	fr0,fr0,fr1
256	stfsx	fr0,r3,r6
257	addi	r6,r6,4
258	bdnz	1b
259	b	fpdisable
260
261/*
262 * Vector multiply and add, floating point.
263 */
264_GLOBAL(vmaddfp)
265	mflr	r12
266	bl	fpenable
267	stfd	fr2,32(r1)
268	li	r0,4
269	mtctr	r0
270	li	r7,0
2711:	lfsx	fr0,r4,r7
272	lfsx	fr1,r5,r7
273	lfsx	fr2,r6,r7
274	fmadds	fr0,fr0,fr2,fr1
275	stfsx	fr0,r3,r7
276	addi	r7,r7,4
277	bdnz	1b
278	lfd	fr2,32(r1)
279	b	fpdisable
280
281/*
282 * Vector negative multiply and subtract, floating point.
283 */
284_GLOBAL(vnmsubfp)
285	mflr	r12
286	bl	fpenable
287	stfd	fr2,32(r1)
288	li	r0,4
289	mtctr	r0
290	li	r7,0
2911:	lfsx	fr0,r4,r7
292	lfsx	fr1,r5,r7
293	lfsx	fr2,r6,r7
294	fnmsubs	fr0,fr0,fr2,fr1
295	stfsx	fr0,r3,r7
296	addi	r7,r7,4
297	bdnz	1b
298	lfd	fr2,32(r1)
299	b	fpdisable
300
301/*
302 * Vector reciprocal estimate.  We just compute 1.0/x.
303 * r3 -> destination, r4 -> source.
304 */
305_GLOBAL(vrefp)
306	mflr	r12
307	bl	fpenable
308	li	r0,4
309	LDCONST(fr1, fpone)
310	mtctr	r0
311	li	r6,0
3121:	lfsx	fr0,r4,r6
313	fdivs	fr0,fr1,fr0
314	stfsx	fr0,r3,r6
315	addi	r6,r6,4
316	bdnz	1b
317	b	fpdisable
318
319/*
320 * Vector reciprocal square-root estimate, floating point.
321 * We use the frsqrte instruction for the initial estimate followed
322 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
323 * r3 -> destination, r4 -> source.
324 */
325_GLOBAL(vrsqrtefp)
326	mflr	r12
327	bl	fpenable
328	stfd	fr2,32(r1)
329	stfd	fr3,40(r1)
330	stfd	fr4,48(r1)
331	stfd	fr5,56(r1)
332	li	r0,4
333	LDCONST(fr4, fpone)
334	LDCONST(fr5, fphalf)
335	mtctr	r0
336	li	r6,0
3371:	lfsx	fr0,r4,r6
338	frsqrte	fr1,fr0		/* r = frsqrte(s) */
339	fmuls	fr3,fr1,fr0	/* r * s */
340	fmuls	fr2,fr1,fr5	/* r * 0.5 */
341	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
342	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
343	fmuls	fr3,fr1,fr0	/* r * s */
344	fmuls	fr2,fr1,fr5	/* r * 0.5 */
345	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
346	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
347	stfsx	fr1,r3,r6
348	addi	r6,r6,4
349	bdnz	1b
350	lfd	fr5,56(r1)
351	lfd	fr4,48(r1)
352	lfd	fr3,40(r1)
353	lfd	fr2,32(r1)
354	b	fpdisable
v3.1
 
 
 
  1#include <asm/processor.h>
  2#include <asm/ppc_asm.h>
  3#include <asm/reg.h>
  4#include <asm/asm-offsets.h>
  5#include <asm/cputable.h>
  6#include <asm/thread_info.h>
  7#include <asm/page.h>
  8#include <asm/ptrace.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  9
 10/*
 11 * load_up_altivec(unused, unused, tsk)
 12 * Disable VMX for the task which had it previously,
 13 * and save its vector registers in its thread_struct.
 14 * Enables the VMX for use in the kernel on return.
 15 * On SMP we know the VMX is free, since we give it up every
 16 * switch (ie, no lazy save of the vector registers).
 
 
 
 17 */
 18_GLOBAL(load_up_altivec)
 19	mfmsr	r5			/* grab the current MSR */
 
 
 
 
 20	oris	r5,r5,MSR_VEC@h
 21	MTMSRD(r5)			/* enable use of AltiVec now */
 22	isync
 23
 24/*
 25 * For SMP, we don't do lazy VMX switching because it just gets too
 26 * horrendously complex, especially when a task switches from one CPU
 27 * to another.  Instead we call giveup_altvec in switch_to.
 28 * VRSAVE isn't dealt with here, that is done in the normal context
 29 * switch code. Note that we could rely on vrsave value to eventually
 30 * avoid saving all of the VREGs here...
 31 */
 32#ifndef CONFIG_SMP
 33	LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
 34	toreal(r3)
 35	PPC_LL	r4,ADDROFF(last_task_used_altivec)(r3)
 36	PPC_LCMPI	0,r4,0
 37	beq	1f
 38
 39	/* Save VMX state to last_task_used_altivec's THREAD struct */
 40	toreal(r4)
 41	addi	r4,r4,THREAD
 42	SAVE_32VRS(0,r5,r4)
 43	mfvscr	vr0
 44	li	r10,THREAD_VSCR
 45	stvx	vr0,r10,r4
 46	/* Disable VMX for last_task_used_altivec */
 47	PPC_LL	r5,PT_REGS(r4)
 48	toreal(r5)
 49	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 50	lis	r10,MSR_VEC@h
 51	andc	r4,r4,r10
 52	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 531:
 54#endif /* CONFIG_SMP */
 55
 56	/* Hack: if we get an altivec unavailable trap with VRSAVE
 57	 * set to all zeros, we assume this is a broken application
 58	 * that fails to set it properly, and thus we switch it to
 59	 * all 1's
 60	 */
 61	mfspr	r4,SPRN_VRSAVE
 62	cmpwi	0,r4,0
 63	bne+	1f
 64	li	r4,-1
 65	mtspr	SPRN_VRSAVE,r4
 661:
 67	/* enable use of VMX after return */
 68#ifdef CONFIG_PPC32
 69	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 70	oris	r9,r9,MSR_VEC@h
 71#else
 72	ld	r4,PACACURRENT(r13)
 73	addi	r5,r4,THREAD		/* Get THREAD */
 74	oris	r12,r12,MSR_VEC@h
 75	std	r12,_MSR(r1)
 
 
 
 
 76#endif
 77	li	r4,1
 78	li	r10,THREAD_VSCR
 
 
 79	stw	r4,THREAD_USED_VR(r5)
 80	lvx	vr0,r10,r5
 81	mtvscr	vr0
 82	REST_32VRS(0,r4,r5)
 83#ifndef CONFIG_SMP
 84	/* Update last_task_used_altivec to 'current' */
 85	subi	r4,r5,THREAD		/* Back to 'current' */
 86	fromreal(r4)
 87	PPC_STL	r4,ADDROFF(last_task_used_altivec)(r3)
 88#endif /* CONFIG_SMP */
 89	/* restore registers and return */
 90	blr
 
 91
 92/*
 93 * giveup_altivec(tsk)
 94 * Disable VMX for the task given as the argument,
 95 * and save the vector registers in its thread_struct.
 96 * Enables the VMX for use in the kernel on return.
 97 */
 98_GLOBAL(giveup_altivec)
 99	mfmsr	r5
100	oris	r5,r5,MSR_VEC@h
101	SYNC
102	MTMSRD(r5)			/* enable use of VMX now */
103	isync
104	PPC_LCMPI	0,r3,0
105	beqlr				/* if no previous owner, done */
106	addi	r3,r3,THREAD		/* want THREAD of task */
 
107	PPC_LL	r5,PT_REGS(r3)
108	PPC_LCMPI	0,r5,0
109	SAVE_32VRS(0,r4,r3)
110	mfvscr	vr0
111	li	r4,THREAD_VSCR
112	stvx	vr0,r4,r3
113	beq	1f
114	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
115#ifdef CONFIG_VSX
116BEGIN_FTR_SECTION
117	lis	r3,(MSR_VEC|MSR_VSX)@h
118FTR_SECTION_ELSE
119	lis	r3,MSR_VEC@h
120ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
121#else
122	lis	r3,MSR_VEC@h
123#endif
124	andc	r4,r4,r3		/* disable FP for previous task */
125	PPC_STL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1261:
127#ifndef CONFIG_SMP
128	li	r5,0
129	LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
130	PPC_STL	r5,ADDROFF(last_task_used_altivec)(r4)
131#endif /* CONFIG_SMP */
132	blr
133
134#ifdef CONFIG_VSX
135
136#ifdef CONFIG_PPC32
137#error This asm code isn't ready for 32-bit kernels
138#endif
139
140/*
141 * load_up_vsx(unused, unused, tsk)
142 * Disable VSX for the task which had it previously,
143 * and save its vector registers in its thread_struct.
144 * Reuse the fp and vsx saves, but first check to see if they have
145 * been saved already.
146 */
147_GLOBAL(load_up_vsx)
148/* Load FP and VSX registers if they haven't been done yet */
149	andi.	r5,r12,MSR_FP
150	beql+	load_up_fpu		/* skip if already loaded */
151	andis.	r5,r12,MSR_VEC@h
152	beql+	load_up_altivec		/* skip if already loaded */
153
154#ifndef CONFIG_SMP
155	ld	r3,last_task_used_vsx@got(r2)
156	ld	r4,0(r3)
157	cmpdi	0,r4,0
158	beq	1f
159	/* Disable VSX for last_task_used_vsx */
160	addi	r4,r4,THREAD
161	ld	r5,PT_REGS(r4)
162	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
163	lis	r6,MSR_VSX@h
164	andc	r6,r4,r6
165	std	r6,_MSR-STACK_FRAME_OVERHEAD(r5)
1661:
167#endif /* CONFIG_SMP */
168	ld	r4,PACACURRENT(r13)
169	addi	r4,r4,THREAD		/* Get THREAD */
170	li	r6,1
171	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
172	/* enable use of VSX after return */
173	oris	r12,r12,MSR_VSX@h
174	std	r12,_MSR(r1)
175#ifndef CONFIG_SMP
176	/* Update last_task_used_vsx to 'current' */
177	ld	r4,PACACURRENT(r13)
178	std	r4,0(r3)
179#endif /* CONFIG_SMP */
180	b	fast_exception_return
181
182/*
183 * __giveup_vsx(tsk)
184 * Disable VSX for the task given as the argument.
185 * Does NOT save vsx registers.
186 * Enables the VSX for use in the kernel on return.
187 */
188_GLOBAL(__giveup_vsx)
189	mfmsr	r5
190	oris	r5,r5,MSR_VSX@h
191	mtmsrd	r5			/* enable use of VSX now */
192	isync
193
194	cmpdi	0,r3,0
195	beqlr-				/* if no previous owner, done */
196	addi	r3,r3,THREAD		/* want THREAD of task */
197	ld	r5,PT_REGS(r3)
198	cmpdi	0,r5,0
199	beq	1f
200	ld	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
201	lis	r3,MSR_VSX@h
202	andc	r4,r4,r3		/* disable VSX for previous task */
203	std	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
2041:
205#ifndef CONFIG_SMP
206	li	r5,0
207	ld	r4,last_task_used_vsx@got(r2)
208	std	r5,0(r4)
209#endif /* CONFIG_SMP */
210	blr
211
212#endif /* CONFIG_VSX */
213
214
215/*
216 * The routines below are in assembler so we can closely control the
217 * usage of floating-point registers.  These routines must be called
218 * with preempt disabled.
219 */
 
220#ifdef CONFIG_PPC32
221	.data
222fpzero:
223	.long	0
224fpone:
225	.long	0x3f800000	/* 1.0 in single-precision FP */
226fphalf:
227	.long	0x3f000000	/* 0.5 in single-precision FP */
228
229#define LDCONST(fr, name)	\
230	lis	r11,name@ha;	\
231	lfs	fr,name@l(r11)
232#else
233
234	.section ".toc","aw"
235fpzero:
236	.tc	FD_0_0[TC],0
237fpone:
238	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
239fphalf:
240	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
241
242#define LDCONST(fr, name)	\
243	lfd	fr,name@toc(r2)
 
 
 
 
 
 
 
244#endif
245
246	.text
247/*
248 * Internal routine to enable floating point and set FPSCR to 0.
249 * Don't call it from C; it doesn't use the normal calling convention.
250 */
251fpenable:
252#ifdef CONFIG_PPC32
253	stwu	r1,-64(r1)
254#else
255	stdu	r1,-64(r1)
256#endif
257	mfmsr	r10
258	ori	r11,r10,MSR_FP
259	mtmsr	r11
260	isync
261	stfd	fr0,24(r1)
262	stfd	fr1,16(r1)
263	stfd	fr31,8(r1)
264	LDCONST(fr1, fpzero)
265	mffs	fr31
266	MTFSF_L(fr1)
267	blr
 
268
269fpdisable:
270	mtlr	r12
271	MTFSF_L(fr31)
272	lfd	fr31,8(r1)
273	lfd	fr1,16(r1)
274	lfd	fr0,24(r1)
275	mtmsr	r10
276	isync
277	addi	r1,r1,64
278	blr
279
280/*
281 * Vector add, floating point.
282 */
283_GLOBAL(vaddfp)
284	mflr	r12
285	bl	fpenable
286	li	r0,4
287	mtctr	r0
288	li	r6,0
2891:	lfsx	fr0,r4,r6
290	lfsx	fr1,r5,r6
291	fadds	fr0,fr0,fr1
292	stfsx	fr0,r3,r6
293	addi	r6,r6,4
294	bdnz	1b
295	b	fpdisable
296
297/*
298 * Vector subtract, floating point.
299 */
300_GLOBAL(vsubfp)
301	mflr	r12
302	bl	fpenable
303	li	r0,4
304	mtctr	r0
305	li	r6,0
3061:	lfsx	fr0,r4,r6
307	lfsx	fr1,r5,r6
308	fsubs	fr0,fr0,fr1
309	stfsx	fr0,r3,r6
310	addi	r6,r6,4
311	bdnz	1b
312	b	fpdisable
313
314/*
315 * Vector multiply and add, floating point.
316 */
317_GLOBAL(vmaddfp)
318	mflr	r12
319	bl	fpenable
320	stfd	fr2,32(r1)
321	li	r0,4
322	mtctr	r0
323	li	r7,0
3241:	lfsx	fr0,r4,r7
325	lfsx	fr1,r5,r7
326	lfsx	fr2,r6,r7
327	fmadds	fr0,fr0,fr2,fr1
328	stfsx	fr0,r3,r7
329	addi	r7,r7,4
330	bdnz	1b
331	lfd	fr2,32(r1)
332	b	fpdisable
333
334/*
335 * Vector negative multiply and subtract, floating point.
336 */
337_GLOBAL(vnmsubfp)
338	mflr	r12
339	bl	fpenable
340	stfd	fr2,32(r1)
341	li	r0,4
342	mtctr	r0
343	li	r7,0
3441:	lfsx	fr0,r4,r7
345	lfsx	fr1,r5,r7
346	lfsx	fr2,r6,r7
347	fnmsubs	fr0,fr0,fr2,fr1
348	stfsx	fr0,r3,r7
349	addi	r7,r7,4
350	bdnz	1b
351	lfd	fr2,32(r1)
352	b	fpdisable
353
354/*
355 * Vector reciprocal estimate.  We just compute 1.0/x.
356 * r3 -> destination, r4 -> source.
357 */
358_GLOBAL(vrefp)
359	mflr	r12
360	bl	fpenable
361	li	r0,4
362	LDCONST(fr1, fpone)
363	mtctr	r0
364	li	r6,0
3651:	lfsx	fr0,r4,r6
366	fdivs	fr0,fr1,fr0
367	stfsx	fr0,r3,r6
368	addi	r6,r6,4
369	bdnz	1b
370	b	fpdisable
371
372/*
373 * Vector reciprocal square-root estimate, floating point.
374 * We use the frsqrte instruction for the initial estimate followed
375 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
376 * r3 -> destination, r4 -> source.
377 */
378_GLOBAL(vrsqrtefp)
379	mflr	r12
380	bl	fpenable
381	stfd	fr2,32(r1)
382	stfd	fr3,40(r1)
383	stfd	fr4,48(r1)
384	stfd	fr5,56(r1)
385	li	r0,4
386	LDCONST(fr4, fpone)
387	LDCONST(fr5, fphalf)
388	mtctr	r0
389	li	r6,0
3901:	lfsx	fr0,r4,r6
391	frsqrte	fr1,fr0		/* r = frsqrte(s) */
392	fmuls	fr3,fr1,fr0	/* r * s */
393	fmuls	fr2,fr1,fr5	/* r * 0.5 */
394	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
395	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
396	fmuls	fr3,fr1,fr0	/* r * s */
397	fmuls	fr2,fr1,fr5	/* r * 0.5 */
398	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
399	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
400	stfsx	fr1,r3,r6
401	addi	r6,r6,4
402	bdnz	1b
403	lfd	fr5,56(r1)
404	lfd	fr4,48(r1)
405	lfd	fr3,40(r1)
406	lfd	fr2,32(r1)
407	b	fpdisable