Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <asm/processor.h>
  3#include <asm/ppc_asm.h>
  4#include <asm/reg.h>
  5#include <asm/asm-offsets.h>
  6#include <asm/cputable.h>
  7#include <asm/thread_info.h>
  8#include <asm/page.h>
  9#include <asm/ptrace.h>
 10#include <asm/export.h>
 11#include <asm/asm-compat.h>
 12
 13/*
 14 * Load state from memory into VMX registers including VSCR.
 15 * Assumes the caller has enabled VMX in the MSR.
 16 */
 17_GLOBAL(load_vr_state)
 18	li	r4,VRSTATE_VSCR
 19	lvx	v0,r4,r3
 20	mtvscr	v0
 21	REST_32VRS(0,r4,r3)
 22	blr
 23EXPORT_SYMBOL(load_vr_state)
 24_ASM_NOKPROBE_SYMBOL(load_vr_state); /* used by restore_math */
 25
 26/*
 27 * Store VMX state into memory, including VSCR.
 28 * Assumes the caller has enabled VMX in the MSR.
 29 */
 30_GLOBAL(store_vr_state)
 31	SAVE_32VRS(0, r4, r3)
 32	mfvscr	v0
 33	li	r4, VRSTATE_VSCR
 34	stvx	v0, r4, r3
 35	blr
 36EXPORT_SYMBOL(store_vr_state)
 37
 38/*
 39 * Disable VMX for the task which had it previously,
 40 * and save its vector registers in its thread_struct.
 41 * Enables the VMX for use in the kernel on return.
 42 * On SMP we know the VMX is free, since we give it up every
 43 * switch (ie, no lazy save of the vector registers).
 44 *
 45 * Note that on 32-bit this can only use registers that will be
 46 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 47 */
 48_GLOBAL(load_up_altivec)
 49	mfmsr	r5			/* grab the current MSR */
 50	oris	r5,r5,MSR_VEC@h
 51	MTMSRD(r5)			/* enable use of AltiVec now */
 52	isync
 53
 54	/*
 55	 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
 56	 * to optimise userspace context save/restore. Whenever we take an
 57	 * altivec unavailable exception we must set VRSAVE to something non
 58	 * zero. Set it to all 1s. See also the programming note in the ISA.
 59	 */
 60	mfspr	r4,SPRN_VRSAVE
 61	cmpwi	0,r4,0
 62	bne+	1f
 63	li	r4,-1
 64	mtspr	SPRN_VRSAVE,r4
 651:
 66	/* enable use of VMX after return */
 67#ifdef CONFIG_PPC32
 68	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 69	oris	r9,r9,MSR_VEC@h
 70	tovirt(r5, r5)
 71#else
 72	ld	r4,PACACURRENT(r13)
 73	addi	r5,r4,THREAD		/* Get THREAD */
 74	oris	r12,r12,MSR_VEC@h
 75	std	r12,_MSR(r1)
 76#ifdef CONFIG_PPC_BOOK3S_64
 77	li	r4,0
 78	stb	r4,PACASRR_VALID(r13)
 79#endif
 80#endif
 81	li	r4,1
 
 82	stb	r4,THREAD_LOAD_VEC(r5)
 83	addi	r6,r5,THREAD_VRSTATE
 84	li	r4,1
 85	li	r10,VRSTATE_VSCR
 86	stw	r4,THREAD_USED_VR(r5)
 87	lvx	v0,r10,r6
 88	mtvscr	v0
 89	REST_32VRS(0,r4,r6)
 90	/* restore registers and return */
 91	blr
 92_ASM_NOKPROBE_SYMBOL(load_up_altivec)
 93
 94/*
 95 * save_altivec(tsk)
 96 * Save the vector registers to its thread_struct
 97 */
 98_GLOBAL(save_altivec)
 99	addi	r3,r3,THREAD		/* want THREAD of task */
100	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
101	PPC_LL	r5,PT_REGS(r3)
102	PPC_LCMPI	0,r7,0
103	bne	2f
104	addi	r7,r3,THREAD_VRSTATE
1052:	SAVE_32VRS(0,r4,r7)
106	mfvscr	v0
107	li	r4,VRSTATE_VSCR
108	stvx	v0,r4,r7
109	blr
110
111#ifdef CONFIG_VSX
112
113#ifdef CONFIG_PPC32
114#error This asm code isn't ready for 32-bit kernels
115#endif
116
117/*
118 * load_up_vsx(unused, unused, tsk)
119 * Disable VSX for the task which had it previously,
120 * and save its vector registers in its thread_struct.
121 * Reuse the fp and vsx saves, but first check to see if they have
122 * been saved already.
123 */
124_GLOBAL(load_up_vsx)
125/* Load FP and VSX registers if they haven't been done yet */
126	andi.	r5,r12,MSR_FP
127	beql+	load_up_fpu		/* skip if already loaded */
128	andis.	r5,r12,MSR_VEC@h
129	beql+	load_up_altivec		/* skip if already loaded */
130
131	ld	r4,PACACURRENT(r13)
132	addi	r4,r4,THREAD		/* Get THREAD */
133	li	r6,1
134	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
135	/* enable use of VSX after return */
136	oris	r12,r12,MSR_VSX@h
137	std	r12,_MSR(r1)
138	li	r4,0
139	stb	r4,PACASRR_VALID(r13)
140	b	fast_interrupt_return_srr
141
142#endif /* CONFIG_VSX */
143
144
145/*
146 * The routines below are in assembler so we can closely control the
147 * usage of floating-point registers.  These routines must be called
148 * with preempt disabled.
149 */
150#ifdef CONFIG_PPC32
151	.data
152fpzero:
153	.long	0
154fpone:
155	.long	0x3f800000	/* 1.0 in single-precision FP */
156fphalf:
157	.long	0x3f000000	/* 0.5 in single-precision FP */
158
159#define LDCONST(fr, name)	\
160	lis	r11,name@ha;	\
161	lfs	fr,name@l(r11)
162#else
163
164	.section ".toc","aw"
165fpzero:
166	.tc	FD_0_0[TC],0
167fpone:
168	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
169fphalf:
170	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
171
172#define LDCONST(fr, name)	\
173	lfd	fr,name@toc(r2)
174#endif
175
176	.text
177/*
178 * Internal routine to enable floating point and set FPSCR to 0.
179 * Don't call it from C; it doesn't use the normal calling convention.
180 */
181fpenable:
182#ifdef CONFIG_PPC32
183	stwu	r1,-64(r1)
184#else
185	stdu	r1,-64(r1)
186#endif
187	mfmsr	r10
188	ori	r11,r10,MSR_FP
189	mtmsr	r11
190	isync
191	stfd	fr0,24(r1)
192	stfd	fr1,16(r1)
193	stfd	fr31,8(r1)
194	LDCONST(fr1, fpzero)
195	mffs	fr31
196	MTFSF_L(fr1)
197	blr
198
199fpdisable:
200	mtlr	r12
201	MTFSF_L(fr31)
202	lfd	fr31,8(r1)
203	lfd	fr1,16(r1)
204	lfd	fr0,24(r1)
205	mtmsr	r10
206	isync
207	addi	r1,r1,64
208	blr
209
210/*
211 * Vector add, floating point.
212 */
213_GLOBAL(vaddfp)
214	mflr	r12
215	bl	fpenable
216	li	r0,4
217	mtctr	r0
218	li	r6,0
2191:	lfsx	fr0,r4,r6
220	lfsx	fr1,r5,r6
221	fadds	fr0,fr0,fr1
222	stfsx	fr0,r3,r6
223	addi	r6,r6,4
224	bdnz	1b
225	b	fpdisable
226
227/*
228 * Vector subtract, floating point.
229 */
230_GLOBAL(vsubfp)
231	mflr	r12
232	bl	fpenable
233	li	r0,4
234	mtctr	r0
235	li	r6,0
2361:	lfsx	fr0,r4,r6
237	lfsx	fr1,r5,r6
238	fsubs	fr0,fr0,fr1
239	stfsx	fr0,r3,r6
240	addi	r6,r6,4
241	bdnz	1b
242	b	fpdisable
243
244/*
245 * Vector multiply and add, floating point.
246 */
247_GLOBAL(vmaddfp)
248	mflr	r12
249	bl	fpenable
250	stfd	fr2,32(r1)
251	li	r0,4
252	mtctr	r0
253	li	r7,0
2541:	lfsx	fr0,r4,r7
255	lfsx	fr1,r5,r7
256	lfsx	fr2,r6,r7
257	fmadds	fr0,fr0,fr2,fr1
258	stfsx	fr0,r3,r7
259	addi	r7,r7,4
260	bdnz	1b
261	lfd	fr2,32(r1)
262	b	fpdisable
263
264/*
265 * Vector negative multiply and subtract, floating point.
266 */
267_GLOBAL(vnmsubfp)
268	mflr	r12
269	bl	fpenable
270	stfd	fr2,32(r1)
271	li	r0,4
272	mtctr	r0
273	li	r7,0
2741:	lfsx	fr0,r4,r7
275	lfsx	fr1,r5,r7
276	lfsx	fr2,r6,r7
277	fnmsubs	fr0,fr0,fr2,fr1
278	stfsx	fr0,r3,r7
279	addi	r7,r7,4
280	bdnz	1b
281	lfd	fr2,32(r1)
282	b	fpdisable
283
284/*
285 * Vector reciprocal estimate.  We just compute 1.0/x.
286 * r3 -> destination, r4 -> source.
287 */
288_GLOBAL(vrefp)
289	mflr	r12
290	bl	fpenable
291	li	r0,4
292	LDCONST(fr1, fpone)
293	mtctr	r0
294	li	r6,0
2951:	lfsx	fr0,r4,r6
296	fdivs	fr0,fr1,fr0
297	stfsx	fr0,r3,r6
298	addi	r6,r6,4
299	bdnz	1b
300	b	fpdisable
301
302/*
303 * Vector reciprocal square-root estimate, floating point.
304 * We use the frsqrte instruction for the initial estimate followed
305 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
306 * r3 -> destination, r4 -> source.
307 */
308_GLOBAL(vrsqrtefp)
309	mflr	r12
310	bl	fpenable
311	stfd	fr2,32(r1)
312	stfd	fr3,40(r1)
313	stfd	fr4,48(r1)
314	stfd	fr5,56(r1)
315	li	r0,4
316	LDCONST(fr4, fpone)
317	LDCONST(fr5, fphalf)
318	mtctr	r0
319	li	r6,0
3201:	lfsx	fr0,r4,r6
321	frsqrte	fr1,fr0		/* r = frsqrte(s) */
322	fmuls	fr3,fr1,fr0	/* r * s */
323	fmuls	fr2,fr1,fr5	/* r * 0.5 */
324	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
325	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
326	fmuls	fr3,fr1,fr0	/* r * s */
327	fmuls	fr2,fr1,fr5	/* r * 0.5 */
328	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
329	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
330	stfsx	fr1,r3,r6
331	addi	r6,r6,4
332	bdnz	1b
333	lfd	fr5,56(r1)
334	lfd	fr4,48(r1)
335	lfd	fr3,40(r1)
336	lfd	fr2,32(r1)
337	b	fpdisable
v4.10.11
 
  1#include <asm/processor.h>
  2#include <asm/ppc_asm.h>
  3#include <asm/reg.h>
  4#include <asm/asm-offsets.h>
  5#include <asm/cputable.h>
  6#include <asm/thread_info.h>
  7#include <asm/page.h>
  8#include <asm/ptrace.h>
  9#include <asm/export.h>
 
 10
 11/*
 12 * Load state from memory into VMX registers including VSCR.
 13 * Assumes the caller has enabled VMX in the MSR.
 14 */
 15_GLOBAL(load_vr_state)
 16	li	r4,VRSTATE_VSCR
 17	lvx	v0,r4,r3
 18	mtvscr	v0
 19	REST_32VRS(0,r4,r3)
 20	blr
 21EXPORT_SYMBOL(load_vr_state)
 
 22
 23/*
 24 * Store VMX state into memory, including VSCR.
 25 * Assumes the caller has enabled VMX in the MSR.
 26 */
 27_GLOBAL(store_vr_state)
 28	SAVE_32VRS(0, r4, r3)
 29	mfvscr	v0
 30	li	r4, VRSTATE_VSCR
 31	stvx	v0, r4, r3
 32	blr
 33EXPORT_SYMBOL(store_vr_state)
 34
 35/*
 36 * Disable VMX for the task which had it previously,
 37 * and save its vector registers in its thread_struct.
 38 * Enables the VMX for use in the kernel on return.
 39 * On SMP we know the VMX is free, since we give it up every
 40 * switch (ie, no lazy save of the vector registers).
 41 *
 42 * Note that on 32-bit this can only use registers that will be
 43 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
 44 */
 45_GLOBAL(load_up_altivec)
 46	mfmsr	r5			/* grab the current MSR */
 47	oris	r5,r5,MSR_VEC@h
 48	MTMSRD(r5)			/* enable use of AltiVec now */
 49	isync
 50
 51	/*
 52	 * While userspace in general ignores VRSAVE, glibc uses it as a boolean
 53	 * to optimise userspace context save/restore. Whenever we take an
 54	 * altivec unavailable exception we must set VRSAVE to something non
 55	 * zero. Set it to all 1s. See also the programming note in the ISA.
 56	 */
 57	mfspr	r4,SPRN_VRSAVE
 58	cmpwi	0,r4,0
 59	bne+	1f
 60	li	r4,-1
 61	mtspr	SPRN_VRSAVE,r4
 621:
 63	/* enable use of VMX after return */
 64#ifdef CONFIG_PPC32
 65	mfspr	r5,SPRN_SPRG_THREAD		/* current task's THREAD (phys) */
 66	oris	r9,r9,MSR_VEC@h
 
 67#else
 68	ld	r4,PACACURRENT(r13)
 69	addi	r5,r4,THREAD		/* Get THREAD */
 70	oris	r12,r12,MSR_VEC@h
 71	std	r12,_MSR(r1)
 
 
 
 72#endif
 73	/* Don't care if r4 overflows, this is desired behaviour */
 74	lbz	r4,THREAD_LOAD_VEC(r5)
 75	addi	r4,r4,1
 76	stb	r4,THREAD_LOAD_VEC(r5)
 77	addi	r6,r5,THREAD_VRSTATE
 78	li	r4,1
 79	li	r10,VRSTATE_VSCR
 80	stw	r4,THREAD_USED_VR(r5)
 81	lvx	v0,r10,r6
 82	mtvscr	v0
 83	REST_32VRS(0,r4,r6)
 84	/* restore registers and return */
 85	blr
 
 86
 87/*
 88 * save_altivec(tsk)
 89 * Save the vector registers to its thread_struct
 90 */
 91_GLOBAL(save_altivec)
 92	addi	r3,r3,THREAD		/* want THREAD of task */
 93	PPC_LL	r7,THREAD_VRSAVEAREA(r3)
 94	PPC_LL	r5,PT_REGS(r3)
 95	PPC_LCMPI	0,r7,0
 96	bne	2f
 97	addi	r7,r3,THREAD_VRSTATE
 982:	SAVE_32VRS(0,r4,r7)
 99	mfvscr	v0
100	li	r4,VRSTATE_VSCR
101	stvx	v0,r4,r7
102	blr
103
104#ifdef CONFIG_VSX
105
106#ifdef CONFIG_PPC32
107#error This asm code isn't ready for 32-bit kernels
108#endif
109
110/*
111 * load_up_vsx(unused, unused, tsk)
112 * Disable VSX for the task which had it previously,
113 * and save its vector registers in its thread_struct.
114 * Reuse the fp and vsx saves, but first check to see if they have
115 * been saved already.
116 */
117_GLOBAL(load_up_vsx)
118/* Load FP and VSX registers if they haven't been done yet */
119	andi.	r5,r12,MSR_FP
120	beql+	load_up_fpu		/* skip if already loaded */
121	andis.	r5,r12,MSR_VEC@h
122	beql+	load_up_altivec		/* skip if already loaded */
123
124	ld	r4,PACACURRENT(r13)
125	addi	r4,r4,THREAD		/* Get THREAD */
126	li	r6,1
127	stw	r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
128	/* enable use of VSX after return */
129	oris	r12,r12,MSR_VSX@h
130	std	r12,_MSR(r1)
131	b	fast_exception_return
 
 
132
133#endif /* CONFIG_VSX */
134
135
136/*
137 * The routines below are in assembler so we can closely control the
138 * usage of floating-point registers.  These routines must be called
139 * with preempt disabled.
140 */
141#ifdef CONFIG_PPC32
142	.data
143fpzero:
144	.long	0
145fpone:
146	.long	0x3f800000	/* 1.0 in single-precision FP */
147fphalf:
148	.long	0x3f000000	/* 0.5 in single-precision FP */
149
150#define LDCONST(fr, name)	\
151	lis	r11,name@ha;	\
152	lfs	fr,name@l(r11)
153#else
154
155	.section ".toc","aw"
156fpzero:
157	.tc	FD_0_0[TC],0
158fpone:
159	.tc	FD_3ff00000_0[TC],0x3ff0000000000000	/* 1.0 */
160fphalf:
161	.tc	FD_3fe00000_0[TC],0x3fe0000000000000	/* 0.5 */
162
163#define LDCONST(fr, name)	\
164	lfd	fr,name@toc(r2)
165#endif
166
167	.text
168/*
169 * Internal routine to enable floating point and set FPSCR to 0.
170 * Don't call it from C; it doesn't use the normal calling convention.
171 */
172fpenable:
173#ifdef CONFIG_PPC32
174	stwu	r1,-64(r1)
175#else
176	stdu	r1,-64(r1)
177#endif
178	mfmsr	r10
179	ori	r11,r10,MSR_FP
180	mtmsr	r11
181	isync
182	stfd	fr0,24(r1)
183	stfd	fr1,16(r1)
184	stfd	fr31,8(r1)
185	LDCONST(fr1, fpzero)
186	mffs	fr31
187	MTFSF_L(fr1)
188	blr
189
190fpdisable:
191	mtlr	r12
192	MTFSF_L(fr31)
193	lfd	fr31,8(r1)
194	lfd	fr1,16(r1)
195	lfd	fr0,24(r1)
196	mtmsr	r10
197	isync
198	addi	r1,r1,64
199	blr
200
201/*
202 * Vector add, floating point.
203 */
204_GLOBAL(vaddfp)
205	mflr	r12
206	bl	fpenable
207	li	r0,4
208	mtctr	r0
209	li	r6,0
2101:	lfsx	fr0,r4,r6
211	lfsx	fr1,r5,r6
212	fadds	fr0,fr0,fr1
213	stfsx	fr0,r3,r6
214	addi	r6,r6,4
215	bdnz	1b
216	b	fpdisable
217
218/*
219 * Vector subtract, floating point.
220 */
221_GLOBAL(vsubfp)
222	mflr	r12
223	bl	fpenable
224	li	r0,4
225	mtctr	r0
226	li	r6,0
2271:	lfsx	fr0,r4,r6
228	lfsx	fr1,r5,r6
229	fsubs	fr0,fr0,fr1
230	stfsx	fr0,r3,r6
231	addi	r6,r6,4
232	bdnz	1b
233	b	fpdisable
234
235/*
236 * Vector multiply and add, floating point.
237 */
238_GLOBAL(vmaddfp)
239	mflr	r12
240	bl	fpenable
241	stfd	fr2,32(r1)
242	li	r0,4
243	mtctr	r0
244	li	r7,0
2451:	lfsx	fr0,r4,r7
246	lfsx	fr1,r5,r7
247	lfsx	fr2,r6,r7
248	fmadds	fr0,fr0,fr2,fr1
249	stfsx	fr0,r3,r7
250	addi	r7,r7,4
251	bdnz	1b
252	lfd	fr2,32(r1)
253	b	fpdisable
254
255/*
256 * Vector negative multiply and subtract, floating point.
257 */
258_GLOBAL(vnmsubfp)
259	mflr	r12
260	bl	fpenable
261	stfd	fr2,32(r1)
262	li	r0,4
263	mtctr	r0
264	li	r7,0
2651:	lfsx	fr0,r4,r7
266	lfsx	fr1,r5,r7
267	lfsx	fr2,r6,r7
268	fnmsubs	fr0,fr0,fr2,fr1
269	stfsx	fr0,r3,r7
270	addi	r7,r7,4
271	bdnz	1b
272	lfd	fr2,32(r1)
273	b	fpdisable
274
275/*
276 * Vector reciprocal estimate.  We just compute 1.0/x.
277 * r3 -> destination, r4 -> source.
278 */
279_GLOBAL(vrefp)
280	mflr	r12
281	bl	fpenable
282	li	r0,4
283	LDCONST(fr1, fpone)
284	mtctr	r0
285	li	r6,0
2861:	lfsx	fr0,r4,r6
287	fdivs	fr0,fr1,fr0
288	stfsx	fr0,r3,r6
289	addi	r6,r6,4
290	bdnz	1b
291	b	fpdisable
292
293/*
294 * Vector reciprocal square-root estimate, floating point.
295 * We use the frsqrte instruction for the initial estimate followed
296 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
297 * r3 -> destination, r4 -> source.
298 */
299_GLOBAL(vrsqrtefp)
300	mflr	r12
301	bl	fpenable
302	stfd	fr2,32(r1)
303	stfd	fr3,40(r1)
304	stfd	fr4,48(r1)
305	stfd	fr5,56(r1)
306	li	r0,4
307	LDCONST(fr4, fpone)
308	LDCONST(fr5, fphalf)
309	mtctr	r0
310	li	r6,0
3111:	lfsx	fr0,r4,r6
312	frsqrte	fr1,fr0		/* r = frsqrte(s) */
313	fmuls	fr3,fr1,fr0	/* r * s */
314	fmuls	fr2,fr1,fr5	/* r * 0.5 */
315	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
316	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
317	fmuls	fr3,fr1,fr0	/* r * s */
318	fmuls	fr2,fr1,fr5	/* r * 0.5 */
319	fnmsubs	fr3,fr1,fr3,fr4	/* 1 - s * r * r */
320	fmadds	fr1,fr2,fr3,fr1	/* r = r + 0.5 * r * (1 - s * r * r) */
321	stfsx	fr1,r3,r6
322	addi	r6,r6,4
323	bdnz	1b
324	lfd	fr5,56(r1)
325	lfd	fr4,48(r1)
326	lfd	fr3,40(r1)
327	lfd	fr2,32(r1)
328	b	fpdisable