Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 *  linux/arch/arm/vfp/vfphw.S
  3 *
  4 *  Copyright (C) 2004 ARM Limited.
  5 *  Written by Deep Blue Solutions Limited.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This code is called from the kernel's undefined instruction trap.
 12 * r9 holds the return address for successful handling.
 13 * lr holds the return address for unrecognised instructions.
 14 * r10 points at the start of the private FP workspace in the thread structure
 15 * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
 16 */
 17#include <linux/init.h>
 18#include <linux/linkage.h>
 19#include <asm/thread_info.h>
 20#include <asm/vfpmacros.h>
 21#include <linux/kern_levels.h>
 22#include <asm/assembler.h>
 23#include <asm/asm-offsets.h>
 24
 25	.macro	DBGSTR, str
 26#ifdef DEBUG
 27	stmfd	sp!, {r0-r3, ip, lr}
 28	ldr	r0, =1f
 29	bl	printk
 30	ldmfd	sp!, {r0-r3, ip, lr}
 31
 32	.pushsection .rodata, "a"
 331:	.ascii	KERN_DEBUG "VFP: \str\n"
 34	.byte	0
 35	.previous
 36#endif
 37	.endm
 38
 39	.macro  DBGSTR1, str, arg
 40#ifdef DEBUG
 41	stmfd	sp!, {r0-r3, ip, lr}
 42	mov	r1, \arg
 43	ldr	r0, =1f
 44	bl	printk
 45	ldmfd	sp!, {r0-r3, ip, lr}
 46
 47	.pushsection .rodata, "a"
 481:	.ascii	KERN_DEBUG "VFP: \str\n"
 49	.byte	0
 50	.previous
 51#endif
 52	.endm
 53
 54	.macro  DBGSTR3, str, arg1, arg2, arg3
 55#ifdef DEBUG
 56	stmfd	sp!, {r0-r3, ip, lr}
 57	mov	r3, \arg3
 58	mov	r2, \arg2
 59	mov	r1, \arg1
 60	ldr	r0, =1f
 61	bl	printk
 62	ldmfd	sp!, {r0-r3, ip, lr}
 63
 64	.pushsection .rodata, "a"
 651:	.ascii	KERN_DEBUG "VFP: \str\n"
 66	.byte	0
 67	.previous
 68#endif
 69	.endm
 70
 71
 72@ VFP hardware support entry point.
 73@
 74@  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 75@  r2  = PC value to resume execution after successful emulation
 76@  r9  = normal "successful" return address
 77@  r10 = vfp_state union
 78@  r11 = CPU number
 79@  lr  = unrecognised instruction return address
 80@  IRQs enabled.
 81ENTRY(vfp_support_entry)
 82	DBGSTR3	"instr %08x pc %08x state %p", r0, r2, r10
 83
 84	ldr	r3, [sp, #S_PSR]	@ Neither lazy restore nor FP exceptions
 85	and	r3, r3, #MODE_MASK	@ are supported in kernel mode
 86	teq	r3, #USR_MODE
 87	bne	vfp_kmode_exception	@ Returns through lr
 88
 89	VFPFMRX	r1, FPEXC		@ Is the VFP enabled?
 90	DBGSTR1	"fpexc %08x", r1
 91	tst	r1, #FPEXC_EN
 92	bne	look_for_VFP_exceptions	@ VFP is already enabled
 93
 94	DBGSTR1 "enable %x", r10
 95	ldr	r3, vfp_current_hw_state_address
 96	orr	r1, r1, #FPEXC_EN	@ user FPEXC has the enable bit set
 97	ldr	r4, [r3, r11, lsl #2]	@ vfp_current_hw_state pointer
 98	bic	r5, r1, #FPEXC_EX	@ make sure exceptions are disabled
 99	cmp	r4, r10			@ this thread owns the hw context?
100#ifndef CONFIG_SMP
101	@ For UP, checking that this thread owns the hw context is
102	@ sufficient to determine that the hardware state is valid.
103	beq	vfp_hw_state_valid
104
105	@ On UP, we lazily save the VFP context.  As a different
106	@ thread wants ownership of the VFP hardware, save the old
107	@ state if there was a previous (valid) owner.
108
109	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
110					@ exceptions, so we can get at the
111					@ rest of it
112
113	DBGSTR1	"save old state %p", r4
114	cmp	r4, #0			@ if the vfp_current_hw_state is NULL
115	beq	vfp_reload_hw		@ then the hw state needs reloading
116	VFPFSTMIA r4, r5		@ save the working registers
117	VFPFMRX	r5, FPSCR		@ current status
118#ifndef CONFIG_CPU_FEROCEON
119	tst	r1, #FPEXC_EX		@ is there additional state to save?
120	beq	1f
121	VFPFMRX	r6, FPINST		@ FPINST (only if FPEXC.EX is set)
122	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
123	beq	1f
124	VFPFMRX	r8, FPINST2		@ FPINST2 if needed (and present)
1251:
126#endif
127	stmia	r4, {r1, r5, r6, r8}	@ save FPEXC, FPSCR, FPINST, FPINST2
128vfp_reload_hw:
129
130#else
131	@ For SMP, if this thread does not own the hw context, then we
132	@ need to reload it.  No need to save the old state as on SMP,
133	@ we always save the state when we switch away from a thread.
134	bne	vfp_reload_hw
135
136	@ This thread has ownership of the current hardware context.
137	@ However, it may have been migrated to another CPU, in which
138	@ case the saved state is newer than the hardware context.
139	@ Check this by looking at the CPU number which the state was
140	@ last loaded onto.
141	ldr	ip, [r10, #VFP_CPU]
142	teq	ip, r11
143	beq	vfp_hw_state_valid
144
145vfp_reload_hw:
146	@ We're loading this threads state into the VFP hardware. Update
147	@ the CPU number which contains the most up to date VFP context.
148	str	r11, [r10, #VFP_CPU]
149
150	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
151					@ exceptions, so we can get at the
152					@ rest of it
153#endif
154
155	DBGSTR1	"load state %p", r10
156	str	r10, [r3, r11, lsl #2]	@ update the vfp_current_hw_state pointer
157					@ Load the saved state back into the VFP
158	VFPFLDMIA r10, r5		@ reload the working registers while
159					@ FPEXC is in a safe state
160	ldmia	r10, {r1, r5, r6, r8}	@ load FPEXC, FPSCR, FPINST, FPINST2
161#ifndef CONFIG_CPU_FEROCEON
162	tst	r1, #FPEXC_EX		@ is there additional state to restore?
163	beq	1f
164	VFPFMXR	FPINST, r6		@ restore FPINST (only if FPEXC.EX is set)
165	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to write?
166	beq	1f
167	VFPFMXR	FPINST2, r8		@ FPINST2 if needed (and present)
1681:
169#endif
170	VFPFMXR	FPSCR, r5		@ restore status
171
172@ The context stored in the VFP hardware is up to date with this thread
173vfp_hw_state_valid:
174	tst	r1, #FPEXC_EX
175	bne	process_exception	@ might as well handle the pending
176					@ exception before retrying branch
177					@ out before setting an FPEXC that
178					@ stops us reading stuff
179	VFPFMXR	FPEXC, r1		@ Restore FPEXC last
180	sub	r2, r2, #4		@ Retry current instruction - if Thumb
181	str	r2, [sp, #S_PC]		@ mode it's two 16-bit instructions,
182					@ else it's one 32-bit instruction, so
183					@ always subtract 4 from the following
184					@ instruction address.
185	dec_preempt_count_ti r10, r4
186	ret	r9			@ we think we have handled things
 
 
 
 
 
187
188
189look_for_VFP_exceptions:
190	@ Check for synchronous or asynchronous exception
191	tst	r1, #FPEXC_EX | FPEXC_DEX
192	bne	process_exception
193	@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
194	@ causes all the CDP instructions to be bounced synchronously without
195	@ setting the FPEXC.EX bit
196	VFPFMRX	r5, FPSCR
197	tst	r5, #FPSCR_IXE
198	bne	process_exception
199
200	tst	r5, #FPSCR_LENGTH_MASK
201	beq	skip
202	orr	r1, r1, #FPEXC_DEX
203	b	process_exception
204skip:
205
206	@ Fall into hand on to next handler - appropriate coproc instr
207	@ not recognised by VFP
208
209	DBGSTR	"not VFP"
210	dec_preempt_count_ti r10, r4
211	ret	lr
 
 
 
 
 
212
213process_exception:
214	DBGSTR	"bounce"
215	mov	r2, sp			@ nothing stacked - regdump is at TOS
216	mov	lr, r9			@ setup for a return to the user code.
217
218	@ Now call the C code to package up the bounce to the support code
219	@   r0 holds the trigger instruction
220	@   r1 holds the FPEXC value
221	@   r2 pointer to register dump
222	b	VFP_bounce		@ we have handled this - the support
223					@ code will raise an exception if
224					@ required. If not, the user code will
225					@ retry the faulted instruction
226ENDPROC(vfp_support_entry)
227
228ENTRY(vfp_save_state)
229	@ Save the current VFP state
230	@ r0 - save location
231	@ r1 - FPEXC
232	DBGSTR1	"save VFP state %p", r0
233	VFPFSTMIA r0, r2		@ save the working registers
234	VFPFMRX	r2, FPSCR		@ current status
235	tst	r1, #FPEXC_EX		@ is there additional state to save?
236	beq	1f
237	VFPFMRX	r3, FPINST		@ FPINST (only if FPEXC.EX is set)
238	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
239	beq	1f
240	VFPFMRX	r12, FPINST2		@ FPINST2 if needed (and present)
2411:
242	stmia	r0, {r1, r2, r3, r12}	@ save FPEXC, FPSCR, FPINST, FPINST2
243	ret	lr
244ENDPROC(vfp_save_state)
245
246	.align
247vfp_current_hw_state_address:
248	.word	vfp_current_hw_state
249
250	.macro	tbl_branch, base, tmp, shift
251#ifdef CONFIG_THUMB2_KERNEL
252	adr	\tmp, 1f
253	add	\tmp, \tmp, \base, lsl \shift
254	ret	\tmp
255#else
256	add	pc, pc, \base, lsl \shift
257	mov	r0, r0
258#endif
2591:
260	.endm
261
262ENTRY(vfp_get_float)
263	tbl_branch r0, r3, #3
264	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2651:	mrc	p10, 0, r0, c\dr, c0, 0	@ fmrs	r0, s0
266	ret	lr
267	.org	1b + 8
2681:	mrc	p10, 0, r0, c\dr, c0, 4	@ fmrs	r0, s1
269	ret	lr
270	.org	1b + 8
271	.endr
272ENDPROC(vfp_get_float)
273
274ENTRY(vfp_put_float)
275	tbl_branch r1, r3, #3
276	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2771:	mcr	p10, 0, r0, c\dr, c0, 0	@ fmsr	r0, s0
278	ret	lr
279	.org	1b + 8
2801:	mcr	p10, 0, r0, c\dr, c0, 4	@ fmsr	r0, s1
281	ret	lr
282	.org	1b + 8
283	.endr
284ENDPROC(vfp_put_float)
285
286ENTRY(vfp_get_double)
287	tbl_branch r0, r3, #3
288	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2891:	fmrrd	r0, r1, d\dr
290	ret	lr
291	.org	1b + 8
292	.endr
293#ifdef CONFIG_VFPv3
294	@ d16 - d31 registers
295	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2961:	mrrc	p11, 3, r0, r1, c\dr	@ fmrrd	r0, r1, d\dr
297	ret	lr
298	.org	1b + 8
299	.endr
300#endif
301
302	@ virtual register 16 (or 32 if VFPv3) for compare with zero
303	mov	r0, #0
304	mov	r1, #0
305	ret	lr
306ENDPROC(vfp_get_double)
307
308ENTRY(vfp_put_double)
309	tbl_branch r2, r3, #3
310	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3111:	fmdrr	d\dr, r0, r1
312	ret	lr
313	.org	1b + 8
314	.endr
315#ifdef CONFIG_VFPv3
316	@ d16 - d31 registers
317	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3181:	mcrr	p11, 3, r0, r1, c\dr	@ fmdrr	r0, r1, d\dr
319	ret	lr
320	.org	1b + 8
321	.endr
322#endif
323ENDPROC(vfp_put_double)
v3.5.6
  1/*
  2 *  linux/arch/arm/vfp/vfphw.S
  3 *
  4 *  Copyright (C) 2004 ARM Limited.
  5 *  Written by Deep Blue Solutions Limited.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This code is called from the kernel's undefined instruction trap.
 12 * r9 holds the return address for successful handling.
 13 * lr holds the return address for unrecognised instructions.
 14 * r10 points at the start of the private FP workspace in the thread structure
 15 * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
 16 */
 
 
 17#include <asm/thread_info.h>
 18#include <asm/vfpmacros.h>
 19#include "../kernel/entry-header.S"
 
 
 20
 21	.macro	DBGSTR, str
 22#ifdef DEBUG
 23	stmfd	sp!, {r0-r3, ip, lr}
 24	add	r0, pc, #4
 25	bl	printk
 26	b	1f
 27	.asciz  "<7>VFP: \str\n"
 28	.balign 4
 291:	ldmfd	sp!, {r0-r3, ip, lr}
 
 
 30#endif
 31	.endm
 32
 33	.macro  DBGSTR1, str, arg
 34#ifdef DEBUG
 35	stmfd	sp!, {r0-r3, ip, lr}
 36	mov	r1, \arg
 37	add	r0, pc, #4
 38	bl	printk
 39	b	1f
 40	.asciz  "<7>VFP: \str\n"
 41	.balign 4
 421:	ldmfd	sp!, {r0-r3, ip, lr}
 
 
 43#endif
 44	.endm
 45
 46	.macro  DBGSTR3, str, arg1, arg2, arg3
 47#ifdef DEBUG
 48	stmfd	sp!, {r0-r3, ip, lr}
 49	mov	r3, \arg3
 50	mov	r2, \arg2
 51	mov	r1, \arg1
 52	add	r0, pc, #4
 53	bl	printk
 54	b	1f
 55	.asciz  "<7>VFP: \str\n"
 56	.balign 4
 571:	ldmfd	sp!, {r0-r3, ip, lr}
 
 
 58#endif
 59	.endm
 60
 61
 62@ VFP hardware support entry point.
 63@
 64@  r0  = instruction opcode (32-bit ARM or two 16-bit Thumb)
 65@  r2  = PC value to resume execution after successful emulation
 66@  r9  = normal "successful" return address
 67@  r10 = vfp_state union
 68@  r11 = CPU number
 69@  lr  = unrecognised instruction return address
 70@  IRQs enabled.
 71ENTRY(vfp_support_entry)
 72	DBGSTR3	"instr %08x pc %08x state %p", r0, r2, r10
 73
 
 
 
 
 
 74	VFPFMRX	r1, FPEXC		@ Is the VFP enabled?
 75	DBGSTR1	"fpexc %08x", r1
 76	tst	r1, #FPEXC_EN
 77	bne	look_for_VFP_exceptions	@ VFP is already enabled
 78
 79	DBGSTR1 "enable %x", r10
 80	ldr	r3, vfp_current_hw_state_address
 81	orr	r1, r1, #FPEXC_EN	@ user FPEXC has the enable bit set
 82	ldr	r4, [r3, r11, lsl #2]	@ vfp_current_hw_state pointer
 83	bic	r5, r1, #FPEXC_EX	@ make sure exceptions are disabled
 84	cmp	r4, r10			@ this thread owns the hw context?
 85#ifndef CONFIG_SMP
 86	@ For UP, checking that this thread owns the hw context is
 87	@ sufficient to determine that the hardware state is valid.
 88	beq	vfp_hw_state_valid
 89
 90	@ On UP, we lazily save the VFP context.  As a different
 91	@ thread wants ownership of the VFP hardware, save the old
 92	@ state if there was a previous (valid) owner.
 93
 94	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
 95					@ exceptions, so we can get at the
 96					@ rest of it
 97
 98	DBGSTR1	"save old state %p", r4
 99	cmp	r4, #0			@ if the vfp_current_hw_state is NULL
100	beq	vfp_reload_hw		@ then the hw state needs reloading
101	VFPFSTMIA r4, r5		@ save the working registers
102	VFPFMRX	r5, FPSCR		@ current status
103#ifndef CONFIG_CPU_FEROCEON
104	tst	r1, #FPEXC_EX		@ is there additional state to save?
105	beq	1f
106	VFPFMRX	r6, FPINST		@ FPINST (only if FPEXC.EX is set)
107	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
108	beq	1f
109	VFPFMRX	r8, FPINST2		@ FPINST2 if needed (and present)
1101:
111#endif
112	stmia	r4, {r1, r5, r6, r8}	@ save FPEXC, FPSCR, FPINST, FPINST2
113vfp_reload_hw:
114
115#else
116	@ For SMP, if this thread does not own the hw context, then we
117	@ need to reload it.  No need to save the old state as on SMP,
118	@ we always save the state when we switch away from a thread.
119	bne	vfp_reload_hw
120
121	@ This thread has ownership of the current hardware context.
122	@ However, it may have been migrated to another CPU, in which
123	@ case the saved state is newer than the hardware context.
124	@ Check this by looking at the CPU number which the state was
125	@ last loaded onto.
126	ldr	ip, [r10, #VFP_CPU]
127	teq	ip, r11
128	beq	vfp_hw_state_valid
129
130vfp_reload_hw:
131	@ We're loading this threads state into the VFP hardware. Update
132	@ the CPU number which contains the most up to date VFP context.
133	str	r11, [r10, #VFP_CPU]
134
135	VFPFMXR	FPEXC, r5		@ enable VFP, disable any pending
136					@ exceptions, so we can get at the
137					@ rest of it
138#endif
139
140	DBGSTR1	"load state %p", r10
141	str	r10, [r3, r11, lsl #2]	@ update the vfp_current_hw_state pointer
142					@ Load the saved state back into the VFP
143	VFPFLDMIA r10, r5		@ reload the working registers while
144					@ FPEXC is in a safe state
145	ldmia	r10, {r1, r5, r6, r8}	@ load FPEXC, FPSCR, FPINST, FPINST2
146#ifndef CONFIG_CPU_FEROCEON
147	tst	r1, #FPEXC_EX		@ is there additional state to restore?
148	beq	1f
149	VFPFMXR	FPINST, r6		@ restore FPINST (only if FPEXC.EX is set)
150	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to write?
151	beq	1f
152	VFPFMXR	FPINST2, r8		@ FPINST2 if needed (and present)
1531:
154#endif
155	VFPFMXR	FPSCR, r5		@ restore status
156
157@ The context stored in the VFP hardware is up to date with this thread
158vfp_hw_state_valid:
159	tst	r1, #FPEXC_EX
160	bne	process_exception	@ might as well handle the pending
161					@ exception before retrying branch
162					@ out before setting an FPEXC that
163					@ stops us reading stuff
164	VFPFMXR	FPEXC, r1		@ Restore FPEXC last
165	sub	r2, r2, #4		@ Retry current instruction - if Thumb
166	str	r2, [sp, #S_PC]		@ mode it's two 16-bit instructions,
167					@ else it's one 32-bit instruction, so
168					@ always subtract 4 from the following
169					@ instruction address.
170#ifdef CONFIG_PREEMPT
171	get_thread_info	r10
172	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
173	sub	r11, r4, #1		@ decrement it
174	str	r11, [r10, #TI_PREEMPT]
175#endif
176	mov	pc, r9			@ we think we have handled things
177
178
179look_for_VFP_exceptions:
180	@ Check for synchronous or asynchronous exception
181	tst	r1, #FPEXC_EX | FPEXC_DEX
182	bne	process_exception
183	@ On some implementations of the VFP subarch 1, setting FPSCR.IXE
184	@ causes all the CDP instructions to be bounced synchronously without
185	@ setting the FPEXC.EX bit
186	VFPFMRX	r5, FPSCR
187	tst	r5, #FPSCR_IXE
188	bne	process_exception
189
 
 
 
 
 
 
190	@ Fall into hand on to next handler - appropriate coproc instr
191	@ not recognised by VFP
192
193	DBGSTR	"not VFP"
194#ifdef CONFIG_PREEMPT
195	get_thread_info	r10
196	ldr	r4, [r10, #TI_PREEMPT]	@ get preempt count
197	sub	r11, r4, #1		@ decrement it
198	str	r11, [r10, #TI_PREEMPT]
199#endif
200	mov	pc, lr
201
202process_exception:
203	DBGSTR	"bounce"
204	mov	r2, sp			@ nothing stacked - regdump is at TOS
205	mov	lr, r9			@ setup for a return to the user code.
206
207	@ Now call the C code to package up the bounce to the support code
208	@   r0 holds the trigger instruction
209	@   r1 holds the FPEXC value
210	@   r2 pointer to register dump
211	b	VFP_bounce		@ we have handled this - the support
212					@ code will raise an exception if
213					@ required. If not, the user code will
214					@ retry the faulted instruction
215ENDPROC(vfp_support_entry)
216
217ENTRY(vfp_save_state)
218	@ Save the current VFP state
219	@ r0 - save location
220	@ r1 - FPEXC
221	DBGSTR1	"save VFP state %p", r0
222	VFPFSTMIA r0, r2		@ save the working registers
223	VFPFMRX	r2, FPSCR		@ current status
224	tst	r1, #FPEXC_EX		@ is there additional state to save?
225	beq	1f
226	VFPFMRX	r3, FPINST		@ FPINST (only if FPEXC.EX is set)
227	tst	r1, #FPEXC_FP2V		@ is there an FPINST2 to read?
228	beq	1f
229	VFPFMRX	r12, FPINST2		@ FPINST2 if needed (and present)
2301:
231	stmia	r0, {r1, r2, r3, r12}	@ save FPEXC, FPSCR, FPINST, FPINST2
232	mov	pc, lr
233ENDPROC(vfp_save_state)
234
235	.align
236vfp_current_hw_state_address:
237	.word	vfp_current_hw_state
238
239	.macro	tbl_branch, base, tmp, shift
240#ifdef CONFIG_THUMB2_KERNEL
241	adr	\tmp, 1f
242	add	\tmp, \tmp, \base, lsl \shift
243	mov	pc, \tmp
244#else
245	add	pc, pc, \base, lsl \shift
246	mov	r0, r0
247#endif
2481:
249	.endm
250
251ENTRY(vfp_get_float)
252	tbl_branch r0, r3, #3
253	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2541:	mrc	p10, 0, r0, c\dr, c0, 0	@ fmrs	r0, s0
255	mov	pc, lr
256	.org	1b + 8
2571:	mrc	p10, 0, r0, c\dr, c0, 4	@ fmrs	r0, s1
258	mov	pc, lr
259	.org	1b + 8
260	.endr
261ENDPROC(vfp_get_float)
262
263ENTRY(vfp_put_float)
264	tbl_branch r1, r3, #3
265	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2661:	mcr	p10, 0, r0, c\dr, c0, 0	@ fmsr	r0, s0
267	mov	pc, lr
268	.org	1b + 8
2691:	mcr	p10, 0, r0, c\dr, c0, 4	@ fmsr	r0, s1
270	mov	pc, lr
271	.org	1b + 8
272	.endr
273ENDPROC(vfp_put_float)
274
275ENTRY(vfp_get_double)
276	tbl_branch r0, r3, #3
277	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2781:	fmrrd	r0, r1, d\dr
279	mov	pc, lr
280	.org	1b + 8
281	.endr
282#ifdef CONFIG_VFPv3
283	@ d16 - d31 registers
284	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2851:	mrrc	p11, 3, r0, r1, c\dr	@ fmrrd	r0, r1, d\dr
286	mov	pc, lr
287	.org	1b + 8
288	.endr
289#endif
290
291	@ virtual register 16 (or 32 if VFPv3) for compare with zero
292	mov	r0, #0
293	mov	r1, #0
294	mov	pc, lr
295ENDPROC(vfp_get_double)
296
297ENTRY(vfp_put_double)
298	tbl_branch r2, r3, #3
299	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3001:	fmdrr	d\dr, r0, r1
301	mov	pc, lr
302	.org	1b + 8
303	.endr
304#ifdef CONFIG_VFPv3
305	@ d16 - d31 registers
306	.irp	dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3071:	mcrr	p11, 3, r0, r1, c\dr	@ fmdrr	r0, r1, d\dr
308	mov	pc, lr
309	.org	1b + 8
310	.endr
311#endif
312ENDPROC(vfp_put_double)