Linux Audio

Check our new training course

Loading...
v5.4
 1/* SPDX-License-Identifier: GPL-2.0 */
 2/*
 3 * r2300_switch.S: R2300 specific task switching code.
 4 *
 5 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
 6 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
 7 *
 8 * Multi-cpu abstraction and macros for easier reading:
 9 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
10 *
11 * Further modifications to make this work:
12 * Copyright (c) 1998-2000 Harald Koerfgen
13 */
14#include <asm/asm.h>
15#include <asm/cachectl.h>
16#include <asm/export.h>
17#include <asm/fpregdef.h>
18#include <asm/mipsregs.h>
19#include <asm/asm-offsets.h>
20#include <asm/regdef.h>
21#include <asm/stackframe.h>
22#include <asm/thread_info.h>
23
24#include <asm/asmmacro.h>
25
26	.set	mips1
27	.align	5
28
29/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30 * task_struct *resume(task_struct *prev, task_struct *next,
31 *		       struct thread_info *next_ti)
32 */
33LEAF(resume)
34	mfc0	t1, CP0_STATUS
35	sw	t1, THREAD_STATUS(a0)
36	cpu_save_nonscratch a0
37	sw	ra, THREAD_REG31(a0)
38
39#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40	PTR_LA	t8, __stack_chk_guard
41	LONG_L	t9, TASK_STACK_CANARY(a1)
42	LONG_S	t9, 0(t8)
43#endif
44
45	/*
46	 * The order of restoring the registers takes care of the race
47	 * updating $28, $29 and kernelsp without disabling ints.
48	 */
49	move	$28, a2
50	cpu_restore_nonscratch a1
51
52	addiu	t1, $28, _THREAD_SIZE - 32
53	sw	t1, kernelsp
54
55	mfc0	t1, CP0_STATUS		/* Do we really need this? */
56	li	a3, 0xff01
57	and	t1, a3
58	lw	a2, THREAD_STATUS(a1)
59	nor	a3, $0, a3
60	and	a2, a3
61	or	a2, t1
62	mtc0	a2, CP0_STATUS
63	move	v0, a0
64	jr	ra
65	END(resume)
v3.15
 
  1/*
  2 * r2300_switch.S: R2300 specific task switching code.
  3 *
  4 * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
  5 * Copyright (C) 1994, 1995, 1996 by Andreas Busse
  6 *
  7 * Multi-cpu abstraction and macros for easier reading:
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 *
 10 * Further modifications to make this work:
 11 * Copyright (c) 1998-2000 Harald Koerfgen
 12 */
 13#include <asm/asm.h>
 14#include <asm/cachectl.h>
 
 15#include <asm/fpregdef.h>
 16#include <asm/mipsregs.h>
 17#include <asm/asm-offsets.h>
 18#include <asm/regdef.h>
 19#include <asm/stackframe.h>
 20#include <asm/thread_info.h>
 21
 22#include <asm/asmmacro.h>
 23
 24	.set	mips1
 25	.align	5
 26
 27/*
 28 * Offset to the current process status flags, the first 32 bytes of the
 29 * stack are not used.
 30 */
 31#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
 32
 33/*
 34 * FPU context is saved iff the process has used it's FPU in the current
 35 * time slice as indicated by TIF_USEDFPU.  In any case, the CU1 bit for user
 36 * space STATUS register should be 0, so that a process *always* starts its
 37 * userland with FPU disabled after each context switch.
 38 *
 39 * FPU will be enabled as soon as the process accesses FPU again, through
 40 * do_cpu() trap.
 41 */
 42
 43/*
 44 * task_struct *resume(task_struct *prev, task_struct *next,
 45 *		       struct thread_info *next_ti, int usedfpu)
 46 */
 47LEAF(resume)
 48	mfc0	t1, CP0_STATUS
 49	sw	t1, THREAD_STATUS(a0)
 50	cpu_save_nonscratch a0
 51	sw	ra, THREAD_REG31(a0)
 52
 53	beqz	a3, 1f
 54
 55	PTR_L	t3, TASK_THREAD_INFO(a0)
 56
 57	/*
 58	 * clear saved user stack CU1 bit
 59	 */
 60	lw	t0, ST_OFF(t3)
 61	li	t1, ~ST0_CU1
 62	and	t0, t0, t1
 63	sw	t0, ST_OFF(t3)
 64
 65	fpu_save_single a0, t0			# clobbers t0
 66
 671:
 68
 69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
 70	PTR_LA	t8, __stack_chk_guard
 71	LONG_L	t9, TASK_STACK_CANARY(a1)
 72	LONG_S	t9, 0(t8)
 73#endif
 74
 75	/*
 76	 * The order of restoring the registers takes care of the race
 77	 * updating $28, $29 and kernelsp without disabling ints.
 78	 */
 79	move	$28, a2
 80	cpu_restore_nonscratch a1
 81
 82	addiu	t1, $28, _THREAD_SIZE - 32
 83	sw	t1, kernelsp
 84
 85	mfc0	t1, CP0_STATUS		/* Do we really need this? */
 86	li	a3, 0xff01
 87	and	t1, a3
 88	lw	a2, THREAD_STATUS(a1)
 89	nor	a3, $0, a3
 90	and	a2, a3
 91	or	a2, t1
 92	mtc0	a2, CP0_STATUS
 93	move	v0, a0
 94	jr	ra
 95	END(resume)
 96
 97/*
 98 * Save a thread's fp context.
 99 */
100LEAF(_save_fp)
101	fpu_save_single a0, t1			# clobbers t1
102	jr	ra
103	END(_save_fp)
104
105/*
106 * Restore a thread's fp context.
107 */
108LEAF(_restore_fp)
109	fpu_restore_single a0, t1		# clobbers t1
110	jr	ra
111	END(_restore_fp)
112
113/*
114 * Load the FPU with signalling NANS.  This bit pattern we're using has
115 * the property that no matter whether considered as single or as double
116 * precision represents signaling NANS.
117 *
118 * We initialize fcr31 to rounding to nearest, no exceptions.
119 */
120
121#define FPU_DEFAULT  0x00000000
122
123LEAF(_init_fpu)
124	mfc0	t0, CP0_STATUS
125	li	t1, ST0_CU1
126	or	t0, t1
127	mtc0	t0, CP0_STATUS
128
129	li	t1, FPU_DEFAULT
130	ctc1	t1, fcr31
131
132	li	t0, -1
133
134	mtc1	t0, $f0
135	mtc1	t0, $f1
136	mtc1	t0, $f2
137	mtc1	t0, $f3
138	mtc1	t0, $f4
139	mtc1	t0, $f5
140	mtc1	t0, $f6
141	mtc1	t0, $f7
142	mtc1	t0, $f8
143	mtc1	t0, $f9
144	mtc1	t0, $f10
145	mtc1	t0, $f11
146	mtc1	t0, $f12
147	mtc1	t0, $f13
148	mtc1	t0, $f14
149	mtc1	t0, $f15
150	mtc1	t0, $f16
151	mtc1	t0, $f17
152	mtc1	t0, $f18
153	mtc1	t0, $f19
154	mtc1	t0, $f20
155	mtc1	t0, $f21
156	mtc1	t0, $f22
157	mtc1	t0, $f23
158	mtc1	t0, $f24
159	mtc1	t0, $f25
160	mtc1	t0, $f26
161	mtc1	t0, $f27
162	mtc1	t0, $f28
163	mtc1	t0, $f29
164	mtc1	t0, $f30
165	mtc1	t0, $f31
166	jr	ra
167	END(_init_fpu)