Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8 * Copyright (C) 1999 Silicon Graphics, Inc.
  9 * Copyright (C) 2007  Maciej W. Rozycki
 10 */
 11#ifndef _ASM_STACKFRAME_H
 12#define _ASM_STACKFRAME_H
 13
 14#include <linux/threads.h>
 15
 16#include <asm/asm.h>
 17#include <asm/asmmacro.h>
 18#include <asm/mipsregs.h>
 19#include <asm/asm-offsets.h>
 20#include <asm/thread_info.h>
 21
 22/* Make the addition of cfi info a little easier. */
 23	.macro cfi_rel_offset reg offset=0 docfi=0
 24	.if \docfi
 25	.cfi_rel_offset \reg, \offset
 26	.endif
 27	.endm
 28
 29	.macro cfi_st reg offset=0 docfi=0
 30	LONG_S	\reg, \offset(sp)
 31	cfi_rel_offset \reg, \offset, \docfi
 32	.endm
 33
 34	.macro cfi_restore reg offset=0 docfi=0
 35	.if \docfi
 36	.cfi_restore \reg
 37	.endif
 38	.endm
 39
 40	.macro cfi_ld reg offset=0 docfi=0
 41	LONG_L	\reg, \offset(sp)
 42	cfi_restore \reg \offset \docfi
 43	.endm
 44
 45#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 46#define STATMASK 0x3f
 47#else
 48#define STATMASK 0x1f
 49#endif
 50
 51		.macro	SAVE_AT docfi=0
 52		.set	push
 53		.set	noat
 54		cfi_st	$1, PT_R1, \docfi
 55		.set	pop
 56		.endm
 57
 58		.macro	SAVE_TEMP docfi=0
 59#ifdef CONFIG_CPU_HAS_SMARTMIPS
 60		mflhxu	v1
 61		LONG_S	v1, PT_LO(sp)
 62		mflhxu	v1
 63		LONG_S	v1, PT_HI(sp)
 64		mflhxu	v1
 65		LONG_S	v1, PT_ACX(sp)
 66#elif !defined(CONFIG_CPU_MIPSR6)
 67		mfhi	v1
 68#endif
 69#ifdef CONFIG_32BIT
 70		cfi_st	$8, PT_R8, \docfi
 71		cfi_st	$9, PT_R9, \docfi
 72#endif
 73		cfi_st	$10, PT_R10, \docfi
 74		cfi_st	$11, PT_R11, \docfi
 75		cfi_st	$12, PT_R12, \docfi
 76#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 77		LONG_S	v1, PT_HI(sp)
 78		mflo	v1
 79#endif
 80		cfi_st	$13, PT_R13, \docfi
 81		cfi_st	$14, PT_R14, \docfi
 82		cfi_st	$15, PT_R15, \docfi
 83		cfi_st	$24, PT_R24, \docfi
 84#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 85		LONG_S	v1, PT_LO(sp)
 86#endif
 87#ifdef CONFIG_CPU_CAVIUM_OCTEON
 88		/*
 89		 * The Octeon multiplier state is affected by general
 90		 * multiply instructions. It must be saved before and
 91		 * kernel code might corrupt it
 92		 */
 93		jal     octeon_mult_save
 94#endif
 95		.endm
 96
 97		.macro	SAVE_STATIC docfi=0
 98		cfi_st	$16, PT_R16, \docfi
 99		cfi_st	$17, PT_R17, \docfi
100		cfi_st	$18, PT_R18, \docfi
101		cfi_st	$19, PT_R19, \docfi
102		cfi_st	$20, PT_R20, \docfi
103		cfi_st	$21, PT_R21, \docfi
104		cfi_st	$22, PT_R22, \docfi
105		cfi_st	$23, PT_R23, \docfi
106		cfi_st	$30, PT_R30, \docfi
107		.endm
108
109/*
110 * get_saved_sp returns the SP for the current CPU by looking in the
111 * kernelsp array for it.  If tosp is set, it stores the current sp in
112 * k0 and loads the new value in sp.  If not, it clobbers k0 and
113 * stores the new value in k1, leaving sp unaffected.
114 */
115#ifdef CONFIG_SMP
116
117		/* SMP variation */
118		.macro	get_saved_sp docfi=0 tosp=0
119		ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
120#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
121		lui	k1, %hi(kernelsp)
122#else
123		lui	k1, %highest(kernelsp)
124		daddiu	k1, %higher(kernelsp)
125		dsll	k1, 16
126		daddiu	k1, %hi(kernelsp)
127		dsll	k1, 16
128#endif
129		LONG_SRL	k0, SMP_CPUID_PTRSHIFT
130		LONG_ADDU	k1, k0
131		.if \tosp
132		move	k0, sp
133		.if \docfi
134		.cfi_register sp, k0
135		.endif
136		LONG_L	sp, %lo(kernelsp)(k1)
137		.else
138		LONG_L	k1, %lo(kernelsp)(k1)
139		.endif
140		.endm
141
142		.macro	set_saved_sp stackp temp temp2
143		ASM_CPUID_MFC0	\temp, ASM_SMP_CPUID_REG
144		LONG_SRL	\temp, SMP_CPUID_PTRSHIFT
145		LONG_S	\stackp, kernelsp(\temp)
146		.endm
147#else /* !CONFIG_SMP */
148		/* Uniprocessor variation */
149		.macro	get_saved_sp docfi=0 tosp=0
150#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
151		/*
152		 * Clear BTB (branch target buffer), forbid RAS (return address
153		 * stack) to workaround the Out-of-order Issue in Loongson2F
154		 * via its diagnostic register.
155		 */
156		move	k0, ra
157		jal	1f
158		 nop
1591:		jal	1f
160		 nop
1611:		jal	1f
162		 nop
1631:		jal	1f
164		 nop
1651:		move	ra, k0
166		li	k0, 3
167		mtc0	k0, $22
168#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
169#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
170		lui	k1, %hi(kernelsp)
171#else
172		lui	k1, %highest(kernelsp)
173		daddiu	k1, %higher(kernelsp)
174		dsll	k1, k1, 16
175		daddiu	k1, %hi(kernelsp)
176		dsll	k1, k1, 16
177#endif
178		.if \tosp
179		move	k0, sp
180		.if \docfi
181		.cfi_register sp, k0
182		.endif
183		LONG_L	sp, %lo(kernelsp)(k1)
184		.else
185		LONG_L	k1, %lo(kernelsp)(k1)
186		.endif
187		.endm
188
189		.macro	set_saved_sp stackp temp temp2
190		LONG_S	\stackp, kernelsp
191		.endm
192#endif
193
194		.macro	SAVE_SOME docfi=0
195		.set	push
196		.set	noat
197		.set	reorder
198		mfc0	k0, CP0_STATUS
199		sll	k0, 3		/* extract cu0 bit */
200		.set	noreorder
201		bltz	k0, 8f
202		 move	k0, sp
203		.if \docfi
204		.cfi_register sp, k0
205		.endif
206#ifdef CONFIG_EVA
207		/*
208		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
209		 * EntryHi. Toggling Config7.RPS is slower and less portable.
210		 *
211		 * The RPS isn't automatically flushed when exceptions are
212		 * taken, which can result in kernel mode speculative accesses
213		 * to user addresses if the RPS mispredicts. That's harmless
214		 * when user and kernel share the same address space, but with
215		 * EVA the same user segments may be unmapped to kernel mode,
216		 * even containing sensitive MMIO regions or invalid memory.
217		 *
218		 * This can happen when the kernel sets the return address to
219		 * ret_from_* and jr's to the exception handler, which looks
220		 * more like a tail call than a function call. If nested calls
221		 * don't evict the last user address in the RPS, it will
222		 * mispredict the return and fetch from a user controlled
223		 * address into the icache.
224		 *
225		 * More recent EVA-capable cores with MAAR to restrict
226		 * speculative accesses aren't affected.
227		 */
228		MFC0	k0, CP0_ENTRYHI
229		MTC0	k0, CP0_ENTRYHI
230#endif
231		.set	reorder
232		/* Called from user mode, new stack. */
233		get_saved_sp docfi=\docfi tosp=1
2348:
235#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
236		.set	at=k1
237#endif
238		PTR_SUBU sp, PT_SIZE
239#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
240		.set	noat
 
 
241#endif
242		.if \docfi
243		.cfi_def_cfa sp,0
244		.endif
245		cfi_st	k0, PT_R29, \docfi
246		cfi_rel_offset  sp, PT_R29, \docfi
247		cfi_st	v1, PT_R3, \docfi
248		/*
249		 * You might think that you don't need to save $0,
250		 * but the FPU emulator and gdb remote debug stub
251		 * need it to operate correctly
252		 */
253		LONG_S	$0, PT_R0(sp)
254		mfc0	v1, CP0_STATUS
255		cfi_st	v0, PT_R2, \docfi
256		LONG_S	v1, PT_STATUS(sp)
257		cfi_st	$4, PT_R4, \docfi
258		mfc0	v1, CP0_CAUSE
259		cfi_st	$5, PT_R5, \docfi
260		LONG_S	v1, PT_CAUSE(sp)
261		cfi_st	$6, PT_R6, \docfi
262		cfi_st	ra, PT_R31, \docfi
263		MFC0	ra, CP0_EPC
264		cfi_st	$7, PT_R7, \docfi
265#ifdef CONFIG_64BIT
266		cfi_st	$8, PT_R8, \docfi
267		cfi_st	$9, PT_R9, \docfi
268#endif
269		LONG_S	ra, PT_EPC(sp)
270		.if \docfi
271		.cfi_rel_offset ra, PT_EPC
272		.endif
273		cfi_st	$25, PT_R25, \docfi
274		cfi_st	$28, PT_R28, \docfi
275
276		/* Set thread_info if we're coming from user mode */
277		mfc0	k0, CP0_STATUS
278		sll	k0, 3		/* extract cu0 bit */
279		bltz	k0, 9f
280
281		ori	$28, sp, _THREAD_MASK
282		xori	$28, _THREAD_MASK
283#ifdef CONFIG_CPU_CAVIUM_OCTEON
284		.set    mips64
285		pref    0, 0($28)       /* Prefetch the current pointer */
286#endif
2879:
288		.set	pop
289		.endm
290
291		.macro	SAVE_ALL docfi=0
292		SAVE_SOME \docfi
293		SAVE_AT \docfi
294		SAVE_TEMP \docfi
295		SAVE_STATIC \docfi
296		.endm
297
298		.macro	RESTORE_AT docfi=0
299		.set	push
300		.set	noat
301		cfi_ld	$1, PT_R1, \docfi
302		.set	pop
303		.endm
304
305		.macro	RESTORE_TEMP docfi=0
306#ifdef CONFIG_CPU_CAVIUM_OCTEON
307		/* Restore the Octeon multiplier state */
308		jal	octeon_mult_restore
309#endif
310#ifdef CONFIG_CPU_HAS_SMARTMIPS
311		LONG_L	$24, PT_ACX(sp)
312		mtlhx	$24
313		LONG_L	$24, PT_HI(sp)
314		mtlhx	$24
315		LONG_L	$24, PT_LO(sp)
316		mtlhx	$24
317#elif !defined(CONFIG_CPU_MIPSR6)
318		LONG_L	$24, PT_LO(sp)
319		mtlo	$24
320		LONG_L	$24, PT_HI(sp)
321		mthi	$24
322#endif
323#ifdef CONFIG_32BIT
324		cfi_ld	$8, PT_R8, \docfi
325		cfi_ld	$9, PT_R9, \docfi
326#endif
327		cfi_ld	$10, PT_R10, \docfi
328		cfi_ld	$11, PT_R11, \docfi
329		cfi_ld	$12, PT_R12, \docfi
330		cfi_ld	$13, PT_R13, \docfi
331		cfi_ld	$14, PT_R14, \docfi
332		cfi_ld	$15, PT_R15, \docfi
333		cfi_ld	$24, PT_R24, \docfi
334		.endm
335
336		.macro	RESTORE_STATIC docfi=0
337		cfi_ld	$16, PT_R16, \docfi
338		cfi_ld	$17, PT_R17, \docfi
339		cfi_ld	$18, PT_R18, \docfi
340		cfi_ld	$19, PT_R19, \docfi
341		cfi_ld	$20, PT_R20, \docfi
342		cfi_ld	$21, PT_R21, \docfi
343		cfi_ld	$22, PT_R22, \docfi
344		cfi_ld	$23, PT_R23, \docfi
345		cfi_ld	$30, PT_R30, \docfi
346		.endm
347
348		.macro	RESTORE_SP docfi=0
349		cfi_ld	sp, PT_R29, \docfi
350		.endm
351
352#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
353
354		.macro	RESTORE_SOME docfi=0
355		.set	push
356		.set	reorder
357		.set	noat
358		mfc0	a0, CP0_STATUS
359		li	v1, ST0_CU1 | ST0_IM
360		ori	a0, STATMASK
361		xori	a0, STATMASK
362		mtc0	a0, CP0_STATUS
363		and	a0, v1
364		LONG_L	v0, PT_STATUS(sp)
365		nor	v1, $0, v1
366		and	v0, v1
367		or	v0, a0
368		mtc0	v0, CP0_STATUS
369		cfi_ld	$31, PT_R31, \docfi
370		cfi_ld	$28, PT_R28, \docfi
371		cfi_ld	$25, PT_R25, \docfi
372		cfi_ld	$7,  PT_R7, \docfi
373		cfi_ld	$6,  PT_R6, \docfi
374		cfi_ld	$5,  PT_R5, \docfi
375		cfi_ld	$4,  PT_R4, \docfi
376		cfi_ld	$3,  PT_R3, \docfi
377		cfi_ld	$2,  PT_R2, \docfi
378		.set	pop
379		.endm
380
381		.macro	RESTORE_SP_AND_RET docfi=0
382		.set	push
383		.set	noreorder
384		LONG_L	k0, PT_EPC(sp)
385		RESTORE_SP \docfi
386		jr	k0
387		 rfe
388		.set	pop
389		.endm
390
391#else
392		.macro	RESTORE_SOME docfi=0
393		.set	push
394		.set	reorder
395		.set	noat
396		mfc0	a0, CP0_STATUS
397		ori	a0, STATMASK
398		xori	a0, STATMASK
399		mtc0	a0, CP0_STATUS
400		li	v1, ST0_CU1 | ST0_FR | ST0_IM
401		and	a0, v1
402		LONG_L	v0, PT_STATUS(sp)
403		nor	v1, $0, v1
404		and	v0, v1
405		or	v0, a0
406		mtc0	v0, CP0_STATUS
407		LONG_L	v1, PT_EPC(sp)
408		MTC0	v1, CP0_EPC
409		cfi_ld	$31, PT_R31, \docfi
410		cfi_ld	$28, PT_R28, \docfi
411		cfi_ld	$25, PT_R25, \docfi
412#ifdef CONFIG_64BIT
413		cfi_ld	$8, PT_R8, \docfi
414		cfi_ld	$9, PT_R9, \docfi
415#endif
416		cfi_ld	$7,  PT_R7, \docfi
417		cfi_ld	$6,  PT_R6, \docfi
418		cfi_ld	$5,  PT_R5, \docfi
419		cfi_ld	$4,  PT_R4, \docfi
420		cfi_ld	$3,  PT_R3, \docfi
421		cfi_ld	$2,  PT_R2, \docfi
422		.set	pop
423		.endm
424
425		.macro	RESTORE_SP_AND_RET docfi=0
426		RESTORE_SP \docfi
427#ifdef CONFIG_CPU_MIPSR6
428		eretnc
429#else
430		.set	arch=r4000
431		eret
432		.set	mips0
433#endif
434		.endm
435
436#endif
437
438		.macro	RESTORE_ALL docfi=0
439		RESTORE_TEMP \docfi
440		RESTORE_STATIC \docfi
441		RESTORE_AT \docfi
442		RESTORE_SOME \docfi
443		RESTORE_SP \docfi
 
 
 
 
 
 
 
 
 
 
 
 
444		.endm
445
446/*
447 * Move to kernel mode and disable interrupts.
448 * Set cp0 enable bit as sign that we're running on the kernel stack
449 */
450		.macro	CLI
451		mfc0	t0, CP0_STATUS
452		li	t1, ST0_CU0 | STATMASK
453		or	t0, t1
454		xori	t0, STATMASK
455		mtc0	t0, CP0_STATUS
456		irq_disable_hazard
457		.endm
458
459/*
460 * Move to kernel mode and enable interrupts.
461 * Set cp0 enable bit as sign that we're running on the kernel stack
462 */
463		.macro	STI
464		mfc0	t0, CP0_STATUS
465		li	t1, ST0_CU0 | STATMASK
466		or	t0, t1
467		xori	t0, STATMASK & ~1
468		mtc0	t0, CP0_STATUS
469		irq_enable_hazard
470		.endm
471
472/*
473 * Just move to kernel mode and leave interrupts as they are.  Note
474 * for the R3000 this means copying the previous enable from IEp.
475 * Set cp0 enable bit as sign that we're running on the kernel stack
476 */
477		.macro	KMODE
478		mfc0	t0, CP0_STATUS
479		li	t1, ST0_CU0 | (STATMASK & ~1)
480#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
481		andi	t2, t0, ST0_IEP
482		srl	t2, 2
483		or	t0, t2
484#endif
485		or	t0, t1
486		xori	t0, STATMASK & ~1
487		mtc0	t0, CP0_STATUS
488		irq_disable_hazard
489		.endm
490
491#endif /* _ASM_STACKFRAME_H */
v4.10.11
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8 * Copyright (C) 1999 Silicon Graphics, Inc.
  9 * Copyright (C) 2007  Maciej W. Rozycki
 10 */
 11#ifndef _ASM_STACKFRAME_H
 12#define _ASM_STACKFRAME_H
 13
 14#include <linux/threads.h>
 15
 16#include <asm/asm.h>
 17#include <asm/asmmacro.h>
 18#include <asm/mipsregs.h>
 19#include <asm/asm-offsets.h>
 20#include <asm/thread_info.h>
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 23#define STATMASK 0x3f
 24#else
 25#define STATMASK 0x1f
 26#endif
 27
 28		.macro	SAVE_AT
 29		.set	push
 30		.set	noat
 31		LONG_S	$1, PT_R1(sp)
 32		.set	pop
 33		.endm
 34
 35		.macro	SAVE_TEMP
 36#ifdef CONFIG_CPU_HAS_SMARTMIPS
 37		mflhxu	v1
 38		LONG_S	v1, PT_LO(sp)
 39		mflhxu	v1
 40		LONG_S	v1, PT_HI(sp)
 41		mflhxu	v1
 42		LONG_S	v1, PT_ACX(sp)
 43#elif !defined(CONFIG_CPU_MIPSR6)
 44		mfhi	v1
 45#endif
 46#ifdef CONFIG_32BIT
 47		LONG_S	$8, PT_R8(sp)
 48		LONG_S	$9, PT_R9(sp)
 49#endif
 50		LONG_S	$10, PT_R10(sp)
 51		LONG_S	$11, PT_R11(sp)
 52		LONG_S	$12, PT_R12(sp)
 53#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 54		LONG_S	v1, PT_HI(sp)
 55		mflo	v1
 56#endif
 57		LONG_S	$13, PT_R13(sp)
 58		LONG_S	$14, PT_R14(sp)
 59		LONG_S	$15, PT_R15(sp)
 60		LONG_S	$24, PT_R24(sp)
 61#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
 62		LONG_S	v1, PT_LO(sp)
 63#endif
 64#ifdef CONFIG_CPU_CAVIUM_OCTEON
 65		/*
 66		 * The Octeon multiplier state is affected by general
 67		 * multiply instructions. It must be saved before and
 68		 * kernel code might corrupt it
 69		 */
 70		jal     octeon_mult_save
 71#endif
 72		.endm
 73
 74		.macro	SAVE_STATIC
 75		LONG_S	$16, PT_R16(sp)
 76		LONG_S	$17, PT_R17(sp)
 77		LONG_S	$18, PT_R18(sp)
 78		LONG_S	$19, PT_R19(sp)
 79		LONG_S	$20, PT_R20(sp)
 80		LONG_S	$21, PT_R21(sp)
 81		LONG_S	$22, PT_R22(sp)
 82		LONG_S	$23, PT_R23(sp)
 83		LONG_S	$30, PT_R30(sp)
 84		.endm
 
 
 
 
 
 
 
 
 85
 86#ifdef CONFIG_SMP
 87		.macro	get_saved_sp	/* SMP variation */
 88		ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
 89#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
 90		lui	k1, %hi(kernelsp)
 91#else
 92		lui	k1, %highest(kernelsp)
 93		daddiu	k1, %higher(kernelsp)
 94		dsll	k1, 16
 95		daddiu	k1, %hi(kernelsp)
 96		dsll	k1, 16
 97#endif
 98		LONG_SRL	k0, SMP_CPUID_PTRSHIFT
 99		LONG_ADDU	k1, k0
 
 
 
 
 
 
 
100		LONG_L	k1, %lo(kernelsp)(k1)
 
101		.endm
102
103		.macro	set_saved_sp stackp temp temp2
104		ASM_CPUID_MFC0	\temp, ASM_SMP_CPUID_REG
105		LONG_SRL	\temp, SMP_CPUID_PTRSHIFT
106		LONG_S	\stackp, kernelsp(\temp)
107		.endm
108#else /* !CONFIG_SMP */
109		.macro	get_saved_sp	/* Uniprocessor variation */
 
110#ifdef CONFIG_CPU_JUMP_WORKAROUNDS
111		/*
112		 * Clear BTB (branch target buffer), forbid RAS (return address
113		 * stack) to workaround the Out-of-order Issue in Loongson2F
114		 * via its diagnostic register.
115		 */
116		move	k0, ra
117		jal	1f
118		 nop
1191:		jal	1f
120		 nop
1211:		jal	1f
122		 nop
1231:		jal	1f
124		 nop
1251:		move	ra, k0
126		li	k0, 3
127		mtc0	k0, $22
128#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
129#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
130		lui	k1, %hi(kernelsp)
131#else
132		lui	k1, %highest(kernelsp)
133		daddiu	k1, %higher(kernelsp)
134		dsll	k1, k1, 16
135		daddiu	k1, %hi(kernelsp)
136		dsll	k1, k1, 16
137#endif
 
 
 
 
 
 
 
138		LONG_L	k1, %lo(kernelsp)(k1)
 
139		.endm
140
141		.macro	set_saved_sp stackp temp temp2
142		LONG_S	\stackp, kernelsp
143		.endm
144#endif
145
146		.macro	SAVE_SOME
147		.set	push
148		.set	noat
149		.set	reorder
150		mfc0	k0, CP0_STATUS
151		sll	k0, 3		/* extract cu0 bit */
152		.set	noreorder
153		bltz	k0, 8f
154		 move	k1, sp
 
 
 
155#ifdef CONFIG_EVA
156		/*
157		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
158		 * EntryHi. Toggling Config7.RPS is slower and less portable.
159		 *
160		 * The RPS isn't automatically flushed when exceptions are
161		 * taken, which can result in kernel mode speculative accesses
162		 * to user addresses if the RPS mispredicts. That's harmless
163		 * when user and kernel share the same address space, but with
164		 * EVA the same user segments may be unmapped to kernel mode,
165		 * even containing sensitive MMIO regions or invalid memory.
166		 *
167		 * This can happen when the kernel sets the return address to
168		 * ret_from_* and jr's to the exception handler, which looks
169		 * more like a tail call than a function call. If nested calls
170		 * don't evict the last user address in the RPS, it will
171		 * mispredict the return and fetch from a user controlled
172		 * address into the icache.
173		 *
174		 * More recent EVA-capable cores with MAAR to restrict
175		 * speculative accesses aren't affected.
176		 */
177		MFC0	k0, CP0_ENTRYHI
178		MTC0	k0, CP0_ENTRYHI
179#endif
180		.set	reorder
181		/* Called from user mode, new stack. */
182		get_saved_sp
183#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
1848:		move	k0, sp
185		PTR_SUBU sp, k1, PT_SIZE
186#else
187		.set	at=k0
1888:		PTR_SUBU k1, PT_SIZE
189		.set	noat
190		move	k0, sp
191		move	sp, k1
192#endif
193		LONG_S	k0, PT_R29(sp)
194		LONG_S	$3, PT_R3(sp)
 
 
 
 
195		/*
196		 * You might think that you don't need to save $0,
197		 * but the FPU emulator and gdb remote debug stub
198		 * need it to operate correctly
199		 */
200		LONG_S	$0, PT_R0(sp)
201		mfc0	v1, CP0_STATUS
202		LONG_S	$2, PT_R2(sp)
203		LONG_S	v1, PT_STATUS(sp)
204		LONG_S	$4, PT_R4(sp)
205		mfc0	v1, CP0_CAUSE
206		LONG_S	$5, PT_R5(sp)
207		LONG_S	v1, PT_CAUSE(sp)
208		LONG_S	$6, PT_R6(sp)
209		MFC0	v1, CP0_EPC
210		LONG_S	$7, PT_R7(sp)
 
211#ifdef CONFIG_64BIT
212		LONG_S	$8, PT_R8(sp)
213		LONG_S	$9, PT_R9(sp)
214#endif
215		LONG_S	v1, PT_EPC(sp)
216		LONG_S	$25, PT_R25(sp)
217		LONG_S	$28, PT_R28(sp)
218		LONG_S	$31, PT_R31(sp)
 
 
219
220		/* Set thread_info if we're coming from user mode */
221		mfc0	k0, CP0_STATUS
222		sll	k0, 3		/* extract cu0 bit */
223		bltz	k0, 9f
224
225		ori	$28, sp, _THREAD_MASK
226		xori	$28, _THREAD_MASK
227#ifdef CONFIG_CPU_CAVIUM_OCTEON
228		.set    mips64
229		pref    0, 0($28)       /* Prefetch the current pointer */
230#endif
2319:
232		.set	pop
233		.endm
234
235		.macro	SAVE_ALL
236		SAVE_SOME
237		SAVE_AT
238		SAVE_TEMP
239		SAVE_STATIC
240		.endm
241
242		.macro	RESTORE_AT
243		.set	push
244		.set	noat
245		LONG_L	$1,  PT_R1(sp)
246		.set	pop
247		.endm
248
249		.macro	RESTORE_TEMP
250#ifdef CONFIG_CPU_CAVIUM_OCTEON
251		/* Restore the Octeon multiplier state */
252		jal	octeon_mult_restore
253#endif
254#ifdef CONFIG_CPU_HAS_SMARTMIPS
255		LONG_L	$24, PT_ACX(sp)
256		mtlhx	$24
257		LONG_L	$24, PT_HI(sp)
258		mtlhx	$24
259		LONG_L	$24, PT_LO(sp)
260		mtlhx	$24
261#elif !defined(CONFIG_CPU_MIPSR6)
262		LONG_L	$24, PT_LO(sp)
263		mtlo	$24
264		LONG_L	$24, PT_HI(sp)
265		mthi	$24
266#endif
267#ifdef CONFIG_32BIT
268		LONG_L	$8, PT_R8(sp)
269		LONG_L	$9, PT_R9(sp)
270#endif
271		LONG_L	$10, PT_R10(sp)
272		LONG_L	$11, PT_R11(sp)
273		LONG_L	$12, PT_R12(sp)
274		LONG_L	$13, PT_R13(sp)
275		LONG_L	$14, PT_R14(sp)
276		LONG_L	$15, PT_R15(sp)
277		LONG_L	$24, PT_R24(sp)
278		.endm
279
280		.macro	RESTORE_STATIC
281		LONG_L	$16, PT_R16(sp)
282		LONG_L	$17, PT_R17(sp)
283		LONG_L	$18, PT_R18(sp)
284		LONG_L	$19, PT_R19(sp)
285		LONG_L	$20, PT_R20(sp)
286		LONG_L	$21, PT_R21(sp)
287		LONG_L	$22, PT_R22(sp)
288		LONG_L	$23, PT_R23(sp)
289		LONG_L	$30, PT_R30(sp)
 
 
 
 
290		.endm
291
292#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
293
294		.macro	RESTORE_SOME
295		.set	push
296		.set	reorder
297		.set	noat
298		mfc0	a0, CP0_STATUS
299		li	v1, ST0_CU1 | ST0_IM
300		ori	a0, STATMASK
301		xori	a0, STATMASK
302		mtc0	a0, CP0_STATUS
303		and	a0, v1
304		LONG_L	v0, PT_STATUS(sp)
305		nor	v1, $0, v1
306		and	v0, v1
307		or	v0, a0
308		mtc0	v0, CP0_STATUS
309		LONG_L	$31, PT_R31(sp)
310		LONG_L	$28, PT_R28(sp)
311		LONG_L	$25, PT_R25(sp)
312		LONG_L	$7,  PT_R7(sp)
313		LONG_L	$6,  PT_R6(sp)
314		LONG_L	$5,  PT_R5(sp)
315		LONG_L	$4,  PT_R4(sp)
316		LONG_L	$3,  PT_R3(sp)
317		LONG_L	$2,  PT_R2(sp)
318		.set	pop
319		.endm
320
321		.macro	RESTORE_SP_AND_RET
322		.set	push
323		.set	noreorder
324		LONG_L	k0, PT_EPC(sp)
325		LONG_L	sp, PT_R29(sp)
326		jr	k0
327		 rfe
328		.set	pop
329		.endm
330
331#else
332		.macro	RESTORE_SOME
333		.set	push
334		.set	reorder
335		.set	noat
336		mfc0	a0, CP0_STATUS
337		ori	a0, STATMASK
338		xori	a0, STATMASK
339		mtc0	a0, CP0_STATUS
340		li	v1, ST0_CU1 | ST0_FR | ST0_IM
341		and	a0, v1
342		LONG_L	v0, PT_STATUS(sp)
343		nor	v1, $0, v1
344		and	v0, v1
345		or	v0, a0
346		mtc0	v0, CP0_STATUS
347		LONG_L	v1, PT_EPC(sp)
348		MTC0	v1, CP0_EPC
349		LONG_L	$31, PT_R31(sp)
350		LONG_L	$28, PT_R28(sp)
351		LONG_L	$25, PT_R25(sp)
352#ifdef CONFIG_64BIT
353		LONG_L	$8, PT_R8(sp)
354		LONG_L	$9, PT_R9(sp)
355#endif
356		LONG_L	$7,  PT_R7(sp)
357		LONG_L	$6,  PT_R6(sp)
358		LONG_L	$5,  PT_R5(sp)
359		LONG_L	$4,  PT_R4(sp)
360		LONG_L	$3,  PT_R3(sp)
361		LONG_L	$2,  PT_R2(sp)
362		.set	pop
363		.endm
364
365		.macro	RESTORE_SP_AND_RET
366		LONG_L	sp, PT_R29(sp)
 
 
 
367		.set	arch=r4000
368		eret
369		.set	mips0
 
370		.endm
371
372#endif
373
374		.macro	RESTORE_SP
375		LONG_L	sp, PT_R29(sp)
376		.endm
377
378		.macro	RESTORE_ALL
379		RESTORE_TEMP
380		RESTORE_STATIC
381		RESTORE_AT
382		RESTORE_SOME
383		RESTORE_SP
384		.endm
385
386		.macro	RESTORE_ALL_AND_RET
387		RESTORE_TEMP
388		RESTORE_STATIC
389		RESTORE_AT
390		RESTORE_SOME
391		RESTORE_SP_AND_RET
392		.endm
393
394/*
395 * Move to kernel mode and disable interrupts.
396 * Set cp0 enable bit as sign that we're running on the kernel stack
397 */
398		.macro	CLI
399		mfc0	t0, CP0_STATUS
400		li	t1, ST0_CU0 | STATMASK
401		or	t0, t1
402		xori	t0, STATMASK
403		mtc0	t0, CP0_STATUS
404		irq_disable_hazard
405		.endm
406
407/*
408 * Move to kernel mode and enable interrupts.
409 * Set cp0 enable bit as sign that we're running on the kernel stack
410 */
411		.macro	STI
412		mfc0	t0, CP0_STATUS
413		li	t1, ST0_CU0 | STATMASK
414		or	t0, t1
415		xori	t0, STATMASK & ~1
416		mtc0	t0, CP0_STATUS
417		irq_enable_hazard
418		.endm
419
420/*
421 * Just move to kernel mode and leave interrupts as they are.  Note
422 * for the R3000 this means copying the previous enable from IEp.
423 * Set cp0 enable bit as sign that we're running on the kernel stack
424 */
425		.macro	KMODE
426		mfc0	t0, CP0_STATUS
427		li	t1, ST0_CU0 | (STATMASK & ~1)
428#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
429		andi	t2, t0, ST0_IEP
430		srl	t2, 2
431		or	t0, t2
432#endif
433		or	t0, t1
434		xori	t0, STATMASK & ~1
435		mtc0	t0, CP0_STATUS
436		irq_disable_hazard
437		.endm
438
439#endif /* _ASM_STACKFRAME_H */