Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12#ifndef _ASM_SWITCH_TO_H
13#define _ASM_SWITCH_TO_H
14
15#include <asm/cpu-features.h>
16#include <asm/watch.h>
17#include <asm/dsp.h>
18#include <asm/cop2.h>
19#include <asm/fpu.h>
20
21struct task_struct;
22
23/**
24 * resume - resume execution of a task
25 * @prev: The task previously executed.
26 * @next: The task to begin executing.
27 * @next_ti: task_thread_info(next).
28 *
29 * This function is used whilst scheduling to save the context of prev & load
30 * the context of next. Returns prev.
31 */
32extern asmlinkage struct task_struct *resume(struct task_struct *prev,
33 struct task_struct *next, struct thread_info *next_ti);
34
35extern unsigned int ll_bit;
36extern struct task_struct *ll_task;
37
38#ifdef CONFIG_MIPS_MT_FPAFF
39
40/*
41 * Handle the scheduler resume end of FPU affinity management. We do this
42 * inline to try to keep the overhead down. If we have been forced to run on
43 * a "CPU" with an FPU because of a previous high level of FP computation,
44 * but did not actually use the FPU during the most recent time-slice (CU1
45 * isn't set), we undo the restriction on cpus_allowed.
46 *
47 * We're not calling set_cpus_allowed() here, because we have no need to
48 * force prompt migration - we're already switching the current CPU to a
49 * different thread.
50 */
51
52#define __mips_mt_fpaff_switch_to(prev) \
53do { \
54 struct thread_info *__prev_ti = task_thread_info(prev); \
55 \
56 if (cpu_has_fpu && \
57 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
58 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
59 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
60 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
61 } \
62 next->thread.emulated_fp = 0; \
63} while(0)
64
65#else
66#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
67#endif
68
69#define __clear_software_ll_bit() \
70do { if (cpu_has_rw_llb) { \
71 write_c0_lladdr(0); \
72 } else { \
73 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
74 ll_bit = 0; \
75 } \
76} while (0)
77
78/*
79 * For newly created kernel threads switch_to() will return to
80 * ret_from_kernel_thread, newly created user threads to ret_from_fork.
81 * That is, everything following resume() will be skipped for new threads.
82 * So everything that matters to new threads should be placed before resume().
83 */
84#define switch_to(prev, next, last) \
85do { \
86 __mips_mt_fpaff_switch_to(prev); \
87 lose_fpu_inatomic(1, prev); \
88 if (cpu_has_dsp) { \
89 __save_dsp(prev); \
90 __restore_dsp(next); \
91 } \
92 if (cop2_present) { \
93 set_c0_status(ST0_CU2); \
94 if ((KSTK_STATUS(prev) & ST0_CU2)) { \
95 if (cop2_lazy_restore) \
96 KSTK_STATUS(prev) &= ~ST0_CU2; \
97 cop2_save(prev); \
98 } \
99 if (KSTK_STATUS(next) & ST0_CU2 && \
100 !cop2_lazy_restore) { \
101 cop2_restore(next); \
102 } \
103 clear_c0_status(ST0_CU2); \
104 } \
105 __clear_software_ll_bit(); \
106 if (cpu_has_userlocal) \
107 write_c0_userlocal(task_thread_info(next)->tp_value); \
108 __restore_watch(); \
109 (last) = resume(prev, next, task_thread_info(next)); \
110} while (0)
111
112#endif /* _ASM_SWITCH_TO_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003, 06 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc.
11 */
12#ifndef _ASM_SWITCH_TO_H
13#define _ASM_SWITCH_TO_H
14
15#include <asm/cpu-features.h>
16#include <asm/watch.h>
17#include <asm/dsp.h>
18#include <asm/cop2.h>
19#include <asm/msa.h>
20
21struct task_struct;
22
23enum {
24 FP_SAVE_NONE = 0,
25 FP_SAVE_VECTOR = -1,
26 FP_SAVE_SCALAR = 1,
27};
28
29/**
30 * resume - resume execution of a task
31 * @prev: The task previously executed.
32 * @next: The task to begin executing.
33 * @next_ti: task_thread_info(next).
34 * @fp_save: Which, if any, FP context to save for prev.
35 *
36 * This function is used whilst scheduling to save the context of prev & load
37 * the context of next. Returns prev.
38 */
39extern asmlinkage struct task_struct *resume(struct task_struct *prev,
40 struct task_struct *next, struct thread_info *next_ti,
41 s32 fp_save);
42
43extern unsigned int ll_bit;
44extern struct task_struct *ll_task;
45
46#ifdef CONFIG_MIPS_MT_FPAFF
47
48/*
49 * Handle the scheduler resume end of FPU affinity management. We do this
50 * inline to try to keep the overhead down. If we have been forced to run on
51 * a "CPU" with an FPU because of a previous high level of FP computation,
52 * but did not actually use the FPU during the most recent time-slice (CU1
53 * isn't set), we undo the restriction on cpus_allowed.
54 *
55 * We're not calling set_cpus_allowed() here, because we have no need to
56 * force prompt migration - we're already switching the current CPU to a
57 * different thread.
58 */
59
60#define __mips_mt_fpaff_switch_to(prev) \
61do { \
62 struct thread_info *__prev_ti = task_thread_info(prev); \
63 \
64 if (cpu_has_fpu && \
65 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
66 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
67 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
68 prev->cpus_allowed = prev->thread.user_cpus_allowed; \
69 } \
70 next->thread.emulated_fp = 0; \
71} while(0)
72
73#else
74#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
75#endif
76
77#define __clear_software_ll_bit() \
78do { \
79 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
80 ll_bit = 0; \
81} while (0)
82
83#define switch_to(prev, next, last) \
84do { \
85 u32 __c0_stat; \
86 s32 __fpsave = FP_SAVE_NONE; \
87 __mips_mt_fpaff_switch_to(prev); \
88 if (cpu_has_dsp) \
89 __save_dsp(prev); \
90 if (cop2_present && (KSTK_STATUS(prev) & ST0_CU2)) { \
91 if (cop2_lazy_restore) \
92 KSTK_STATUS(prev) &= ~ST0_CU2; \
93 __c0_stat = read_c0_status(); \
94 write_c0_status(__c0_stat | ST0_CU2); \
95 cop2_save(&prev->thread.cp2); \
96 write_c0_status(__c0_stat & ~ST0_CU2); \
97 } \
98 __clear_software_ll_bit(); \
99 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
100 __fpsave = FP_SAVE_SCALAR; \
101 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
102 __fpsave = FP_SAVE_VECTOR; \
103 (last) = resume(prev, next, task_thread_info(next), __fpsave); \
104 disable_msa(); \
105} while (0)
106
107#define finish_arch_switch(prev) \
108do { \
109 u32 __c0_stat; \
110 if (cop2_present && !cop2_lazy_restore && \
111 (KSTK_STATUS(current) & ST0_CU2)) { \
112 __c0_stat = read_c0_status(); \
113 write_c0_status(__c0_stat | ST0_CU2); \
114 cop2_restore(¤t->thread.cp2); \
115 write_c0_status(__c0_stat & ~ST0_CU2); \
116 } \
117 if (cpu_has_dsp) \
118 __restore_dsp(current); \
119 if (cpu_has_userlocal) \
120 write_c0_userlocal(current_thread_info()->tp_value); \
121 __restore_watch(); \
122} while (0)
123
124#endif /* _ASM_SWITCH_TO_H */