Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/sched/signal.h>
3#include <linux/sched/task.h>
4#include <linux/sched/task_stack.h>
5#include <linux/slab.h>
6#include <asm/processor.h>
7#include <asm/fpu.h>
8#include <asm/traps.h>
9#include <asm/ptrace.h>
10
11int init_fpu(struct task_struct *tsk)
12{
13 if (tsk_used_math(tsk)) {
14 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
15 unlazy_fpu(tsk, task_pt_regs(tsk));
16 return 0;
17 }
18
19 /*
20 * Memory allocation at the first usage of the FPU and other state.
21 */
22 if (!tsk->thread.xstate) {
23 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
24 GFP_KERNEL);
25 if (!tsk->thread.xstate)
26 return -ENOMEM;
27 }
28
29 if (boot_cpu_data.flags & CPU_HAS_FPU) {
30 struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
31 memset(fp, 0, xstate_size);
32 fp->fpscr = FPSCR_INIT;
33 } else {
34 struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
35 memset(fp, 0, xstate_size);
36 fp->fpscr = FPSCR_INIT;
37 }
38
39 set_stopped_child_used_math(tsk);
40 return 0;
41}
42
43#ifdef CONFIG_SH_FPU
44void __fpu_state_restore(void)
45{
46 struct task_struct *tsk = current;
47
48 restore_fpu(tsk);
49
50 task_thread_info(tsk)->status |= TS_USEDFPU;
51 tsk->thread.fpu_counter++;
52}
53
54void fpu_state_restore(struct pt_regs *regs)
55{
56 struct task_struct *tsk = current;
57
58 if (unlikely(!user_mode(regs))) {
59 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
60 BUG();
61 return;
62 }
63
64 if (!tsk_used_math(tsk)) {
65 int ret;
66 /*
67 * does a slab alloc which can sleep
68 */
69 local_irq_enable();
70 ret = init_fpu(tsk);
71 local_irq_disable();
72 if (ret) {
73 /*
74 * ran out of memory!
75 */
76 force_sig(SIGKILL);
77 return;
78 }
79 }
80
81 grab_fpu(regs);
82
83 __fpu_state_restore();
84}
85
86BUILD_TRAP_HANDLER(fpu_state_restore)
87{
88 TRAP_HANDLER_DECL;
89
90 fpu_state_restore(regs);
91}
92#endif /* CONFIG_SH_FPU */
1#include <linux/sched.h>
2#include <linux/slab.h>
3#include <asm/processor.h>
4#include <asm/fpu.h>
5
6int init_fpu(struct task_struct *tsk)
7{
8 if (tsk_used_math(tsk)) {
9 if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
10 unlazy_fpu(tsk, task_pt_regs(tsk));
11 return 0;
12 }
13
14 /*
15 * Memory allocation at the first usage of the FPU and other state.
16 */
17 if (!tsk->thread.xstate) {
18 tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
19 GFP_KERNEL);
20 if (!tsk->thread.xstate)
21 return -ENOMEM;
22 }
23
24 if (boot_cpu_data.flags & CPU_HAS_FPU) {
25 struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
26 memset(fp, 0, xstate_size);
27 fp->fpscr = FPSCR_INIT;
28 } else {
29 struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
30 memset(fp, 0, xstate_size);
31 fp->fpscr = FPSCR_INIT;
32 }
33
34 set_stopped_child_used_math(tsk);
35 return 0;
36}
37
38#ifdef CONFIG_SH_FPU
39void __fpu_state_restore(void)
40{
41 struct task_struct *tsk = current;
42
43 restore_fpu(tsk);
44
45 task_thread_info(tsk)->status |= TS_USEDFPU;
46 tsk->fpu_counter++;
47}
48
49void fpu_state_restore(struct pt_regs *regs)
50{
51 struct task_struct *tsk = current;
52
53 if (unlikely(!user_mode(regs))) {
54 printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
55 BUG();
56 return;
57 }
58
59 if (!tsk_used_math(tsk)) {
60 local_irq_enable();
61 /*
62 * does a slab alloc which can sleep
63 */
64 if (init_fpu(tsk)) {
65 /*
66 * ran out of memory!
67 */
68 do_group_exit(SIGKILL);
69 return;
70 }
71 local_irq_disable();
72 }
73
74 grab_fpu(regs);
75
76 __fpu_state_restore();
77}
78
79BUILD_TRAP_HANDLER(fpu_state_restore)
80{
81 TRAP_HANDLER_DECL;
82
83 fpu_state_restore(regs);
84}
85#endif /* CONFIG_SH_FPU */