Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9#include <asm/fpu/internal.h>
10#include <asm/fpu/regset.h>
11#include <asm/fpu/signal.h>
12#include <asm/fpu/types.h>
13#include <asm/traps.h>
14#include <asm/irq_regs.h>
15
16#include <linux/hardirq.h>
17#include <linux/pkeys.h>
18
19#define CREATE_TRACE_POINTS
20#include <asm/trace/fpu.h>
21
22/*
23 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
24 * depending on the FPU hardware format:
25 */
26union fpregs_state init_fpstate __read_mostly;
27
28/*
29 * Track whether the kernel is using the FPU state
30 * currently.
31 *
32 * This flag is used:
33 *
34 * - by IRQ context code to potentially use the FPU
35 * if it's unused.
36 *
37 * - to debug kernel_fpu_begin()/end() correctness
38 */
39static DEFINE_PER_CPU(bool, in_kernel_fpu);
40
41/*
42 * Track which context is using the FPU on the CPU:
43 */
44DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
45
46static bool kernel_fpu_disabled(void)
47{
48 return this_cpu_read(in_kernel_fpu);
49}
50
51static bool interrupted_kernel_fpu_idle(void)
52{
53 return !kernel_fpu_disabled();
54}
55
56/*
57 * Were we in user mode (or vm86 mode) when we were
58 * interrupted?
59 *
60 * Doing kernel_fpu_begin/end() is ok if we are running
61 * in an interrupt context from user mode - we'll just
62 * save the FPU state as required.
63 */
64static bool interrupted_user_mode(void)
65{
66 struct pt_regs *regs = get_irq_regs();
67 return regs && user_mode(regs);
68}
69
70/*
71 * Can we use the FPU in kernel mode with the
72 * whole "kernel_fpu_begin/end()" sequence?
73 *
74 * It's always ok in process context (ie "not interrupt")
75 * but it is sometimes ok even from an irq.
76 */
77bool irq_fpu_usable(void)
78{
79 return !in_interrupt() ||
80 interrupted_user_mode() ||
81 interrupted_kernel_fpu_idle();
82}
83EXPORT_SYMBOL(irq_fpu_usable);
84
85void kernel_fpu_begin(void)
86{
87 preempt_disable();
88
89 WARN_ON_FPU(!irq_fpu_usable());
90 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
91
92 this_cpu_write(in_kernel_fpu, true);
93
94 if (!(current->flags & PF_KTHREAD) &&
95 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
96 set_thread_flag(TIF_NEED_FPU_LOAD);
97 /*
98 * Ignore return value -- we don't care if reg state
99 * is clobbered.
100 */
101 copy_fpregs_to_fpstate(¤t->thread.fpu);
102 }
103 __cpu_invalidate_fpregs_state();
104}
105EXPORT_SYMBOL_GPL(kernel_fpu_begin);
106
107void kernel_fpu_end(void)
108{
109 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
110
111 this_cpu_write(in_kernel_fpu, false);
112 preempt_enable();
113}
114EXPORT_SYMBOL_GPL(kernel_fpu_end);
115
116/*
117 * Save the FPU state (mark it for reload if necessary):
118 *
119 * This only ever gets called for the current task.
120 */
121void fpu__save(struct fpu *fpu)
122{
123 WARN_ON_FPU(fpu != ¤t->thread.fpu);
124
125 fpregs_lock();
126 trace_x86_fpu_before_save(fpu);
127
128 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
129 if (!copy_fpregs_to_fpstate(fpu)) {
130 copy_kernel_to_fpregs(&fpu->state);
131 }
132 }
133
134 trace_x86_fpu_after_save(fpu);
135 fpregs_unlock();
136}
137
138/*
139 * Legacy x87 fpstate state init:
140 */
141static inline void fpstate_init_fstate(struct fregs_state *fp)
142{
143 fp->cwd = 0xffff037fu;
144 fp->swd = 0xffff0000u;
145 fp->twd = 0xffffffffu;
146 fp->fos = 0xffff0000u;
147}
148
149void fpstate_init(union fpregs_state *state)
150{
151 if (!static_cpu_has(X86_FEATURE_FPU)) {
152 fpstate_init_soft(&state->soft);
153 return;
154 }
155
156 memset(state, 0, fpu_kernel_xstate_size);
157
158 if (static_cpu_has(X86_FEATURE_XSAVES))
159 fpstate_init_xstate(&state->xsave);
160 if (static_cpu_has(X86_FEATURE_FXSR))
161 fpstate_init_fxstate(&state->fxsave);
162 else
163 fpstate_init_fstate(&state->fsave);
164}
165EXPORT_SYMBOL_GPL(fpstate_init);
166
167int fpu__copy(struct task_struct *dst, struct task_struct *src)
168{
169 struct fpu *dst_fpu = &dst->thread.fpu;
170 struct fpu *src_fpu = &src->thread.fpu;
171
172 dst_fpu->last_cpu = -1;
173
174 if (!static_cpu_has(X86_FEATURE_FPU))
175 return 0;
176
177 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
178
179 /*
180 * Don't let 'init optimized' areas of the XSAVE area
181 * leak into the child task:
182 */
183 memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
184
185 /*
186 * If the FPU registers are not current just memcpy() the state.
187 * Otherwise save current FPU registers directly into the child's FPU
188 * context, without any memory-to-memory copying.
189 *
190 * ( The function 'fails' in the FNSAVE case, which destroys
191 * register contents so we have to load them back. )
192 */
193 fpregs_lock();
194 if (test_thread_flag(TIF_NEED_FPU_LOAD))
195 memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
196
197 else if (!copy_fpregs_to_fpstate(dst_fpu))
198 copy_kernel_to_fpregs(&dst_fpu->state);
199
200 fpregs_unlock();
201
202 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
203
204 trace_x86_fpu_copy_src(src_fpu);
205 trace_x86_fpu_copy_dst(dst_fpu);
206
207 return 0;
208}
209
210/*
211 * Activate the current task's in-memory FPU context,
212 * if it has not been used before:
213 */
214static void fpu__initialize(struct fpu *fpu)
215{
216 WARN_ON_FPU(fpu != ¤t->thread.fpu);
217
218 set_thread_flag(TIF_NEED_FPU_LOAD);
219 fpstate_init(&fpu->state);
220 trace_x86_fpu_init_state(fpu);
221}
222
223/*
224 * This function must be called before we read a task's fpstate.
225 *
226 * There's two cases where this gets called:
227 *
228 * - for the current task (when coredumping), in which case we have
229 * to save the latest FPU registers into the fpstate,
230 *
231 * - or it's called for stopped tasks (ptrace), in which case the
232 * registers were already saved by the context-switch code when
233 * the task scheduled out.
234 *
235 * If the task has used the FPU before then save it.
236 */
237void fpu__prepare_read(struct fpu *fpu)
238{
239 if (fpu == ¤t->thread.fpu)
240 fpu__save(fpu);
241}
242
243/*
244 * This function must be called before we write a task's fpstate.
245 *
246 * Invalidate any cached FPU registers.
247 *
248 * After this function call, after registers in the fpstate are
249 * modified and the child task has woken up, the child task will
250 * restore the modified FPU state from the modified context. If we
251 * didn't clear its cached status here then the cached in-registers
252 * state pending on its former CPU could be restored, corrupting
253 * the modifications.
254 */
255void fpu__prepare_write(struct fpu *fpu)
256{
257 /*
258 * Only stopped child tasks can be used to modify the FPU
259 * state in the fpstate buffer:
260 */
261 WARN_ON_FPU(fpu == ¤t->thread.fpu);
262
263 /* Invalidate any cached state: */
264 __fpu_invalidate_fpregs_state(fpu);
265}
266
267/*
268 * Drops current FPU state: deactivates the fpregs and
269 * the fpstate. NOTE: it still leaves previous contents
270 * in the fpregs in the eager-FPU case.
271 *
272 * This function can be used in cases where we know that
273 * a state-restore is coming: either an explicit one,
274 * or a reschedule.
275 */
276void fpu__drop(struct fpu *fpu)
277{
278 preempt_disable();
279
280 if (fpu == ¤t->thread.fpu) {
281 /* Ignore delayed exceptions from user space */
282 asm volatile("1: fwait\n"
283 "2:\n"
284 _ASM_EXTABLE(1b, 2b));
285 fpregs_deactivate(fpu);
286 }
287
288 trace_x86_fpu_dropped(fpu);
289
290 preempt_enable();
291}
292
293/*
294 * Clear FPU registers by setting them up from
295 * the init fpstate:
296 */
297static inline void copy_init_fpstate_to_fpregs(void)
298{
299 fpregs_lock();
300
301 if (use_xsave())
302 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
303 else if (static_cpu_has(X86_FEATURE_FXSR))
304 copy_kernel_to_fxregs(&init_fpstate.fxsave);
305 else
306 copy_kernel_to_fregs(&init_fpstate.fsave);
307
308 if (boot_cpu_has(X86_FEATURE_OSPKE))
309 copy_init_pkru_to_fpregs();
310
311 fpregs_mark_activate();
312 fpregs_unlock();
313}
314
315/*
316 * Clear the FPU state back to init state.
317 *
318 * Called by sys_execve(), by the signal handler code and by various
319 * error paths.
320 */
321void fpu__clear(struct fpu *fpu)
322{
323 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
324
325 fpu__drop(fpu);
326
327 /*
328 * Make sure fpstate is cleared and initialized.
329 */
330 fpu__initialize(fpu);
331 if (static_cpu_has(X86_FEATURE_FPU))
332 copy_init_fpstate_to_fpregs();
333}
334
335/*
336 * Load FPU context before returning to userspace.
337 */
338void switch_fpu_return(void)
339{
340 if (!static_cpu_has(X86_FEATURE_FPU))
341 return;
342
343 __fpregs_load_activate();
344}
345EXPORT_SYMBOL_GPL(switch_fpu_return);
346
347#ifdef CONFIG_X86_DEBUG_FPU
348/*
349 * If current FPU state according to its tracking (loaded FPU context on this
350 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
351 * loaded on return to userland.
352 */
353void fpregs_assert_state_consistent(void)
354{
355 struct fpu *fpu = ¤t->thread.fpu;
356
357 if (test_thread_flag(TIF_NEED_FPU_LOAD))
358 return;
359
360 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
361}
362EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
363#endif
364
365void fpregs_mark_activate(void)
366{
367 struct fpu *fpu = ¤t->thread.fpu;
368
369 fpregs_activate(fpu);
370 fpu->last_cpu = smp_processor_id();
371 clear_thread_flag(TIF_NEED_FPU_LOAD);
372}
373EXPORT_SYMBOL_GPL(fpregs_mark_activate);
374
375/*
376 * x87 math exception handling:
377 */
378
379int fpu__exception_code(struct fpu *fpu, int trap_nr)
380{
381 int err;
382
383 if (trap_nr == X86_TRAP_MF) {
384 unsigned short cwd, swd;
385 /*
386 * (~cwd & swd) will mask out exceptions that are not set to unmasked
387 * status. 0x3f is the exception bits in these regs, 0x200 is the
388 * C1 reg you need in case of a stack fault, 0x040 is the stack
389 * fault bit. We should only be taking one exception at a time,
390 * so if this combination doesn't produce any single exception,
391 * then we have a bad program that isn't synchronizing its FPU usage
392 * and it will suffer the consequences since we won't be able to
393 * fully reproduce the context of the exception.
394 */
395 if (boot_cpu_has(X86_FEATURE_FXSR)) {
396 cwd = fpu->state.fxsave.cwd;
397 swd = fpu->state.fxsave.swd;
398 } else {
399 cwd = (unsigned short)fpu->state.fsave.cwd;
400 swd = (unsigned short)fpu->state.fsave.swd;
401 }
402
403 err = swd & ~cwd;
404 } else {
405 /*
406 * The SIMD FPU exceptions are handled a little differently, as there
407 * is only a single status/control register. Thus, to determine which
408 * unmasked exception was caught we must mask the exception mask bits
409 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
410 */
411 unsigned short mxcsr = MXCSR_DEFAULT;
412
413 if (boot_cpu_has(X86_FEATURE_XMM))
414 mxcsr = fpu->state.fxsave.mxcsr;
415
416 err = ~(mxcsr >> 7) & mxcsr;
417 }
418
419 if (err & 0x001) { /* Invalid op */
420 /*
421 * swd & 0x240 == 0x040: Stack Underflow
422 * swd & 0x240 == 0x240: Stack Overflow
423 * User must clear the SF bit (0x40) if set
424 */
425 return FPE_FLTINV;
426 } else if (err & 0x004) { /* Divide by Zero */
427 return FPE_FLTDIV;
428 } else if (err & 0x008) { /* Overflow */
429 return FPE_FLTOVF;
430 } else if (err & 0x012) { /* Denormal, Underflow */
431 return FPE_FLTUND;
432 } else if (err & 0x020) { /* Precision */
433 return FPE_FLTRES;
434 }
435
436 /*
437 * If we're using IRQ 13, or supposedly even some trap
438 * X86_TRAP_MF implementations, it's possible
439 * we get a spurious trap, which is not an error.
440 */
441 return 0;
442}
1/*
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8#include <asm/fpu/internal.h>
9#include <asm/fpu/regset.h>
10#include <asm/fpu/signal.h>
11#include <asm/traps.h>
12
13#include <linux/hardirq.h>
14
15/*
16 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
17 * depending on the FPU hardware format:
18 */
19union fpregs_state init_fpstate __read_mostly;
20
21/*
22 * Track whether the kernel is using the FPU state
23 * currently.
24 *
25 * This flag is used:
26 *
27 * - by IRQ context code to potentially use the FPU
28 * if it's unused.
29 *
30 * - to debug kernel_fpu_begin()/end() correctness
31 */
32static DEFINE_PER_CPU(bool, in_kernel_fpu);
33
34/*
35 * Track which context is using the FPU on the CPU:
36 */
37DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
38
39static void kernel_fpu_disable(void)
40{
41 WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
42 this_cpu_write(in_kernel_fpu, true);
43}
44
45static void kernel_fpu_enable(void)
46{
47 WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
48 this_cpu_write(in_kernel_fpu, false);
49}
50
51static bool kernel_fpu_disabled(void)
52{
53 return this_cpu_read(in_kernel_fpu);
54}
55
56/*
57 * Were we in an interrupt that interrupted kernel mode?
58 *
59 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
60 * pair does nothing at all: the thread must not have fpu (so
61 * that we don't try to save the FPU state), and TS must
62 * be set (so that the clts/stts pair does nothing that is
63 * visible in the interrupted kernel thread).
64 *
65 * Except for the eagerfpu case when we return true; in the likely case
66 * the thread has FPU but we are not going to set/clear TS.
67 */
68static bool interrupted_kernel_fpu_idle(void)
69{
70 if (kernel_fpu_disabled())
71 return false;
72
73 if (use_eager_fpu())
74 return true;
75
76 return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
77}
78
79/*
80 * Were we in user mode (or vm86 mode) when we were
81 * interrupted?
82 *
83 * Doing kernel_fpu_begin/end() is ok if we are running
84 * in an interrupt context from user mode - we'll just
85 * save the FPU state as required.
86 */
87static bool interrupted_user_mode(void)
88{
89 struct pt_regs *regs = get_irq_regs();
90 return regs && user_mode(regs);
91}
92
93/*
94 * Can we use the FPU in kernel mode with the
95 * whole "kernel_fpu_begin/end()" sequence?
96 *
97 * It's always ok in process context (ie "not interrupt")
98 * but it is sometimes ok even from an irq.
99 */
100bool irq_fpu_usable(void)
101{
102 return !in_interrupt() ||
103 interrupted_user_mode() ||
104 interrupted_kernel_fpu_idle();
105}
106EXPORT_SYMBOL(irq_fpu_usable);
107
108void __kernel_fpu_begin(void)
109{
110 struct fpu *fpu = ¤t->thread.fpu;
111
112 WARN_ON_FPU(!irq_fpu_usable());
113
114 kernel_fpu_disable();
115
116 if (fpu->fpregs_active) {
117 /*
118 * Ignore return value -- we don't care if reg state
119 * is clobbered.
120 */
121 copy_fpregs_to_fpstate(fpu);
122 } else {
123 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
124 __fpregs_activate_hw();
125 }
126}
127EXPORT_SYMBOL(__kernel_fpu_begin);
128
129void __kernel_fpu_end(void)
130{
131 struct fpu *fpu = ¤t->thread.fpu;
132
133 if (fpu->fpregs_active)
134 copy_kernel_to_fpregs(&fpu->state);
135 else
136 __fpregs_deactivate_hw();
137
138 kernel_fpu_enable();
139}
140EXPORT_SYMBOL(__kernel_fpu_end);
141
142void kernel_fpu_begin(void)
143{
144 preempt_disable();
145 __kernel_fpu_begin();
146}
147EXPORT_SYMBOL_GPL(kernel_fpu_begin);
148
149void kernel_fpu_end(void)
150{
151 __kernel_fpu_end();
152 preempt_enable();
153}
154EXPORT_SYMBOL_GPL(kernel_fpu_end);
155
156/*
157 * CR0::TS save/restore functions:
158 */
159int irq_ts_save(void)
160{
161 /*
162 * If in process context and not atomic, we can take a spurious DNA fault.
163 * Otherwise, doing clts() in process context requires disabling preemption
164 * or some heavy lifting like kernel_fpu_begin()
165 */
166 if (!in_atomic())
167 return 0;
168
169 if (read_cr0() & X86_CR0_TS) {
170 clts();
171 return 1;
172 }
173
174 return 0;
175}
176EXPORT_SYMBOL_GPL(irq_ts_save);
177
178void irq_ts_restore(int TS_state)
179{
180 if (TS_state)
181 stts();
182}
183EXPORT_SYMBOL_GPL(irq_ts_restore);
184
185/*
186 * Save the FPU state (mark it for reload if necessary):
187 *
188 * This only ever gets called for the current task.
189 */
190void fpu__save(struct fpu *fpu)
191{
192 WARN_ON_FPU(fpu != ¤t->thread.fpu);
193
194 preempt_disable();
195 if (fpu->fpregs_active) {
196 if (!copy_fpregs_to_fpstate(fpu)) {
197 if (use_eager_fpu())
198 copy_kernel_to_fpregs(&fpu->state);
199 else
200 fpregs_deactivate(fpu);
201 }
202 }
203 preempt_enable();
204}
205EXPORT_SYMBOL_GPL(fpu__save);
206
207/*
208 * Legacy x87 fpstate state init:
209 */
210static inline void fpstate_init_fstate(struct fregs_state *fp)
211{
212 fp->cwd = 0xffff037fu;
213 fp->swd = 0xffff0000u;
214 fp->twd = 0xffffffffu;
215 fp->fos = 0xffff0000u;
216}
217
218void fpstate_init(union fpregs_state *state)
219{
220 if (!cpu_has_fpu) {
221 fpstate_init_soft(&state->soft);
222 return;
223 }
224
225 memset(state, 0, xstate_size);
226
227 if (cpu_has_fxsr)
228 fpstate_init_fxstate(&state->fxsave);
229 else
230 fpstate_init_fstate(&state->fsave);
231}
232EXPORT_SYMBOL_GPL(fpstate_init);
233
234int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
235{
236 dst_fpu->counter = 0;
237 dst_fpu->fpregs_active = 0;
238 dst_fpu->last_cpu = -1;
239
240 if (!src_fpu->fpstate_active || !cpu_has_fpu)
241 return 0;
242
243 WARN_ON_FPU(src_fpu != ¤t->thread.fpu);
244
245 /*
246 * Don't let 'init optimized' areas of the XSAVE area
247 * leak into the child task:
248 */
249 if (use_eager_fpu())
250 memset(&dst_fpu->state.xsave, 0, xstate_size);
251
252 /*
253 * Save current FPU registers directly into the child
254 * FPU context, without any memory-to-memory copying.
255 * In lazy mode, if the FPU context isn't loaded into
256 * fpregs, CR0.TS will be set and do_device_not_available
257 * will load the FPU context.
258 *
259 * We have to do all this with preemption disabled,
260 * mostly because of the FNSAVE case, because in that
261 * case we must not allow preemption in the window
262 * between the FNSAVE and us marking the context lazy.
263 *
264 * It shouldn't be an issue as even FNSAVE is plenty
265 * fast in terms of critical section length.
266 */
267 preempt_disable();
268 if (!copy_fpregs_to_fpstate(dst_fpu)) {
269 memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
270
271 if (use_eager_fpu())
272 copy_kernel_to_fpregs(&src_fpu->state);
273 else
274 fpregs_deactivate(src_fpu);
275 }
276 preempt_enable();
277
278 return 0;
279}
280
281/*
282 * Activate the current task's in-memory FPU context,
283 * if it has not been used before:
284 */
285void fpu__activate_curr(struct fpu *fpu)
286{
287 WARN_ON_FPU(fpu != ¤t->thread.fpu);
288
289 if (!fpu->fpstate_active) {
290 fpstate_init(&fpu->state);
291
292 /* Safe to do for the current task: */
293 fpu->fpstate_active = 1;
294 }
295}
296EXPORT_SYMBOL_GPL(fpu__activate_curr);
297
298/*
299 * This function must be called before we read a task's fpstate.
300 *
301 * If the task has not used the FPU before then initialize its
302 * fpstate.
303 *
304 * If the task has used the FPU before then save it.
305 */
306void fpu__activate_fpstate_read(struct fpu *fpu)
307{
308 /*
309 * If fpregs are active (in the current CPU), then
310 * copy them to the fpstate:
311 */
312 if (fpu->fpregs_active) {
313 fpu__save(fpu);
314 } else {
315 if (!fpu->fpstate_active) {
316 fpstate_init(&fpu->state);
317
318 /* Safe to do for current and for stopped child tasks: */
319 fpu->fpstate_active = 1;
320 }
321 }
322}
323
324/*
325 * This function must be called before we write a task's fpstate.
326 *
327 * If the task has used the FPU before then unlazy it.
328 * If the task has not used the FPU before then initialize its fpstate.
329 *
330 * After this function call, after registers in the fpstate are
331 * modified and the child task has woken up, the child task will
332 * restore the modified FPU state from the modified context. If we
333 * didn't clear its lazy status here then the lazy in-registers
334 * state pending on its former CPU could be restored, corrupting
335 * the modifications.
336 */
337void fpu__activate_fpstate_write(struct fpu *fpu)
338{
339 /*
340 * Only stopped child tasks can be used to modify the FPU
341 * state in the fpstate buffer:
342 */
343 WARN_ON_FPU(fpu == ¤t->thread.fpu);
344
345 if (fpu->fpstate_active) {
346 /* Invalidate any lazy state: */
347 fpu->last_cpu = -1;
348 } else {
349 fpstate_init(&fpu->state);
350
351 /* Safe to do for stopped child tasks: */
352 fpu->fpstate_active = 1;
353 }
354}
355
356/*
357 * This function must be called before we write the current
358 * task's fpstate.
359 *
360 * This call gets the current FPU register state and moves
361 * it in to the 'fpstate'. Preemption is disabled so that
362 * no writes to the 'fpstate' can occur from context
363 * swiches.
364 *
365 * Must be followed by a fpu__current_fpstate_write_end().
366 */
367void fpu__current_fpstate_write_begin(void)
368{
369 struct fpu *fpu = ¤t->thread.fpu;
370
371 /*
372 * Ensure that the context-switching code does not write
373 * over the fpstate while we are doing our update.
374 */
375 preempt_disable();
376
377 /*
378 * Move the fpregs in to the fpu's 'fpstate'.
379 */
380 fpu__activate_fpstate_read(fpu);
381
382 /*
383 * The caller is about to write to 'fpu'. Ensure that no
384 * CPU thinks that its fpregs match the fpstate. This
385 * ensures we will not be lazy and skip a XRSTOR in the
386 * future.
387 */
388 fpu->last_cpu = -1;
389}
390
391/*
392 * This function must be paired with fpu__current_fpstate_write_begin()
393 *
394 * This will ensure that the modified fpstate gets placed back in
395 * the fpregs if necessary.
396 *
397 * Note: This function may be called whether or not an _actual_
398 * write to the fpstate occurred.
399 */
400void fpu__current_fpstate_write_end(void)
401{
402 struct fpu *fpu = ¤t->thread.fpu;
403
404 /*
405 * 'fpu' now has an updated copy of the state, but the
406 * registers may still be out of date. Update them with
407 * an XRSTOR if they are active.
408 */
409 if (fpregs_active())
410 copy_kernel_to_fpregs(&fpu->state);
411
412 /*
413 * Our update is done and the fpregs/fpstate are in sync
414 * if necessary. Context switches can happen again.
415 */
416 preempt_enable();
417}
418
419/*
420 * 'fpu__restore()' is called to copy FPU registers from
421 * the FPU fpstate to the live hw registers and to activate
422 * access to the hardware registers, so that FPU instructions
423 * can be used afterwards.
424 *
425 * Must be called with kernel preemption disabled (for example
426 * with local interrupts disabled, as it is in the case of
427 * do_device_not_available()).
428 */
429void fpu__restore(struct fpu *fpu)
430{
431 fpu__activate_curr(fpu);
432
433 /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
434 kernel_fpu_disable();
435 fpregs_activate(fpu);
436 copy_kernel_to_fpregs(&fpu->state);
437 fpu->counter++;
438 kernel_fpu_enable();
439}
440EXPORT_SYMBOL_GPL(fpu__restore);
441
442/*
443 * Drops current FPU state: deactivates the fpregs and
444 * the fpstate. NOTE: it still leaves previous contents
445 * in the fpregs in the eager-FPU case.
446 *
447 * This function can be used in cases where we know that
448 * a state-restore is coming: either an explicit one,
449 * or a reschedule.
450 */
451void fpu__drop(struct fpu *fpu)
452{
453 preempt_disable();
454 fpu->counter = 0;
455
456 if (fpu->fpregs_active) {
457 /* Ignore delayed exceptions from user space */
458 asm volatile("1: fwait\n"
459 "2:\n"
460 _ASM_EXTABLE(1b, 2b));
461 fpregs_deactivate(fpu);
462 }
463
464 fpu->fpstate_active = 0;
465
466 preempt_enable();
467}
468
469/*
470 * Clear FPU registers by setting them up from
471 * the init fpstate:
472 */
473static inline void copy_init_fpstate_to_fpregs(void)
474{
475 if (use_xsave())
476 copy_kernel_to_xregs(&init_fpstate.xsave, -1);
477 else if (static_cpu_has(X86_FEATURE_FXSR))
478 copy_kernel_to_fxregs(&init_fpstate.fxsave);
479 else
480 copy_kernel_to_fregs(&init_fpstate.fsave);
481}
482
483/*
484 * Clear the FPU state back to init state.
485 *
486 * Called by sys_execve(), by the signal handler code and by various
487 * error paths.
488 */
489void fpu__clear(struct fpu *fpu)
490{
491 WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
492
493 if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
494 /* FPU state will be reallocated lazily at the first use. */
495 fpu__drop(fpu);
496 } else {
497 if (!fpu->fpstate_active) {
498 fpu__activate_curr(fpu);
499 user_fpu_begin();
500 }
501 copy_init_fpstate_to_fpregs();
502 }
503}
504
505/*
506 * x87 math exception handling:
507 */
508
509static inline unsigned short get_fpu_cwd(struct fpu *fpu)
510{
511 if (cpu_has_fxsr) {
512 return fpu->state.fxsave.cwd;
513 } else {
514 return (unsigned short)fpu->state.fsave.cwd;
515 }
516}
517
518static inline unsigned short get_fpu_swd(struct fpu *fpu)
519{
520 if (cpu_has_fxsr) {
521 return fpu->state.fxsave.swd;
522 } else {
523 return (unsigned short)fpu->state.fsave.swd;
524 }
525}
526
527static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
528{
529 if (cpu_has_xmm) {
530 return fpu->state.fxsave.mxcsr;
531 } else {
532 return MXCSR_DEFAULT;
533 }
534}
535
536int fpu__exception_code(struct fpu *fpu, int trap_nr)
537{
538 int err;
539
540 if (trap_nr == X86_TRAP_MF) {
541 unsigned short cwd, swd;
542 /*
543 * (~cwd & swd) will mask out exceptions that are not set to unmasked
544 * status. 0x3f is the exception bits in these regs, 0x200 is the
545 * C1 reg you need in case of a stack fault, 0x040 is the stack
546 * fault bit. We should only be taking one exception at a time,
547 * so if this combination doesn't produce any single exception,
548 * then we have a bad program that isn't synchronizing its FPU usage
549 * and it will suffer the consequences since we won't be able to
550 * fully reproduce the context of the exception
551 */
552 cwd = get_fpu_cwd(fpu);
553 swd = get_fpu_swd(fpu);
554
555 err = swd & ~cwd;
556 } else {
557 /*
558 * The SIMD FPU exceptions are handled a little differently, as there
559 * is only a single status/control register. Thus, to determine which
560 * unmasked exception was caught we must mask the exception mask bits
561 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
562 */
563 unsigned short mxcsr = get_fpu_mxcsr(fpu);
564 err = ~(mxcsr >> 7) & mxcsr;
565 }
566
567 if (err & 0x001) { /* Invalid op */
568 /*
569 * swd & 0x240 == 0x040: Stack Underflow
570 * swd & 0x240 == 0x240: Stack Overflow
571 * User must clear the SF bit (0x40) if set
572 */
573 return FPE_FLTINV;
574 } else if (err & 0x004) { /* Divide by Zero */
575 return FPE_FLTDIV;
576 } else if (err & 0x008) { /* Overflow */
577 return FPE_FLTOVF;
578 } else if (err & 0x012) { /* Denormal, Underflow */
579 return FPE_FLTUND;
580 } else if (err & 0x020) { /* Precision */
581 return FPE_FLTRES;
582 }
583
584 /*
585 * If we're using IRQ 13, or supposedly even some trap
586 * X86_TRAP_MF implementations, it's possible
587 * we get a spurious trap, which is not an error.
588 */
589 return 0;
590}