Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * common.c - C code for kernel entry and exit
4 * Copyright (c) 2015 Andrew Lutomirski
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/sched/task_stack.h>
13#include <linux/entry-common.h>
14#include <linux/mm.h>
15#include <linux/smp.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/export.h>
19#include <linux/nospec.h>
20#include <linux/syscalls.h>
21#include <linux/uaccess.h>
22
23#ifdef CONFIG_XEN_PV
24#include <xen/xen-ops.h>
25#include <xen/events.h>
26#endif
27
28#include <asm/desc.h>
29#include <asm/traps.h>
30#include <asm/vdso.h>
31#include <asm/cpufeature.h>
32#include <asm/fpu/api.h>
33#include <asm/nospec-branch.h>
34#include <asm/io_bitmap.h>
35#include <asm/syscall.h>
36#include <asm/irq_stack.h>
37
38#ifdef CONFIG_X86_64
39
40static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
41{
42 /*
43 * Convert negative numbers to very high and thus out of range
44 * numbers for comparisons.
45 */
46 unsigned int unr = nr;
47
48 if (likely(unr < NR_syscalls)) {
49 unr = array_index_nospec(unr, NR_syscalls);
50 regs->ax = sys_call_table[unr](regs);
51 return true;
52 }
53 return false;
54}
55
56static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
57{
58 /*
59 * Adjust the starting offset of the table, and convert numbers
60 * < __X32_SYSCALL_BIT to very high and thus out of range
61 * numbers for comparisons.
62 */
63 unsigned int xnr = nr - __X32_SYSCALL_BIT;
64
65 if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
66 xnr = array_index_nospec(xnr, X32_NR_syscalls);
67 regs->ax = x32_sys_call_table[xnr](regs);
68 return true;
69 }
70 return false;
71}
72
73__visible noinstr void do_syscall_64(struct pt_regs *regs, int nr)
74{
75 add_random_kstack_offset();
76 nr = syscall_enter_from_user_mode(regs, nr);
77
78 instrumentation_begin();
79
80 if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
81 /* Invalid system call, but still a system call. */
82 regs->ax = __x64_sys_ni_syscall(regs);
83 }
84
85 instrumentation_end();
86 syscall_exit_to_user_mode(regs);
87}
88#endif
89
90#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
91static __always_inline int syscall_32_enter(struct pt_regs *regs)
92{
93 if (IS_ENABLED(CONFIG_IA32_EMULATION))
94 current_thread_info()->status |= TS_COMPAT;
95
96 return (int)regs->orig_ax;
97}
98
99/*
100 * Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
101 */
102static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
103{
104 /*
105 * Convert negative numbers to very high and thus out of range
106 * numbers for comparisons.
107 */
108 unsigned int unr = nr;
109
110 if (likely(unr < IA32_NR_syscalls)) {
111 unr = array_index_nospec(unr, IA32_NR_syscalls);
112 regs->ax = ia32_sys_call_table[unr](regs);
113 } else if (nr != -1) {
114 regs->ax = __ia32_sys_ni_syscall(regs);
115 }
116}
117
118/* Handles int $0x80 */
119__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
120{
121 int nr = syscall_32_enter(regs);
122
123 add_random_kstack_offset();
124 /*
125 * Subtlety here: if ptrace pokes something larger than 2^31-1 into
126 * orig_ax, the int return value truncates it. This matches
127 * the semantics of syscall_get_nr().
128 */
129 nr = syscall_enter_from_user_mode(regs, nr);
130 instrumentation_begin();
131
132 do_syscall_32_irqs_on(regs, nr);
133
134 instrumentation_end();
135 syscall_exit_to_user_mode(regs);
136}
137
138static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
139{
140 int nr = syscall_32_enter(regs);
141 int res;
142
143 add_random_kstack_offset();
144 /*
145 * This cannot use syscall_enter_from_user_mode() as it has to
146 * fetch EBP before invoking any of the syscall entry work
147 * functions.
148 */
149 syscall_enter_from_user_mode_prepare(regs);
150
151 instrumentation_begin();
152 /* Fetch EBP from where the vDSO stashed it. */
153 if (IS_ENABLED(CONFIG_X86_64)) {
154 /*
155 * Micro-optimization: the pointer we're following is
156 * explicitly 32 bits, so it can't be out of range.
157 */
158 res = __get_user(*(u32 *)®s->bp,
159 (u32 __user __force *)(unsigned long)(u32)regs->sp);
160 } else {
161 res = get_user(*(u32 *)®s->bp,
162 (u32 __user __force *)(unsigned long)(u32)regs->sp);
163 }
164
165 if (res) {
166 /* User code screwed up. */
167 regs->ax = -EFAULT;
168
169 local_irq_disable();
170 instrumentation_end();
171 irqentry_exit_to_user_mode(regs);
172 return false;
173 }
174
175 nr = syscall_enter_from_user_mode_work(regs, nr);
176
177 /* Now this is just like a normal syscall. */
178 do_syscall_32_irqs_on(regs, nr);
179
180 instrumentation_end();
181 syscall_exit_to_user_mode(regs);
182 return true;
183}
184
185/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
186__visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
187{
188 /*
189 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
190 * convention. Adjust regs so it looks like we entered using int80.
191 */
192 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
193 vdso_image_32.sym_int80_landing_pad;
194
195 /*
196 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
197 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
198 * Fix it up.
199 */
200 regs->ip = landing_pad;
201
202 /* Invoke the syscall. If it failed, keep it simple: use IRET. */
203 if (!__do_fast_syscall_32(regs))
204 return 0;
205
206#ifdef CONFIG_X86_64
207 /*
208 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
209 * SYSRETL is available on all 64-bit CPUs, so we don't need to
210 * bother with SYSEXIT.
211 *
212 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
213 * because the ECX fixup above will ensure that this is essentially
214 * never the case.
215 */
216 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
217 regs->ip == landing_pad &&
218 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
219#else
220 /*
221 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
222 *
223 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
224 * because the ECX fixup above will ensure that this is essentially
225 * never the case.
226 *
227 * We don't allow syscalls at all from VM86 mode, but we still
228 * need to check VM, because we might be returning from sys_vm86.
229 */
230 return static_cpu_has(X86_FEATURE_SEP) &&
231 regs->cs == __USER_CS && regs->ss == __USER_DS &&
232 regs->ip == landing_pad &&
233 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
234#endif
235}
236
237/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
238__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
239{
240 /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
241 regs->sp = regs->bp;
242
243 /* SYSENTER clobbers EFLAGS.IF. Assume it was set in usermode. */
244 regs->flags |= X86_EFLAGS_IF;
245
246 return do_fast_syscall_32(regs);
247}
248#endif
249
250SYSCALL_DEFINE0(ni_syscall)
251{
252 return -ENOSYS;
253}
254
255#ifdef CONFIG_XEN_PV
256#ifndef CONFIG_PREEMPTION
257/*
258 * Some hypercalls issued by the toolstack can take many 10s of
259 * seconds. Allow tasks running hypercalls via the privcmd driver to
260 * be voluntarily preempted even if full kernel preemption is
261 * disabled.
262 *
263 * Such preemptible hypercalls are bracketed by
264 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
265 * calls.
266 */
267DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
268EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
269
270/*
271 * In case of scheduling the flag must be cleared and restored after
272 * returning from schedule as the task might move to a different CPU.
273 */
274static __always_inline bool get_and_clear_inhcall(void)
275{
276 bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
277
278 __this_cpu_write(xen_in_preemptible_hcall, false);
279 return inhcall;
280}
281
282static __always_inline void restore_inhcall(bool inhcall)
283{
284 __this_cpu_write(xen_in_preemptible_hcall, inhcall);
285}
286#else
287static __always_inline bool get_and_clear_inhcall(void) { return false; }
288static __always_inline void restore_inhcall(bool inhcall) { }
289#endif
290
291static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
292{
293 struct pt_regs *old_regs = set_irq_regs(regs);
294
295 inc_irq_stat(irq_hv_callback_count);
296
297 xen_hvm_evtchn_do_upcall();
298
299 set_irq_regs(old_regs);
300}
301
302__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
303{
304 irqentry_state_t state = irqentry_enter(regs);
305 bool inhcall;
306
307 instrumentation_begin();
308 run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
309
310 inhcall = get_and_clear_inhcall();
311 if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
312 irqentry_exit_cond_resched();
313 instrumentation_end();
314 restore_inhcall(inhcall);
315 } else {
316 instrumentation_end();
317 irqentry_exit(regs, state);
318 }
319}
320#endif /* CONFIG_XEN_PV */
1/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/audit.h>
18#include <linux/seccomp.h>
19#include <linux/signal.h>
20#include <linux/export.h>
21#include <linux/context_tracking.h>
22#include <linux/user-return-notifier.h>
23#include <linux/uprobes.h>
24
25#include <asm/desc.h>
26#include <asm/traps.h>
27#include <asm/vdso.h>
28#include <asm/uaccess.h>
29#include <asm/cpufeature.h>
30
31#define CREATE_TRACE_POINTS
32#include <trace/events/syscalls.h>
33
34static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
35{
36 unsigned long top_of_stack =
37 (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
38 return (struct thread_info *)(top_of_stack - THREAD_SIZE);
39}
40
41#ifdef CONFIG_CONTEXT_TRACKING
42/* Called on entry from user mode with IRQs off. */
43__visible void enter_from_user_mode(void)
44{
45 CT_WARN_ON(ct_state() != CONTEXT_USER);
46 user_exit();
47}
48#else
49static inline void enter_from_user_mode(void) {}
50#endif
51
52static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
53{
54#ifdef CONFIG_X86_64
55 if (arch == AUDIT_ARCH_X86_64) {
56 audit_syscall_entry(regs->orig_ax, regs->di,
57 regs->si, regs->dx, regs->r10);
58 } else
59#endif
60 {
61 audit_syscall_entry(regs->orig_ax, regs->bx,
62 regs->cx, regs->dx, regs->si);
63 }
64}
65
66/*
67 * We can return 0 to resume the syscall or anything else to go to phase
68 * 2. If we resume the syscall, we need to put something appropriate in
69 * regs->orig_ax.
70 *
71 * NB: We don't have full pt_regs here, but regs->orig_ax and regs->ax
72 * are fully functional.
73 *
74 * For phase 2's benefit, our return value is:
75 * 0: resume the syscall
76 * 1: go to phase 2; no seccomp phase 2 needed
77 * anything else: go to phase 2; pass return value to seccomp
78 */
79unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
80{
81 struct thread_info *ti = pt_regs_to_thread_info(regs);
82 unsigned long ret = 0;
83 u32 work;
84
85 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
86 BUG_ON(regs != task_pt_regs(current));
87
88 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
89
90#ifdef CONFIG_SECCOMP
91 /*
92 * Do seccomp first -- it should minimize exposure of other
93 * code, and keeping seccomp fast is probably more valuable
94 * than the rest of this.
95 */
96 if (work & _TIF_SECCOMP) {
97 struct seccomp_data sd;
98
99 sd.arch = arch;
100 sd.nr = regs->orig_ax;
101 sd.instruction_pointer = regs->ip;
102#ifdef CONFIG_X86_64
103 if (arch == AUDIT_ARCH_X86_64) {
104 sd.args[0] = regs->di;
105 sd.args[1] = regs->si;
106 sd.args[2] = regs->dx;
107 sd.args[3] = regs->r10;
108 sd.args[4] = regs->r8;
109 sd.args[5] = regs->r9;
110 } else
111#endif
112 {
113 sd.args[0] = regs->bx;
114 sd.args[1] = regs->cx;
115 sd.args[2] = regs->dx;
116 sd.args[3] = regs->si;
117 sd.args[4] = regs->di;
118 sd.args[5] = regs->bp;
119 }
120
121 BUILD_BUG_ON(SECCOMP_PHASE1_OK != 0);
122 BUILD_BUG_ON(SECCOMP_PHASE1_SKIP != 1);
123
124 ret = seccomp_phase1(&sd);
125 if (ret == SECCOMP_PHASE1_SKIP) {
126 regs->orig_ax = -1;
127 ret = 0;
128 } else if (ret != SECCOMP_PHASE1_OK) {
129 return ret; /* Go directly to phase 2 */
130 }
131
132 work &= ~_TIF_SECCOMP;
133 }
134#endif
135
136 /* Do our best to finish without phase 2. */
137 if (work == 0)
138 return ret; /* seccomp and/or nohz only (ret == 0 here) */
139
140#ifdef CONFIG_AUDITSYSCALL
141 if (work == _TIF_SYSCALL_AUDIT) {
142 /*
143 * If there is no more work to be done except auditing,
144 * then audit in phase 1. Phase 2 always audits, so, if
145 * we audit here, then we can't go on to phase 2.
146 */
147 do_audit_syscall_entry(regs, arch);
148 return 0;
149 }
150#endif
151
152 return 1; /* Something is enabled that we can't handle in phase 1 */
153}
154
155/* Returns the syscall nr to run (which should match regs->orig_ax). */
156long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
157 unsigned long phase1_result)
158{
159 struct thread_info *ti = pt_regs_to_thread_info(regs);
160 long ret = 0;
161 u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
162
163 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
164 BUG_ON(regs != task_pt_regs(current));
165
166#ifdef CONFIG_SECCOMP
167 /*
168 * Call seccomp_phase2 before running the other hooks so that
169 * they can see any changes made by a seccomp tracer.
170 */
171 if (phase1_result > 1 && seccomp_phase2(phase1_result)) {
172 /* seccomp failures shouldn't expose any additional code. */
173 return -1;
174 }
175#endif
176
177 if (unlikely(work & _TIF_SYSCALL_EMU))
178 ret = -1L;
179
180 if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
181 tracehook_report_syscall_entry(regs))
182 ret = -1L;
183
184 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
185 trace_sys_enter(regs, regs->orig_ax);
186
187 do_audit_syscall_entry(regs, arch);
188
189 return ret ?: regs->orig_ax;
190}
191
192long syscall_trace_enter(struct pt_regs *regs)
193{
194 u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
195 unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
196
197 if (phase1_result == 0)
198 return regs->orig_ax;
199 else
200 return syscall_trace_enter_phase2(regs, arch, phase1_result);
201}
202
203#define EXIT_TO_USERMODE_LOOP_FLAGS \
204 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
205 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY)
206
207static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
208{
209 /*
210 * In order to return to user mode, we need to have IRQs off with
211 * none of _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_USER_RETURN_NOTIFY,
212 * _TIF_UPROBE, or _TIF_NEED_RESCHED set. Several of these flags
213 * can be set at any time on preemptable kernels if we have IRQs on,
214 * so we need to loop. Disabling preemption wouldn't help: doing the
215 * work to clear some of the flags can sleep.
216 */
217 while (true) {
218 /* We have work to do. */
219 local_irq_enable();
220
221 if (cached_flags & _TIF_NEED_RESCHED)
222 schedule();
223
224 if (cached_flags & _TIF_UPROBE)
225 uprobe_notify_resume(regs);
226
227 /* deal with pending signal delivery */
228 if (cached_flags & _TIF_SIGPENDING)
229 do_signal(regs);
230
231 if (cached_flags & _TIF_NOTIFY_RESUME) {
232 clear_thread_flag(TIF_NOTIFY_RESUME);
233 tracehook_notify_resume(regs);
234 }
235
236 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
237 fire_user_return_notifiers();
238
239 /* Disable IRQs and retry */
240 local_irq_disable();
241
242 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
243
244 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
245 break;
246
247 }
248}
249
250/* Called with IRQs disabled. */
251__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
252{
253 struct thread_info *ti = pt_regs_to_thread_info(regs);
254 u32 cached_flags;
255
256 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
257 local_irq_disable();
258
259 lockdep_sys_exit();
260
261 cached_flags = READ_ONCE(ti->flags);
262
263 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
264 exit_to_usermode_loop(regs, cached_flags);
265
266#ifdef CONFIG_COMPAT
267 /*
268 * Compat syscalls set TS_COMPAT. Make sure we clear it before
269 * returning to user mode. We need to clear it *after* signal
270 * handling, because syscall restart has a fixup for compat
271 * syscalls. The fixup is exercised by the ptrace_syscall_32
272 * selftest.
273 */
274 ti->status &= ~TS_COMPAT;
275#endif
276
277 user_enter();
278}
279
280#define SYSCALL_EXIT_WORK_FLAGS \
281 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
282 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
283
284static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
285{
286 bool step;
287
288 audit_syscall_exit(regs);
289
290 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
291 trace_sys_exit(regs, regs->ax);
292
293 /*
294 * If TIF_SYSCALL_EMU is set, we only get here because of
295 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
296 * We already reported this syscall instruction in
297 * syscall_trace_enter().
298 */
299 step = unlikely(
300 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
301 == _TIF_SINGLESTEP);
302 if (step || cached_flags & _TIF_SYSCALL_TRACE)
303 tracehook_report_syscall_exit(regs, step);
304}
305
306/*
307 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
308 * state such that we can immediately switch to user mode.
309 */
310__visible inline void syscall_return_slowpath(struct pt_regs *regs)
311{
312 struct thread_info *ti = pt_regs_to_thread_info(regs);
313 u32 cached_flags = READ_ONCE(ti->flags);
314
315 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
316
317 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
318 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
319 local_irq_enable();
320
321 /*
322 * First do one-time work. If these work items are enabled, we
323 * want to run them exactly once per syscall exit with IRQs on.
324 */
325 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
326 syscall_slow_exit_work(regs, cached_flags);
327
328 local_irq_disable();
329 prepare_exit_to_usermode(regs);
330}
331
332#ifdef CONFIG_X86_64
333__visible void do_syscall_64(struct pt_regs *regs)
334{
335 struct thread_info *ti = pt_regs_to_thread_info(regs);
336 unsigned long nr = regs->orig_ax;
337
338 enter_from_user_mode();
339 local_irq_enable();
340
341 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
342 nr = syscall_trace_enter(regs);
343
344 /*
345 * NB: Native and x32 syscalls are dispatched from the same
346 * table. The only functional difference is the x32 bit in
347 * regs->orig_ax, which changes the behavior of some syscalls.
348 */
349 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
350 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
351 regs->di, regs->si, regs->dx,
352 regs->r10, regs->r8, regs->r9);
353 }
354
355 syscall_return_slowpath(regs);
356}
357#endif
358
359#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
360/*
361 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
362 * all entry and exit work and returns with IRQs off. This function is
363 * extremely hot in workloads that use it, and it's usually called from
364 * do_fast_syscall_32, so forcibly inline it to improve performance.
365 */
366static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
367{
368 struct thread_info *ti = pt_regs_to_thread_info(regs);
369 unsigned int nr = (unsigned int)regs->orig_ax;
370
371#ifdef CONFIG_IA32_EMULATION
372 ti->status |= TS_COMPAT;
373#endif
374
375 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
376 /*
377 * Subtlety here: if ptrace pokes something larger than
378 * 2^32-1 into orig_ax, this truncates it. This may or
379 * may not be necessary, but it matches the old asm
380 * behavior.
381 */
382 nr = syscall_trace_enter(regs);
383 }
384
385 if (likely(nr < IA32_NR_syscalls)) {
386 /*
387 * It's possible that a 32-bit syscall implementation
388 * takes a 64-bit parameter but nonetheless assumes that
389 * the high bits are zero. Make sure we zero-extend all
390 * of the args.
391 */
392 regs->ax = ia32_sys_call_table[nr](
393 (unsigned int)regs->bx, (unsigned int)regs->cx,
394 (unsigned int)regs->dx, (unsigned int)regs->si,
395 (unsigned int)regs->di, (unsigned int)regs->bp);
396 }
397
398 syscall_return_slowpath(regs);
399}
400
401/* Handles int $0x80 */
402__visible void do_int80_syscall_32(struct pt_regs *regs)
403{
404 enter_from_user_mode();
405 local_irq_enable();
406 do_syscall_32_irqs_on(regs);
407}
408
409/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
410__visible long do_fast_syscall_32(struct pt_regs *regs)
411{
412 /*
413 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
414 * convention. Adjust regs so it looks like we entered using int80.
415 */
416
417 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
418 vdso_image_32.sym_int80_landing_pad;
419
420 /*
421 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
422 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
423 * Fix it up.
424 */
425 regs->ip = landing_pad;
426
427 enter_from_user_mode();
428
429 local_irq_enable();
430
431 /* Fetch EBP from where the vDSO stashed it. */
432 if (
433#ifdef CONFIG_X86_64
434 /*
435 * Micro-optimization: the pointer we're following is explicitly
436 * 32 bits, so it can't be out of range.
437 */
438 __get_user(*(u32 *)®s->bp,
439 (u32 __user __force *)(unsigned long)(u32)regs->sp)
440#else
441 get_user(*(u32 *)®s->bp,
442 (u32 __user __force *)(unsigned long)(u32)regs->sp)
443#endif
444 ) {
445
446 /* User code screwed up. */
447 local_irq_disable();
448 regs->ax = -EFAULT;
449 prepare_exit_to_usermode(regs);
450 return 0; /* Keep it simple: use IRET. */
451 }
452
453 /* Now this is just like a normal syscall. */
454 do_syscall_32_irqs_on(regs);
455
456#ifdef CONFIG_X86_64
457 /*
458 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
459 * SYSRETL is available on all 64-bit CPUs, so we don't need to
460 * bother with SYSEXIT.
461 *
462 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
463 * because the ECX fixup above will ensure that this is essentially
464 * never the case.
465 */
466 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
467 regs->ip == landing_pad &&
468 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
469#else
470 /*
471 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
472 *
473 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
474 * because the ECX fixup above will ensure that this is essentially
475 * never the case.
476 *
477 * We don't allow syscalls at all from VM86 mode, but we still
478 * need to check VM, because we might be returning from sys_vm86.
479 */
480 return static_cpu_has(X86_FEATURE_SEP) &&
481 regs->cs == __USER_CS && regs->ss == __USER_DS &&
482 regs->ip == landing_pad &&
483 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
484#endif
485}
486#endif