Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * common.c - C code for kernel entry and exit
  4 * Copyright (c) 2015 Andrew Lutomirski
 
  5 *
  6 * Based on asm and ptrace code by many authors.  The code here originated
  7 * in ptrace.c and signal.c.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/sched.h>
 12#include <linux/sched/task_stack.h>
 13#include <linux/entry-common.h>
 14#include <linux/mm.h>
 15#include <linux/smp.h>
 16#include <linux/errno.h>
 17#include <linux/ptrace.h>
 
 
 
 
 18#include <linux/export.h>
 
 
 19#include <linux/nospec.h>
 
 
 20#include <linux/syscalls.h>
 21#include <linux/uaccess.h>
 22
 23#ifdef CONFIG_XEN_PV
 24#include <xen/xen-ops.h>
 25#include <xen/events.h>
 26#endif
 27
 28#include <asm/desc.h>
 29#include <asm/traps.h>
 30#include <asm/vdso.h>
 
 31#include <asm/cpufeature.h>
 32#include <asm/fpu/api.h>
 33#include <asm/nospec-branch.h>
 34#include <asm/io_bitmap.h>
 35#include <asm/syscall.h>
 36#include <asm/irq_stack.h>
 37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38#ifdef CONFIG_X86_64
 39__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40{
 41	nr = syscall_enter_from_user_mode(regs, nr);
 42
 43	instrumentation_begin();
 44	if (likely(nr < NR_syscalls)) {
 45		nr = array_index_nospec(nr, NR_syscalls);
 46		regs->ax = sys_call_table[nr](regs);
 47#ifdef CONFIG_X86_X32_ABI
 48	} else if (likely((nr & __X32_SYSCALL_BIT) &&
 49			  (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
 50		nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
 51					X32_NR_syscalls);
 52		regs->ax = x32_sys_call_table[nr](regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54	}
 55	instrumentation_end();
 56	syscall_exit_to_user_mode(regs);
 57}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58#endif
 59
 60#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 61static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
 
 
 
 
 
 
 62{
 63	if (IS_ENABLED(CONFIG_IA32_EMULATION))
 64		current_thread_info()->status |= TS_COMPAT;
 
 
 
 
 65
 66	return (unsigned int)regs->orig_ax;
 
 
 
 
 
 
 
 
 
 
 67}
 68
 69/*
 70 * Invoke a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.
 
 71 */
 72static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
 73						  unsigned int nr)
 74{
 75	if (likely(nr < IA32_NR_syscalls)) {
 76		instrumentation_begin();
 77		nr = array_index_nospec(nr, IA32_NR_syscalls);
 78		regs->ax = ia32_sys_call_table[nr](regs);
 79		instrumentation_end();
 80	}
 81}
 82
 83/* Handles int $0x80 */
 84__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
 85{
 86	unsigned int nr = syscall_32_enter(regs);
 
 87
 88	/*
 89	 * Subtlety here: if ptrace pokes something larger than 2^32-1 into
 90	 * orig_ax, the unsigned int return value truncates it.  This may
 91	 * or may not be necessary, but it matches the old asm behavior.
 92	 */
 93	nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
 
 94
 95	do_syscall_32_irqs_on(regs, nr);
 96	syscall_exit_to_user_mode(regs);
 97}
 98
 99static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
 
100{
101	unsigned int nr = syscall_32_enter(regs);
102	int res;
 
 
 
 
 
103
104	/*
105	 * This cannot use syscall_enter_from_user_mode() as it has to
106	 * fetch EBP before invoking any of the syscall entry work
107	 * functions.
108	 */
109	syscall_enter_from_user_mode_prepare(regs);
 
 
 
 
110
111	instrumentation_begin();
112	/* Fetch EBP from where the vDSO stashed it. */
113	if (IS_ENABLED(CONFIG_X86_64)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114		/*
115		 * Micro-optimization: the pointer we're following is
116		 * explicitly 32 bits, so it can't be out of range.
 
 
117		 */
118		res = __get_user(*(u32 *)&regs->bp,
119			 (u32 __user __force *)(unsigned long)(u32)regs->sp);
120	} else {
121		res = get_user(*(u32 *)&regs->bp,
122		       (u32 __user __force *)(unsigned long)(u32)regs->sp);
123	}
124	instrumentation_end();
125
126	if (res) {
127		/* User code screwed up. */
128		regs->ax = -EFAULT;
129		syscall_exit_to_user_mode(regs);
130		return false;
 
 
 
 
 
 
 
 
 
 
 
131	}
132
133	/* The case truncates any ptrace induced syscall nr > 2^32 -1 */
134	nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
135
136	/* Now this is just like a normal syscall. */
137	do_syscall_32_irqs_on(regs, nr);
138	syscall_exit_to_user_mode(regs);
139	return true;
 
 
140}
141
142/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
143__visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
144{
145	/*
146	 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
147	 * convention.  Adjust regs so it looks like we entered using int80.
148	 */
 
149	unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
150					vdso_image_32.sym_int80_landing_pad;
151
152	/*
153	 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
154	 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
155	 * Fix it up.
156	 */
157	regs->ip = landing_pad;
158
159	/* Invoke the syscall. If it failed, keep it simple: use IRET. */
160	if (!__do_fast_syscall_32(regs))
161		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
163#ifdef CONFIG_X86_64
164	/*
165	 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
166	 * SYSRETL is available on all 64-bit CPUs, so we don't need to
167	 * bother with SYSEXIT.
168	 *
169	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
170	 * because the ECX fixup above will ensure that this is essentially
171	 * never the case.
172	 */
173	return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
174		regs->ip == landing_pad &&
175		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
176#else
177	/*
178	 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
179	 *
180	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
181	 * because the ECX fixup above will ensure that this is essentially
182	 * never the case.
183	 *
184	 * We don't allow syscalls at all from VM86 mode, but we still
185	 * need to check VM, because we might be returning from sys_vm86.
186	 */
187	return static_cpu_has(X86_FEATURE_SEP) &&
188		regs->cs == __USER_CS && regs->ss == __USER_DS &&
189		regs->ip == landing_pad &&
190		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
191#endif
192}
193
194/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
195__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
196{
197	/* SYSENTER loses RSP, but the vDSO saved it in RBP. */
198	regs->sp = regs->bp;
199
200	/* SYSENTER clobbers EFLAGS.IF.  Assume it was set in usermode. */
201	regs->flags |= X86_EFLAGS_IF;
202
203	return do_fast_syscall_32(regs);
204}
205#endif
206
207SYSCALL_DEFINE0(ni_syscall)
208{
209	return -ENOSYS;
210}
211
212noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
213{
214	bool irq_state = lockdep_hardirqs_enabled();
215
216	__nmi_enter();
217	lockdep_hardirqs_off(CALLER_ADDR0);
218	lockdep_hardirq_enter();
219	rcu_nmi_enter();
220
221	instrumentation_begin();
222	trace_hardirqs_off_finish();
223	ftrace_nmi_enter();
224	instrumentation_end();
225
226	return irq_state;
227}
228
229noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
230{
231	instrumentation_begin();
232	ftrace_nmi_exit();
233	if (restore) {
234		trace_hardirqs_on_prepare();
235		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
236	}
237	instrumentation_end();
238
239	rcu_nmi_exit();
240	lockdep_hardirq_exit();
241	if (restore)
242		lockdep_hardirqs_on(CALLER_ADDR0);
243	__nmi_exit();
244}
245
246#ifdef CONFIG_XEN_PV
247#ifndef CONFIG_PREEMPTION
248/*
249 * Some hypercalls issued by the toolstack can take many 10s of
250 * seconds. Allow tasks running hypercalls via the privcmd driver to
251 * be voluntarily preempted even if full kernel preemption is
252 * disabled.
253 *
254 * Such preemptible hypercalls are bracketed by
255 * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end()
256 * calls.
257 */
258DEFINE_PER_CPU(bool, xen_in_preemptible_hcall);
259EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
260
261/*
262 * In case of scheduling the flag must be cleared and restored after
263 * returning from schedule as the task might move to a different CPU.
264 */
265static __always_inline bool get_and_clear_inhcall(void)
266{
267	bool inhcall = __this_cpu_read(xen_in_preemptible_hcall);
268
269	__this_cpu_write(xen_in_preemptible_hcall, false);
270	return inhcall;
271}
272
273static __always_inline void restore_inhcall(bool inhcall)
274{
275	__this_cpu_write(xen_in_preemptible_hcall, inhcall);
276}
277#else
278static __always_inline bool get_and_clear_inhcall(void) { return false; }
279static __always_inline void restore_inhcall(bool inhcall) { }
280#endif
281
282static void __xen_pv_evtchn_do_upcall(void)
283{
284	irq_enter_rcu();
285	inc_irq_stat(irq_hv_callback_count);
286
287	xen_hvm_evtchn_do_upcall();
288
289	irq_exit_rcu();
290}
291
292__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
293{
294	struct pt_regs *old_regs;
295	bool inhcall;
296	irqentry_state_t state;
297
298	state = irqentry_enter(regs);
299	old_regs = set_irq_regs(regs);
300
301	instrumentation_begin();
302	run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
303	instrumentation_begin();
304
305	set_irq_regs(old_regs);
306
307	inhcall = get_and_clear_inhcall();
308	if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
309		instrumentation_begin();
310		irqentry_exit_cond_resched();
311		instrumentation_end();
312		restore_inhcall(inhcall);
313	} else {
314		irqentry_exit(regs, state);
315	}
316}
317#endif /* CONFIG_XEN_PV */
v4.17
 
  1/*
  2 * common.c - C code for kernel entry and exit
  3 * Copyright (c) 2015 Andrew Lutomirski
  4 * GPL v2
  5 *
  6 * Based on asm and ptrace code by many authors.  The code here originated
  7 * in ptrace.c and signal.c.
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/sched.h>
 12#include <linux/sched/task_stack.h>
 
 13#include <linux/mm.h>
 14#include <linux/smp.h>
 15#include <linux/errno.h>
 16#include <linux/ptrace.h>
 17#include <linux/tracehook.h>
 18#include <linux/audit.h>
 19#include <linux/seccomp.h>
 20#include <linux/signal.h>
 21#include <linux/export.h>
 22#include <linux/context_tracking.h>
 23#include <linux/user-return-notifier.h>
 24#include <linux/nospec.h>
 25#include <linux/uprobes.h>
 26#include <linux/livepatch.h>
 27#include <linux/syscalls.h>
 
 
 
 
 
 
 28
 29#include <asm/desc.h>
 30#include <asm/traps.h>
 31#include <asm/vdso.h>
 32#include <linux/uaccess.h>
 33#include <asm/cpufeature.h>
 
 
 
 
 
 34
 35#define CREATE_TRACE_POINTS
 36#include <trace/events/syscalls.h>
 37
 38#ifdef CONFIG_CONTEXT_TRACKING
 39/* Called on entry from user mode with IRQs off. */
 40__visible inline void enter_from_user_mode(void)
 41{
 42	CT_WARN_ON(ct_state() != CONTEXT_USER);
 43	user_exit_irqoff();
 44}
 45#else
 46static inline void enter_from_user_mode(void) {}
 47#endif
 48
 49static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
 50{
 51#ifdef CONFIG_X86_64
 52	if (arch == AUDIT_ARCH_X86_64) {
 53		audit_syscall_entry(regs->orig_ax, regs->di,
 54				    regs->si, regs->dx, regs->r10);
 55	} else
 56#endif
 57	{
 58		audit_syscall_entry(regs->orig_ax, regs->bx,
 59				    regs->cx, regs->dx, regs->si);
 60	}
 61}
 62
 63/*
 64 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
 65 * to skip the syscall.
 66 */
 67static long syscall_trace_enter(struct pt_regs *regs)
 68{
 69	u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
 70
 71	struct thread_info *ti = current_thread_info();
 72	unsigned long ret = 0;
 73	bool emulated = false;
 74	u32 work;
 75
 76	if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
 77		BUG_ON(regs != task_pt_regs(current));
 78
 79	work = READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
 80
 81	if (unlikely(work & _TIF_SYSCALL_EMU))
 82		emulated = true;
 83
 84	if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
 85	    tracehook_report_syscall_entry(regs))
 86		return -1L;
 87
 88	if (emulated)
 89		return -1L;
 90
 91#ifdef CONFIG_SECCOMP
 92	/*
 93	 * Do seccomp after ptrace, to catch any tracer changes.
 94	 */
 95	if (work & _TIF_SECCOMP) {
 96		struct seccomp_data sd;
 97
 98		sd.arch = arch;
 99		sd.nr = regs->orig_ax;
100		sd.instruction_pointer = regs->ip;
101#ifdef CONFIG_X86_64
102		if (arch == AUDIT_ARCH_X86_64) {
103			sd.args[0] = regs->di;
104			sd.args[1] = regs->si;
105			sd.args[2] = regs->dx;
106			sd.args[3] = regs->r10;
107			sd.args[4] = regs->r8;
108			sd.args[5] = regs->r9;
109		} else
110#endif
111		{
112			sd.args[0] = regs->bx;
113			sd.args[1] = regs->cx;
114			sd.args[2] = regs->dx;
115			sd.args[3] = regs->si;
116			sd.args[4] = regs->di;
117			sd.args[5] = regs->bp;
118		}
119
120		ret = __secure_computing(&sd);
121		if (ret == -1)
122			return ret;
123	}
124#endif
125
126	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
127		trace_sys_enter(regs, regs->orig_ax);
128
129	do_audit_syscall_entry(regs, arch);
130
131	return ret ?: regs->orig_ax;
132}
133
134#define EXIT_TO_USERMODE_LOOP_FLAGS				\
135	(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE |	\
136	 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
137
138static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
139{
140	/*
141	 * In order to return to user mode, we need to have IRQs off with
142	 * none of EXIT_TO_USERMODE_LOOP_FLAGS set.  Several of these flags
143	 * can be set at any time on preemptable kernels if we have IRQs on,
144	 * so we need to loop.  Disabling preemption wouldn't help: doing the
145	 * work to clear some of the flags can sleep.
146	 */
147	while (true) {
148		/* We have work to do. */
149		local_irq_enable();
150
151		if (cached_flags & _TIF_NEED_RESCHED)
152			schedule();
153
154		if (cached_flags & _TIF_UPROBE)
155			uprobe_notify_resume(regs);
156
157		if (cached_flags & _TIF_PATCH_PENDING)
158			klp_update_patch_state(current);
159
160		/* deal with pending signal delivery */
161		if (cached_flags & _TIF_SIGPENDING)
162			do_signal(regs);
163
164		if (cached_flags & _TIF_NOTIFY_RESUME) {
165			clear_thread_flag(TIF_NOTIFY_RESUME);
166			tracehook_notify_resume(regs);
167		}
168
169		if (cached_flags & _TIF_USER_RETURN_NOTIFY)
170			fire_user_return_notifiers();
171
172		/* Disable IRQs and retry */
173		local_irq_disable();
174
175		cached_flags = READ_ONCE(current_thread_info()->flags);
176
177		if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
178			break;
179	}
 
 
180}
181
182/* Called with IRQs disabled. */
183__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
184{
185	struct thread_info *ti = current_thread_info();
186	u32 cached_flags;
187
188	addr_limit_user_check();
189
190	lockdep_assert_irqs_disabled();
191	lockdep_sys_exit();
192
193	cached_flags = READ_ONCE(ti->flags);
194
195	if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
196		exit_to_usermode_loop(regs, cached_flags);
197
198#ifdef CONFIG_COMPAT
199	/*
200	 * Compat syscalls set TS_COMPAT.  Make sure we clear it before
201	 * returning to user mode.  We need to clear it *after* signal
202	 * handling, because syscall restart has a fixup for compat
203	 * syscalls.  The fixup is exercised by the ptrace_syscall_32
204	 * selftest.
205	 *
206	 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
207	 * special case only applies after poking regs and before the
208	 * very next return to user mode.
209	 */
210	ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
211#endif
212
213	user_enter_irqoff();
214}
215
216#define SYSCALL_EXIT_WORK_FLAGS				\
217	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |	\
218	 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
219
220static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
221{
222	bool step;
223
224	audit_syscall_exit(regs);
225
226	if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
227		trace_sys_exit(regs, regs->ax);
228
229	/*
230	 * If TIF_SYSCALL_EMU is set, we only get here because of
231	 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
232	 * We already reported this syscall instruction in
233	 * syscall_trace_enter().
234	 */
235	step = unlikely(
236		(cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
237		== _TIF_SINGLESTEP);
238	if (step || cached_flags & _TIF_SYSCALL_TRACE)
239		tracehook_report_syscall_exit(regs, step);
240}
241
242/*
243 * Called with IRQs on and fully valid regs.  Returns with IRQs off in a
244 * state such that we can immediately switch to user mode.
245 */
246__visible inline void syscall_return_slowpath(struct pt_regs *regs)
 
247{
248	struct thread_info *ti = current_thread_info();
249	u32 cached_flags = READ_ONCE(ti->flags);
 
 
 
 
 
250
251	CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
252
253	if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
254	    WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
255		local_irq_enable();
256
257	/*
258	 * First do one-time work.  If these work items are enabled, we
259	 * want to run them exactly once per syscall exit with IRQs on.
 
260	 */
261	if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
262		syscall_slow_exit_work(regs, cached_flags);
263
264	local_irq_disable();
265	prepare_exit_to_usermode(regs);
266}
267
268#ifdef CONFIG_X86_64
269__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
270{
271	struct thread_info *ti;
272
273	enter_from_user_mode();
274	local_irq_enable();
275	ti = current_thread_info();
276	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
277		nr = syscall_trace_enter(regs);
278
279	/*
280	 * NB: Native and x32 syscalls are dispatched from the same
281	 * table.  The only functional difference is the x32 bit in
282	 * regs->orig_ax, which changes the behavior of some syscalls.
283	 */
284	nr &= __SYSCALL_MASK;
285	if (likely(nr < NR_syscalls)) {
286		nr = array_index_nospec(nr, NR_syscalls);
287		regs->ax = sys_call_table[nr](regs);
288	}
289
290	syscall_return_slowpath(regs);
291}
292#endif
293
294#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
295/*
296 * Does a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.  Does
297 * all entry and exit work and returns with IRQs off.  This function is
298 * extremely hot in workloads that use it, and it's usually called from
299 * do_fast_syscall_32, so forcibly inline it to improve performance.
300 */
301static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
302{
303	struct thread_info *ti = current_thread_info();
304	unsigned int nr = (unsigned int)regs->orig_ax;
305
306#ifdef CONFIG_IA32_EMULATION
307	ti->status |= TS_COMPAT;
308#endif
309
310	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
311		/*
312		 * Subtlety here: if ptrace pokes something larger than
313		 * 2^32-1 into orig_ax, this truncates it.  This may or
314		 * may not be necessary, but it matches the old asm
315		 * behavior.
316		 */
317		nr = syscall_trace_enter(regs);
 
 
 
 
318	}
 
319
320	if (likely(nr < IA32_NR_syscalls)) {
321		nr = array_index_nospec(nr, IA32_NR_syscalls);
322#ifdef CONFIG_IA32_EMULATION
323		regs->ax = ia32_sys_call_table[nr](regs);
324#else
325		/*
326		 * It's possible that a 32-bit syscall implementation
327		 * takes a 64-bit parameter but nonetheless assumes that
328		 * the high bits are zero.  Make sure we zero-extend all
329		 * of the args.
330		 */
331		regs->ax = ia32_sys_call_table[nr](
332			(unsigned int)regs->bx, (unsigned int)regs->cx,
333			(unsigned int)regs->dx, (unsigned int)regs->si,
334			(unsigned int)regs->di, (unsigned int)regs->bp);
335#endif /* CONFIG_IA32_EMULATION */
336	}
337
338	syscall_return_slowpath(regs);
339}
340
341/* Handles int $0x80 */
342__visible void do_int80_syscall_32(struct pt_regs *regs)
343{
344	enter_from_user_mode();
345	local_irq_enable();
346	do_syscall_32_irqs_on(regs);
347}
348
349/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
350__visible long do_fast_syscall_32(struct pt_regs *regs)
351{
352	/*
353	 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
354	 * convention.  Adjust regs so it looks like we entered using int80.
355	 */
356
357	unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
358		vdso_image_32.sym_int80_landing_pad;
359
360	/*
361	 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
362	 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
363	 * Fix it up.
364	 */
365	regs->ip = landing_pad;
366
367	enter_from_user_mode();
368
369	local_irq_enable();
370
371	/* Fetch EBP from where the vDSO stashed it. */
372	if (
373#ifdef CONFIG_X86_64
374		/*
375		 * Micro-optimization: the pointer we're following is explicitly
376		 * 32 bits, so it can't be out of range.
377		 */
378		__get_user(*(u32 *)&regs->bp,
379			    (u32 __user __force *)(unsigned long)(u32)regs->sp)
380#else
381		get_user(*(u32 *)&regs->bp,
382			 (u32 __user __force *)(unsigned long)(u32)regs->sp)
383#endif
384		) {
385
386		/* User code screwed up. */
387		local_irq_disable();
388		regs->ax = -EFAULT;
389		prepare_exit_to_usermode(regs);
390		return 0;	/* Keep it simple: use IRET. */
391	}
392
393	/* Now this is just like a normal syscall. */
394	do_syscall_32_irqs_on(regs);
395
396#ifdef CONFIG_X86_64
397	/*
398	 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
399	 * SYSRETL is available on all 64-bit CPUs, so we don't need to
400	 * bother with SYSEXIT.
401	 *
402	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
403	 * because the ECX fixup above will ensure that this is essentially
404	 * never the case.
405	 */
406	return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
407		regs->ip == landing_pad &&
408		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
409#else
410	/*
411	 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
412	 *
413	 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
414	 * because the ECX fixup above will ensure that this is essentially
415	 * never the case.
416	 *
417	 * We don't allow syscalls at all from VM86 mode, but we still
418	 * need to check VM, because we might be returning from sys_vm86.
419	 */
420	return static_cpu_has(X86_FEATURE_SEP) &&
421		regs->cs == __USER_CS && regs->ss == __USER_DS &&
422		regs->ip == landing_pad &&
423		(regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
424#endif
425}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
426#endif