Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/context_tracking.h>
4#include <linux/entry-common.h>
5#include <linux/resume_user_mode.h>
6#include <linux/highmem.h>
7#include <linux/jump_label.h>
8#include <linux/kmsan.h>
9#include <linux/livepatch.h>
10#include <linux/audit.h>
11#include <linux/tick.h>
12
13#include "common.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/syscalls.h>
17
18/* See comment for enter_from_user_mode() in entry-common.h */
19static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
20{
21 arch_enter_from_user_mode(regs);
22 lockdep_hardirqs_off(CALLER_ADDR0);
23
24 CT_WARN_ON(ct_state() != CONTEXT_USER);
25 user_exit_irqoff();
26
27 instrumentation_begin();
28 kmsan_unpoison_entry_regs(regs);
29 trace_hardirqs_off_finish();
30 instrumentation_end();
31}
32
33void noinstr enter_from_user_mode(struct pt_regs *regs)
34{
35 __enter_from_user_mode(regs);
36}
37
38static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
39{
40 if (unlikely(audit_context())) {
41 unsigned long args[6];
42
43 syscall_get_arguments(current, regs, args);
44 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
45 }
46}
47
48static long syscall_trace_enter(struct pt_regs *regs, long syscall,
49 unsigned long work)
50{
51 long ret = 0;
52
53 /*
54 * Handle Syscall User Dispatch. This must comes first, since
55 * the ABI here can be something that doesn't make sense for
56 * other syscall_work features.
57 */
58 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
59 if (syscall_user_dispatch(regs))
60 return -1L;
61 }
62
63 /* Handle ptrace */
64 if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
65 ret = ptrace_report_syscall_entry(regs);
66 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
67 return -1L;
68 }
69
70 /* Do seccomp after ptrace, to catch any tracer changes. */
71 if (work & SYSCALL_WORK_SECCOMP) {
72 ret = __secure_computing(NULL);
73 if (ret == -1L)
74 return ret;
75 }
76
77 /* Either of the above might have changed the syscall number */
78 syscall = syscall_get_nr(current, regs);
79
80 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
81 trace_sys_enter(regs, syscall);
82
83 syscall_enter_audit(regs, syscall);
84
85 return ret ? : syscall;
86}
87
88static __always_inline long
89__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
90{
91 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
92
93 if (work & SYSCALL_WORK_ENTER)
94 syscall = syscall_trace_enter(regs, syscall, work);
95
96 return syscall;
97}
98
99long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
100{
101 return __syscall_enter_from_user_work(regs, syscall);
102}
103
104noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
105{
106 long ret;
107
108 __enter_from_user_mode(regs);
109
110 instrumentation_begin();
111 local_irq_enable();
112 ret = __syscall_enter_from_user_work(regs, syscall);
113 instrumentation_end();
114
115 return ret;
116}
117
118noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
119{
120 __enter_from_user_mode(regs);
121 instrumentation_begin();
122 local_irq_enable();
123 instrumentation_end();
124}
125
126/* See comment for exit_to_user_mode() in entry-common.h */
127static __always_inline void __exit_to_user_mode(void)
128{
129 instrumentation_begin();
130 trace_hardirqs_on_prepare();
131 lockdep_hardirqs_on_prepare();
132 instrumentation_end();
133
134 user_enter_irqoff();
135 arch_exit_to_user_mode();
136 lockdep_hardirqs_on(CALLER_ADDR0);
137}
138
139void noinstr exit_to_user_mode(void)
140{
141 __exit_to_user_mode();
142}
143
144/* Workaround to allow gradual conversion of architecture code */
145void __weak arch_do_signal_or_restart(struct pt_regs *regs) { }
146
147static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
148 unsigned long ti_work)
149{
150 /*
151 * Before returning to user space ensure that all pending work
152 * items have been completed.
153 */
154 while (ti_work & EXIT_TO_USER_MODE_WORK) {
155
156 local_irq_enable_exit_to_user(ti_work);
157
158 if (ti_work & _TIF_NEED_RESCHED)
159 schedule();
160
161 if (ti_work & _TIF_UPROBE)
162 uprobe_notify_resume(regs);
163
164 if (ti_work & _TIF_PATCH_PENDING)
165 klp_update_patch_state(current);
166
167 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
168 arch_do_signal_or_restart(regs);
169
170 if (ti_work & _TIF_NOTIFY_RESUME)
171 resume_user_mode_work(regs);
172
173 /* Architecture specific TIF work */
174 arch_exit_to_user_mode_work(regs, ti_work);
175
176 /*
177 * Disable interrupts and reevaluate the work flags as they
178 * might have changed while interrupts and preemption was
179 * enabled above.
180 */
181 local_irq_disable_exit_to_user();
182
183 /* Check if any of the above work has queued a deferred wakeup */
184 tick_nohz_user_enter_prepare();
185
186 ti_work = read_thread_flags();
187 }
188
189 /* Return the latest work state for arch_exit_to_user_mode() */
190 return ti_work;
191}
192
193static void exit_to_user_mode_prepare(struct pt_regs *regs)
194{
195 unsigned long ti_work = read_thread_flags();
196
197 lockdep_assert_irqs_disabled();
198
199 /* Flush pending rcuog wakeup before the last need_resched() check */
200 tick_nohz_user_enter_prepare();
201
202 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
203 ti_work = exit_to_user_mode_loop(regs, ti_work);
204
205 arch_exit_to_user_mode_prepare(regs, ti_work);
206
207 /* Ensure that the address limit is intact and no locks are held */
208 addr_limit_user_check();
209 kmap_assert_nomap();
210 lockdep_assert_irqs_disabled();
211 lockdep_sys_exit();
212}
213
214/*
215 * If SYSCALL_EMU is set, then the only reason to report is when
216 * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
217 * instruction has been already reported in syscall_enter_from_user_mode().
218 */
219static inline bool report_single_step(unsigned long work)
220{
221 if (work & SYSCALL_WORK_SYSCALL_EMU)
222 return false;
223
224 return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
225}
226
227static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
228{
229 bool step;
230
231 /*
232 * If the syscall was rolled back due to syscall user dispatching,
233 * then the tracers below are not invoked for the same reason as
234 * the entry side was not invoked in syscall_trace_enter(): The ABI
235 * of these syscalls is unknown.
236 */
237 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
238 if (unlikely(current->syscall_dispatch.on_dispatch)) {
239 current->syscall_dispatch.on_dispatch = false;
240 return;
241 }
242 }
243
244 audit_syscall_exit(regs);
245
246 if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
247 trace_sys_exit(regs, syscall_get_return_value(current, regs));
248
249 step = report_single_step(work);
250 if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
251 ptrace_report_syscall_exit(regs, step);
252}
253
254/*
255 * Syscall specific exit to user mode preparation. Runs with interrupts
256 * enabled.
257 */
258static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
259{
260 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
261 unsigned long nr = syscall_get_nr(current, regs);
262
263 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
264
265 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
266 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
267 local_irq_enable();
268 }
269
270 rseq_syscall(regs);
271
272 /*
273 * Do one-time syscall specific work. If these work items are
274 * enabled, we want to run them exactly once per syscall exit with
275 * interrupts enabled.
276 */
277 if (unlikely(work & SYSCALL_WORK_EXIT))
278 syscall_exit_work(regs, work);
279}
280
281static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
282{
283 syscall_exit_to_user_mode_prepare(regs);
284 local_irq_disable_exit_to_user();
285 exit_to_user_mode_prepare(regs);
286}
287
288void syscall_exit_to_user_mode_work(struct pt_regs *regs)
289{
290 __syscall_exit_to_user_mode_work(regs);
291}
292
293__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
294{
295 instrumentation_begin();
296 __syscall_exit_to_user_mode_work(regs);
297 instrumentation_end();
298 __exit_to_user_mode();
299}
300
301noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
302{
303 __enter_from_user_mode(regs);
304}
305
306noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
307{
308 instrumentation_begin();
309 exit_to_user_mode_prepare(regs);
310 instrumentation_end();
311 __exit_to_user_mode();
312}
313
314noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
315{
316 irqentry_state_t ret = {
317 .exit_rcu = false,
318 };
319
320 if (user_mode(regs)) {
321 irqentry_enter_from_user_mode(regs);
322 return ret;
323 }
324
325 /*
326 * If this entry hit the idle task invoke ct_irq_enter() whether
327 * RCU is watching or not.
328 *
329 * Interrupts can nest when the first interrupt invokes softirq
330 * processing on return which enables interrupts.
331 *
332 * Scheduler ticks in the idle task can mark quiescent state and
333 * terminate a grace period, if and only if the timer interrupt is
334 * not nested into another interrupt.
335 *
336 * Checking for rcu_is_watching() here would prevent the nesting
337 * interrupt to invoke ct_irq_enter(). If that nested interrupt is
338 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
339 * assume that it is the first interrupt and eventually claim
340 * quiescent state and end grace periods prematurely.
341 *
342 * Unconditionally invoke ct_irq_enter() so RCU state stays
343 * consistent.
344 *
345 * TINY_RCU does not support EQS, so let the compiler eliminate
346 * this part when enabled.
347 */
348 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
349 /*
350 * If RCU is not watching then the same careful
351 * sequence vs. lockdep and tracing is required
352 * as in irqentry_enter_from_user_mode().
353 */
354 lockdep_hardirqs_off(CALLER_ADDR0);
355 ct_irq_enter();
356 instrumentation_begin();
357 kmsan_unpoison_entry_regs(regs);
358 trace_hardirqs_off_finish();
359 instrumentation_end();
360
361 ret.exit_rcu = true;
362 return ret;
363 }
364
365 /*
366 * If RCU is watching then RCU only wants to check whether it needs
367 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
368 * already contains a warning when RCU is not watching, so no point
369 * in having another one here.
370 */
371 lockdep_hardirqs_off(CALLER_ADDR0);
372 instrumentation_begin();
373 kmsan_unpoison_entry_regs(regs);
374 rcu_irq_enter_check_tick();
375 trace_hardirqs_off_finish();
376 instrumentation_end();
377
378 return ret;
379}
380
381void raw_irqentry_exit_cond_resched(void)
382{
383 if (!preempt_count()) {
384 /* Sanity check RCU and thread stack */
385 rcu_irq_exit_check_preempt();
386 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
387 WARN_ON_ONCE(!on_thread_stack());
388 if (need_resched())
389 preempt_schedule_irq();
390 }
391}
392#ifdef CONFIG_PREEMPT_DYNAMIC
393#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
394DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
395#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
396DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
397void dynamic_irqentry_exit_cond_resched(void)
398{
399 if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
400 return;
401 raw_irqentry_exit_cond_resched();
402}
403#endif
404#endif
405
406noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
407{
408 lockdep_assert_irqs_disabled();
409
410 /* Check whether this returns to user mode */
411 if (user_mode(regs)) {
412 irqentry_exit_to_user_mode(regs);
413 } else if (!regs_irqs_disabled(regs)) {
414 /*
415 * If RCU was not watching on entry this needs to be done
416 * carefully and needs the same ordering of lockdep/tracing
417 * and RCU as the return to user mode path.
418 */
419 if (state.exit_rcu) {
420 instrumentation_begin();
421 /* Tell the tracer that IRET will enable interrupts */
422 trace_hardirqs_on_prepare();
423 lockdep_hardirqs_on_prepare();
424 instrumentation_end();
425 ct_irq_exit();
426 lockdep_hardirqs_on(CALLER_ADDR0);
427 return;
428 }
429
430 instrumentation_begin();
431 if (IS_ENABLED(CONFIG_PREEMPTION))
432 irqentry_exit_cond_resched();
433
434 /* Covers both tracing and lockdep */
435 trace_hardirqs_on();
436 instrumentation_end();
437 } else {
438 /*
439 * IRQ flags state is correct already. Just tell RCU if it
440 * was not watching on entry.
441 */
442 if (state.exit_rcu)
443 ct_irq_exit();
444 }
445}
446
447irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
448{
449 irqentry_state_t irq_state;
450
451 irq_state.lockdep = lockdep_hardirqs_enabled();
452
453 __nmi_enter();
454 lockdep_hardirqs_off(CALLER_ADDR0);
455 lockdep_hardirq_enter();
456 ct_nmi_enter();
457
458 instrumentation_begin();
459 kmsan_unpoison_entry_regs(regs);
460 trace_hardirqs_off_finish();
461 ftrace_nmi_enter();
462 instrumentation_end();
463
464 return irq_state;
465}
466
467void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
468{
469 instrumentation_begin();
470 ftrace_nmi_exit();
471 if (irq_state.lockdep) {
472 trace_hardirqs_on_prepare();
473 lockdep_hardirqs_on_prepare();
474 }
475 instrumentation_end();
476
477 ct_nmi_exit();
478 lockdep_hardirq_exit();
479 if (irq_state.lockdep)
480 lockdep_hardirqs_on(CALLER_ADDR0);
481 __nmi_exit();
482}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/context_tracking.h>
4#include <linux/entry-common.h>
5#include <linux/highmem.h>
6#include <linux/livepatch.h>
7#include <linux/audit.h>
8#include <linux/tick.h>
9
10#include "common.h"
11
12#define CREATE_TRACE_POINTS
13#include <trace/events/syscalls.h>
14
15/* See comment for enter_from_user_mode() in entry-common.h */
16static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
17{
18 arch_check_user_regs(regs);
19 lockdep_hardirqs_off(CALLER_ADDR0);
20
21 CT_WARN_ON(ct_state() != CONTEXT_USER);
22 user_exit_irqoff();
23
24 instrumentation_begin();
25 trace_hardirqs_off_finish();
26 instrumentation_end();
27}
28
29void noinstr enter_from_user_mode(struct pt_regs *regs)
30{
31 __enter_from_user_mode(regs);
32}
33
34static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
35{
36 if (unlikely(audit_context())) {
37 unsigned long args[6];
38
39 syscall_get_arguments(current, regs, args);
40 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
41 }
42}
43
44static long syscall_trace_enter(struct pt_regs *regs, long syscall,
45 unsigned long work)
46{
47 long ret = 0;
48
49 /*
50 * Handle Syscall User Dispatch. This must comes first, since
51 * the ABI here can be something that doesn't make sense for
52 * other syscall_work features.
53 */
54 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
55 if (syscall_user_dispatch(regs))
56 return -1L;
57 }
58
59 /* Handle ptrace */
60 if (work & (SYSCALL_WORK_SYSCALL_TRACE | SYSCALL_WORK_SYSCALL_EMU)) {
61 ret = arch_syscall_enter_tracehook(regs);
62 if (ret || (work & SYSCALL_WORK_SYSCALL_EMU))
63 return -1L;
64 }
65
66 /* Do seccomp after ptrace, to catch any tracer changes. */
67 if (work & SYSCALL_WORK_SECCOMP) {
68 ret = __secure_computing(NULL);
69 if (ret == -1L)
70 return ret;
71 }
72
73 /* Either of the above might have changed the syscall number */
74 syscall = syscall_get_nr(current, regs);
75
76 if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
77 trace_sys_enter(regs, syscall);
78
79 syscall_enter_audit(regs, syscall);
80
81 return ret ? : syscall;
82}
83
84static __always_inline long
85__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
86{
87 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
88
89 if (work & SYSCALL_WORK_ENTER)
90 syscall = syscall_trace_enter(regs, syscall, work);
91
92 return syscall;
93}
94
95long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
96{
97 return __syscall_enter_from_user_work(regs, syscall);
98}
99
100noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
101{
102 long ret;
103
104 __enter_from_user_mode(regs);
105
106 instrumentation_begin();
107 local_irq_enable();
108 ret = __syscall_enter_from_user_work(regs, syscall);
109 instrumentation_end();
110
111 return ret;
112}
113
114noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
115{
116 __enter_from_user_mode(regs);
117 instrumentation_begin();
118 local_irq_enable();
119 instrumentation_end();
120}
121
122/* See comment for exit_to_user_mode() in entry-common.h */
123static __always_inline void __exit_to_user_mode(void)
124{
125 instrumentation_begin();
126 trace_hardirqs_on_prepare();
127 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
128 instrumentation_end();
129
130 user_enter_irqoff();
131 arch_exit_to_user_mode();
132 lockdep_hardirqs_on(CALLER_ADDR0);
133}
134
135void noinstr exit_to_user_mode(void)
136{
137 __exit_to_user_mode();
138}
139
140/* Workaround to allow gradual conversion of architecture code */
141void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
142
143static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
144{
145 if (ti_work & _TIF_NOTIFY_SIGNAL)
146 tracehook_notify_signal();
147
148 arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
149}
150
151static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
152 unsigned long ti_work)
153{
154 /*
155 * Before returning to user space ensure that all pending work
156 * items have been completed.
157 */
158 while (ti_work & EXIT_TO_USER_MODE_WORK) {
159
160 local_irq_enable_exit_to_user(ti_work);
161
162 if (ti_work & _TIF_NEED_RESCHED)
163 schedule();
164
165 if (ti_work & _TIF_UPROBE)
166 uprobe_notify_resume(regs);
167
168 if (ti_work & _TIF_PATCH_PENDING)
169 klp_update_patch_state(current);
170
171 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
172 handle_signal_work(regs, ti_work);
173
174 if (ti_work & _TIF_NOTIFY_RESUME) {
175 tracehook_notify_resume(regs);
176 rseq_handle_notify_resume(NULL, regs);
177 }
178
179 /* Architecture specific TIF work */
180 arch_exit_to_user_mode_work(regs, ti_work);
181
182 /*
183 * Disable interrupts and reevaluate the work flags as they
184 * might have changed while interrupts and preemption was
185 * enabled above.
186 */
187 local_irq_disable_exit_to_user();
188
189 /* Check if any of the above work has queued a deferred wakeup */
190 tick_nohz_user_enter_prepare();
191
192 ti_work = READ_ONCE(current_thread_info()->flags);
193 }
194
195 /* Return the latest work state for arch_exit_to_user_mode() */
196 return ti_work;
197}
198
199static void exit_to_user_mode_prepare(struct pt_regs *regs)
200{
201 unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
202
203 lockdep_assert_irqs_disabled();
204
205 /* Flush pending rcuog wakeup before the last need_resched() check */
206 tick_nohz_user_enter_prepare();
207
208 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
209 ti_work = exit_to_user_mode_loop(regs, ti_work);
210
211 arch_exit_to_user_mode_prepare(regs, ti_work);
212
213 /* Ensure that the address limit is intact and no locks are held */
214 addr_limit_user_check();
215 kmap_assert_nomap();
216 lockdep_assert_irqs_disabled();
217 lockdep_sys_exit();
218}
219
220/*
221 * If SYSCALL_EMU is set, then the only reason to report is when
222 * SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
223 * instruction has been already reported in syscall_enter_from_user_mode().
224 */
225static inline bool report_single_step(unsigned long work)
226{
227 if (work & SYSCALL_WORK_SYSCALL_EMU)
228 return false;
229
230 return work & SYSCALL_WORK_SYSCALL_EXIT_TRAP;
231}
232
233static void syscall_exit_work(struct pt_regs *regs, unsigned long work)
234{
235 bool step;
236
237 /*
238 * If the syscall was rolled back due to syscall user dispatching,
239 * then the tracers below are not invoked for the same reason as
240 * the entry side was not invoked in syscall_trace_enter(): The ABI
241 * of these syscalls is unknown.
242 */
243 if (work & SYSCALL_WORK_SYSCALL_USER_DISPATCH) {
244 if (unlikely(current->syscall_dispatch.on_dispatch)) {
245 current->syscall_dispatch.on_dispatch = false;
246 return;
247 }
248 }
249
250 audit_syscall_exit(regs);
251
252 if (work & SYSCALL_WORK_SYSCALL_TRACEPOINT)
253 trace_sys_exit(regs, syscall_get_return_value(current, regs));
254
255 step = report_single_step(work);
256 if (step || work & SYSCALL_WORK_SYSCALL_TRACE)
257 arch_syscall_exit_tracehook(regs, step);
258}
259
260/*
261 * Syscall specific exit to user mode preparation. Runs with interrupts
262 * enabled.
263 */
264static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
265{
266 unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
267 unsigned long nr = syscall_get_nr(current, regs);
268
269 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
270
271 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
272 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
273 local_irq_enable();
274 }
275
276 rseq_syscall(regs);
277
278 /*
279 * Do one-time syscall specific work. If these work items are
280 * enabled, we want to run them exactly once per syscall exit with
281 * interrupts enabled.
282 */
283 if (unlikely(work & SYSCALL_WORK_EXIT))
284 syscall_exit_work(regs, work);
285}
286
287static __always_inline void __syscall_exit_to_user_mode_work(struct pt_regs *regs)
288{
289 syscall_exit_to_user_mode_prepare(regs);
290 local_irq_disable_exit_to_user();
291 exit_to_user_mode_prepare(regs);
292}
293
294void syscall_exit_to_user_mode_work(struct pt_regs *regs)
295{
296 __syscall_exit_to_user_mode_work(regs);
297}
298
299__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
300{
301 instrumentation_begin();
302 __syscall_exit_to_user_mode_work(regs);
303 instrumentation_end();
304 __exit_to_user_mode();
305}
306
307noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
308{
309 __enter_from_user_mode(regs);
310}
311
312noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
313{
314 instrumentation_begin();
315 exit_to_user_mode_prepare(regs);
316 instrumentation_end();
317 __exit_to_user_mode();
318}
319
320noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
321{
322 irqentry_state_t ret = {
323 .exit_rcu = false,
324 };
325
326 if (user_mode(regs)) {
327 irqentry_enter_from_user_mode(regs);
328 return ret;
329 }
330
331 /*
332 * If this entry hit the idle task invoke rcu_irq_enter() whether
333 * RCU is watching or not.
334 *
335 * Interrupts can nest when the first interrupt invokes softirq
336 * processing on return which enables interrupts.
337 *
338 * Scheduler ticks in the idle task can mark quiescent state and
339 * terminate a grace period, if and only if the timer interrupt is
340 * not nested into another interrupt.
341 *
342 * Checking for rcu_is_watching() here would prevent the nesting
343 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
344 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
345 * assume that it is the first interrupt and eventually claim
346 * quiescent state and end grace periods prematurely.
347 *
348 * Unconditionally invoke rcu_irq_enter() so RCU state stays
349 * consistent.
350 *
351 * TINY_RCU does not support EQS, so let the compiler eliminate
352 * this part when enabled.
353 */
354 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
355 /*
356 * If RCU is not watching then the same careful
357 * sequence vs. lockdep and tracing is required
358 * as in irqentry_enter_from_user_mode().
359 */
360 lockdep_hardirqs_off(CALLER_ADDR0);
361 rcu_irq_enter();
362 instrumentation_begin();
363 trace_hardirqs_off_finish();
364 instrumentation_end();
365
366 ret.exit_rcu = true;
367 return ret;
368 }
369
370 /*
371 * If RCU is watching then RCU only wants to check whether it needs
372 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
373 * already contains a warning when RCU is not watching, so no point
374 * in having another one here.
375 */
376 lockdep_hardirqs_off(CALLER_ADDR0);
377 instrumentation_begin();
378 rcu_irq_enter_check_tick();
379 trace_hardirqs_off_finish();
380 instrumentation_end();
381
382 return ret;
383}
384
385void irqentry_exit_cond_resched(void)
386{
387 if (!preempt_count()) {
388 /* Sanity check RCU and thread stack */
389 rcu_irq_exit_check_preempt();
390 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
391 WARN_ON_ONCE(!on_thread_stack());
392 if (need_resched())
393 preempt_schedule_irq();
394 }
395}
396#ifdef CONFIG_PREEMPT_DYNAMIC
397DEFINE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
398#endif
399
400noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
401{
402 lockdep_assert_irqs_disabled();
403
404 /* Check whether this returns to user mode */
405 if (user_mode(regs)) {
406 irqentry_exit_to_user_mode(regs);
407 } else if (!regs_irqs_disabled(regs)) {
408 /*
409 * If RCU was not watching on entry this needs to be done
410 * carefully and needs the same ordering of lockdep/tracing
411 * and RCU as the return to user mode path.
412 */
413 if (state.exit_rcu) {
414 instrumentation_begin();
415 /* Tell the tracer that IRET will enable interrupts */
416 trace_hardirqs_on_prepare();
417 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
418 instrumentation_end();
419 rcu_irq_exit();
420 lockdep_hardirqs_on(CALLER_ADDR0);
421 return;
422 }
423
424 instrumentation_begin();
425 if (IS_ENABLED(CONFIG_PREEMPTION)) {
426#ifdef CONFIG_PREEMPT_DYNAMIC
427 static_call(irqentry_exit_cond_resched)();
428#else
429 irqentry_exit_cond_resched();
430#endif
431 }
432 /* Covers both tracing and lockdep */
433 trace_hardirqs_on();
434 instrumentation_end();
435 } else {
436 /*
437 * IRQ flags state is correct already. Just tell RCU if it
438 * was not watching on entry.
439 */
440 if (state.exit_rcu)
441 rcu_irq_exit();
442 }
443}
444
445irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
446{
447 irqentry_state_t irq_state;
448
449 irq_state.lockdep = lockdep_hardirqs_enabled();
450
451 __nmi_enter();
452 lockdep_hardirqs_off(CALLER_ADDR0);
453 lockdep_hardirq_enter();
454 rcu_nmi_enter();
455
456 instrumentation_begin();
457 trace_hardirqs_off_finish();
458 ftrace_nmi_enter();
459 instrumentation_end();
460
461 return irq_state;
462}
463
464void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
465{
466 instrumentation_begin();
467 ftrace_nmi_exit();
468 if (irq_state.lockdep) {
469 trace_hardirqs_on_prepare();
470 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
471 }
472 instrumentation_end();
473
474 rcu_nmi_exit();
475 lockdep_hardirq_exit();
476 if (irq_state.lockdep)
477 lockdep_hardirqs_on(CALLER_ADDR0);
478 __nmi_exit();
479}