Loading...
1/*
2 * Copyright (C) 2004 PathScale, Inc
3 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 * Licensed under the GPL
5 */
6
7#include <stdlib.h>
8#include <stdarg.h>
9#include <errno.h>
10#include <signal.h>
11#include <strings.h>
12#include "as-layout.h"
13#include "kern_util.h"
14#include "os.h"
15#include "sysdep/mcontext.h"
16
17void (*sig_info[NSIG])(int, struct uml_pt_regs *) = {
18 [SIGTRAP] = relay_signal,
19 [SIGFPE] = relay_signal,
20 [SIGILL] = relay_signal,
21 [SIGWINCH] = winch,
22 [SIGBUS] = bus_handler,
23 [SIGSEGV] = segv_handler,
24 [SIGIO] = sigio_handler,
25 [SIGVTALRM] = timer_handler };
26
27static void sig_handler_common(int sig, mcontext_t *mc)
28{
29 struct uml_pt_regs r;
30 int save_errno = errno;
31
32 r.is_user = 0;
33 if (sig == SIGSEGV) {
34 /* For segfaults, we want the data from the sigcontext. */
35 get_regs_from_mc(&r, mc);
36 GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
37 }
38
39 /* enable signals if sig isn't IRQ signal */
40 if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGVTALRM))
41 unblock_signals();
42
43 (*sig_info[sig])(sig, &r);
44
45 errno = save_errno;
46}
47
48/*
49 * These are the asynchronous signals. SIGPROF is excluded because we want to
50 * be able to profile all of UML, not just the non-critical sections. If
51 * profiling is not thread-safe, then that is not my problem. We can disable
52 * profiling when SMP is enabled in that case.
53 */
54#define SIGIO_BIT 0
55#define SIGIO_MASK (1 << SIGIO_BIT)
56
57#define SIGVTALRM_BIT 1
58#define SIGVTALRM_MASK (1 << SIGVTALRM_BIT)
59
60static int signals_enabled;
61static unsigned int signals_pending;
62
63void sig_handler(int sig, mcontext_t *mc)
64{
65 int enabled;
66
67 enabled = signals_enabled;
68 if (!enabled && (sig == SIGIO)) {
69 signals_pending |= SIGIO_MASK;
70 return;
71 }
72
73 block_signals();
74
75 sig_handler_common(sig, mc);
76
77 set_signals(enabled);
78}
79
80static void real_alarm_handler(mcontext_t *mc)
81{
82 struct uml_pt_regs regs;
83
84 if (mc != NULL)
85 get_regs_from_mc(®s, mc);
86 regs.is_user = 0;
87 unblock_signals();
88 timer_handler(SIGVTALRM, ®s);
89}
90
91void alarm_handler(int sig, mcontext_t *mc)
92{
93 int enabled;
94
95 enabled = signals_enabled;
96 if (!signals_enabled) {
97 signals_pending |= SIGVTALRM_MASK;
98 return;
99 }
100
101 block_signals();
102
103 real_alarm_handler(mc);
104 set_signals(enabled);
105}
106
107void timer_init(void)
108{
109 set_handler(SIGVTALRM);
110}
111
112void set_sigstack(void *sig_stack, int size)
113{
114 stack_t stack = ((stack_t) { .ss_flags = 0,
115 .ss_sp = (__ptr_t) sig_stack,
116 .ss_size = size - sizeof(void *) });
117
118 if (sigaltstack(&stack, NULL) != 0)
119 panic("enabling signal stack failed, errno = %d\n", errno);
120}
121
122static void (*handlers[_NSIG])(int sig, mcontext_t *mc) = {
123 [SIGSEGV] = sig_handler,
124 [SIGBUS] = sig_handler,
125 [SIGILL] = sig_handler,
126 [SIGFPE] = sig_handler,
127 [SIGTRAP] = sig_handler,
128
129 [SIGIO] = sig_handler,
130 [SIGWINCH] = sig_handler,
131 [SIGVTALRM] = alarm_handler
132};
133
134
135static void hard_handler(int sig, siginfo_t *info, void *p)
136{
137 struct ucontext *uc = p;
138 mcontext_t *mc = &uc->uc_mcontext;
139 unsigned long pending = 1UL << sig;
140
141 do {
142 int nested, bail;
143
144 /*
145 * pending comes back with one bit set for each
146 * interrupt that arrived while setting up the stack,
147 * plus a bit for this interrupt, plus the zero bit is
148 * set if this is a nested interrupt.
149 * If bail is true, then we interrupted another
150 * handler setting up the stack. In this case, we
151 * have to return, and the upper handler will deal
152 * with this interrupt.
153 */
154 bail = to_irq_stack(&pending);
155 if (bail)
156 return;
157
158 nested = pending & 1;
159 pending &= ~1;
160
161 while ((sig = ffs(pending)) != 0){
162 sig--;
163 pending &= ~(1 << sig);
164 (*handlers[sig])(sig, mc);
165 }
166
167 /*
168 * Again, pending comes back with a mask of signals
169 * that arrived while tearing down the stack. If this
170 * is non-zero, we just go back, set up the stack
171 * again, and handle the new interrupts.
172 */
173 if (!nested)
174 pending = from_irq_stack(nested);
175 } while (pending);
176}
177
178void set_handler(int sig)
179{
180 struct sigaction action;
181 int flags = SA_SIGINFO | SA_ONSTACK;
182 sigset_t sig_mask;
183
184 action.sa_sigaction = hard_handler;
185
186 /* block irq ones */
187 sigemptyset(&action.sa_mask);
188 sigaddset(&action.sa_mask, SIGVTALRM);
189 sigaddset(&action.sa_mask, SIGIO);
190 sigaddset(&action.sa_mask, SIGWINCH);
191
192 if (sig == SIGSEGV)
193 flags |= SA_NODEFER;
194
195 if (sigismember(&action.sa_mask, sig))
196 flags |= SA_RESTART; /* if it's an irq signal */
197
198 action.sa_flags = flags;
199 action.sa_restorer = NULL;
200 if (sigaction(sig, &action, NULL) < 0)
201 panic("sigaction failed - errno = %d\n", errno);
202
203 sigemptyset(&sig_mask);
204 sigaddset(&sig_mask, sig);
205 if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
206 panic("sigprocmask failed - errno = %d\n", errno);
207}
208
209int change_sig(int signal, int on)
210{
211 sigset_t sigset;
212
213 sigemptyset(&sigset);
214 sigaddset(&sigset, signal);
215 if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
216 return -errno;
217
218 return 0;
219}
220
221void block_signals(void)
222{
223 signals_enabled = 0;
224 /*
225 * This must return with signals disabled, so this barrier
226 * ensures that writes are flushed out before the return.
227 * This might matter if gcc figures out how to inline this and
228 * decides to shuffle this code into the caller.
229 */
230 barrier();
231}
232
233void unblock_signals(void)
234{
235 int save_pending;
236
237 if (signals_enabled == 1)
238 return;
239
240 /*
241 * We loop because the IRQ handler returns with interrupts off. So,
242 * interrupts may have arrived and we need to re-enable them and
243 * recheck signals_pending.
244 */
245 while (1) {
246 /*
247 * Save and reset save_pending after enabling signals. This
248 * way, signals_pending won't be changed while we're reading it.
249 */
250 signals_enabled = 1;
251
252 /*
253 * Setting signals_enabled and reading signals_pending must
254 * happen in this order.
255 */
256 barrier();
257
258 save_pending = signals_pending;
259 if (save_pending == 0)
260 return;
261
262 signals_pending = 0;
263
264 /*
265 * We have pending interrupts, so disable signals, as the
266 * handlers expect them off when they are called. They will
267 * be enabled again above.
268 */
269
270 signals_enabled = 0;
271
272 /*
273 * Deal with SIGIO first because the alarm handler might
274 * schedule, leaving the pending SIGIO stranded until we come
275 * back here.
276 */
277 if (save_pending & SIGIO_MASK)
278 sig_handler_common(SIGIO, NULL);
279
280 if (save_pending & SIGVTALRM_MASK)
281 real_alarm_handler(NULL);
282 }
283}
284
285int get_signals(void)
286{
287 return signals_enabled;
288}
289
290int set_signals(int enable)
291{
292 int ret;
293 if (signals_enabled == enable)
294 return enable;
295
296 ret = signals_enabled;
297 if (enable)
298 unblock_signals();
299 else block_signals();
300
301 return ret;
302}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2004 PathScale, Inc
6 * Copyright (C) 2004 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 */
8
9#include <stdlib.h>
10#include <stdarg.h>
11#include <stdbool.h>
12#include <errno.h>
13#include <signal.h>
14#include <string.h>
15#include <strings.h>
16#include <as-layout.h>
17#include <kern_util.h>
18#include <os.h>
19#include <sysdep/mcontext.h>
20#include <um_malloc.h>
21#include <sys/ucontext.h>
22#include <timetravel.h>
23
24void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
25 [SIGTRAP] = relay_signal,
26 [SIGFPE] = relay_signal,
27 [SIGILL] = relay_signal,
28 [SIGWINCH] = winch,
29 [SIGBUS] = relay_signal,
30 [SIGSEGV] = segv_handler,
31 [SIGIO] = sigio_handler,
32};
33
34static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
35{
36 struct uml_pt_regs r;
37 int save_errno = errno;
38
39 r.is_user = 0;
40 if (sig == SIGSEGV) {
41 /* For segfaults, we want the data from the sigcontext. */
42 get_regs_from_mc(&r, mc);
43 GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
44 }
45
46 /* enable signals if sig isn't IRQ signal */
47 if ((sig != SIGIO) && (sig != SIGWINCH))
48 unblock_signals_trace();
49
50 (*sig_info[sig])(sig, si, &r);
51
52 errno = save_errno;
53}
54
55/*
56 * These are the asynchronous signals. SIGPROF is excluded because we want to
57 * be able to profile all of UML, not just the non-critical sections. If
58 * profiling is not thread-safe, then that is not my problem. We can disable
59 * profiling when SMP is enabled in that case.
60 */
61#define SIGIO_BIT 0
62#define SIGIO_MASK (1 << SIGIO_BIT)
63
64#define SIGALRM_BIT 1
65#define SIGALRM_MASK (1 << SIGALRM_BIT)
66
67int signals_enabled;
68#if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
69static int signals_blocked, signals_blocked_pending;
70#endif
71static unsigned int signals_pending;
72static unsigned int signals_active = 0;
73
74static void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
75{
76 int enabled = signals_enabled;
77
78#if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
79 if ((signals_blocked ||
80 __atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) &&
81 (sig == SIGIO)) {
82 /* increment so unblock will do another round */
83 __atomic_add_fetch(&signals_blocked_pending, 1,
84 __ATOMIC_SEQ_CST);
85 return;
86 }
87#endif
88
89 if (!enabled && (sig == SIGIO)) {
90 /*
91 * In TT_MODE_EXTERNAL, need to still call time-travel
92 * handlers. This will mark signals_pending by itself
93 * (only if necessary.)
94 * Note we won't get here if signals are hard-blocked
95 * (which is handled above), in that case the hard-
96 * unblock will handle things.
97 */
98 if (time_travel_mode == TT_MODE_EXTERNAL)
99 sigio_run_timetravel_handlers();
100 else
101 signals_pending |= SIGIO_MASK;
102 return;
103 }
104
105 block_signals_trace();
106
107 sig_handler_common(sig, si, mc);
108
109 um_set_signals_trace(enabled);
110}
111
112static void timer_real_alarm_handler(mcontext_t *mc)
113{
114 struct uml_pt_regs regs;
115
116 if (mc != NULL)
117 get_regs_from_mc(®s, mc);
118 else
119 memset(®s, 0, sizeof(regs));
120 timer_handler(SIGALRM, NULL, ®s);
121}
122
123static void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
124{
125 int enabled;
126
127 enabled = signals_enabled;
128 if (!signals_enabled) {
129 signals_pending |= SIGALRM_MASK;
130 return;
131 }
132
133 block_signals_trace();
134
135 signals_active |= SIGALRM_MASK;
136
137 timer_real_alarm_handler(mc);
138
139 signals_active &= ~SIGALRM_MASK;
140
141 um_set_signals_trace(enabled);
142}
143
144void deliver_alarm(void) {
145 timer_alarm_handler(SIGALRM, NULL, NULL);
146}
147
148void timer_set_signal_handler(void)
149{
150 set_handler(SIGALRM);
151}
152
153void set_sigstack(void *sig_stack, int size)
154{
155 stack_t stack = {
156 .ss_flags = 0,
157 .ss_sp = sig_stack,
158 .ss_size = size
159 };
160
161 if (sigaltstack(&stack, NULL) != 0)
162 panic("enabling signal stack failed, errno = %d\n", errno);
163}
164
165static void sigusr1_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
166{
167 uml_pm_wake();
168}
169
170void register_pm_wake_signal(void)
171{
172 set_handler(SIGUSR1);
173}
174
175static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
176 [SIGSEGV] = sig_handler,
177 [SIGBUS] = sig_handler,
178 [SIGILL] = sig_handler,
179 [SIGFPE] = sig_handler,
180 [SIGTRAP] = sig_handler,
181
182 [SIGIO] = sig_handler,
183 [SIGWINCH] = sig_handler,
184 [SIGALRM] = timer_alarm_handler,
185
186 [SIGUSR1] = sigusr1_handler,
187};
188
189static void hard_handler(int sig, siginfo_t *si, void *p)
190{
191 ucontext_t *uc = p;
192 mcontext_t *mc = &uc->uc_mcontext;
193
194 (*handlers[sig])(sig, (struct siginfo *)si, mc);
195}
196
197void set_handler(int sig)
198{
199 struct sigaction action;
200 int flags = SA_SIGINFO | SA_ONSTACK;
201 sigset_t sig_mask;
202
203 action.sa_sigaction = hard_handler;
204
205 /* block irq ones */
206 sigemptyset(&action.sa_mask);
207 sigaddset(&action.sa_mask, SIGIO);
208 sigaddset(&action.sa_mask, SIGWINCH);
209 sigaddset(&action.sa_mask, SIGALRM);
210
211 if (sig == SIGSEGV)
212 flags |= SA_NODEFER;
213
214 if (sigismember(&action.sa_mask, sig))
215 flags |= SA_RESTART; /* if it's an irq signal */
216
217 action.sa_flags = flags;
218 action.sa_restorer = NULL;
219 if (sigaction(sig, &action, NULL) < 0)
220 panic("sigaction failed - errno = %d\n", errno);
221
222 sigemptyset(&sig_mask);
223 sigaddset(&sig_mask, sig);
224 if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
225 panic("sigprocmask failed - errno = %d\n", errno);
226}
227
228void send_sigio_to_self(void)
229{
230 kill(os_getpid(), SIGIO);
231}
232
233int change_sig(int signal, int on)
234{
235 sigset_t sigset;
236
237 sigemptyset(&sigset);
238 sigaddset(&sigset, signal);
239 if (sigprocmask(on ? SIG_UNBLOCK : SIG_BLOCK, &sigset, NULL) < 0)
240 return -errno;
241
242 return 0;
243}
244
245void block_signals(void)
246{
247 signals_enabled = 0;
248 /*
249 * This must return with signals disabled, so this barrier
250 * ensures that writes are flushed out before the return.
251 * This might matter if gcc figures out how to inline this and
252 * decides to shuffle this code into the caller.
253 */
254 barrier();
255}
256
257void unblock_signals(void)
258{
259 int save_pending;
260
261 if (signals_enabled == 1)
262 return;
263
264 signals_enabled = 1;
265#if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
266 deliver_time_travel_irqs();
267#endif
268
269 /*
270 * We loop because the IRQ handler returns with interrupts off. So,
271 * interrupts may have arrived and we need to re-enable them and
272 * recheck signals_pending.
273 */
274 while (1) {
275 /*
276 * Save and reset save_pending after enabling signals. This
277 * way, signals_pending won't be changed while we're reading it.
278 *
279 * Setting signals_enabled and reading signals_pending must
280 * happen in this order, so have the barrier here.
281 */
282 barrier();
283
284 save_pending = signals_pending;
285 if (save_pending == 0)
286 return;
287
288 signals_pending = 0;
289
290 /*
291 * We have pending interrupts, so disable signals, as the
292 * handlers expect them off when they are called. They will
293 * be enabled again above. We need to trace this, as we're
294 * expected to be enabling interrupts already, but any more
295 * tracing that happens inside the handlers we call for the
296 * pending signals will mess up the tracing state.
297 */
298 signals_enabled = 0;
299 um_trace_signals_off();
300
301 /*
302 * Deal with SIGIO first because the alarm handler might
303 * schedule, leaving the pending SIGIO stranded until we come
304 * back here.
305 *
306 * SIGIO's handler doesn't use siginfo or mcontext,
307 * so they can be NULL.
308 */
309 if (save_pending & SIGIO_MASK)
310 sig_handler_common(SIGIO, NULL, NULL);
311
312 /* Do not reenter the handler */
313
314 if ((save_pending & SIGALRM_MASK) && (!(signals_active & SIGALRM_MASK)))
315 timer_real_alarm_handler(NULL);
316
317 /* Rerun the loop only if there is still pending SIGIO and not in TIMER handler */
318
319 if (!(signals_pending & SIGIO_MASK) && (signals_active & SIGALRM_MASK))
320 return;
321
322 /* Re-enable signals and trace that we're doing so. */
323 um_trace_signals_on();
324 signals_enabled = 1;
325 }
326}
327
328int um_set_signals(int enable)
329{
330 int ret;
331 if (signals_enabled == enable)
332 return enable;
333
334 ret = signals_enabled;
335 if (enable)
336 unblock_signals();
337 else block_signals();
338
339 return ret;
340}
341
342int um_set_signals_trace(int enable)
343{
344 int ret;
345 if (signals_enabled == enable)
346 return enable;
347
348 ret = signals_enabled;
349 if (enable)
350 unblock_signals_trace();
351 else
352 block_signals_trace();
353
354 return ret;
355}
356
357#if IS_ENABLED(CONFIG_UML_TIME_TRAVEL_SUPPORT)
358void mark_sigio_pending(void)
359{
360 /*
361 * It would seem that this should be atomic so
362 * it isn't a read-modify-write with a signal
363 * that could happen in the middle, losing the
364 * value set by the signal.
365 *
366 * However, this function is only called when in
367 * time-travel=ext simulation mode, in which case
368 * the only signal ever pending is SIGIO, which
369 * is blocked while this can be called, and the
370 * timer signal (SIGALRM) cannot happen.
371 */
372 signals_pending |= SIGIO_MASK;
373}
374
375void block_signals_hard(void)
376{
377 signals_blocked++;
378 barrier();
379}
380
381void unblock_signals_hard(void)
382{
383 static bool unblocking;
384
385 if (!signals_blocked)
386 panic("unblocking signals while not blocked");
387
388 if (--signals_blocked)
389 return;
390 /*
391 * Must be set to 0 before we check pending so the
392 * SIGIO handler will run as normal unless we're still
393 * going to process signals_blocked_pending.
394 */
395 barrier();
396
397 /*
398 * Note that block_signals_hard()/unblock_signals_hard() can be called
399 * within the unblock_signals()/sigio_run_timetravel_handlers() below.
400 * This would still be prone to race conditions since it's actually a
401 * call _within_ e.g. vu_req_read_message(), where we observed this
402 * issue, which loops. Thus, if the inner call handles the recorded
403 * pending signals, we can get out of the inner call with the real
404 * signal hander no longer blocked, and still have a race. Thus don't
405 * handle unblocking in the inner call, if it happens, but only in
406 * the outermost call - 'unblocking' serves as an ownership for the
407 * signals_blocked_pending decrement.
408 */
409 if (unblocking)
410 return;
411 unblocking = true;
412
413 while (__atomic_load_n(&signals_blocked_pending, __ATOMIC_SEQ_CST)) {
414 if (signals_enabled) {
415 /* signals are enabled so we can touch this */
416 signals_pending |= SIGIO_MASK;
417 /*
418 * this is a bit inefficient, but that's
419 * not really important
420 */
421 block_signals();
422 unblock_signals();
423 } else {
424 /*
425 * we need to run time-travel handlers even
426 * if not enabled
427 */
428 sigio_run_timetravel_handlers();
429 }
430
431 /*
432 * The decrement of signals_blocked_pending must be atomic so
433 * that the signal handler will either happen before or after
434 * the decrement, not during a read-modify-write:
435 * - If it happens before, it can increment it and we'll
436 * decrement it and do another round in the loop.
437 * - If it happens after it'll see 0 for both signals_blocked
438 * and signals_blocked_pending and thus run the handler as
439 * usual (subject to signals_enabled, but that's unrelated.)
440 *
441 * Note that a call to unblock_signals_hard() within the calls
442 * to unblock_signals() or sigio_run_timetravel_handlers() above
443 * will do nothing due to the 'unblocking' state, so this cannot
444 * underflow as the only one decrementing will be the outermost
445 * one.
446 */
447 if (__atomic_sub_fetch(&signals_blocked_pending, 1,
448 __ATOMIC_SEQ_CST) < 0)
449 panic("signals_blocked_pending underflow");
450 }
451
452 unblocking = false;
453}
454#endif