Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 */
9
10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <as-layout.h>
19#include <kern_util.h>
20#include <os.h>
21#include <irq_user.h>
22
23
24extern void free_irqs(void);
25
26/* When epoll triggers we do not know why it did so
27 * we can also have different IRQs for read and write.
28 * This is why we keep a small irq_fd array for each fd -
29 * one entry per IRQ type
30 */
31
32struct irq_entry {
33 struct irq_entry *next;
34 int fd;
35 struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
36};
37
38static struct irq_entry *active_fds;
39
40static DEFINE_SPINLOCK(irq_lock);
41
42static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
43{
44/*
45 * irq->active guards against reentry
46 * irq->pending accumulates pending requests
47 * if pending is raised the irq_handler is re-run
48 * until pending is cleared
49 */
50 if (irq->active) {
51 irq->active = false;
52 do {
53 irq->pending = false;
54 do_IRQ(irq->irq, regs);
55 } while (irq->pending && (!irq->purge));
56 if (!irq->purge)
57 irq->active = true;
58 } else {
59 irq->pending = true;
60 }
61}
62
63void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
64{
65 struct irq_entry *irq_entry;
66 struct irq_fd *irq;
67
68 int n, i, j;
69
70 while (1) {
71 /* This is now lockless - epoll keeps back-referencesto the irqs
72 * which have trigger it so there is no need to walk the irq
73 * list and lock it every time. We avoid locking by turning off
74 * IO for a specific fd by executing os_del_epoll_fd(fd) before
75 * we do any changes to the actual data structures
76 */
77 n = os_waiting_for_events_epoll();
78
79 if (n <= 0) {
80 if (n == -EINTR)
81 continue;
82 else
83 break;
84 }
85
86 for (i = 0; i < n ; i++) {
87 /* Epoll back reference is the entry with 3 irq_fd
88 * leaves - one for each irq type.
89 */
90 irq_entry = (struct irq_entry *)
91 os_epoll_get_data_pointer(i);
92 for (j = 0; j < MAX_IRQ_TYPE ; j++) {
93 irq = irq_entry->irq_array[j];
94 if (irq == NULL)
95 continue;
96 if (os_epoll_triggered(i, irq->events) > 0)
97 irq_io_loop(irq, regs);
98 if (irq->purge) {
99 irq_entry->irq_array[j] = NULL;
100 kfree(irq);
101 }
102 }
103 }
104 }
105
106 free_irqs();
107}
108
109static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
110{
111 int i;
112 int events = 0;
113 struct irq_fd *irq;
114
115 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
116 irq = irq_entry->irq_array[i];
117 if (irq != NULL)
118 events = irq->events | events;
119 }
120 if (events > 0) {
121 /* os_add_epoll will call os_mod_epoll if this already exists */
122 return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
123 }
124 /* No events - delete */
125 return os_del_epoll_fd(irq_entry->fd);
126}
127
128
129
130static int activate_fd(int irq, int fd, int type, void *dev_id)
131{
132 struct irq_fd *new_fd;
133 struct irq_entry *irq_entry;
134 int i, err, events;
135 unsigned long flags;
136
137 err = os_set_fd_async(fd);
138 if (err < 0)
139 goto out;
140
141 spin_lock_irqsave(&irq_lock, flags);
142
143 /* Check if we have an entry for this fd */
144
145 err = -EBUSY;
146 for (irq_entry = active_fds;
147 irq_entry != NULL; irq_entry = irq_entry->next) {
148 if (irq_entry->fd == fd)
149 break;
150 }
151
152 if (irq_entry == NULL) {
153 /* This needs to be atomic as it may be called from an
154 * IRQ context.
155 */
156 irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
157 if (irq_entry == NULL) {
158 printk(KERN_ERR
159 "Failed to allocate new IRQ entry\n");
160 goto out_unlock;
161 }
162 irq_entry->fd = fd;
163 for (i = 0; i < MAX_IRQ_TYPE; i++)
164 irq_entry->irq_array[i] = NULL;
165 irq_entry->next = active_fds;
166 active_fds = irq_entry;
167 }
168
169 /* Check if we are trying to re-register an interrupt for a
170 * particular fd
171 */
172
173 if (irq_entry->irq_array[type] != NULL) {
174 printk(KERN_ERR
175 "Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
176 irq, fd, type, dev_id
177 );
178 goto out_unlock;
179 } else {
180 /* New entry for this fd */
181
182 err = -ENOMEM;
183 new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
184 if (new_fd == NULL)
185 goto out_unlock;
186
187 events = os_event_mask(type);
188
189 *new_fd = ((struct irq_fd) {
190 .id = dev_id,
191 .irq = irq,
192 .type = type,
193 .events = events,
194 .active = true,
195 .pending = false,
196 .purge = false
197 });
198 /* Turn off any IO on this fd - allows us to
199 * avoid locking the IRQ loop
200 */
201 os_del_epoll_fd(irq_entry->fd);
202 irq_entry->irq_array[type] = new_fd;
203 }
204
205 /* Turn back IO on with the correct (new) IO event mask */
206 assign_epoll_events_to_irq(irq_entry);
207 spin_unlock_irqrestore(&irq_lock, flags);
208 maybe_sigio_broken(fd, (type != IRQ_NONE));
209
210 return 0;
211out_unlock:
212 spin_unlock_irqrestore(&irq_lock, flags);
213out:
214 return err;
215}
216
217/*
218 * Walk the IRQ list and dispose of any unused entries.
219 * Should be done under irq_lock.
220 */
221
222static void garbage_collect_irq_entries(void)
223{
224 int i;
225 bool reap;
226 struct irq_entry *walk;
227 struct irq_entry *previous = NULL;
228 struct irq_entry *to_free;
229
230 if (active_fds == NULL)
231 return;
232 walk = active_fds;
233 while (walk != NULL) {
234 reap = true;
235 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
236 if (walk->irq_array[i] != NULL) {
237 reap = false;
238 break;
239 }
240 }
241 if (reap) {
242 if (previous == NULL)
243 active_fds = walk->next;
244 else
245 previous->next = walk->next;
246 to_free = walk;
247 } else {
248 to_free = NULL;
249 }
250 walk = walk->next;
251 kfree(to_free);
252 }
253}
254
255/*
256 * Walk the IRQ list and get the descriptor for our FD
257 */
258
259static struct irq_entry *get_irq_entry_by_fd(int fd)
260{
261 struct irq_entry *walk = active_fds;
262
263 while (walk != NULL) {
264 if (walk->fd == fd)
265 return walk;
266 walk = walk->next;
267 }
268 return NULL;
269}
270
271
272/*
273 * Walk the IRQ list and dispose of an entry for a specific
274 * device, fd and number. Note - if sharing an IRQ for read
275 * and writefor the same FD it will be disposed in either case.
276 * If this behaviour is undesirable use different IRQ ids.
277 */
278
279#define IGNORE_IRQ 1
280#define IGNORE_DEV (1<<1)
281
282static void do_free_by_irq_and_dev(
283 struct irq_entry *irq_entry,
284 unsigned int irq,
285 void *dev,
286 int flags
287)
288{
289 int i;
290 struct irq_fd *to_free;
291
292 for (i = 0; i < MAX_IRQ_TYPE ; i++) {
293 if (irq_entry->irq_array[i] != NULL) {
294 if (
295 ((flags & IGNORE_IRQ) ||
296 (irq_entry->irq_array[i]->irq == irq)) &&
297 ((flags & IGNORE_DEV) ||
298 (irq_entry->irq_array[i]->id == dev))
299 ) {
300 /* Turn off any IO on this fd - allows us to
301 * avoid locking the IRQ loop
302 */
303 os_del_epoll_fd(irq_entry->fd);
304 to_free = irq_entry->irq_array[i];
305 irq_entry->irq_array[i] = NULL;
306 assign_epoll_events_to_irq(irq_entry);
307 if (to_free->active)
308 to_free->purge = true;
309 else
310 kfree(to_free);
311 }
312 }
313 }
314}
315
316void free_irq_by_fd(int fd)
317{
318 struct irq_entry *to_free;
319 unsigned long flags;
320
321 spin_lock_irqsave(&irq_lock, flags);
322 to_free = get_irq_entry_by_fd(fd);
323 if (to_free != NULL) {
324 do_free_by_irq_and_dev(
325 to_free,
326 -1,
327 NULL,
328 IGNORE_IRQ | IGNORE_DEV
329 );
330 }
331 garbage_collect_irq_entries();
332 spin_unlock_irqrestore(&irq_lock, flags);
333}
334EXPORT_SYMBOL(free_irq_by_fd);
335
336static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
337{
338 struct irq_entry *to_free;
339 unsigned long flags;
340
341 spin_lock_irqsave(&irq_lock, flags);
342 to_free = active_fds;
343 while (to_free != NULL) {
344 do_free_by_irq_and_dev(
345 to_free,
346 irq,
347 dev,
348 0
349 );
350 to_free = to_free->next;
351 }
352 garbage_collect_irq_entries();
353 spin_unlock_irqrestore(&irq_lock, flags);
354}
355
356
357void deactivate_fd(int fd, int irqnum)
358{
359 struct irq_entry *to_free;
360 unsigned long flags;
361
362 os_del_epoll_fd(fd);
363 spin_lock_irqsave(&irq_lock, flags);
364 to_free = get_irq_entry_by_fd(fd);
365 if (to_free != NULL) {
366 do_free_by_irq_and_dev(
367 to_free,
368 irqnum,
369 NULL,
370 IGNORE_DEV
371 );
372 }
373 garbage_collect_irq_entries();
374 spin_unlock_irqrestore(&irq_lock, flags);
375 ignore_sigio_fd(fd);
376}
377EXPORT_SYMBOL(deactivate_fd);
378
379/*
380 * Called just before shutdown in order to provide a clean exec
381 * environment in case the system is rebooting. No locking because
382 * that would cause a pointless shutdown hang if something hadn't
383 * released the lock.
384 */
385int deactivate_all_fds(void)
386{
387 struct irq_entry *to_free;
388
389 /* Stop IO. The IRQ loop has no lock so this is our
390 * only way of making sure we are safe to dispose
391 * of all IRQ handlers
392 */
393 os_set_ioignore();
394 to_free = active_fds;
395 while (to_free != NULL) {
396 do_free_by_irq_and_dev(
397 to_free,
398 -1,
399 NULL,
400 IGNORE_IRQ | IGNORE_DEV
401 );
402 to_free = to_free->next;
403 }
404 /* don't garbage collect - we can no longer call kfree() here */
405 os_close_epoll_fd();
406 return 0;
407}
408
409/*
410 * do_IRQ handles all normal device IRQs (the special
411 * SMP cross-CPU interrupts have their own specific
412 * handlers).
413 */
414unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
415{
416 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
417 irq_enter();
418 generic_handle_irq(irq);
419 irq_exit();
420 set_irq_regs(old_regs);
421 return 1;
422}
423
424void um_free_irq(unsigned int irq, void *dev)
425{
426 free_irq_by_irq_and_dev(irq, dev);
427 free_irq(irq, dev);
428}
429EXPORT_SYMBOL(um_free_irq);
430
431int um_request_irq(unsigned int irq, int fd, int type,
432 irq_handler_t handler,
433 unsigned long irqflags, const char * devname,
434 void *dev_id)
435{
436 int err;
437
438 if (fd != -1) {
439 err = activate_fd(irq, fd, type, dev_id);
440 if (err)
441 return err;
442 }
443
444 return request_irq(irq, handler, irqflags, devname, dev_id);
445}
446
447EXPORT_SYMBOL(um_request_irq);
448
449/*
450 * irq_chip must define at least enable/disable and ack when
451 * the edge handler is used.
452 */
453static void dummy(struct irq_data *d)
454{
455}
456
457/* This is used for everything else than the timer. */
458static struct irq_chip normal_irq_type = {
459 .name = "SIGIO",
460 .irq_disable = dummy,
461 .irq_enable = dummy,
462 .irq_ack = dummy,
463 .irq_mask = dummy,
464 .irq_unmask = dummy,
465};
466
467static struct irq_chip SIGVTALRM_irq_type = {
468 .name = "SIGVTALRM",
469 .irq_disable = dummy,
470 .irq_enable = dummy,
471 .irq_ack = dummy,
472 .irq_mask = dummy,
473 .irq_unmask = dummy,
474};
475
476void __init init_IRQ(void)
477{
478 int i;
479
480 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
481
482
483 for (i = 1; i <= LAST_IRQ; i++)
484 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
485 /* Initialize EPOLL Loop */
486 os_setup_epoll();
487}
488
489/*
490 * IRQ stack entry and exit:
491 *
492 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
493 * and switch over to the IRQ stack after some preparation. We use
494 * sigaltstack to receive signals on a separate stack from the start.
495 * These two functions make sure the rest of the kernel won't be too
496 * upset by being on a different stack. The IRQ stack has a
497 * thread_info structure at the bottom so that current et al continue
498 * to work.
499 *
500 * to_irq_stack copies the current task's thread_info to the IRQ stack
501 * thread_info and sets the tasks's stack to point to the IRQ stack.
502 *
503 * from_irq_stack copies the thread_info struct back (flags may have
504 * been modified) and resets the task's stack pointer.
505 *
506 * Tricky bits -
507 *
508 * What happens when two signals race each other? UML doesn't block
509 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
510 * could arrive while a previous one is still setting up the
511 * thread_info.
512 *
513 * There are three cases -
514 * The first interrupt on the stack - sets up the thread_info and
515 * handles the interrupt
516 * A nested interrupt interrupting the copying of the thread_info -
517 * can't handle the interrupt, as the stack is in an unknown state
518 * A nested interrupt not interrupting the copying of the
519 * thread_info - doesn't do any setup, just handles the interrupt
520 *
521 * The first job is to figure out whether we interrupted stack setup.
522 * This is done by xchging the signal mask with thread_info->pending.
523 * If the value that comes back is zero, then there is no setup in
524 * progress, and the interrupt can be handled. If the value is
525 * non-zero, then there is stack setup in progress. In order to have
526 * the interrupt handled, we leave our signal in the mask, and it will
527 * be handled by the upper handler after it has set up the stack.
528 *
529 * Next is to figure out whether we are the outer handler or a nested
530 * one. As part of setting up the stack, thread_info->real_thread is
531 * set to non-NULL (and is reset to NULL on exit). This is the
532 * nesting indicator. If it is non-NULL, then the stack is already
533 * set up and the handler can run.
534 */
535
536static unsigned long pending_mask;
537
538unsigned long to_irq_stack(unsigned long *mask_out)
539{
540 struct thread_info *ti;
541 unsigned long mask, old;
542 int nested;
543
544 mask = xchg(&pending_mask, *mask_out);
545 if (mask != 0) {
546 /*
547 * If any interrupts come in at this point, we want to
548 * make sure that their bits aren't lost by our
549 * putting our bit in. So, this loop accumulates bits
550 * until xchg returns the same value that we put in.
551 * When that happens, there were no new interrupts,
552 * and pending_mask contains a bit for each interrupt
553 * that came in.
554 */
555 old = *mask_out;
556 do {
557 old |= mask;
558 mask = xchg(&pending_mask, old);
559 } while (mask != old);
560 return 1;
561 }
562
563 ti = current_thread_info();
564 nested = (ti->real_thread != NULL);
565 if (!nested) {
566 struct task_struct *task;
567 struct thread_info *tti;
568
569 task = cpu_tasks[ti->cpu].task;
570 tti = task_thread_info(task);
571
572 *ti = *tti;
573 ti->real_thread = tti;
574 task->stack = ti;
575 }
576
577 mask = xchg(&pending_mask, 0);
578 *mask_out |= mask | nested;
579 return 0;
580}
581
582unsigned long from_irq_stack(int nested)
583{
584 struct thread_info *ti, *to;
585 unsigned long mask;
586
587 ti = current_thread_info();
588
589 pending_mask = 1;
590
591 to = ti->real_thread;
592 current->stack = to;
593 ti->real_thread = NULL;
594 *to = *ti;
595
596 mask = xchg(&pending_mask, 0);
597 return mask & ~1;
598}
599
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2017 - Cambridge Greys Ltd
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
7 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
8 */
9
10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/seq_file.h>
17#include <linux/slab.h>
18#include <as-layout.h>
19#include <kern_util.h>
20#include <os.h>
21#include <irq_user.h>
22#include <irq_kern.h>
23#include <linux/time-internal.h>
24
25
26extern void free_irqs(void);
27
28/* When epoll triggers we do not know why it did so
29 * we can also have different IRQs for read and write.
30 * This is why we keep a small irq_reg array for each fd -
31 * one entry per IRQ type
32 */
33struct irq_reg {
34 void *id;
35 int irq;
36 /* it's cheaper to store this than to query it */
37 int events;
38 bool active;
39 bool pending;
40 bool wakeup;
41#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
42 bool pending_on_resume;
43 void (*timetravel_handler)(int, int, void *,
44 struct time_travel_event *);
45 struct time_travel_event event;
46#endif
47};
48
49struct irq_entry {
50 struct list_head list;
51 int fd;
52 struct irq_reg reg[NUM_IRQ_TYPES];
53 bool suspended;
54 bool sigio_workaround;
55};
56
57static DEFINE_SPINLOCK(irq_lock);
58static LIST_HEAD(active_fds);
59static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
60static bool irqs_suspended;
61
62static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
63{
64/*
65 * irq->active guards against reentry
66 * irq->pending accumulates pending requests
67 * if pending is raised the irq_handler is re-run
68 * until pending is cleared
69 */
70 if (irq->active) {
71 irq->active = false;
72
73 do {
74 irq->pending = false;
75 do_IRQ(irq->irq, regs);
76 } while (irq->pending);
77
78 irq->active = true;
79 } else {
80 irq->pending = true;
81 }
82}
83
84#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
85static void irq_event_handler(struct time_travel_event *ev)
86{
87 struct irq_reg *reg = container_of(ev, struct irq_reg, event);
88
89 /* do nothing if suspended - just to cause a wakeup */
90 if (irqs_suspended)
91 return;
92
93 generic_handle_irq(reg->irq);
94}
95
96static bool irq_do_timetravel_handler(struct irq_entry *entry,
97 enum um_irq_type t)
98{
99 struct irq_reg *reg = &entry->reg[t];
100
101 if (!reg->timetravel_handler)
102 return false;
103
104 /*
105 * Handle all messages - we might get multiple even while
106 * interrupts are already suspended, due to suspend order
107 * etc. Note that time_travel_add_irq_event() will not add
108 * an event twice, if it's pending already "first wins".
109 */
110 reg->timetravel_handler(reg->irq, entry->fd, reg->id, ®->event);
111
112 if (!reg->event.pending)
113 return false;
114
115 if (irqs_suspended)
116 reg->pending_on_resume = true;
117 return true;
118}
119#else
120static bool irq_do_timetravel_handler(struct irq_entry *entry,
121 enum um_irq_type t)
122{
123 return false;
124}
125#endif
126
127static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
128 struct uml_pt_regs *regs,
129 bool timetravel_handlers_only)
130{
131 struct irq_reg *reg = &entry->reg[t];
132
133 if (!reg->events)
134 return;
135
136 if (os_epoll_triggered(idx, reg->events) <= 0)
137 return;
138
139 if (irq_do_timetravel_handler(entry, t))
140 return;
141
142 /*
143 * If we're called to only run time-travel handlers then don't
144 * actually proceed but mark sigio as pending (if applicable).
145 * For suspend/resume, timetravel_handlers_only may be true
146 * despite time-travel not being configured and used.
147 */
148 if (timetravel_handlers_only) {
149#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
150 mark_sigio_pending();
151#endif
152 return;
153 }
154
155 irq_io_loop(reg, regs);
156}
157
158static void _sigio_handler(struct uml_pt_regs *regs,
159 bool timetravel_handlers_only)
160{
161 struct irq_entry *irq_entry;
162 int n, i;
163
164 if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
165 return;
166
167 while (1) {
168 /* This is now lockless - epoll keeps back-referencesto the irqs
169 * which have trigger it so there is no need to walk the irq
170 * list and lock it every time. We avoid locking by turning off
171 * IO for a specific fd by executing os_del_epoll_fd(fd) before
172 * we do any changes to the actual data structures
173 */
174 n = os_waiting_for_events_epoll();
175
176 if (n <= 0) {
177 if (n == -EINTR)
178 continue;
179 else
180 break;
181 }
182
183 for (i = 0; i < n ; i++) {
184 enum um_irq_type t;
185
186 irq_entry = os_epoll_get_data_pointer(i);
187
188 for (t = 0; t < NUM_IRQ_TYPES; t++)
189 sigio_reg_handler(i, irq_entry, t, regs,
190 timetravel_handlers_only);
191 }
192 }
193
194 if (!timetravel_handlers_only)
195 free_irqs();
196}
197
198void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
199{
200 _sigio_handler(regs, irqs_suspended);
201}
202
203static struct irq_entry *get_irq_entry_by_fd(int fd)
204{
205 struct irq_entry *walk;
206
207 lockdep_assert_held(&irq_lock);
208
209 list_for_each_entry(walk, &active_fds, list) {
210 if (walk->fd == fd)
211 return walk;
212 }
213
214 return NULL;
215}
216
217static void free_irq_entry(struct irq_entry *to_free, bool remove)
218{
219 if (!to_free)
220 return;
221
222 if (remove)
223 os_del_epoll_fd(to_free->fd);
224 list_del(&to_free->list);
225 kfree(to_free);
226}
227
228static bool update_irq_entry(struct irq_entry *entry)
229{
230 enum um_irq_type i;
231 int events = 0;
232
233 for (i = 0; i < NUM_IRQ_TYPES; i++)
234 events |= entry->reg[i].events;
235
236 if (events) {
237 /* will modify (instead of add) if needed */
238 os_add_epoll_fd(events, entry->fd, entry);
239 return true;
240 }
241
242 os_del_epoll_fd(entry->fd);
243 return false;
244}
245
246static void update_or_free_irq_entry(struct irq_entry *entry)
247{
248 if (!update_irq_entry(entry))
249 free_irq_entry(entry, false);
250}
251
252static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
253 void (*timetravel_handler)(int, int, void *,
254 struct time_travel_event *))
255{
256 struct irq_entry *irq_entry;
257 int err, events = os_event_mask(type);
258 unsigned long flags;
259
260 err = os_set_fd_async(fd);
261 if (err < 0)
262 goto out;
263
264 spin_lock_irqsave(&irq_lock, flags);
265 irq_entry = get_irq_entry_by_fd(fd);
266 if (irq_entry) {
267 /* cannot register the same FD twice with the same type */
268 if (WARN_ON(irq_entry->reg[type].events)) {
269 err = -EALREADY;
270 goto out_unlock;
271 }
272
273 /* temporarily disable to avoid IRQ-side locking */
274 os_del_epoll_fd(fd);
275 } else {
276 irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
277 if (!irq_entry) {
278 err = -ENOMEM;
279 goto out_unlock;
280 }
281 irq_entry->fd = fd;
282 list_add_tail(&irq_entry->list, &active_fds);
283 maybe_sigio_broken(fd);
284 }
285
286 irq_entry->reg[type].id = dev_id;
287 irq_entry->reg[type].irq = irq;
288 irq_entry->reg[type].active = true;
289 irq_entry->reg[type].events = events;
290
291#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
292 if (um_irq_timetravel_handler_used()) {
293 irq_entry->reg[type].timetravel_handler = timetravel_handler;
294 irq_entry->reg[type].event.fn = irq_event_handler;
295 }
296#endif
297
298 WARN_ON(!update_irq_entry(irq_entry));
299 spin_unlock_irqrestore(&irq_lock, flags);
300
301 return 0;
302out_unlock:
303 spin_unlock_irqrestore(&irq_lock, flags);
304out:
305 return err;
306}
307
308/*
309 * Remove the entry or entries for a specific FD, if you
310 * don't want to remove all the possible entries then use
311 * um_free_irq() or deactivate_fd() instead.
312 */
313void free_irq_by_fd(int fd)
314{
315 struct irq_entry *to_free;
316 unsigned long flags;
317
318 spin_lock_irqsave(&irq_lock, flags);
319 to_free = get_irq_entry_by_fd(fd);
320 free_irq_entry(to_free, true);
321 spin_unlock_irqrestore(&irq_lock, flags);
322}
323EXPORT_SYMBOL(free_irq_by_fd);
324
325static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
326{
327 struct irq_entry *entry;
328 unsigned long flags;
329
330 spin_lock_irqsave(&irq_lock, flags);
331 list_for_each_entry(entry, &active_fds, list) {
332 enum um_irq_type i;
333
334 for (i = 0; i < NUM_IRQ_TYPES; i++) {
335 struct irq_reg *reg = &entry->reg[i];
336
337 if (!reg->events)
338 continue;
339 if (reg->irq != irq)
340 continue;
341 if (reg->id != dev)
342 continue;
343
344 os_del_epoll_fd(entry->fd);
345 reg->events = 0;
346 update_or_free_irq_entry(entry);
347 goto out;
348 }
349 }
350out:
351 spin_unlock_irqrestore(&irq_lock, flags);
352}
353
354void deactivate_fd(int fd, int irqnum)
355{
356 struct irq_entry *entry;
357 unsigned long flags;
358 enum um_irq_type i;
359
360 os_del_epoll_fd(fd);
361
362 spin_lock_irqsave(&irq_lock, flags);
363 entry = get_irq_entry_by_fd(fd);
364 if (!entry)
365 goto out;
366
367 for (i = 0; i < NUM_IRQ_TYPES; i++) {
368 if (!entry->reg[i].events)
369 continue;
370 if (entry->reg[i].irq == irqnum)
371 entry->reg[i].events = 0;
372 }
373
374 update_or_free_irq_entry(entry);
375out:
376 spin_unlock_irqrestore(&irq_lock, flags);
377
378 ignore_sigio_fd(fd);
379}
380EXPORT_SYMBOL(deactivate_fd);
381
382/*
383 * Called just before shutdown in order to provide a clean exec
384 * environment in case the system is rebooting. No locking because
385 * that would cause a pointless shutdown hang if something hadn't
386 * released the lock.
387 */
388int deactivate_all_fds(void)
389{
390 struct irq_entry *entry;
391
392 /* Stop IO. The IRQ loop has no lock so this is our
393 * only way of making sure we are safe to dispose
394 * of all IRQ handlers
395 */
396 os_set_ioignore();
397
398 /* we can no longer call kfree() here so just deactivate */
399 list_for_each_entry(entry, &active_fds, list)
400 os_del_epoll_fd(entry->fd);
401 os_close_epoll_fd();
402 return 0;
403}
404
405/*
406 * do_IRQ handles all normal device IRQs (the special
407 * SMP cross-CPU interrupts have their own specific
408 * handlers).
409 */
410unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
411{
412 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
413 irq_enter();
414 generic_handle_irq(irq);
415 irq_exit();
416 set_irq_regs(old_regs);
417 return 1;
418}
419
420void um_free_irq(int irq, void *dev)
421{
422 if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
423 "freeing invalid irq %d", irq))
424 return;
425
426 free_irq_by_irq_and_dev(irq, dev);
427 free_irq(irq, dev);
428 clear_bit(irq, irqs_allocated);
429}
430EXPORT_SYMBOL(um_free_irq);
431
432static int
433_um_request_irq(int irq, int fd, enum um_irq_type type,
434 irq_handler_t handler, unsigned long irqflags,
435 const char *devname, void *dev_id,
436 void (*timetravel_handler)(int, int, void *,
437 struct time_travel_event *))
438{
439 int err;
440
441 if (irq == UM_IRQ_ALLOC) {
442 int i;
443
444 for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
445 if (!test_and_set_bit(i, irqs_allocated)) {
446 irq = i;
447 break;
448 }
449 }
450 }
451
452 if (irq < 0)
453 return -ENOSPC;
454
455 if (fd != -1) {
456 err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
457 if (err)
458 goto error;
459 }
460
461 err = request_irq(irq, handler, irqflags, devname, dev_id);
462 if (err < 0)
463 goto error;
464
465 return irq;
466error:
467 clear_bit(irq, irqs_allocated);
468 return err;
469}
470
471int um_request_irq(int irq, int fd, enum um_irq_type type,
472 irq_handler_t handler, unsigned long irqflags,
473 const char *devname, void *dev_id)
474{
475 return _um_request_irq(irq, fd, type, handler, irqflags,
476 devname, dev_id, NULL);
477}
478EXPORT_SYMBOL(um_request_irq);
479
480#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
481int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
482 irq_handler_t handler, unsigned long irqflags,
483 const char *devname, void *dev_id,
484 void (*timetravel_handler)(int, int, void *,
485 struct time_travel_event *))
486{
487 return _um_request_irq(irq, fd, type, handler, irqflags,
488 devname, dev_id, timetravel_handler);
489}
490EXPORT_SYMBOL(um_request_irq_tt);
491
492void sigio_run_timetravel_handlers(void)
493{
494 _sigio_handler(NULL, true);
495}
496#endif
497
498#ifdef CONFIG_PM_SLEEP
499void um_irqs_suspend(void)
500{
501 struct irq_entry *entry;
502 unsigned long flags;
503
504 irqs_suspended = true;
505
506 spin_lock_irqsave(&irq_lock, flags);
507 list_for_each_entry(entry, &active_fds, list) {
508 enum um_irq_type t;
509 bool clear = true;
510
511 for (t = 0; t < NUM_IRQ_TYPES; t++) {
512 if (!entry->reg[t].events)
513 continue;
514
515 /*
516 * For the SIGIO_WRITE_IRQ, which is used to handle the
517 * SIGIO workaround thread, we need special handling:
518 * enable wake for it itself, but below we tell it about
519 * any FDs that should be suspended.
520 */
521 if (entry->reg[t].wakeup ||
522 entry->reg[t].irq == SIGIO_WRITE_IRQ
523#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
524 || entry->reg[t].timetravel_handler
525#endif
526 ) {
527 clear = false;
528 break;
529 }
530 }
531
532 if (clear) {
533 entry->suspended = true;
534 os_clear_fd_async(entry->fd);
535 entry->sigio_workaround =
536 !__ignore_sigio_fd(entry->fd);
537 }
538 }
539 spin_unlock_irqrestore(&irq_lock, flags);
540}
541
542void um_irqs_resume(void)
543{
544 struct irq_entry *entry;
545 unsigned long flags;
546
547
548 local_irq_save(flags);
549#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
550 /*
551 * We don't need to lock anything here since we're in resume
552 * and nothing else is running, but have disabled IRQs so we
553 * don't try anything else with the interrupt list from there.
554 */
555 list_for_each_entry(entry, &active_fds, list) {
556 enum um_irq_type t;
557
558 for (t = 0; t < NUM_IRQ_TYPES; t++) {
559 struct irq_reg *reg = &entry->reg[t];
560
561 if (reg->pending_on_resume) {
562 irq_enter();
563 generic_handle_irq(reg->irq);
564 irq_exit();
565 reg->pending_on_resume = false;
566 }
567 }
568 }
569#endif
570
571 spin_lock(&irq_lock);
572 list_for_each_entry(entry, &active_fds, list) {
573 if (entry->suspended) {
574 int err = os_set_fd_async(entry->fd);
575
576 WARN(err < 0, "os_set_fd_async returned %d\n", err);
577 entry->suspended = false;
578
579 if (entry->sigio_workaround) {
580 err = __add_sigio_fd(entry->fd);
581 WARN(err < 0, "add_sigio_returned %d\n", err);
582 }
583 }
584 }
585 spin_unlock_irqrestore(&irq_lock, flags);
586
587 irqs_suspended = false;
588 send_sigio_to_self();
589}
590
591static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
592{
593 struct irq_entry *entry;
594 unsigned long flags;
595
596 spin_lock_irqsave(&irq_lock, flags);
597 list_for_each_entry(entry, &active_fds, list) {
598 enum um_irq_type t;
599
600 for (t = 0; t < NUM_IRQ_TYPES; t++) {
601 if (!entry->reg[t].events)
602 continue;
603
604 if (entry->reg[t].irq != d->irq)
605 continue;
606 entry->reg[t].wakeup = on;
607 goto unlock;
608 }
609 }
610unlock:
611 spin_unlock_irqrestore(&irq_lock, flags);
612 return 0;
613}
614#else
615#define normal_irq_set_wake NULL
616#endif
617
618/*
619 * irq_chip must define at least enable/disable and ack when
620 * the edge handler is used.
621 */
622static void dummy(struct irq_data *d)
623{
624}
625
626/* This is used for everything other than the timer. */
627static struct irq_chip normal_irq_type = {
628 .name = "SIGIO",
629 .irq_disable = dummy,
630 .irq_enable = dummy,
631 .irq_ack = dummy,
632 .irq_mask = dummy,
633 .irq_unmask = dummy,
634 .irq_set_wake = normal_irq_set_wake,
635};
636
637static struct irq_chip alarm_irq_type = {
638 .name = "SIGALRM",
639 .irq_disable = dummy,
640 .irq_enable = dummy,
641 .irq_ack = dummy,
642 .irq_mask = dummy,
643 .irq_unmask = dummy,
644};
645
646void __init init_IRQ(void)
647{
648 int i;
649
650 irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
651
652 for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
653 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
654 /* Initialize EPOLL Loop */
655 os_setup_epoll();
656}
657
658/*
659 * IRQ stack entry and exit:
660 *
661 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
662 * and switch over to the IRQ stack after some preparation. We use
663 * sigaltstack to receive signals on a separate stack from the start.
664 * These two functions make sure the rest of the kernel won't be too
665 * upset by being on a different stack. The IRQ stack has a
666 * thread_info structure at the bottom so that current et al continue
667 * to work.
668 *
669 * to_irq_stack copies the current task's thread_info to the IRQ stack
670 * thread_info and sets the tasks's stack to point to the IRQ stack.
671 *
672 * from_irq_stack copies the thread_info struct back (flags may have
673 * been modified) and resets the task's stack pointer.
674 *
675 * Tricky bits -
676 *
677 * What happens when two signals race each other? UML doesn't block
678 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
679 * could arrive while a previous one is still setting up the
680 * thread_info.
681 *
682 * There are three cases -
683 * The first interrupt on the stack - sets up the thread_info and
684 * handles the interrupt
685 * A nested interrupt interrupting the copying of the thread_info -
686 * can't handle the interrupt, as the stack is in an unknown state
687 * A nested interrupt not interrupting the copying of the
688 * thread_info - doesn't do any setup, just handles the interrupt
689 *
690 * The first job is to figure out whether we interrupted stack setup.
691 * This is done by xchging the signal mask with thread_info->pending.
692 * If the value that comes back is zero, then there is no setup in
693 * progress, and the interrupt can be handled. If the value is
694 * non-zero, then there is stack setup in progress. In order to have
695 * the interrupt handled, we leave our signal in the mask, and it will
696 * be handled by the upper handler after it has set up the stack.
697 *
698 * Next is to figure out whether we are the outer handler or a nested
699 * one. As part of setting up the stack, thread_info->real_thread is
700 * set to non-NULL (and is reset to NULL on exit). This is the
701 * nesting indicator. If it is non-NULL, then the stack is already
702 * set up and the handler can run.
703 */
704
705static unsigned long pending_mask;
706
707unsigned long to_irq_stack(unsigned long *mask_out)
708{
709 struct thread_info *ti;
710 unsigned long mask, old;
711 int nested;
712
713 mask = xchg(&pending_mask, *mask_out);
714 if (mask != 0) {
715 /*
716 * If any interrupts come in at this point, we want to
717 * make sure that their bits aren't lost by our
718 * putting our bit in. So, this loop accumulates bits
719 * until xchg returns the same value that we put in.
720 * When that happens, there were no new interrupts,
721 * and pending_mask contains a bit for each interrupt
722 * that came in.
723 */
724 old = *mask_out;
725 do {
726 old |= mask;
727 mask = xchg(&pending_mask, old);
728 } while (mask != old);
729 return 1;
730 }
731
732 ti = current_thread_info();
733 nested = (ti->real_thread != NULL);
734 if (!nested) {
735 struct task_struct *task;
736 struct thread_info *tti;
737
738 task = cpu_tasks[ti->cpu].task;
739 tti = task_thread_info(task);
740
741 *ti = *tti;
742 ti->real_thread = tti;
743 task->stack = ti;
744 }
745
746 mask = xchg(&pending_mask, 0);
747 *mask_out |= mask | nested;
748 return 0;
749}
750
751unsigned long from_irq_stack(int nested)
752{
753 struct thread_info *ti, *to;
754 unsigned long mask;
755
756 ti = current_thread_info();
757
758 pending_mask = 1;
759
760 to = ti->real_thread;
761 current->stack = to;
762 ti->real_thread = NULL;
763 *to = *ti;
764
765 mask = xchg(&pending_mask, 0);
766 return mask & ~1;
767}
768