Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
 
 
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  5 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  6 */
  7
  8#include "linux/cpumask.h"
  9#include "linux/hardirq.h"
 10#include "linux/interrupt.h"
 11#include "linux/kernel_stat.h"
 12#include "linux/module.h"
 13#include "linux/sched.h"
 14#include "linux/seq_file.h"
 15#include "linux/slab.h"
 16#include "as-layout.h"
 17#include "kern_util.h"
 18#include "os.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 
 
 20/*
 21 * This list is accessed under irq_lock, except in sigio_handler,
 22 * where it is safe from being modified.  IRQ handlers won't change it -
 23 * if an IRQ source has vanished, it will be freed by free_irqs just
 24 * before returning from sigio_handler.  That will process a separate
 25 * list of irqs to free, with its own locking, coming back here to
 26 * remove list elements, taking the irq_lock to do so.
 27 */
 28static struct irq_fd *active_fds = NULL;
 29static struct irq_fd **last_irq_ptr = &active_fds;
 30
 31extern void free_irqs(void);
 
 
 
 
 
 
 
 
 
 32
 33void sigio_handler(int sig, struct uml_pt_regs *regs)
 
 34{
 35	struct irq_fd *irq_fd;
 36	int n;
 37
 38	if (smp_sigio_handler())
 
 
 
 39		return;
 
 40
 41	while (1) {
 42		n = os_waiting_for_events(active_fds);
 43		if (n <= 0) {
 44			if (n == -EINTR)
 45				continue;
 46			else break;
 47		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48
 49		for (irq_fd = active_fds; irq_fd != NULL;
 50		     irq_fd = irq_fd->next) {
 51			if (irq_fd->current_events != 0) {
 52				irq_fd->current_events = 0;
 53				do_IRQ(irq_fd->irq, regs);
 
 
 
 
 
 
 
 
 
 
 54			}
 55		}
 56	}
 57
 58	free_irqs();
 
 
 
 
 59}
 60
 61static DEFINE_SPINLOCK(irq_lock);
 
 
 
 62
 63static int activate_fd(int irq, int fd, int type, void *dev_id)
 
 
 64{
 65	struct pollfd *tmp_pfd;
 66	struct irq_fd *new_fd, *irq_fd;
 67	unsigned long flags;
 68	int events, err, n;
 69
 70	err = os_set_fd_async(fd);
 71	if (err < 0)
 72		goto out;
 73
 74	err = -ENOMEM;
 75	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
 76	if (new_fd == NULL)
 77		goto out;
 78
 79	if (type == IRQ_READ)
 80		events = UM_POLLIN | UM_POLLPRI;
 81	else events = UM_POLLOUT;
 82	*new_fd = ((struct irq_fd) { .next  		= NULL,
 83				     .id 		= dev_id,
 84				     .fd 		= fd,
 85				     .type 		= type,
 86				     .irq 		= irq,
 87				     .events 		= events,
 88				     .current_events 	= 0 } );
 89
 90	err = -EBUSY;
 91	spin_lock_irqsave(&irq_lock, flags);
 92	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
 93		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
 94			printk(KERN_ERR "Registering fd %d twice\n", fd);
 95			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
 96			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
 97			       dev_id);
 98			goto out_unlock;
 99		}
 
 
 
100	}
101
102	if (type == IRQ_WRITE)
103		fd = -1;
104
105	tmp_pfd = NULL;
106	n = 0;
 
 
 
107
108	while (1) {
109		n = os_create_pollfd(fd, events, tmp_pfd, n);
110		if (n == 0)
111			break;
112
113		/*
114		 * n > 0
115		 * It means we couldn't put new pollfd to current pollfds
116		 * and tmp_fds is NULL or too small for new pollfds array.
117		 * Needed size is equal to n as minimum.
118		 *
119		 * Here we have to drop the lock in order to call
120		 * kmalloc, which might sleep.
121		 * If something else came in and changed the pollfds array
122		 * so we will not be able to put new pollfd struct to pollfds
123		 * then we free the buffer tmp_fds and try again.
124		 */
125		spin_unlock_irqrestore(&irq_lock, flags);
126		kfree(tmp_pfd);
127
128		tmp_pfd = kmalloc(n, GFP_KERNEL);
129		if (tmp_pfd == NULL)
130			goto out_kfree;
131
132		spin_lock_irqsave(&irq_lock, flags);
133	}
 
 
 
 
 
 
134
135	*last_irq_ptr = new_fd;
136	last_irq_ptr = &new_fd->next;
 
 
 
 
137
138	spin_unlock_irqrestore(&irq_lock, flags);
 
139
140	/*
141	 * This calls activate_fd, so it has to be outside the critical
142	 * section.
143	 */
144	maybe_sigio_broken(fd, (type == IRQ_READ));
145
146	return 0;
 
 
 
 
147
148 out_unlock:
149	spin_unlock_irqrestore(&irq_lock, flags);
150 out_kfree:
151	kfree(new_fd);
152 out:
153	return err;
154}
155
156static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
157{
158	unsigned long flags;
159
160	spin_lock_irqsave(&irq_lock, flags);
161	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162	spin_unlock_irqrestore(&irq_lock, flags);
163}
164
165struct irq_and_dev {
166	int irq;
167	void *dev;
168};
169
170static int same_irq_and_dev(struct irq_fd *irq, void *d)
171{
172	struct irq_and_dev *data = d;
173
174	return ((irq->irq == data->irq) && (irq->id == data->dev));
 
 
 
 
 
 
 
175}
176
177static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178{
179	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
180							  .dev  = dev });
181
182	free_irq_by_cb(same_irq_and_dev, &data);
 
 
 
183}
184
185static int same_fd(struct irq_fd *irq, void *fd)
186{
187	return (irq->fd == *((int *)fd));
 
 
 
 
 
 
 
 
 
 
 
 
 
188}
189
190void free_irq_by_fd(int fd)
191{
192	free_irq_by_cb(same_fd, &fd);
 
193}
194
195/* Must be called with irq_lock held */
196static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
 
197{
198	struct irq_fd *irq;
199	int i = 0;
200	int fdi;
201
202	for (irq = active_fds; irq != NULL; irq = irq->next) {
203		if ((irq->fd == fd) && (irq->irq == irqnum))
204			break;
205		i++;
206	}
207	if (irq == NULL) {
208		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209		       fd);
210		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211	}
212	fdi = os_get_pollfd(i);
213	if ((fdi != -1) && (fdi != fd)) {
214		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216		       fdi, fd);
217		irq = NULL;
218		goto out;
 
 
 
219	}
220	*index_out = i;
221 out:
222	return irq;
 
 
 
 
 
 
 
223}
224
225void reactivate_fd(int fd, int irqnum)
 
 
 
 
 
226{
227	struct irq_fd *irq;
228	unsigned long flags;
229	int i;
230
231	spin_lock_irqsave(&irq_lock, flags);
232	irq = find_irq_by_fd(fd, irqnum, &i);
233	if (irq == NULL) {
234		spin_unlock_irqrestore(&irq_lock, flags);
235		return;
236	}
237	os_set_pollfd(i, irq->fd);
238	spin_unlock_irqrestore(&irq_lock, flags);
 
 
239
240	add_sigio_fd(fd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241}
242
243void deactivate_fd(int fd, int irqnum)
244{
245	struct irq_fd *irq;
246	unsigned long flags;
247	int i;
 
 
248
249	spin_lock_irqsave(&irq_lock, flags);
250	irq = find_irq_by_fd(fd, irqnum, &i);
251	if (irq == NULL) {
252		spin_unlock_irqrestore(&irq_lock, flags);
253		return;
 
 
 
 
 
254	}
255
256	os_set_pollfd(i, -1);
 
257	spin_unlock_irqrestore(&irq_lock, flags);
258
259	ignore_sigio_fd(fd);
260}
261EXPORT_SYMBOL(deactivate_fd);
262
263/*
264 * Called just before shutdown in order to provide a clean exec
265 * environment in case the system is rebooting.  No locking because
266 * that would cause a pointless shutdown hang if something hadn't
267 * released the lock.
268 */
269int deactivate_all_fds(void)
270{
271	struct irq_fd *irq;
272	int err;
273
274	for (irq = active_fds; irq != NULL; irq = irq->next) {
275		err = os_clear_fd_async(irq->fd);
276		if (err)
277			return err;
278	}
279	/* If there is a signal already queued, after unblocking ignore it */
280	os_set_ioignore();
281
 
 
 
 
282	return 0;
283}
284
285/*
286 * do_IRQ handles all normal device IRQs (the special
287 * SMP cross-CPU interrupts have their own specific
288 * handlers).
289 */
290unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
291{
292	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
293	irq_enter();
294	generic_handle_irq(irq);
295	irq_exit();
296	set_irq_regs(old_regs);
297	return 1;
298}
299
300void um_free_irq(unsigned int irq, void *dev)
301{
 
 
 
 
302	free_irq_by_irq_and_dev(irq, dev);
303	free_irq(irq, dev);
 
304}
305EXPORT_SYMBOL(um_free_irq);
306
307int um_request_irq(unsigned int irq, int fd, int type,
308		   irq_handler_t handler,
309		   unsigned long irqflags, const char * devname,
310		   void *dev_id)
 
 
311{
312	int err;
313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314	if (fd != -1) {
315		err = activate_fd(irq, fd, type, dev_id);
316		if (err)
317			return err;
318	}
319
320	return request_irq(irq, handler, irqflags, devname, dev_id);
 
 
 
 
 
 
 
321}
322
 
 
 
 
 
 
 
323EXPORT_SYMBOL(um_request_irq);
324EXPORT_SYMBOL(reactivate_fd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
326/*
327 * irq_chip must define at least enable/disable and ack when
328 * the edge handler is used.
329 */
330static void dummy(struct irq_data *d)
331{
332}
333
334/* This is used for everything else than the timer. */
335static struct irq_chip normal_irq_type = {
336	.name = "SIGIO",
337	.irq_disable = dummy,
338	.irq_enable = dummy,
339	.irq_ack = dummy,
 
 
 
340};
341
342static struct irq_chip SIGVTALRM_irq_type = {
343	.name = "SIGVTALRM",
344	.irq_disable = dummy,
345	.irq_enable = dummy,
346	.irq_ack = dummy,
 
 
347};
348
349void __init init_IRQ(void)
350{
351	int i;
352
353	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
354
355	for (i = 1; i < NR_IRQS; i++)
356		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
 
 
357}
358
359/*
360 * IRQ stack entry and exit:
361 *
362 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
363 * and switch over to the IRQ stack after some preparation.  We use
364 * sigaltstack to receive signals on a separate stack from the start.
365 * These two functions make sure the rest of the kernel won't be too
366 * upset by being on a different stack.  The IRQ stack has a
367 * thread_info structure at the bottom so that current et al continue
368 * to work.
369 *
370 * to_irq_stack copies the current task's thread_info to the IRQ stack
371 * thread_info and sets the tasks's stack to point to the IRQ stack.
372 *
373 * from_irq_stack copies the thread_info struct back (flags may have
374 * been modified) and resets the task's stack pointer.
375 *
376 * Tricky bits -
377 *
378 * What happens when two signals race each other?  UML doesn't block
379 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
380 * could arrive while a previous one is still setting up the
381 * thread_info.
382 *
383 * There are three cases -
384 *     The first interrupt on the stack - sets up the thread_info and
385 * handles the interrupt
386 *     A nested interrupt interrupting the copying of the thread_info -
387 * can't handle the interrupt, as the stack is in an unknown state
388 *     A nested interrupt not interrupting the copying of the
389 * thread_info - doesn't do any setup, just handles the interrupt
390 *
391 * The first job is to figure out whether we interrupted stack setup.
392 * This is done by xchging the signal mask with thread_info->pending.
393 * If the value that comes back is zero, then there is no setup in
394 * progress, and the interrupt can be handled.  If the value is
395 * non-zero, then there is stack setup in progress.  In order to have
396 * the interrupt handled, we leave our signal in the mask, and it will
397 * be handled by the upper handler after it has set up the stack.
398 *
399 * Next is to figure out whether we are the outer handler or a nested
400 * one.  As part of setting up the stack, thread_info->real_thread is
401 * set to non-NULL (and is reset to NULL on exit).  This is the
402 * nesting indicator.  If it is non-NULL, then the stack is already
403 * set up and the handler can run.
404 */
405
406static unsigned long pending_mask;
407
408unsigned long to_irq_stack(unsigned long *mask_out)
409{
410	struct thread_info *ti;
411	unsigned long mask, old;
412	int nested;
413
414	mask = xchg(&pending_mask, *mask_out);
415	if (mask != 0) {
416		/*
417		 * If any interrupts come in at this point, we want to
418		 * make sure that their bits aren't lost by our
419		 * putting our bit in.  So, this loop accumulates bits
420		 * until xchg returns the same value that we put in.
421		 * When that happens, there were no new interrupts,
422		 * and pending_mask contains a bit for each interrupt
423		 * that came in.
424		 */
425		old = *mask_out;
426		do {
427			old |= mask;
428			mask = xchg(&pending_mask, old);
429		} while (mask != old);
430		return 1;
431	}
432
433	ti = current_thread_info();
434	nested = (ti->real_thread != NULL);
435	if (!nested) {
436		struct task_struct *task;
437		struct thread_info *tti;
438
439		task = cpu_tasks[ti->cpu].task;
440		tti = task_thread_info(task);
441
442		*ti = *tti;
443		ti->real_thread = tti;
444		task->stack = ti;
445	}
446
447	mask = xchg(&pending_mask, 0);
448	*mask_out |= mask | nested;
449	return 0;
450}
451
452unsigned long from_irq_stack(int nested)
453{
454	struct thread_info *ti, *to;
455	unsigned long mask;
456
457	ti = current_thread_info();
458
459	pending_mask = 1;
460
461	to = ti->real_thread;
462	current->stack = to;
463	ti->real_thread = NULL;
464	*to = *ti;
465
466	mask = xchg(&pending_mask, 0);
467	return mask & ~1;
468}
469
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 - Cambridge Greys Ltd
  4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
  5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 
  6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  7 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  8 */
  9
 10#include <linux/cpumask.h>
 11#include <linux/hardirq.h>
 12#include <linux/interrupt.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/module.h>
 15#include <linux/sched.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include <as-layout.h>
 19#include <kern_util.h>
 20#include <os.h>
 21#include <irq_user.h>
 22#include <irq_kern.h>
 23#include <linux/time-internal.h>
 24
 25
 26/* When epoll triggers we do not know why it did so
 27 * we can also have different IRQs for read and write.
 28 * This is why we keep a small irq_reg array for each fd -
 29 * one entry per IRQ type
 30 */
 31struct irq_reg {
 32	void *id;
 33	int irq;
 34	/* it's cheaper to store this than to query it */
 35	int events;
 36	bool active;
 37	bool pending;
 38	bool wakeup;
 39#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 40	bool pending_event;
 41	void (*timetravel_handler)(int, int, void *,
 42				   struct time_travel_event *);
 43	struct time_travel_event event;
 44#endif
 45};
 46
 47struct irq_entry {
 48	struct list_head list;
 49	int fd;
 50	struct irq_reg reg[NUM_IRQ_TYPES];
 51	bool suspended;
 52	bool sigio_workaround;
 53};
 54
 55static DEFINE_SPINLOCK(irq_lock);
 56static LIST_HEAD(active_fds);
 57static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
 58static bool irqs_suspended;
 59#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 60static bool irqs_pending;
 61#endif
 62
 63static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
 64{
 65/*
 66 * irq->active guards against reentry
 67 * irq->pending accumulates pending requests
 68 * if pending is raised the irq_handler is re-run
 69 * until pending is cleared
 
 
 70 */
 71	if (irq->active) {
 72		irq->active = false;
 73
 74		do {
 75			irq->pending = false;
 76			do_IRQ(irq->irq, regs);
 77		} while (irq->pending);
 78
 79		irq->active = true;
 80	} else {
 81		irq->pending = true;
 82	}
 83}
 84
 85#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 86static void irq_event_handler(struct time_travel_event *ev)
 87{
 88	struct irq_reg *reg = container_of(ev, struct irq_reg, event);
 
 89
 90	/* do nothing if suspended; just cause a wakeup and mark as pending */
 91	if (irqs_suspended) {
 92		irqs_pending = true;
 93		reg->pending_event = true;
 94		return;
 95	}
 96
 97	generic_handle_irq(reg->irq);
 98}
 99
100static bool irq_do_timetravel_handler(struct irq_entry *entry,
101				      enum um_irq_type t)
102{
103	struct irq_reg *reg = &entry->reg[t];
104
105	if (!reg->timetravel_handler)
106		return false;
107
108	/*
109	 * Handle all messages - we might get multiple even while
110	 * interrupts are already suspended, due to suspend order
111	 * etc. Note that time_travel_add_irq_event() will not add
112	 * an event twice, if it's pending already "first wins".
113	 */
114	reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
115
116	if (!reg->event.pending)
117		return false;
118
119	return true;
120}
121
122static void irq_do_pending_events(bool timetravel_handlers_only)
123{
124	struct irq_entry *entry;
125
126	if (!irqs_pending || timetravel_handlers_only)
127		return;
128
129	irqs_pending = false;
130
131	list_for_each_entry(entry, &active_fds, list) {
132		enum um_irq_type t;
133
134		for (t = 0; t < NUM_IRQ_TYPES; t++) {
135			struct irq_reg *reg = &entry->reg[t];
136
137			/*
138			 * Any timetravel_handler was invoked already, just
139			 * directly run the IRQ.
140			 */
141			if (reg->pending_event) {
142				irq_enter();
143				generic_handle_irq(reg->irq);
144				irq_exit();
145				reg->pending_event = false;
146			}
147		}
148	}
149}
150#else
151static bool irq_do_timetravel_handler(struct irq_entry *entry,
152				      enum um_irq_type t)
153{
154	return false;
155}
156
157static void irq_do_pending_events(bool timetravel_handlers_only)
158{
159}
160#endif
161
162static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
163			      struct uml_pt_regs *regs,
164			      bool timetravel_handlers_only)
165{
166	struct irq_reg *reg = &entry->reg[t];
 
 
 
167
168	if (!reg->events)
169		return;
 
170
171	if (os_epoll_triggered(idx, reg->events) <= 0)
172		return;
 
 
173
174	if (irq_do_timetravel_handler(entry, t))
175		return;
 
 
 
 
 
 
 
 
176
177	/*
178	 * If we're called to only run time-travel handlers then don't
179	 * actually proceed but mark sigio as pending (if applicable).
180	 * For suspend/resume, timetravel_handlers_only may be true
181	 * despite time-travel not being configured and used.
182	 */
183	if (timetravel_handlers_only) {
184#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
185		reg->pending_event = true;
186		irqs_pending = true;
187		mark_sigio_pending();
188#endif
189		return;
190	}
191
192	irq_io_loop(reg, regs);
193}
194
195static void _sigio_handler(struct uml_pt_regs *regs,
196			   bool timetravel_handlers_only)
197{
198	struct irq_entry *irq_entry;
199	int n, i;
200
201	if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
202		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
204	/* Flush out pending events that were ignored due to time-travel. */
205	if (!irqs_suspended)
206		irq_do_pending_events(timetravel_handlers_only);
207
208	while (1) {
209		/* This is now lockless - epoll keeps back-referencesto the irqs
210		 * which have trigger it so there is no need to walk the irq
211		 * list and lock it every time. We avoid locking by turning off
212		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
213		 * we do any changes to the actual data structures
214		 */
215		n = os_waiting_for_events_epoll();
216
217		if (n <= 0) {
218			if (n == -EINTR)
219				continue;
220			else
221				break;
222		}
223
224		for (i = 0; i < n ; i++) {
225			enum um_irq_type t;
226
227			irq_entry = os_epoll_get_data_pointer(i);
 
 
 
 
228
229			for (t = 0; t < NUM_IRQ_TYPES; t++)
230				sigio_reg_handler(i, irq_entry, t, regs,
231						  timetravel_handlers_only);
232		}
233	}
234
235	if (!timetravel_handlers_only)
236		free_irqs();
 
 
 
 
237}
238
239void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
240{
241	preempt_disable();
242	_sigio_handler(regs, irqs_suspended);
243	preempt_enable();
 
 
244}
245
246static struct irq_entry *get_irq_entry_by_fd(int fd)
 
 
 
 
 
247{
248	struct irq_entry *walk;
249
250	lockdep_assert_held(&irq_lock);
251
252	list_for_each_entry(walk, &active_fds, list) {
253		if (walk->fd == fd)
254			return walk;
255	}
256
257	return NULL;
258}
259
260static void free_irq_entry(struct irq_entry *to_free, bool remove)
261{
262	if (!to_free)
263		return;
264
265	if (remove)
266		os_del_epoll_fd(to_free->fd);
267	list_del(&to_free->list);
268	kfree(to_free);
269}
270
271static bool update_irq_entry(struct irq_entry *entry)
272{
273	enum um_irq_type i;
274	int events = 0;
275
276	for (i = 0; i < NUM_IRQ_TYPES; i++)
277		events |= entry->reg[i].events;
278
279	if (events) {
280		/* will modify (instead of add) if needed */
281		os_add_epoll_fd(events, entry->fd, entry);
282		return true;
283	}
284
285	os_del_epoll_fd(entry->fd);
286	return false;
287}
288
289static void update_or_free_irq_entry(struct irq_entry *entry)
290{
291	if (!update_irq_entry(entry))
292		free_irq_entry(entry, false);
293}
294
295static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
296		       void (*timetravel_handler)(int, int, void *,
297						  struct time_travel_event *))
298{
299	struct irq_entry *irq_entry;
300	int err, events = os_event_mask(type);
301	unsigned long flags;
302
303	err = os_set_fd_async(fd);
304	if (err < 0)
 
 
 
 
 
 
305		goto out;
306
307	spin_lock_irqsave(&irq_lock, flags);
308	irq_entry = get_irq_entry_by_fd(fd);
309	if (irq_entry) {
310		/* cannot register the same FD twice with the same type */
311		if (WARN_ON(irq_entry->reg[type].events)) {
312			err = -EALREADY;
313			goto out_unlock;
314		}
315
316		/* temporarily disable to avoid IRQ-side locking */
317		os_del_epoll_fd(fd);
318	} else {
319		irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
320		if (!irq_entry) {
321			err = -ENOMEM;
322			goto out_unlock;
323		}
324		irq_entry->fd = fd;
325		list_add_tail(&irq_entry->list, &active_fds);
326		maybe_sigio_broken(fd);
327	}
328
329	irq_entry->reg[type].id = dev_id;
330	irq_entry->reg[type].irq = irq;
331	irq_entry->reg[type].active = true;
332	irq_entry->reg[type].events = events;
333
334#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
335	if (um_irq_timetravel_handler_used()) {
336		irq_entry->reg[type].timetravel_handler = timetravel_handler;
337		irq_entry->reg[type].event.fn = irq_event_handler;
338	}
339#endif
340
341	WARN_ON(!update_irq_entry(irq_entry));
342	spin_unlock_irqrestore(&irq_lock, flags);
343
344	return 0;
345out_unlock:
346	spin_unlock_irqrestore(&irq_lock, flags);
347out:
348	return err;
349}
350
351/*
352 * Remove the entry or entries for a specific FD, if you
353 * don't want to remove all the possible entries then use
354 * um_free_irq() or deactivate_fd() instead.
355 */
356void free_irq_by_fd(int fd)
357{
358	struct irq_entry *to_free;
359	unsigned long flags;
 
360
361	spin_lock_irqsave(&irq_lock, flags);
362	to_free = get_irq_entry_by_fd(fd);
363	free_irq_entry(to_free, true);
 
 
 
 
364	spin_unlock_irqrestore(&irq_lock, flags);
365}
366EXPORT_SYMBOL(free_irq_by_fd);
367
368static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
369{
370	struct irq_entry *entry;
371	unsigned long flags;
372
373	spin_lock_irqsave(&irq_lock, flags);
374	list_for_each_entry(entry, &active_fds, list) {
375		enum um_irq_type i;
376
377		for (i = 0; i < NUM_IRQ_TYPES; i++) {
378			struct irq_reg *reg = &entry->reg[i];
379
380			if (!reg->events)
381				continue;
382			if (reg->irq != irq)
383				continue;
384			if (reg->id != dev)
385				continue;
386
387			os_del_epoll_fd(entry->fd);
388			reg->events = 0;
389			update_or_free_irq_entry(entry);
390			goto out;
391		}
392	}
393out:
394	spin_unlock_irqrestore(&irq_lock, flags);
395}
396
397void deactivate_fd(int fd, int irqnum)
398{
399	struct irq_entry *entry;
400	unsigned long flags;
401	enum um_irq_type i;
402
403	os_del_epoll_fd(fd);
404
405	spin_lock_irqsave(&irq_lock, flags);
406	entry = get_irq_entry_by_fd(fd);
407	if (!entry)
408		goto out;
409
410	for (i = 0; i < NUM_IRQ_TYPES; i++) {
411		if (!entry->reg[i].events)
412			continue;
413		if (entry->reg[i].irq == irqnum)
414			entry->reg[i].events = 0;
415	}
416
417	update_or_free_irq_entry(entry);
418out:
419	spin_unlock_irqrestore(&irq_lock, flags);
420
421	ignore_sigio_fd(fd);
422}
423EXPORT_SYMBOL(deactivate_fd);
424
425/*
426 * Called just before shutdown in order to provide a clean exec
427 * environment in case the system is rebooting.  No locking because
428 * that would cause a pointless shutdown hang if something hadn't
429 * released the lock.
430 */
431int deactivate_all_fds(void)
432{
433	struct irq_entry *entry;
 
434
435	/* Stop IO. The IRQ loop has no lock so this is our
436	 * only way of making sure we are safe to dispose
437	 * of all IRQ handlers
438	 */
 
 
439	os_set_ioignore();
440
441	/* we can no longer call kfree() here so just deactivate */
442	list_for_each_entry(entry, &active_fds, list)
443		os_del_epoll_fd(entry->fd);
444	os_close_epoll_fd();
445	return 0;
446}
447
448/*
449 * do_IRQ handles all normal device IRQs (the special
450 * SMP cross-CPU interrupts have their own specific
451 * handlers).
452 */
453unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
454{
455	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
456	irq_enter();
457	generic_handle_irq(irq);
458	irq_exit();
459	set_irq_regs(old_regs);
460	return 1;
461}
462
463void um_free_irq(int irq, void *dev)
464{
465	if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
466		 "freeing invalid irq %d", irq))
467		return;
468
469	free_irq_by_irq_and_dev(irq, dev);
470	free_irq(irq, dev);
471	clear_bit(irq, irqs_allocated);
472}
473EXPORT_SYMBOL(um_free_irq);
474
475static int
476_um_request_irq(int irq, int fd, enum um_irq_type type,
477		irq_handler_t handler, unsigned long irqflags,
478		const char *devname, void *dev_id,
479		void (*timetravel_handler)(int, int, void *,
480					   struct time_travel_event *))
481{
482	int err;
483
484	if (irq == UM_IRQ_ALLOC) {
485		int i;
486
487		for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
488			if (!test_and_set_bit(i, irqs_allocated)) {
489				irq = i;
490				break;
491			}
492		}
493	}
494
495	if (irq < 0)
496		return -ENOSPC;
497
498	if (fd != -1) {
499		err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
500		if (err)
501			goto error;
502	}
503
504	err = request_irq(irq, handler, irqflags, devname, dev_id);
505	if (err < 0)
506		goto error;
507
508	return irq;
509error:
510	clear_bit(irq, irqs_allocated);
511	return err;
512}
513
514int um_request_irq(int irq, int fd, enum um_irq_type type,
515		   irq_handler_t handler, unsigned long irqflags,
516		   const char *devname, void *dev_id)
517{
518	return _um_request_irq(irq, fd, type, handler, irqflags,
519			       devname, dev_id, NULL);
520}
521EXPORT_SYMBOL(um_request_irq);
522
523#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
524int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
525		      irq_handler_t handler, unsigned long irqflags,
526		      const char *devname, void *dev_id,
527		      void (*timetravel_handler)(int, int, void *,
528						 struct time_travel_event *))
529{
530	return _um_request_irq(irq, fd, type, handler, irqflags,
531			       devname, dev_id, timetravel_handler);
532}
533EXPORT_SYMBOL(um_request_irq_tt);
534
535void sigio_run_timetravel_handlers(void)
536{
537	_sigio_handler(NULL, true);
538}
539#endif
540
541#ifdef CONFIG_PM_SLEEP
542void um_irqs_suspend(void)
543{
544	struct irq_entry *entry;
545	unsigned long flags;
546
547	irqs_suspended = true;
548
549	spin_lock_irqsave(&irq_lock, flags);
550	list_for_each_entry(entry, &active_fds, list) {
551		enum um_irq_type t;
552		bool clear = true;
553
554		for (t = 0; t < NUM_IRQ_TYPES; t++) {
555			if (!entry->reg[t].events)
556				continue;
557
558			/*
559			 * For the SIGIO_WRITE_IRQ, which is used to handle the
560			 * SIGIO workaround thread, we need special handling:
561			 * enable wake for it itself, but below we tell it about
562			 * any FDs that should be suspended.
563			 */
564			if (entry->reg[t].wakeup ||
565			    entry->reg[t].irq == SIGIO_WRITE_IRQ
566#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
567			    || entry->reg[t].timetravel_handler
568#endif
569			    ) {
570				clear = false;
571				break;
572			}
573		}
574
575		if (clear) {
576			entry->suspended = true;
577			os_clear_fd_async(entry->fd);
578			entry->sigio_workaround =
579				!__ignore_sigio_fd(entry->fd);
580		}
581	}
582	spin_unlock_irqrestore(&irq_lock, flags);
583}
584
585void um_irqs_resume(void)
586{
587	struct irq_entry *entry;
588	unsigned long flags;
589
590
591	spin_lock_irqsave(&irq_lock, flags);
592	list_for_each_entry(entry, &active_fds, list) {
593		if (entry->suspended) {
594			int err = os_set_fd_async(entry->fd);
595
596			WARN(err < 0, "os_set_fd_async returned %d\n", err);
597			entry->suspended = false;
598
599			if (entry->sigio_workaround) {
600				err = __add_sigio_fd(entry->fd);
601				WARN(err < 0, "add_sigio_returned %d\n", err);
602			}
603		}
604	}
605	spin_unlock_irqrestore(&irq_lock, flags);
606
607	irqs_suspended = false;
608	send_sigio_to_self();
609}
610
611static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
612{
613	struct irq_entry *entry;
614	unsigned long flags;
615
616	spin_lock_irqsave(&irq_lock, flags);
617	list_for_each_entry(entry, &active_fds, list) {
618		enum um_irq_type t;
619
620		for (t = 0; t < NUM_IRQ_TYPES; t++) {
621			if (!entry->reg[t].events)
622				continue;
623
624			if (entry->reg[t].irq != d->irq)
625				continue;
626			entry->reg[t].wakeup = on;
627			goto unlock;
628		}
629	}
630unlock:
631	spin_unlock_irqrestore(&irq_lock, flags);
632	return 0;
633}
634#else
635#define normal_irq_set_wake NULL
636#endif
637
638/*
639 * irq_chip must define at least enable/disable and ack when
640 * the edge handler is used.
641 */
642static void dummy(struct irq_data *d)
643{
644}
645
646/* This is used for everything other than the timer. */
647static struct irq_chip normal_irq_type = {
648	.name = "SIGIO",
649	.irq_disable = dummy,
650	.irq_enable = dummy,
651	.irq_ack = dummy,
652	.irq_mask = dummy,
653	.irq_unmask = dummy,
654	.irq_set_wake = normal_irq_set_wake,
655};
656
657static struct irq_chip alarm_irq_type = {
658	.name = "SIGALRM",
659	.irq_disable = dummy,
660	.irq_enable = dummy,
661	.irq_ack = dummy,
662	.irq_mask = dummy,
663	.irq_unmask = dummy,
664};
665
666void __init init_IRQ(void)
667{
668	int i;
669
670	irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
671
672	for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
673		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
674	/* Initialize EPOLL Loop */
675	os_setup_epoll();
676}