Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v3.5.6
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  5 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  6 */
  7
  8#include "linux/cpumask.h"
  9#include "linux/hardirq.h"
 10#include "linux/interrupt.h"
 11#include "linux/kernel_stat.h"
 12#include "linux/module.h"
 13#include "linux/sched.h"
 14#include "linux/seq_file.h"
 15#include "linux/slab.h"
 16#include "as-layout.h"
 17#include "kern_util.h"
 18#include "os.h"
 19
 20/*
 21 * This list is accessed under irq_lock, except in sigio_handler,
 22 * where it is safe from being modified.  IRQ handlers won't change it -
 23 * if an IRQ source has vanished, it will be freed by free_irqs just
 24 * before returning from sigio_handler.  That will process a separate
 25 * list of irqs to free, with its own locking, coming back here to
 26 * remove list elements, taking the irq_lock to do so.
 27 */
 28static struct irq_fd *active_fds = NULL;
 29static struct irq_fd **last_irq_ptr = &active_fds;
 30
 31extern void free_irqs(void);
 32
 33void sigio_handler(int sig, struct uml_pt_regs *regs)
 34{
 35	struct irq_fd *irq_fd;
 36	int n;
 37
 38	if (smp_sigio_handler())
 39		return;
 40
 41	while (1) {
 42		n = os_waiting_for_events(active_fds);
 43		if (n <= 0) {
 44			if (n == -EINTR)
 45				continue;
 46			else break;
 47		}
 48
 49		for (irq_fd = active_fds; irq_fd != NULL;
 50		     irq_fd = irq_fd->next) {
 51			if (irq_fd->current_events != 0) {
 52				irq_fd->current_events = 0;
 53				do_IRQ(irq_fd->irq, regs);
 54			}
 55		}
 56	}
 57
 58	free_irqs();
 59}
 60
 61static DEFINE_SPINLOCK(irq_lock);
 62
 63static int activate_fd(int irq, int fd, int type, void *dev_id)
 64{
 65	struct pollfd *tmp_pfd;
 66	struct irq_fd *new_fd, *irq_fd;
 67	unsigned long flags;
 68	int events, err, n;
 69
 70	err = os_set_fd_async(fd);
 71	if (err < 0)
 72		goto out;
 73
 74	err = -ENOMEM;
 75	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
 76	if (new_fd == NULL)
 77		goto out;
 78
 79	if (type == IRQ_READ)
 80		events = UM_POLLIN | UM_POLLPRI;
 81	else events = UM_POLLOUT;
 82	*new_fd = ((struct irq_fd) { .next  		= NULL,
 83				     .id 		= dev_id,
 84				     .fd 		= fd,
 85				     .type 		= type,
 86				     .irq 		= irq,
 87				     .events 		= events,
 88				     .current_events 	= 0 } );
 89
 90	err = -EBUSY;
 91	spin_lock_irqsave(&irq_lock, flags);
 92	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
 93		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
 94			printk(KERN_ERR "Registering fd %d twice\n", fd);
 95			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
 96			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
 97			       dev_id);
 98			goto out_unlock;
 99		}
100	}
101
102	if (type == IRQ_WRITE)
103		fd = -1;
104
105	tmp_pfd = NULL;
106	n = 0;
107
108	while (1) {
109		n = os_create_pollfd(fd, events, tmp_pfd, n);
110		if (n == 0)
111			break;
112
113		/*
114		 * n > 0
115		 * It means we couldn't put new pollfd to current pollfds
116		 * and tmp_fds is NULL or too small for new pollfds array.
117		 * Needed size is equal to n as minimum.
118		 *
119		 * Here we have to drop the lock in order to call
120		 * kmalloc, which might sleep.
121		 * If something else came in and changed the pollfds array
122		 * so we will not be able to put new pollfd struct to pollfds
123		 * then we free the buffer tmp_fds and try again.
124		 */
125		spin_unlock_irqrestore(&irq_lock, flags);
126		kfree(tmp_pfd);
127
128		tmp_pfd = kmalloc(n, GFP_KERNEL);
129		if (tmp_pfd == NULL)
130			goto out_kfree;
131
132		spin_lock_irqsave(&irq_lock, flags);
133	}
134
135	*last_irq_ptr = new_fd;
136	last_irq_ptr = &new_fd->next;
137
138	spin_unlock_irqrestore(&irq_lock, flags);
139
140	/*
141	 * This calls activate_fd, so it has to be outside the critical
142	 * section.
143	 */
144	maybe_sigio_broken(fd, (type == IRQ_READ));
145
146	return 0;
147
148 out_unlock:
149	spin_unlock_irqrestore(&irq_lock, flags);
150 out_kfree:
151	kfree(new_fd);
152 out:
153	return err;
154}
155
156static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
157{
158	unsigned long flags;
159
160	spin_lock_irqsave(&irq_lock, flags);
161	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162	spin_unlock_irqrestore(&irq_lock, flags);
163}
164
165struct irq_and_dev {
166	int irq;
167	void *dev;
168};
169
170static int same_irq_and_dev(struct irq_fd *irq, void *d)
171{
172	struct irq_and_dev *data = d;
173
174	return ((irq->irq == data->irq) && (irq->id == data->dev));
175}
176
177static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178{
179	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
180							  .dev  = dev });
181
182	free_irq_by_cb(same_irq_and_dev, &data);
183}
184
185static int same_fd(struct irq_fd *irq, void *fd)
186{
187	return (irq->fd == *((int *)fd));
188}
189
190void free_irq_by_fd(int fd)
191{
192	free_irq_by_cb(same_fd, &fd);
193}
194
195/* Must be called with irq_lock held */
196static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
197{
198	struct irq_fd *irq;
199	int i = 0;
200	int fdi;
201
202	for (irq = active_fds; irq != NULL; irq = irq->next) {
203		if ((irq->fd == fd) && (irq->irq == irqnum))
204			break;
205		i++;
206	}
207	if (irq == NULL) {
208		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209		       fd);
210		goto out;
211	}
212	fdi = os_get_pollfd(i);
213	if ((fdi != -1) && (fdi != fd)) {
214		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216		       fdi, fd);
217		irq = NULL;
218		goto out;
219	}
220	*index_out = i;
221 out:
222	return irq;
223}
224
225void reactivate_fd(int fd, int irqnum)
226{
227	struct irq_fd *irq;
228	unsigned long flags;
229	int i;
230
231	spin_lock_irqsave(&irq_lock, flags);
232	irq = find_irq_by_fd(fd, irqnum, &i);
233	if (irq == NULL) {
234		spin_unlock_irqrestore(&irq_lock, flags);
235		return;
236	}
237	os_set_pollfd(i, irq->fd);
238	spin_unlock_irqrestore(&irq_lock, flags);
239
240	add_sigio_fd(fd);
241}
242
243void deactivate_fd(int fd, int irqnum)
244{
245	struct irq_fd *irq;
246	unsigned long flags;
247	int i;
248
249	spin_lock_irqsave(&irq_lock, flags);
250	irq = find_irq_by_fd(fd, irqnum, &i);
251	if (irq == NULL) {
252		spin_unlock_irqrestore(&irq_lock, flags);
253		return;
254	}
255
256	os_set_pollfd(i, -1);
257	spin_unlock_irqrestore(&irq_lock, flags);
258
259	ignore_sigio_fd(fd);
260}
261EXPORT_SYMBOL(deactivate_fd);
262
263/*
264 * Called just before shutdown in order to provide a clean exec
265 * environment in case the system is rebooting.  No locking because
266 * that would cause a pointless shutdown hang if something hadn't
267 * released the lock.
268 */
269int deactivate_all_fds(void)
270{
271	struct irq_fd *irq;
272	int err;
273
274	for (irq = active_fds; irq != NULL; irq = irq->next) {
275		err = os_clear_fd_async(irq->fd);
276		if (err)
277			return err;
278	}
279	/* If there is a signal already queued, after unblocking ignore it */
280	os_set_ioignore();
281
282	return 0;
283}
284
285/*
286 * do_IRQ handles all normal device IRQs (the special
287 * SMP cross-CPU interrupts have their own specific
288 * handlers).
289 */
290unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
291{
292	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
293	irq_enter();
294	generic_handle_irq(irq);
295	irq_exit();
296	set_irq_regs(old_regs);
297	return 1;
298}
299
300void um_free_irq(unsigned int irq, void *dev)
301{
302	free_irq_by_irq_and_dev(irq, dev);
303	free_irq(irq, dev);
304}
305EXPORT_SYMBOL(um_free_irq);
306
307int um_request_irq(unsigned int irq, int fd, int type,
308		   irq_handler_t handler,
309		   unsigned long irqflags, const char * devname,
310		   void *dev_id)
311{
312	int err;
313
314	if (fd != -1) {
315		err = activate_fd(irq, fd, type, dev_id);
316		if (err)
317			return err;
318	}
319
320	return request_irq(irq, handler, irqflags, devname, dev_id);
321}
322
323EXPORT_SYMBOL(um_request_irq);
324EXPORT_SYMBOL(reactivate_fd);
325
326/*
327 * irq_chip must define at least enable/disable and ack when
328 * the edge handler is used.
329 */
330static void dummy(struct irq_data *d)
331{
332}
333
334/* This is used for everything else than the timer. */
335static struct irq_chip normal_irq_type = {
336	.name = "SIGIO",
 
337	.irq_disable = dummy,
338	.irq_enable = dummy,
339	.irq_ack = dummy,
340};
341
342static struct irq_chip SIGVTALRM_irq_type = {
343	.name = "SIGVTALRM",
 
344	.irq_disable = dummy,
345	.irq_enable = dummy,
346	.irq_ack = dummy,
347};
348
349void __init init_IRQ(void)
350{
351	int i;
352
353	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
354
355	for (i = 1; i < NR_IRQS; i++)
356		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
357}
358
359/*
360 * IRQ stack entry and exit:
361 *
362 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
363 * and switch over to the IRQ stack after some preparation.  We use
364 * sigaltstack to receive signals on a separate stack from the start.
365 * These two functions make sure the rest of the kernel won't be too
366 * upset by being on a different stack.  The IRQ stack has a
367 * thread_info structure at the bottom so that current et al continue
368 * to work.
369 *
370 * to_irq_stack copies the current task's thread_info to the IRQ stack
371 * thread_info and sets the tasks's stack to point to the IRQ stack.
372 *
373 * from_irq_stack copies the thread_info struct back (flags may have
374 * been modified) and resets the task's stack pointer.
375 *
376 * Tricky bits -
377 *
378 * What happens when two signals race each other?  UML doesn't block
379 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
380 * could arrive while a previous one is still setting up the
381 * thread_info.
382 *
383 * There are three cases -
384 *     The first interrupt on the stack - sets up the thread_info and
385 * handles the interrupt
386 *     A nested interrupt interrupting the copying of the thread_info -
387 * can't handle the interrupt, as the stack is in an unknown state
388 *     A nested interrupt not interrupting the copying of the
389 * thread_info - doesn't do any setup, just handles the interrupt
390 *
391 * The first job is to figure out whether we interrupted stack setup.
392 * This is done by xchging the signal mask with thread_info->pending.
393 * If the value that comes back is zero, then there is no setup in
394 * progress, and the interrupt can be handled.  If the value is
395 * non-zero, then there is stack setup in progress.  In order to have
396 * the interrupt handled, we leave our signal in the mask, and it will
397 * be handled by the upper handler after it has set up the stack.
398 *
399 * Next is to figure out whether we are the outer handler or a nested
400 * one.  As part of setting up the stack, thread_info->real_thread is
401 * set to non-NULL (and is reset to NULL on exit).  This is the
402 * nesting indicator.  If it is non-NULL, then the stack is already
403 * set up and the handler can run.
404 */
405
406static unsigned long pending_mask;
407
408unsigned long to_irq_stack(unsigned long *mask_out)
409{
410	struct thread_info *ti;
411	unsigned long mask, old;
412	int nested;
413
414	mask = xchg(&pending_mask, *mask_out);
415	if (mask != 0) {
416		/*
417		 * If any interrupts come in at this point, we want to
418		 * make sure that their bits aren't lost by our
419		 * putting our bit in.  So, this loop accumulates bits
420		 * until xchg returns the same value that we put in.
421		 * When that happens, there were no new interrupts,
422		 * and pending_mask contains a bit for each interrupt
423		 * that came in.
424		 */
425		old = *mask_out;
426		do {
427			old |= mask;
428			mask = xchg(&pending_mask, old);
429		} while (mask != old);
430		return 1;
431	}
432
433	ti = current_thread_info();
434	nested = (ti->real_thread != NULL);
435	if (!nested) {
436		struct task_struct *task;
437		struct thread_info *tti;
438
439		task = cpu_tasks[ti->cpu].task;
440		tti = task_thread_info(task);
441
442		*ti = *tti;
443		ti->real_thread = tti;
444		task->stack = ti;
445	}
446
447	mask = xchg(&pending_mask, 0);
448	*mask_out |= mask | nested;
449	return 0;
450}
451
452unsigned long from_irq_stack(int nested)
453{
454	struct thread_info *ti, *to;
455	unsigned long mask;
456
457	ti = current_thread_info();
458
459	pending_mask = 1;
460
461	to = ti->real_thread;
462	current->stack = to;
463	ti->real_thread = NULL;
464	*to = *ti;
465
466	mask = xchg(&pending_mask, 0);
467	return mask & ~1;
468}
469
v3.1
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  5 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  6 */
  7
  8#include "linux/cpumask.h"
  9#include "linux/hardirq.h"
 10#include "linux/interrupt.h"
 11#include "linux/kernel_stat.h"
 12#include "linux/module.h"
 13#include "linux/sched.h"
 14#include "linux/seq_file.h"
 15#include "linux/slab.h"
 16#include "as-layout.h"
 17#include "kern_util.h"
 18#include "os.h"
 19
 20/*
 21 * This list is accessed under irq_lock, except in sigio_handler,
 22 * where it is safe from being modified.  IRQ handlers won't change it -
 23 * if an IRQ source has vanished, it will be freed by free_irqs just
 24 * before returning from sigio_handler.  That will process a separate
 25 * list of irqs to free, with its own locking, coming back here to
 26 * remove list elements, taking the irq_lock to do so.
 27 */
 28static struct irq_fd *active_fds = NULL;
 29static struct irq_fd **last_irq_ptr = &active_fds;
 30
 31extern void free_irqs(void);
 32
 33void sigio_handler(int sig, struct uml_pt_regs *regs)
 34{
 35	struct irq_fd *irq_fd;
 36	int n;
 37
 38	if (smp_sigio_handler())
 39		return;
 40
 41	while (1) {
 42		n = os_waiting_for_events(active_fds);
 43		if (n <= 0) {
 44			if (n == -EINTR)
 45				continue;
 46			else break;
 47		}
 48
 49		for (irq_fd = active_fds; irq_fd != NULL;
 50		     irq_fd = irq_fd->next) {
 51			if (irq_fd->current_events != 0) {
 52				irq_fd->current_events = 0;
 53				do_IRQ(irq_fd->irq, regs);
 54			}
 55		}
 56	}
 57
 58	free_irqs();
 59}
 60
 61static DEFINE_SPINLOCK(irq_lock);
 62
 63static int activate_fd(int irq, int fd, int type, void *dev_id)
 64{
 65	struct pollfd *tmp_pfd;
 66	struct irq_fd *new_fd, *irq_fd;
 67	unsigned long flags;
 68	int events, err, n;
 69
 70	err = os_set_fd_async(fd);
 71	if (err < 0)
 72		goto out;
 73
 74	err = -ENOMEM;
 75	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
 76	if (new_fd == NULL)
 77		goto out;
 78
 79	if (type == IRQ_READ)
 80		events = UM_POLLIN | UM_POLLPRI;
 81	else events = UM_POLLOUT;
 82	*new_fd = ((struct irq_fd) { .next  		= NULL,
 83				     .id 		= dev_id,
 84				     .fd 		= fd,
 85				     .type 		= type,
 86				     .irq 		= irq,
 87				     .events 		= events,
 88				     .current_events 	= 0 } );
 89
 90	err = -EBUSY;
 91	spin_lock_irqsave(&irq_lock, flags);
 92	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
 93		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
 94			printk(KERN_ERR "Registering fd %d twice\n", fd);
 95			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
 96			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
 97			       dev_id);
 98			goto out_unlock;
 99		}
100	}
101
102	if (type == IRQ_WRITE)
103		fd = -1;
104
105	tmp_pfd = NULL;
106	n = 0;
107
108	while (1) {
109		n = os_create_pollfd(fd, events, tmp_pfd, n);
110		if (n == 0)
111			break;
112
113		/*
114		 * n > 0
115		 * It means we couldn't put new pollfd to current pollfds
116		 * and tmp_fds is NULL or too small for new pollfds array.
117		 * Needed size is equal to n as minimum.
118		 *
119		 * Here we have to drop the lock in order to call
120		 * kmalloc, which might sleep.
121		 * If something else came in and changed the pollfds array
122		 * so we will not be able to put new pollfd struct to pollfds
123		 * then we free the buffer tmp_fds and try again.
124		 */
125		spin_unlock_irqrestore(&irq_lock, flags);
126		kfree(tmp_pfd);
127
128		tmp_pfd = kmalloc(n, GFP_KERNEL);
129		if (tmp_pfd == NULL)
130			goto out_kfree;
131
132		spin_lock_irqsave(&irq_lock, flags);
133	}
134
135	*last_irq_ptr = new_fd;
136	last_irq_ptr = &new_fd->next;
137
138	spin_unlock_irqrestore(&irq_lock, flags);
139
140	/*
141	 * This calls activate_fd, so it has to be outside the critical
142	 * section.
143	 */
144	maybe_sigio_broken(fd, (type == IRQ_READ));
145
146	return 0;
147
148 out_unlock:
149	spin_unlock_irqrestore(&irq_lock, flags);
150 out_kfree:
151	kfree(new_fd);
152 out:
153	return err;
154}
155
156static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
157{
158	unsigned long flags;
159
160	spin_lock_irqsave(&irq_lock, flags);
161	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162	spin_unlock_irqrestore(&irq_lock, flags);
163}
164
165struct irq_and_dev {
166	int irq;
167	void *dev;
168};
169
170static int same_irq_and_dev(struct irq_fd *irq, void *d)
171{
172	struct irq_and_dev *data = d;
173
174	return ((irq->irq == data->irq) && (irq->id == data->dev));
175}
176
177static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178{
179	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
180							  .dev  = dev });
181
182	free_irq_by_cb(same_irq_and_dev, &data);
183}
184
185static int same_fd(struct irq_fd *irq, void *fd)
186{
187	return (irq->fd == *((int *)fd));
188}
189
190void free_irq_by_fd(int fd)
191{
192	free_irq_by_cb(same_fd, &fd);
193}
194
195/* Must be called with irq_lock held */
196static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
197{
198	struct irq_fd *irq;
199	int i = 0;
200	int fdi;
201
202	for (irq = active_fds; irq != NULL; irq = irq->next) {
203		if ((irq->fd == fd) && (irq->irq == irqnum))
204			break;
205		i++;
206	}
207	if (irq == NULL) {
208		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209		       fd);
210		goto out;
211	}
212	fdi = os_get_pollfd(i);
213	if ((fdi != -1) && (fdi != fd)) {
214		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216		       fdi, fd);
217		irq = NULL;
218		goto out;
219	}
220	*index_out = i;
221 out:
222	return irq;
223}
224
225void reactivate_fd(int fd, int irqnum)
226{
227	struct irq_fd *irq;
228	unsigned long flags;
229	int i;
230
231	spin_lock_irqsave(&irq_lock, flags);
232	irq = find_irq_by_fd(fd, irqnum, &i);
233	if (irq == NULL) {
234		spin_unlock_irqrestore(&irq_lock, flags);
235		return;
236	}
237	os_set_pollfd(i, irq->fd);
238	spin_unlock_irqrestore(&irq_lock, flags);
239
240	add_sigio_fd(fd);
241}
242
243void deactivate_fd(int fd, int irqnum)
244{
245	struct irq_fd *irq;
246	unsigned long flags;
247	int i;
248
249	spin_lock_irqsave(&irq_lock, flags);
250	irq = find_irq_by_fd(fd, irqnum, &i);
251	if (irq == NULL) {
252		spin_unlock_irqrestore(&irq_lock, flags);
253		return;
254	}
255
256	os_set_pollfd(i, -1);
257	spin_unlock_irqrestore(&irq_lock, flags);
258
259	ignore_sigio_fd(fd);
260}
 
261
262/*
263 * Called just before shutdown in order to provide a clean exec
264 * environment in case the system is rebooting.  No locking because
265 * that would cause a pointless shutdown hang if something hadn't
266 * released the lock.
267 */
268int deactivate_all_fds(void)
269{
270	struct irq_fd *irq;
271	int err;
272
273	for (irq = active_fds; irq != NULL; irq = irq->next) {
274		err = os_clear_fd_async(irq->fd);
275		if (err)
276			return err;
277	}
278	/* If there is a signal already queued, after unblocking ignore it */
279	os_set_ioignore();
280
281	return 0;
282}
283
284/*
285 * do_IRQ handles all normal device IRQs (the special
286 * SMP cross-CPU interrupts have their own specific
287 * handlers).
288 */
289unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
290{
291	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
292	irq_enter();
293	generic_handle_irq(irq);
294	irq_exit();
295	set_irq_regs(old_regs);
296	return 1;
297}
298
 
 
 
 
 
 
 
299int um_request_irq(unsigned int irq, int fd, int type,
300		   irq_handler_t handler,
301		   unsigned long irqflags, const char * devname,
302		   void *dev_id)
303{
304	int err;
305
306	if (fd != -1) {
307		err = activate_fd(irq, fd, type, dev_id);
308		if (err)
309			return err;
310	}
311
312	return request_irq(irq, handler, irqflags, devname, dev_id);
313}
314
315EXPORT_SYMBOL(um_request_irq);
316EXPORT_SYMBOL(reactivate_fd);
317
318/*
319 * irq_chip must define at least enable/disable and ack when
320 * the edge handler is used.
321 */
322static void dummy(struct irq_data *d)
323{
324}
325
326/* This is used for everything else than the timer. */
327static struct irq_chip normal_irq_type = {
328	.name = "SIGIO",
329	.release = free_irq_by_irq_and_dev,
330	.irq_disable = dummy,
331	.irq_enable = dummy,
332	.irq_ack = dummy,
333};
334
335static struct irq_chip SIGVTALRM_irq_type = {
336	.name = "SIGVTALRM",
337	.release = free_irq_by_irq_and_dev,
338	.irq_disable = dummy,
339	.irq_enable = dummy,
340	.irq_ack = dummy,
341};
342
343void __init init_IRQ(void)
344{
345	int i;
346
347	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
348
349	for (i = 1; i < NR_IRQS; i++)
350		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
351}
352
353/*
354 * IRQ stack entry and exit:
355 *
356 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
357 * and switch over to the IRQ stack after some preparation.  We use
358 * sigaltstack to receive signals on a separate stack from the start.
359 * These two functions make sure the rest of the kernel won't be too
360 * upset by being on a different stack.  The IRQ stack has a
361 * thread_info structure at the bottom so that current et al continue
362 * to work.
363 *
364 * to_irq_stack copies the current task's thread_info to the IRQ stack
365 * thread_info and sets the tasks's stack to point to the IRQ stack.
366 *
367 * from_irq_stack copies the thread_info struct back (flags may have
368 * been modified) and resets the task's stack pointer.
369 *
370 * Tricky bits -
371 *
372 * What happens when two signals race each other?  UML doesn't block
373 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
374 * could arrive while a previous one is still setting up the
375 * thread_info.
376 *
377 * There are three cases -
378 *     The first interrupt on the stack - sets up the thread_info and
379 * handles the interrupt
380 *     A nested interrupt interrupting the copying of the thread_info -
381 * can't handle the interrupt, as the stack is in an unknown state
382 *     A nested interrupt not interrupting the copying of the
383 * thread_info - doesn't do any setup, just handles the interrupt
384 *
385 * The first job is to figure out whether we interrupted stack setup.
386 * This is done by xchging the signal mask with thread_info->pending.
387 * If the value that comes back is zero, then there is no setup in
388 * progress, and the interrupt can be handled.  If the value is
389 * non-zero, then there is stack setup in progress.  In order to have
390 * the interrupt handled, we leave our signal in the mask, and it will
391 * be handled by the upper handler after it has set up the stack.
392 *
393 * Next is to figure out whether we are the outer handler or a nested
394 * one.  As part of setting up the stack, thread_info->real_thread is
395 * set to non-NULL (and is reset to NULL on exit).  This is the
396 * nesting indicator.  If it is non-NULL, then the stack is already
397 * set up and the handler can run.
398 */
399
400static unsigned long pending_mask;
401
402unsigned long to_irq_stack(unsigned long *mask_out)
403{
404	struct thread_info *ti;
405	unsigned long mask, old;
406	int nested;
407
408	mask = xchg(&pending_mask, *mask_out);
409	if (mask != 0) {
410		/*
411		 * If any interrupts come in at this point, we want to
412		 * make sure that their bits aren't lost by our
413		 * putting our bit in.  So, this loop accumulates bits
414		 * until xchg returns the same value that we put in.
415		 * When that happens, there were no new interrupts,
416		 * and pending_mask contains a bit for each interrupt
417		 * that came in.
418		 */
419		old = *mask_out;
420		do {
421			old |= mask;
422			mask = xchg(&pending_mask, old);
423		} while (mask != old);
424		return 1;
425	}
426
427	ti = current_thread_info();
428	nested = (ti->real_thread != NULL);
429	if (!nested) {
430		struct task_struct *task;
431		struct thread_info *tti;
432
433		task = cpu_tasks[ti->cpu].task;
434		tti = task_thread_info(task);
435
436		*ti = *tti;
437		ti->real_thread = tti;
438		task->stack = ti;
439	}
440
441	mask = xchg(&pending_mask, 0);
442	*mask_out |= mask | nested;
443	return 0;
444}
445
446unsigned long from_irq_stack(int nested)
447{
448	struct thread_info *ti, *to;
449	unsigned long mask;
450
451	ti = current_thread_info();
452
453	pending_mask = 1;
454
455	to = ti->real_thread;
456	current->stack = to;
457	ti->real_thread = NULL;
458	*to = *ti;
459
460	mask = xchg(&pending_mask, 0);
461	return mask & ~1;
462}
463