Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright (C) 2017 - Cambridge Greys Ltd
  3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
  4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  5 * Licensed under the GPL
  6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  7 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  8 */
  9
 10#include <linux/cpumask.h>
 11#include <linux/hardirq.h>
 12#include <linux/interrupt.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/module.h>
 15#include <linux/sched.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include <as-layout.h>
 19#include <kern_util.h>
 20#include <os.h>
 21#include <irq_user.h>
 22
 23
 24/* When epoll triggers we do not know why it did so
 25 * we can also have different IRQs for read and write.
 26 * This is why we keep a small irq_fd array for each fd -
 27 * one entry per IRQ type
 28 */
 29
 30struct irq_entry {
 31	struct irq_entry *next;
 32	int fd;
 33	struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
 34};
 35
 36static struct irq_entry *active_fds;
 37
 38static DEFINE_SPINLOCK(irq_lock);
 39
 40static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
 41{
 42/*
 43 * irq->active guards against reentry
 44 * irq->pending accumulates pending requests
 45 * if pending is raised the irq_handler is re-run
 46 * until pending is cleared
 
 
 47 */
 48	if (irq->active) {
 49		irq->active = false;
 50		do {
 51			irq->pending = false;
 52			do_IRQ(irq->irq, regs);
 53		} while (irq->pending && (!irq->purge));
 54		if (!irq->purge)
 55			irq->active = true;
 56	} else {
 57		irq->pending = true;
 58	}
 59}
 60
 61void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 
 
 62{
 63	struct irq_entry *irq_entry;
 64	struct irq_fd *irq;
 65
 66	int n, i, j;
 
 67
 68	while (1) {
 69		/* This is now lockless - epoll keeps back-referencesto the irqs
 70		 * which have trigger it so there is no need to walk the irq
 71		 * list and lock it every time. We avoid locking by turning off
 72		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
 73		 * we do any changes to the actual data structures
 74		 */
 75		n = os_waiting_for_events_epoll();
 76
 77		if (n <= 0) {
 78			if (n == -EINTR)
 79				continue;
 80			else
 81				break;
 82		}
 83
 84		for (i = 0; i < n ; i++) {
 85			/* Epoll back reference is the entry with 3 irq_fd
 86			 * leaves - one for each irq type.
 87			 */
 88			irq_entry = (struct irq_entry *)
 89				os_epoll_get_data_pointer(i);
 90			for (j = 0; j < MAX_IRQ_TYPE ; j++) {
 91				irq = irq_entry->irq_array[j];
 92				if (irq == NULL)
 93					continue;
 94				if (os_epoll_triggered(i, irq->events) > 0)
 95					irq_io_loop(irq, regs);
 96				if (irq->purge) {
 97					irq_entry->irq_array[j] = NULL;
 98					kfree(irq);
 99				}
100			}
101		}
102	}
103}
104
105static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
106{
107	int i;
108	int events = 0;
109	struct irq_fd *irq;
110
111	for (i = 0; i < MAX_IRQ_TYPE ; i++) {
112		irq = irq_entry->irq_array[i];
113		if (irq != NULL)
114			events = irq->events | events;
115	}
116	if (events > 0) {
117	/* os_add_epoll will call os_mod_epoll if this already exists */
118		return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
119	}
120	/* No events - delete */
121	return os_del_epoll_fd(irq_entry->fd);
122}
123
124
125
126static int activate_fd(int irq, int fd, int type, void *dev_id)
127{
128	struct irq_fd *new_fd;
129	struct irq_entry *irq_entry;
130	int i, err, events;
131	unsigned long flags;
 
132
133	err = os_set_fd_async(fd);
134	if (err < 0)
135		goto out;
136
137	spin_lock_irqsave(&irq_lock, flags);
 
 
 
138
139	/* Check if we have an entry for this fd */
 
 
 
 
 
 
 
 
 
140
141	err = -EBUSY;
142	for (irq_entry = active_fds;
143		irq_entry != NULL; irq_entry = irq_entry->next) {
144		if (irq_entry->fd == fd)
145			break;
146	}
147
148	if (irq_entry == NULL) {
149		/* This needs to be atomic as it may be called from an
150		 * IRQ context.
151		 */
152		irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
153		if (irq_entry == NULL) {
154			printk(KERN_ERR
155				"Failed to allocate new IRQ entry\n");
156			goto out_unlock;
157		}
158		irq_entry->fd = fd;
159		for (i = 0; i < MAX_IRQ_TYPE; i++)
160			irq_entry->irq_array[i] = NULL;
161		irq_entry->next = active_fds;
162		active_fds = irq_entry;
163	}
164
165	/* Check if we are trying to re-register an interrupt for a
166	 * particular fd
167	 */
168
169	if (irq_entry->irq_array[type] != NULL) {
170		printk(KERN_ERR
171			"Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
172			irq, fd, type, dev_id
173		);
174		goto out_unlock;
175	} else {
176		/* New entry for this fd */
177
178		err = -ENOMEM;
179		new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
180		if (new_fd == NULL)
181			goto out_unlock;
182
183		events = os_event_mask(type);
 
 
 
184
185		*new_fd = ((struct irq_fd) {
186			.id		= dev_id,
187			.irq		= irq,
188			.type		= type,
189			.events		= events,
190			.active		= true,
191			.pending	= false,
192			.purge		= false
193		});
194		/* Turn off any IO on this fd - allows us to
195		 * avoid locking the IRQ loop
196		 */
197		os_del_epoll_fd(irq_entry->fd);
198		irq_entry->irq_array[type] = new_fd;
 
 
 
 
 
 
199	}
200
201	/* Turn back IO on with the correct (new) IO event mask */
202	assign_epoll_events_to_irq(irq_entry);
 
203	spin_unlock_irqrestore(&irq_lock, flags);
204	maybe_sigio_broken(fd, (type != IRQ_NONE));
 
 
 
 
 
205
206	return 0;
207out_unlock:
 
208	spin_unlock_irqrestore(&irq_lock, flags);
209out:
 
 
210	return err;
211}
212
213/*
214 * Walk the IRQ list and dispose of any unused entries.
215 * Should be done under irq_lock.
216 */
217
218static void garbage_collect_irq_entries(void)
219{
220	int i;
221	bool reap;
222	struct irq_entry *walk;
223	struct irq_entry *previous = NULL;
224	struct irq_entry *to_free;
225
226	if (active_fds == NULL)
227		return;
228	walk = active_fds;
229	while (walk != NULL) {
230		reap = true;
231		for (i = 0; i < MAX_IRQ_TYPE ; i++) {
232			if (walk->irq_array[i] != NULL) {
233				reap = false;
234				break;
235			}
236		}
237		if (reap) {
238			if (previous == NULL)
239				active_fds = walk->next;
240			else
241				previous->next = walk->next;
242			to_free = walk;
243		} else {
244			to_free = NULL;
245		}
246		walk = walk->next;
247		if (to_free != NULL)
248			kfree(to_free);
249	}
250}
251
252/*
253 * Walk the IRQ list and get the descriptor for our FD
254 */
 
255
256static struct irq_entry *get_irq_entry_by_fd(int fd)
257{
258	struct irq_entry *walk = active_fds;
259
260	while (walk != NULL) {
261		if (walk->fd == fd)
262			return walk;
263		walk = walk->next;
264	}
265	return NULL;
266}
267
 
 
 
 
268
269/*
270 * Walk the IRQ list and dispose of an entry for a specific
271 * device, fd and number. Note - if sharing an IRQ for read
272 * and writefor the same FD it will be disposed in either case.
273 * If this behaviour is undesirable use different IRQ ids.
274 */
275
276#define IGNORE_IRQ 1
277#define IGNORE_DEV (1<<1)
278
279static void do_free_by_irq_and_dev(
280	struct irq_entry *irq_entry,
281	unsigned int irq,
282	void *dev,
283	int flags
284)
285{
286	int i;
287	struct irq_fd *to_free;
288
289	for (i = 0; i < MAX_IRQ_TYPE ; i++) {
290		if (irq_entry->irq_array[i] != NULL) {
291			if (
292			((flags & IGNORE_IRQ) ||
293				(irq_entry->irq_array[i]->irq == irq)) &&
294			((flags & IGNORE_DEV) ||
295				(irq_entry->irq_array[i]->id == dev))
296			) {
297				/* Turn off any IO on this fd - allows us to
298				 * avoid locking the IRQ loop
299				 */
300				os_del_epoll_fd(irq_entry->fd);
301				to_free = irq_entry->irq_array[i];
302				irq_entry->irq_array[i] = NULL;
303				assign_epoll_events_to_irq(irq_entry);
304				if (to_free->active)
305					to_free->purge = true;
306				else
307					kfree(to_free);
308			}
309		}
310	}
311}
312
313void free_irq_by_fd(int fd)
314{
315	struct irq_entry *to_free;
316	unsigned long flags;
317
318	spin_lock_irqsave(&irq_lock, flags);
319	to_free = get_irq_entry_by_fd(fd);
320	if (to_free != NULL) {
321		do_free_by_irq_and_dev(
322			to_free,
323			-1,
324			NULL,
325			IGNORE_IRQ | IGNORE_DEV
326		);
 
 
327	}
328	garbage_collect_irq_entries();
329	spin_unlock_irqrestore(&irq_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330}
331EXPORT_SYMBOL(free_irq_by_fd);
332
333static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
334{
335	struct irq_entry *to_free;
336	unsigned long flags;
 
337
338	spin_lock_irqsave(&irq_lock, flags);
339	to_free = active_fds;
340	while (to_free != NULL) {
341		do_free_by_irq_and_dev(
342			to_free,
343			irq,
344			dev,
345			0
346		);
347		to_free = to_free->next;
348	}
349	garbage_collect_irq_entries();
350	spin_unlock_irqrestore(&irq_lock, flags);
351}
352
353
354void reactivate_fd(int fd, int irqnum)
355{
356	/** NOP - we do auto-EOI now **/
357}
358
359void deactivate_fd(int fd, int irqnum)
360{
361	struct irq_entry *to_free;
362	unsigned long flags;
 
363
364	os_del_epoll_fd(fd);
365	spin_lock_irqsave(&irq_lock, flags);
366	to_free = get_irq_entry_by_fd(fd);
367	if (to_free != NULL) {
368		do_free_by_irq_and_dev(
369			to_free,
370			irqnum,
371			NULL,
372			IGNORE_DEV
373		);
374	}
375	garbage_collect_irq_entries();
 
376	spin_unlock_irqrestore(&irq_lock, flags);
 
377	ignore_sigio_fd(fd);
378}
379EXPORT_SYMBOL(deactivate_fd);
380
381/*
382 * Called just before shutdown in order to provide a clean exec
383 * environment in case the system is rebooting.  No locking because
384 * that would cause a pointless shutdown hang if something hadn't
385 * released the lock.
386 */
387int deactivate_all_fds(void)
388{
389	unsigned long flags;
390	struct irq_entry *to_free;
391
392	spin_lock_irqsave(&irq_lock, flags);
393	/* Stop IO. The IRQ loop has no lock so this is our
394	 * only way of making sure we are safe to dispose
395	 * of all IRQ handlers
396	 */
397	os_set_ioignore();
398	to_free = active_fds;
399	while (to_free != NULL) {
400		do_free_by_irq_and_dev(
401			to_free,
402			-1,
403			NULL,
404			IGNORE_IRQ | IGNORE_DEV
405		);
406		to_free = to_free->next;
407	}
408	garbage_collect_irq_entries();
409	spin_unlock_irqrestore(&irq_lock, flags);
410	os_close_epoll_fd();
411	return 0;
412}
413
414/*
415 * do_IRQ handles all normal device IRQs (the special
416 * SMP cross-CPU interrupts have their own specific
417 * handlers).
418 */
419unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
420{
421	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
422	irq_enter();
423	generic_handle_irq(irq);
424	irq_exit();
425	set_irq_regs(old_regs);
426	return 1;
427}
428
429void um_free_irq(unsigned int irq, void *dev)
430{
431	free_irq_by_irq_and_dev(irq, dev);
432	free_irq(irq, dev);
433}
434EXPORT_SYMBOL(um_free_irq);
435
436int um_request_irq(unsigned int irq, int fd, int type,
437		   irq_handler_t handler,
438		   unsigned long irqflags, const char * devname,
439		   void *dev_id)
440{
441	int err;
442
443	if (fd != -1) {
444		err = activate_fd(irq, fd, type, dev_id);
445		if (err)
446			return err;
447	}
448
449	return request_irq(irq, handler, irqflags, devname, dev_id);
450}
451
452EXPORT_SYMBOL(um_request_irq);
453EXPORT_SYMBOL(reactivate_fd);
454
455/*
456 * irq_chip must define at least enable/disable and ack when
457 * the edge handler is used.
458 */
459static void dummy(struct irq_data *d)
460{
461}
462
463/* This is used for everything else than the timer. */
464static struct irq_chip normal_irq_type = {
465	.name = "SIGIO",
 
466	.irq_disable = dummy,
467	.irq_enable = dummy,
468	.irq_ack = dummy,
469	.irq_mask = dummy,
470	.irq_unmask = dummy,
471};
472
473static struct irq_chip SIGVTALRM_irq_type = {
474	.name = "SIGVTALRM",
 
475	.irq_disable = dummy,
476	.irq_enable = dummy,
477	.irq_ack = dummy,
478	.irq_mask = dummy,
479	.irq_unmask = dummy,
480};
481
482void __init init_IRQ(void)
483{
484	int i;
485
486	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
487
488
489	for (i = 1; i < NR_IRQS; i++)
490		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
491	/* Initialize EPOLL Loop */
492	os_setup_epoll();
493}
494
495/*
496 * IRQ stack entry and exit:
497 *
498 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
499 * and switch over to the IRQ stack after some preparation.  We use
500 * sigaltstack to receive signals on a separate stack from the start.
501 * These two functions make sure the rest of the kernel won't be too
502 * upset by being on a different stack.  The IRQ stack has a
503 * thread_info structure at the bottom so that current et al continue
504 * to work.
505 *
506 * to_irq_stack copies the current task's thread_info to the IRQ stack
507 * thread_info and sets the tasks's stack to point to the IRQ stack.
508 *
509 * from_irq_stack copies the thread_info struct back (flags may have
510 * been modified) and resets the task's stack pointer.
511 *
512 * Tricky bits -
513 *
514 * What happens when two signals race each other?  UML doesn't block
515 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
516 * could arrive while a previous one is still setting up the
517 * thread_info.
518 *
519 * There are three cases -
520 *     The first interrupt on the stack - sets up the thread_info and
521 * handles the interrupt
522 *     A nested interrupt interrupting the copying of the thread_info -
523 * can't handle the interrupt, as the stack is in an unknown state
524 *     A nested interrupt not interrupting the copying of the
525 * thread_info - doesn't do any setup, just handles the interrupt
526 *
527 * The first job is to figure out whether we interrupted stack setup.
528 * This is done by xchging the signal mask with thread_info->pending.
529 * If the value that comes back is zero, then there is no setup in
530 * progress, and the interrupt can be handled.  If the value is
531 * non-zero, then there is stack setup in progress.  In order to have
532 * the interrupt handled, we leave our signal in the mask, and it will
533 * be handled by the upper handler after it has set up the stack.
534 *
535 * Next is to figure out whether we are the outer handler or a nested
536 * one.  As part of setting up the stack, thread_info->real_thread is
537 * set to non-NULL (and is reset to NULL on exit).  This is the
538 * nesting indicator.  If it is non-NULL, then the stack is already
539 * set up and the handler can run.
540 */
541
542static unsigned long pending_mask;
543
544unsigned long to_irq_stack(unsigned long *mask_out)
545{
546	struct thread_info *ti;
547	unsigned long mask, old;
548	int nested;
549
550	mask = xchg(&pending_mask, *mask_out);
551	if (mask != 0) {
552		/*
553		 * If any interrupts come in at this point, we want to
554		 * make sure that their bits aren't lost by our
555		 * putting our bit in.  So, this loop accumulates bits
556		 * until xchg returns the same value that we put in.
557		 * When that happens, there were no new interrupts,
558		 * and pending_mask contains a bit for each interrupt
559		 * that came in.
560		 */
561		old = *mask_out;
562		do {
563			old |= mask;
564			mask = xchg(&pending_mask, old);
565		} while (mask != old);
566		return 1;
567	}
568
569	ti = current_thread_info();
570	nested = (ti->real_thread != NULL);
571	if (!nested) {
572		struct task_struct *task;
573		struct thread_info *tti;
574
575		task = cpu_tasks[ti->cpu].task;
576		tti = task_thread_info(task);
577
578		*ti = *tti;
579		ti->real_thread = tti;
580		task->stack = ti;
581	}
582
583	mask = xchg(&pending_mask, 0);
584	*mask_out |= mask | nested;
585	return 0;
586}
587
588unsigned long from_irq_stack(int nested)
589{
590	struct thread_info *ti, *to;
591	unsigned long mask;
592
593	ti = current_thread_info();
594
595	pending_mask = 1;
596
597	to = ti->real_thread;
598	current->stack = to;
599	ti->real_thread = NULL;
600	*to = *ti;
601
602	mask = xchg(&pending_mask, 0);
603	return mask & ~1;
604}
605
v3.1
  1/*
 
 
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Licensed under the GPL
  4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  5 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  6 */
  7
  8#include "linux/cpumask.h"
  9#include "linux/hardirq.h"
 10#include "linux/interrupt.h"
 11#include "linux/kernel_stat.h"
 12#include "linux/module.h"
 13#include "linux/sched.h"
 14#include "linux/seq_file.h"
 15#include "linux/slab.h"
 16#include "as-layout.h"
 17#include "kern_util.h"
 18#include "os.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 
 
 
 
 20/*
 21 * This list is accessed under irq_lock, except in sigio_handler,
 22 * where it is safe from being modified.  IRQ handlers won't change it -
 23 * if an IRQ source has vanished, it will be freed by free_irqs just
 24 * before returning from sigio_handler.  That will process a separate
 25 * list of irqs to free, with its own locking, coming back here to
 26 * remove list elements, taking the irq_lock to do so.
 27 */
 28static struct irq_fd *active_fds = NULL;
 29static struct irq_fd **last_irq_ptr = &active_fds;
 
 
 
 
 
 
 
 
 
 
 30
 31extern void free_irqs(void);
 32
 33void sigio_handler(int sig, struct uml_pt_regs *regs)
 34{
 35	struct irq_fd *irq_fd;
 36	int n;
 37
 38	if (smp_sigio_handler())
 39		return;
 40
 41	while (1) {
 42		n = os_waiting_for_events(active_fds);
 
 
 
 
 
 
 
 43		if (n <= 0) {
 44			if (n == -EINTR)
 45				continue;
 46			else break;
 
 47		}
 48
 49		for (irq_fd = active_fds; irq_fd != NULL;
 50		     irq_fd = irq_fd->next) {
 51			if (irq_fd->current_events != 0) {
 52				irq_fd->current_events = 0;
 53				do_IRQ(irq_fd->irq, regs);
 
 
 
 
 
 
 
 
 
 
 
 54			}
 55		}
 56	}
 
 57
 58	free_irqs();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59}
 60
 61static DEFINE_SPINLOCK(irq_lock);
 62
 63static int activate_fd(int irq, int fd, int type, void *dev_id)
 64{
 65	struct pollfd *tmp_pfd;
 66	struct irq_fd *new_fd, *irq_fd;
 
 67	unsigned long flags;
 68	int events, err, n;
 69
 70	err = os_set_fd_async(fd);
 71	if (err < 0)
 72		goto out;
 73
 74	err = -ENOMEM;
 75	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
 76	if (new_fd == NULL)
 77		goto out;
 78
 79	if (type == IRQ_READ)
 80		events = UM_POLLIN | UM_POLLPRI;
 81	else events = UM_POLLOUT;
 82	*new_fd = ((struct irq_fd) { .next  		= NULL,
 83				     .id 		= dev_id,
 84				     .fd 		= fd,
 85				     .type 		= type,
 86				     .irq 		= irq,
 87				     .events 		= events,
 88				     .current_events 	= 0 } );
 89
 90	err = -EBUSY;
 91	spin_lock_irqsave(&irq_lock, flags);
 92	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
 93		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
 94			printk(KERN_ERR "Registering fd %d twice\n", fd);
 95			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
 96			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
 97			       dev_id);
 
 
 
 
 
 
 
 98			goto out_unlock;
 99		}
 
 
 
 
 
100	}
101
102	if (type == IRQ_WRITE)
103		fd = -1;
 
104
105	tmp_pfd = NULL;
106	n = 0;
 
 
 
 
 
 
 
 
 
 
 
107
108	while (1) {
109		n = os_create_pollfd(fd, events, tmp_pfd, n);
110		if (n == 0)
111			break;
112
113		/*
114		 * n > 0
115		 * It means we couldn't put new pollfd to current pollfds
116		 * and tmp_fds is NULL or too small for new pollfds array.
117		 * Needed size is equal to n as minimum.
118		 *
119		 * Here we have to drop the lock in order to call
120		 * kmalloc, which might sleep.
121		 * If something else came in and changed the pollfds array
122		 * so we will not be able to put new pollfd struct to pollfds
123		 * then we free the buffer tmp_fds and try again.
124		 */
125		spin_unlock_irqrestore(&irq_lock, flags);
126		kfree(tmp_pfd);
127
128		tmp_pfd = kmalloc(n, GFP_KERNEL);
129		if (tmp_pfd == NULL)
130			goto out_kfree;
131
132		spin_lock_irqsave(&irq_lock, flags);
133	}
134
135	*last_irq_ptr = new_fd;
136	last_irq_ptr = &new_fd->next;
137
138	spin_unlock_irqrestore(&irq_lock, flags);
139
140	/*
141	 * This calls activate_fd, so it has to be outside the critical
142	 * section.
143	 */
144	maybe_sigio_broken(fd, (type == IRQ_READ));
145
146	return 0;
147
148 out_unlock:
149	spin_unlock_irqrestore(&irq_lock, flags);
150 out_kfree:
151	kfree(new_fd);
152 out:
153	return err;
154}
155
156static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
 
 
 
 
 
157{
158	unsigned long flags;
 
 
 
 
159
160	spin_lock_irqsave(&irq_lock, flags);
161	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162	spin_unlock_irqrestore(&irq_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163}
164
165struct irq_and_dev {
166	int irq;
167	void *dev;
168};
169
170static int same_irq_and_dev(struct irq_fd *irq, void *d)
171{
172	struct irq_and_dev *data = d;
173
174	return ((irq->irq == data->irq) && (irq->id == data->dev));
 
 
 
 
 
175}
176
177static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178{
179	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
180							  .dev  = dev });
181
182	free_irq_by_cb(same_irq_and_dev, &data);
183}
 
 
 
 
 
 
 
184
185static int same_fd(struct irq_fd *irq, void *fd)
 
 
 
 
 
186{
187	return (irq->fd == *((int *)fd));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188}
189
190void free_irq_by_fd(int fd)
191{
192	free_irq_by_cb(same_fd, &fd);
193}
194
195/* Must be called with irq_lock held */
196static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
197{
198	struct irq_fd *irq;
199	int i = 0;
200	int fdi;
201
202	for (irq = active_fds; irq != NULL; irq = irq->next) {
203		if ((irq->fd == fd) && (irq->irq == irqnum))
204			break;
205		i++;
206	}
207	if (irq == NULL) {
208		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
209		       fd);
210		goto out;
211	}
212	fdi = os_get_pollfd(i);
213	if ((fdi != -1) && (fdi != fd)) {
214		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
215		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216		       fdi, fd);
217		irq = NULL;
218		goto out;
219	}
220	*index_out = i;
221 out:
222	return irq;
223}
 
224
225void reactivate_fd(int fd, int irqnum)
226{
227	struct irq_fd *irq;
228	unsigned long flags;
229	int i;
230
231	spin_lock_irqsave(&irq_lock, flags);
232	irq = find_irq_by_fd(fd, irqnum, &i);
233	if (irq == NULL) {
234		spin_unlock_irqrestore(&irq_lock, flags);
235		return;
 
 
 
 
 
236	}
237	os_set_pollfd(i, irq->fd);
238	spin_unlock_irqrestore(&irq_lock, flags);
 
239
240	add_sigio_fd(fd);
 
 
 
241}
242
243void deactivate_fd(int fd, int irqnum)
244{
245	struct irq_fd *irq;
246	unsigned long flags;
247	int i;
248
 
249	spin_lock_irqsave(&irq_lock, flags);
250	irq = find_irq_by_fd(fd, irqnum, &i);
251	if (irq == NULL) {
252		spin_unlock_irqrestore(&irq_lock, flags);
253		return;
 
 
 
 
254	}
255
256	os_set_pollfd(i, -1);
257	spin_unlock_irqrestore(&irq_lock, flags);
258
259	ignore_sigio_fd(fd);
260}
 
261
262/*
263 * Called just before shutdown in order to provide a clean exec
264 * environment in case the system is rebooting.  No locking because
265 * that would cause a pointless shutdown hang if something hadn't
266 * released the lock.
267 */
268int deactivate_all_fds(void)
269{
270	struct irq_fd *irq;
271	int err;
272
273	for (irq = active_fds; irq != NULL; irq = irq->next) {
274		err = os_clear_fd_async(irq->fd);
275		if (err)
276			return err;
 
 
 
 
 
 
 
 
 
 
 
277	}
278	/* If there is a signal already queued, after unblocking ignore it */
279	os_set_ioignore();
280
281	return 0;
282}
283
284/*
285 * do_IRQ handles all normal device IRQs (the special
286 * SMP cross-CPU interrupts have their own specific
287 * handlers).
288 */
289unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
290{
291	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
292	irq_enter();
293	generic_handle_irq(irq);
294	irq_exit();
295	set_irq_regs(old_regs);
296	return 1;
297}
298
 
 
 
 
 
 
 
299int um_request_irq(unsigned int irq, int fd, int type,
300		   irq_handler_t handler,
301		   unsigned long irqflags, const char * devname,
302		   void *dev_id)
303{
304	int err;
305
306	if (fd != -1) {
307		err = activate_fd(irq, fd, type, dev_id);
308		if (err)
309			return err;
310	}
311
312	return request_irq(irq, handler, irqflags, devname, dev_id);
313}
314
315EXPORT_SYMBOL(um_request_irq);
316EXPORT_SYMBOL(reactivate_fd);
317
318/*
319 * irq_chip must define at least enable/disable and ack when
320 * the edge handler is used.
321 */
322static void dummy(struct irq_data *d)
323{
324}
325
326/* This is used for everything else than the timer. */
327static struct irq_chip normal_irq_type = {
328	.name = "SIGIO",
329	.release = free_irq_by_irq_and_dev,
330	.irq_disable = dummy,
331	.irq_enable = dummy,
332	.irq_ack = dummy,
 
 
333};
334
335static struct irq_chip SIGVTALRM_irq_type = {
336	.name = "SIGVTALRM",
337	.release = free_irq_by_irq_and_dev,
338	.irq_disable = dummy,
339	.irq_enable = dummy,
340	.irq_ack = dummy,
 
 
341};
342
343void __init init_IRQ(void)
344{
345	int i;
346
347	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
348
 
349	for (i = 1; i < NR_IRQS; i++)
350		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
 
 
351}
352
353/*
354 * IRQ stack entry and exit:
355 *
356 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
357 * and switch over to the IRQ stack after some preparation.  We use
358 * sigaltstack to receive signals on a separate stack from the start.
359 * These two functions make sure the rest of the kernel won't be too
360 * upset by being on a different stack.  The IRQ stack has a
361 * thread_info structure at the bottom so that current et al continue
362 * to work.
363 *
364 * to_irq_stack copies the current task's thread_info to the IRQ stack
365 * thread_info and sets the tasks's stack to point to the IRQ stack.
366 *
367 * from_irq_stack copies the thread_info struct back (flags may have
368 * been modified) and resets the task's stack pointer.
369 *
370 * Tricky bits -
371 *
372 * What happens when two signals race each other?  UML doesn't block
373 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
374 * could arrive while a previous one is still setting up the
375 * thread_info.
376 *
377 * There are three cases -
378 *     The first interrupt on the stack - sets up the thread_info and
379 * handles the interrupt
380 *     A nested interrupt interrupting the copying of the thread_info -
381 * can't handle the interrupt, as the stack is in an unknown state
382 *     A nested interrupt not interrupting the copying of the
383 * thread_info - doesn't do any setup, just handles the interrupt
384 *
385 * The first job is to figure out whether we interrupted stack setup.
386 * This is done by xchging the signal mask with thread_info->pending.
387 * If the value that comes back is zero, then there is no setup in
388 * progress, and the interrupt can be handled.  If the value is
389 * non-zero, then there is stack setup in progress.  In order to have
390 * the interrupt handled, we leave our signal in the mask, and it will
391 * be handled by the upper handler after it has set up the stack.
392 *
393 * Next is to figure out whether we are the outer handler or a nested
394 * one.  As part of setting up the stack, thread_info->real_thread is
395 * set to non-NULL (and is reset to NULL on exit).  This is the
396 * nesting indicator.  If it is non-NULL, then the stack is already
397 * set up and the handler can run.
398 */
399
400static unsigned long pending_mask;
401
402unsigned long to_irq_stack(unsigned long *mask_out)
403{
404	struct thread_info *ti;
405	unsigned long mask, old;
406	int nested;
407
408	mask = xchg(&pending_mask, *mask_out);
409	if (mask != 0) {
410		/*
411		 * If any interrupts come in at this point, we want to
412		 * make sure that their bits aren't lost by our
413		 * putting our bit in.  So, this loop accumulates bits
414		 * until xchg returns the same value that we put in.
415		 * When that happens, there were no new interrupts,
416		 * and pending_mask contains a bit for each interrupt
417		 * that came in.
418		 */
419		old = *mask_out;
420		do {
421			old |= mask;
422			mask = xchg(&pending_mask, old);
423		} while (mask != old);
424		return 1;
425	}
426
427	ti = current_thread_info();
428	nested = (ti->real_thread != NULL);
429	if (!nested) {
430		struct task_struct *task;
431		struct thread_info *tti;
432
433		task = cpu_tasks[ti->cpu].task;
434		tti = task_thread_info(task);
435
436		*ti = *tti;
437		ti->real_thread = tti;
438		task->stack = ti;
439	}
440
441	mask = xchg(&pending_mask, 0);
442	*mask_out |= mask | nested;
443	return 0;
444}
445
446unsigned long from_irq_stack(int nested)
447{
448	struct thread_info *ti, *to;
449	unsigned long mask;
450
451	ti = current_thread_info();
452
453	pending_mask = 1;
454
455	to = ti->real_thread;
456	current->stack = to;
457	ti->real_thread = NULL;
458	*to = *ti;
459
460	mask = xchg(&pending_mask, 0);
461	return mask & ~1;
462}
463