Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2017 - Cambridge Greys Ltd
  3 * Copyright (C) 2011 - 2014 Cisco Systems Inc
  4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  5 * Licensed under the GPL
  6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  7 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  8 */
  9
 10#include <linux/cpumask.h>
 11#include <linux/hardirq.h>
 12#include <linux/interrupt.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/module.h>
 15#include <linux/sched.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include <as-layout.h>
 19#include <kern_util.h>
 20#include <os.h>
 21#include <irq_user.h>
 
 
 22
 23
 
 
 24/* When epoll triggers we do not know why it did so
 25 * we can also have different IRQs for read and write.
 26 * This is why we keep a small irq_fd array for each fd -
 27 * one entry per IRQ type
 28 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29
 30struct irq_entry {
 31	struct irq_entry *next;
 32	int fd;
 33	struct irq_fd *irq_array[MAX_IRQ_TYPE + 1];
 
 
 34};
 35
 36static struct irq_entry *active_fds;
 37
 38static DEFINE_SPINLOCK(irq_lock);
 
 
 
 39
 40static void irq_io_loop(struct irq_fd *irq, struct uml_pt_regs *regs)
 41{
 42/*
 43 * irq->active guards against reentry
 44 * irq->pending accumulates pending requests
 45 * if pending is raised the irq_handler is re-run
 46 * until pending is cleared
 47 */
 48	if (irq->active) {
 49		irq->active = false;
 
 50		do {
 51			irq->pending = false;
 52			do_IRQ(irq->irq, regs);
 53		} while (irq->pending && (!irq->purge));
 54		if (!irq->purge)
 55			irq->active = true;
 56	} else {
 57		irq->pending = true;
 58	}
 59}
 60
 61void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62{
 63	struct irq_entry *irq_entry;
 64	struct irq_fd *irq;
 65
 66	int n, i, j;
 
 67
 68	while (1) {
 69		/* This is now lockless - epoll keeps back-referencesto the irqs
 70		 * which have trigger it so there is no need to walk the irq
 71		 * list and lock it every time. We avoid locking by turning off
 72		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
 73		 * we do any changes to the actual data structures
 74		 */
 75		n = os_waiting_for_events_epoll();
 76
 77		if (n <= 0) {
 78			if (n == -EINTR)
 79				continue;
 80			else
 81				break;
 82		}
 83
 84		for (i = 0; i < n ; i++) {
 85			/* Epoll back reference is the entry with 3 irq_fd
 86			 * leaves - one for each irq type.
 87			 */
 88			irq_entry = (struct irq_entry *)
 89				os_epoll_get_data_pointer(i);
 90			for (j = 0; j < MAX_IRQ_TYPE ; j++) {
 91				irq = irq_entry->irq_array[j];
 92				if (irq == NULL)
 93					continue;
 94				if (os_epoll_triggered(i, irq->events) > 0)
 95					irq_io_loop(irq, regs);
 96				if (irq->purge) {
 97					irq_entry->irq_array[j] = NULL;
 98					kfree(irq);
 99				}
100			}
101		}
102	}
 
 
 
103}
104
105static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
106{
107	int i;
108	int events = 0;
109	struct irq_fd *irq;
110
111	for (i = 0; i < MAX_IRQ_TYPE ; i++) {
112		irq = irq_entry->irq_array[i];
113		if (irq != NULL)
114			events = irq->events | events;
115	}
116	if (events > 0) {
117	/* os_add_epoll will call os_mod_epoll if this already exists */
118		return os_add_epoll_fd(events, irq_entry->fd, irq_entry);
 
119	}
120	/* No events - delete */
121	return os_del_epoll_fd(irq_entry->fd);
122}
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126static int activate_fd(int irq, int fd, int type, void *dev_id)
 
 
127{
128	struct irq_fd *new_fd;
129	struct irq_entry *irq_entry;
130	int i, err, events;
131	unsigned long flags;
132
133	err = os_set_fd_async(fd);
134	if (err < 0)
135		goto out;
136
137	spin_lock_irqsave(&irq_lock, flags);
138
139	/* Check if we have an entry for this fd */
140
141	err = -EBUSY;
142	for (irq_entry = active_fds;
143		irq_entry != NULL; irq_entry = irq_entry->next) {
144		if (irq_entry->fd == fd)
145			break;
146	}
147
148	if (irq_entry == NULL) {
149		/* This needs to be atomic as it may be called from an
150		 * IRQ context.
151		 */
152		irq_entry = kmalloc(sizeof(struct irq_entry), GFP_ATOMIC);
153		if (irq_entry == NULL) {
154			printk(KERN_ERR
155				"Failed to allocate new IRQ entry\n");
156			goto out_unlock;
157		}
158		irq_entry->fd = fd;
159		for (i = 0; i < MAX_IRQ_TYPE; i++)
160			irq_entry->irq_array[i] = NULL;
161		irq_entry->next = active_fds;
162		active_fds = irq_entry;
163	}
164
165	/* Check if we are trying to re-register an interrupt for a
166	 * particular fd
167	 */
168
169	if (irq_entry->irq_array[type] != NULL) {
170		printk(KERN_ERR
171			"Trying to reregister IRQ %d FD %d TYPE %d ID %p\n",
172			irq, fd, type, dev_id
173		);
174		goto out_unlock;
175	} else {
176		/* New entry for this fd */
177
178		err = -ENOMEM;
179		new_fd = kmalloc(sizeof(struct irq_fd), GFP_ATOMIC);
180		if (new_fd == NULL)
181			goto out_unlock;
 
 
 
 
 
182
183		events = os_event_mask(type);
 
 
 
184
185		*new_fd = ((struct irq_fd) {
186			.id		= dev_id,
187			.irq		= irq,
188			.type		= type,
189			.events		= events,
190			.active		= true,
191			.pending	= false,
192			.purge		= false
193		});
194		/* Turn off any IO on this fd - allows us to
195		 * avoid locking the IRQ loop
196		 */
197		os_del_epoll_fd(irq_entry->fd);
198		irq_entry->irq_array[type] = new_fd;
199	}
 
200
201	/* Turn back IO on with the correct (new) IO event mask */
202	assign_epoll_events_to_irq(irq_entry);
203	spin_unlock_irqrestore(&irq_lock, flags);
204	maybe_sigio_broken(fd, (type != IRQ_NONE));
205
206	return 0;
207out_unlock:
208	spin_unlock_irqrestore(&irq_lock, flags);
209out:
210	return err;
211}
212
213/*
214 * Walk the IRQ list and dispose of any unused entries.
215 * Should be done under irq_lock.
216 */
217
218static void garbage_collect_irq_entries(void)
219{
220	int i;
221	bool reap;
222	struct irq_entry *walk;
223	struct irq_entry *previous = NULL;
224	struct irq_entry *to_free;
225
226	if (active_fds == NULL)
227		return;
228	walk = active_fds;
229	while (walk != NULL) {
230		reap = true;
231		for (i = 0; i < MAX_IRQ_TYPE ; i++) {
232			if (walk->irq_array[i] != NULL) {
233				reap = false;
234				break;
235			}
236		}
237		if (reap) {
238			if (previous == NULL)
239				active_fds = walk->next;
240			else
241				previous->next = walk->next;
242			to_free = walk;
243		} else {
244			to_free = NULL;
245		}
246		walk = walk->next;
247		if (to_free != NULL)
248			kfree(to_free);
249	}
250}
251
252/*
253 * Walk the IRQ list and get the descriptor for our FD
254 */
255
256static struct irq_entry *get_irq_entry_by_fd(int fd)
257{
258	struct irq_entry *walk = active_fds;
259
260	while (walk != NULL) {
261		if (walk->fd == fd)
262			return walk;
263		walk = walk->next;
264	}
265	return NULL;
266}
267
268
269/*
270 * Walk the IRQ list and dispose of an entry for a specific
271 * device, fd and number. Note - if sharing an IRQ for read
272 * and writefor the same FD it will be disposed in either case.
273 * If this behaviour is undesirable use different IRQ ids.
274 */
275
276#define IGNORE_IRQ 1
277#define IGNORE_DEV (1<<1)
278
279static void do_free_by_irq_and_dev(
280	struct irq_entry *irq_entry,
281	unsigned int irq,
282	void *dev,
283	int flags
284)
285{
286	int i;
287	struct irq_fd *to_free;
288
289	for (i = 0; i < MAX_IRQ_TYPE ; i++) {
290		if (irq_entry->irq_array[i] != NULL) {
291			if (
292			((flags & IGNORE_IRQ) ||
293				(irq_entry->irq_array[i]->irq == irq)) &&
294			((flags & IGNORE_DEV) ||
295				(irq_entry->irq_array[i]->id == dev))
296			) {
297				/* Turn off any IO on this fd - allows us to
298				 * avoid locking the IRQ loop
299				 */
300				os_del_epoll_fd(irq_entry->fd);
301				to_free = irq_entry->irq_array[i];
302				irq_entry->irq_array[i] = NULL;
303				assign_epoll_events_to_irq(irq_entry);
304				if (to_free->active)
305					to_free->purge = true;
306				else
307					kfree(to_free);
308			}
309		}
310	}
311}
312
313void free_irq_by_fd(int fd)
314{
315	struct irq_entry *to_free;
316	unsigned long flags;
317
318	spin_lock_irqsave(&irq_lock, flags);
319	to_free = get_irq_entry_by_fd(fd);
320	if (to_free != NULL) {
321		do_free_by_irq_and_dev(
322			to_free,
323			-1,
324			NULL,
325			IGNORE_IRQ | IGNORE_DEV
326		);
327	}
328	garbage_collect_irq_entries();
329	spin_unlock_irqrestore(&irq_lock, flags);
330}
331EXPORT_SYMBOL(free_irq_by_fd);
332
333static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
334{
335	struct irq_entry *to_free;
336	unsigned long flags;
337
338	spin_lock_irqsave(&irq_lock, flags);
339	to_free = active_fds;
340	while (to_free != NULL) {
341		do_free_by_irq_and_dev(
342			to_free,
343			irq,
344			dev,
345			0
346		);
347		to_free = to_free->next;
348	}
349	garbage_collect_irq_entries();
350	spin_unlock_irqrestore(&irq_lock, flags);
351}
352
 
 
353
354void reactivate_fd(int fd, int irqnum)
355{
356	/** NOP - we do auto-EOI now **/
 
 
 
 
 
 
 
 
 
 
 
 
357}
358
359void deactivate_fd(int fd, int irqnum)
360{
361	struct irq_entry *to_free;
362	unsigned long flags;
 
363
364	os_del_epoll_fd(fd);
 
365	spin_lock_irqsave(&irq_lock, flags);
366	to_free = get_irq_entry_by_fd(fd);
367	if (to_free != NULL) {
368		do_free_by_irq_and_dev(
369			to_free,
370			irqnum,
371			NULL,
372			IGNORE_DEV
373		);
 
374	}
375	garbage_collect_irq_entries();
 
 
376	spin_unlock_irqrestore(&irq_lock, flags);
 
377	ignore_sigio_fd(fd);
378}
379EXPORT_SYMBOL(deactivate_fd);
380
381/*
382 * Called just before shutdown in order to provide a clean exec
383 * environment in case the system is rebooting.  No locking because
384 * that would cause a pointless shutdown hang if something hadn't
385 * released the lock.
386 */
387int deactivate_all_fds(void)
388{
389	unsigned long flags;
390	struct irq_entry *to_free;
391
392	spin_lock_irqsave(&irq_lock, flags);
393	/* Stop IO. The IRQ loop has no lock so this is our
394	 * only way of making sure we are safe to dispose
395	 * of all IRQ handlers
396	 */
397	os_set_ioignore();
398	to_free = active_fds;
399	while (to_free != NULL) {
400		do_free_by_irq_and_dev(
401			to_free,
402			-1,
403			NULL,
404			IGNORE_IRQ | IGNORE_DEV
405		);
406		to_free = to_free->next;
407	}
408	garbage_collect_irq_entries();
409	spin_unlock_irqrestore(&irq_lock, flags);
410	os_close_epoll_fd();
411	return 0;
412}
413
414/*
415 * do_IRQ handles all normal device IRQs (the special
416 * SMP cross-CPU interrupts have their own specific
417 * handlers).
418 */
419unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
420{
421	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
422	irq_enter();
423	generic_handle_irq(irq);
424	irq_exit();
425	set_irq_regs(old_regs);
426	return 1;
427}
428
429void um_free_irq(unsigned int irq, void *dev)
430{
 
 
 
 
431	free_irq_by_irq_and_dev(irq, dev);
432	free_irq(irq, dev);
 
433}
434EXPORT_SYMBOL(um_free_irq);
435
436int um_request_irq(unsigned int irq, int fd, int type,
437		   irq_handler_t handler,
438		   unsigned long irqflags, const char * devname,
439		   void *dev_id)
 
 
440{
441	int err;
442
 
 
 
 
 
 
 
 
 
 
 
 
 
 
443	if (fd != -1) {
444		err = activate_fd(irq, fd, type, dev_id);
445		if (err)
446			return err;
447	}
448
449	return request_irq(irq, handler, irqflags, devname, dev_id);
 
 
 
 
 
 
 
450}
451
 
 
 
 
 
 
 
452EXPORT_SYMBOL(um_request_irq);
453EXPORT_SYMBOL(reactivate_fd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
455/*
456 * irq_chip must define at least enable/disable and ack when
457 * the edge handler is used.
458 */
459static void dummy(struct irq_data *d)
460{
461}
462
463/* This is used for everything else than the timer. */
464static struct irq_chip normal_irq_type = {
465	.name = "SIGIO",
466	.irq_disable = dummy,
467	.irq_enable = dummy,
468	.irq_ack = dummy,
469	.irq_mask = dummy,
470	.irq_unmask = dummy,
 
471};
472
473static struct irq_chip SIGVTALRM_irq_type = {
474	.name = "SIGVTALRM",
475	.irq_disable = dummy,
476	.irq_enable = dummy,
477	.irq_ack = dummy,
478	.irq_mask = dummy,
479	.irq_unmask = dummy,
480};
481
482void __init init_IRQ(void)
483{
484	int i;
485
486	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
487
488
489	for (i = 1; i < NR_IRQS; i++)
490		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
491	/* Initialize EPOLL Loop */
492	os_setup_epoll();
493}
494
495/*
496 * IRQ stack entry and exit:
497 *
498 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
499 * and switch over to the IRQ stack after some preparation.  We use
500 * sigaltstack to receive signals on a separate stack from the start.
501 * These two functions make sure the rest of the kernel won't be too
502 * upset by being on a different stack.  The IRQ stack has a
503 * thread_info structure at the bottom so that current et al continue
504 * to work.
505 *
506 * to_irq_stack copies the current task's thread_info to the IRQ stack
507 * thread_info and sets the tasks's stack to point to the IRQ stack.
508 *
509 * from_irq_stack copies the thread_info struct back (flags may have
510 * been modified) and resets the task's stack pointer.
511 *
512 * Tricky bits -
513 *
514 * What happens when two signals race each other?  UML doesn't block
515 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
516 * could arrive while a previous one is still setting up the
517 * thread_info.
518 *
519 * There are three cases -
520 *     The first interrupt on the stack - sets up the thread_info and
521 * handles the interrupt
522 *     A nested interrupt interrupting the copying of the thread_info -
523 * can't handle the interrupt, as the stack is in an unknown state
524 *     A nested interrupt not interrupting the copying of the
525 * thread_info - doesn't do any setup, just handles the interrupt
526 *
527 * The first job is to figure out whether we interrupted stack setup.
528 * This is done by xchging the signal mask with thread_info->pending.
529 * If the value that comes back is zero, then there is no setup in
530 * progress, and the interrupt can be handled.  If the value is
531 * non-zero, then there is stack setup in progress.  In order to have
532 * the interrupt handled, we leave our signal in the mask, and it will
533 * be handled by the upper handler after it has set up the stack.
534 *
535 * Next is to figure out whether we are the outer handler or a nested
536 * one.  As part of setting up the stack, thread_info->real_thread is
537 * set to non-NULL (and is reset to NULL on exit).  This is the
538 * nesting indicator.  If it is non-NULL, then the stack is already
539 * set up and the handler can run.
540 */
541
542static unsigned long pending_mask;
543
544unsigned long to_irq_stack(unsigned long *mask_out)
545{
546	struct thread_info *ti;
547	unsigned long mask, old;
548	int nested;
549
550	mask = xchg(&pending_mask, *mask_out);
551	if (mask != 0) {
552		/*
553		 * If any interrupts come in at this point, we want to
554		 * make sure that their bits aren't lost by our
555		 * putting our bit in.  So, this loop accumulates bits
556		 * until xchg returns the same value that we put in.
557		 * When that happens, there were no new interrupts,
558		 * and pending_mask contains a bit for each interrupt
559		 * that came in.
560		 */
561		old = *mask_out;
562		do {
563			old |= mask;
564			mask = xchg(&pending_mask, old);
565		} while (mask != old);
566		return 1;
567	}
568
569	ti = current_thread_info();
570	nested = (ti->real_thread != NULL);
571	if (!nested) {
572		struct task_struct *task;
573		struct thread_info *tti;
574
575		task = cpu_tasks[ti->cpu].task;
576		tti = task_thread_info(task);
577
578		*ti = *tti;
579		ti->real_thread = tti;
580		task->stack = ti;
581	}
582
583	mask = xchg(&pending_mask, 0);
584	*mask_out |= mask | nested;
585	return 0;
586}
587
588unsigned long from_irq_stack(int nested)
589{
590	struct thread_info *ti, *to;
591	unsigned long mask;
592
593	ti = current_thread_info();
594
595	pending_mask = 1;
596
597	to = ti->real_thread;
598	current->stack = to;
599	ti->real_thread = NULL;
600	*to = *ti;
601
602	mask = xchg(&pending_mask, 0);
603	return mask & ~1;
604}
605
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 - Cambridge Greys Ltd
  4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
  5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
 
  6 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
  7 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
  8 */
  9
 10#include <linux/cpumask.h>
 11#include <linux/hardirq.h>
 12#include <linux/interrupt.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/module.h>
 15#include <linux/sched.h>
 16#include <linux/seq_file.h>
 17#include <linux/slab.h>
 18#include <as-layout.h>
 19#include <kern_util.h>
 20#include <os.h>
 21#include <irq_user.h>
 22#include <irq_kern.h>
 23#include <linux/time-internal.h>
 24
 25
 26extern void free_irqs(void);
 27
 28/* When epoll triggers we do not know why it did so
 29 * we can also have different IRQs for read and write.
 30 * This is why we keep a small irq_reg array for each fd -
 31 * one entry per IRQ type
 32 */
 33struct irq_reg {
 34	void *id;
 35	int irq;
 36	/* it's cheaper to store this than to query it */
 37	int events;
 38	bool active;
 39	bool pending;
 40	bool wakeup;
 41#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 42	bool pending_on_resume;
 43	void (*timetravel_handler)(int, int, void *,
 44				   struct time_travel_event *);
 45	struct time_travel_event event;
 46#endif
 47};
 48
 49struct irq_entry {
 50	struct list_head list;
 51	int fd;
 52	struct irq_reg reg[NUM_IRQ_TYPES];
 53	bool suspended;
 54	bool sigio_workaround;
 55};
 56
 
 
 57static DEFINE_SPINLOCK(irq_lock);
 58static LIST_HEAD(active_fds);
 59static DECLARE_BITMAP(irqs_allocated, UM_LAST_SIGNAL_IRQ);
 60static bool irqs_suspended;
 61
 62static void irq_io_loop(struct irq_reg *irq, struct uml_pt_regs *regs)
 63{
 64/*
 65 * irq->active guards against reentry
 66 * irq->pending accumulates pending requests
 67 * if pending is raised the irq_handler is re-run
 68 * until pending is cleared
 69 */
 70	if (irq->active) {
 71		irq->active = false;
 72
 73		do {
 74			irq->pending = false;
 75			do_IRQ(irq->irq, regs);
 76		} while (irq->pending);
 77
 78		irq->active = true;
 79	} else {
 80		irq->pending = true;
 81	}
 82}
 83
 84#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
 85static void irq_event_handler(struct time_travel_event *ev)
 86{
 87	struct irq_reg *reg = container_of(ev, struct irq_reg, event);
 88
 89	/* do nothing if suspended - just to cause a wakeup */
 90	if (irqs_suspended)
 91		return;
 92
 93	generic_handle_irq(reg->irq);
 94}
 95
 96static bool irq_do_timetravel_handler(struct irq_entry *entry,
 97				      enum um_irq_type t)
 98{
 99	struct irq_reg *reg = &entry->reg[t];
100
101	if (!reg->timetravel_handler)
102		return false;
103
104	/*
105	 * Handle all messages - we might get multiple even while
106	 * interrupts are already suspended, due to suspend order
107	 * etc. Note that time_travel_add_irq_event() will not add
108	 * an event twice, if it's pending already "first wins".
109	 */
110	reg->timetravel_handler(reg->irq, entry->fd, reg->id, &reg->event);
111
112	if (!reg->event.pending)
113		return false;
114
115	if (irqs_suspended)
116		reg->pending_on_resume = true;
117	return true;
118}
119#else
120static bool irq_do_timetravel_handler(struct irq_entry *entry,
121				      enum um_irq_type t)
122{
123	return false;
124}
125#endif
126
127static void sigio_reg_handler(int idx, struct irq_entry *entry, enum um_irq_type t,
128			      struct uml_pt_regs *regs,
129			      bool timetravel_handlers_only)
130{
131	struct irq_reg *reg = &entry->reg[t];
132
133	if (!reg->events)
134		return;
135
136	if (os_epoll_triggered(idx, reg->events) <= 0)
137		return;
138
139	if (irq_do_timetravel_handler(entry, t))
140		return;
141
142	/*
143	 * If we're called to only run time-travel handlers then don't
144	 * actually proceed but mark sigio as pending (if applicable).
145	 * For suspend/resume, timetravel_handlers_only may be true
146	 * despite time-travel not being configured and used.
147	 */
148	if (timetravel_handlers_only) {
149#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
150		mark_sigio_pending();
151#endif
152		return;
153	}
154
155	irq_io_loop(reg, regs);
156}
157
158static void _sigio_handler(struct uml_pt_regs *regs,
159			   bool timetravel_handlers_only)
160{
161	struct irq_entry *irq_entry;
162	int n, i;
163
164	if (timetravel_handlers_only && !um_irq_timetravel_handler_used())
165		return;
166
167	while (1) {
168		/* This is now lockless - epoll keeps back-referencesto the irqs
169		 * which have trigger it so there is no need to walk the irq
170		 * list and lock it every time. We avoid locking by turning off
171		 * IO for a specific fd by executing os_del_epoll_fd(fd) before
172		 * we do any changes to the actual data structures
173		 */
174		n = os_waiting_for_events_epoll();
175
176		if (n <= 0) {
177			if (n == -EINTR)
178				continue;
179			else
180				break;
181		}
182
183		for (i = 0; i < n ; i++) {
184			enum um_irq_type t;
185
186			irq_entry = os_epoll_get_data_pointer(i);
187
188			for (t = 0; t < NUM_IRQ_TYPES; t++)
189				sigio_reg_handler(i, irq_entry, t, regs,
190						  timetravel_handlers_only);
 
 
 
 
 
 
 
 
 
191		}
192	}
193
194	if (!timetravel_handlers_only)
195		free_irqs();
196}
197
198void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
199{
200	_sigio_handler(regs, irqs_suspended);
201}
 
202
203static struct irq_entry *get_irq_entry_by_fd(int fd)
204{
205	struct irq_entry *walk;
206
207	lockdep_assert_held(&irq_lock);
208
209	list_for_each_entry(walk, &active_fds, list) {
210		if (walk->fd == fd)
211			return walk;
212	}
213
214	return NULL;
215}
216
217static void free_irq_entry(struct irq_entry *to_free, bool remove)
218{
219	if (!to_free)
220		return;
221
222	if (remove)
223		os_del_epoll_fd(to_free->fd);
224	list_del(&to_free->list);
225	kfree(to_free);
226}
227
228static bool update_irq_entry(struct irq_entry *entry)
229{
230	enum um_irq_type i;
231	int events = 0;
232
233	for (i = 0; i < NUM_IRQ_TYPES; i++)
234		events |= entry->reg[i].events;
235
236	if (events) {
237		/* will modify (instead of add) if needed */
238		os_add_epoll_fd(events, entry->fd, entry);
239		return true;
240	}
241
242	os_del_epoll_fd(entry->fd);
243	return false;
244}
245
246static void update_or_free_irq_entry(struct irq_entry *entry)
247{
248	if (!update_irq_entry(entry))
249		free_irq_entry(entry, false);
250}
251
252static int activate_fd(int irq, int fd, enum um_irq_type type, void *dev_id,
253		       void (*timetravel_handler)(int, int, void *,
254						  struct time_travel_event *))
255{
 
256	struct irq_entry *irq_entry;
257	int err, events = os_event_mask(type);
258	unsigned long flags;
259
260	err = os_set_fd_async(fd);
261	if (err < 0)
262		goto out;
263
264	spin_lock_irqsave(&irq_lock, flags);
265	irq_entry = get_irq_entry_by_fd(fd);
266	if (irq_entry) {
267		/* cannot register the same FD twice with the same type */
268		if (WARN_ON(irq_entry->reg[type].events)) {
269			err = -EALREADY;
 
 
 
 
 
 
 
 
 
 
 
 
 
270			goto out_unlock;
271		}
 
 
 
 
 
 
272
273		/* temporarily disable to avoid IRQ-side locking */
274		os_del_epoll_fd(fd);
 
 
 
 
 
 
 
 
275	} else {
276		irq_entry = kzalloc(sizeof(*irq_entry), GFP_ATOMIC);
277		if (!irq_entry) {
278			err = -ENOMEM;
 
 
279			goto out_unlock;
280		}
281		irq_entry->fd = fd;
282		list_add_tail(&irq_entry->list, &active_fds);
283		maybe_sigio_broken(fd);
284	}
285
286	irq_entry->reg[type].id = dev_id;
287	irq_entry->reg[type].irq = irq;
288	irq_entry->reg[type].active = true;
289	irq_entry->reg[type].events = events;
290
291#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
292	if (um_irq_timetravel_handler_used()) {
293		irq_entry->reg[type].timetravel_handler = timetravel_handler;
294		irq_entry->reg[type].event.fn = irq_event_handler;
 
 
 
 
 
 
 
 
 
 
295	}
296#endif
297
298	WARN_ON(!update_irq_entry(irq_entry));
 
299	spin_unlock_irqrestore(&irq_lock, flags);
 
300
301	return 0;
302out_unlock:
303	spin_unlock_irqrestore(&irq_lock, flags);
304out:
305	return err;
306}
307
308/*
309 * Remove the entry or entries for a specific FD, if you
310 * don't want to remove all the possible entries then use
311 * um_free_irq() or deactivate_fd() instead.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313void free_irq_by_fd(int fd)
314{
315	struct irq_entry *to_free;
316	unsigned long flags;
317
318	spin_lock_irqsave(&irq_lock, flags);
319	to_free = get_irq_entry_by_fd(fd);
320	free_irq_entry(to_free, true);
 
 
 
 
 
 
 
 
321	spin_unlock_irqrestore(&irq_lock, flags);
322}
323EXPORT_SYMBOL(free_irq_by_fd);
324
325static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
326{
327	struct irq_entry *entry;
328	unsigned long flags;
329
330	spin_lock_irqsave(&irq_lock, flags);
331	list_for_each_entry(entry, &active_fds, list) {
332		enum um_irq_type i;
 
 
 
 
 
 
 
 
 
 
 
333
334		for (i = 0; i < NUM_IRQ_TYPES; i++) {
335			struct irq_reg *reg = &entry->reg[i];
336
337			if (!reg->events)
338				continue;
339			if (reg->irq != irq)
340				continue;
341			if (reg->id != dev)
342				continue;
343
344			os_del_epoll_fd(entry->fd);
345			reg->events = 0;
346			update_or_free_irq_entry(entry);
347			goto out;
348		}
349	}
350out:
351	spin_unlock_irqrestore(&irq_lock, flags);
352}
353
354void deactivate_fd(int fd, int irqnum)
355{
356	struct irq_entry *entry;
357	unsigned long flags;
358	enum um_irq_type i;
359
360	os_del_epoll_fd(fd);
361
362	spin_lock_irqsave(&irq_lock, flags);
363	entry = get_irq_entry_by_fd(fd);
364	if (!entry)
365		goto out;
366
367	for (i = 0; i < NUM_IRQ_TYPES; i++) {
368		if (!entry->reg[i].events)
369			continue;
370		if (entry->reg[i].irq == irqnum)
371			entry->reg[i].events = 0;
372	}
373
374	update_or_free_irq_entry(entry);
375out:
376	spin_unlock_irqrestore(&irq_lock, flags);
377
378	ignore_sigio_fd(fd);
379}
380EXPORT_SYMBOL(deactivate_fd);
381
382/*
383 * Called just before shutdown in order to provide a clean exec
384 * environment in case the system is rebooting.  No locking because
385 * that would cause a pointless shutdown hang if something hadn't
386 * released the lock.
387 */
388int deactivate_all_fds(void)
389{
390	struct irq_entry *entry;
 
391
 
392	/* Stop IO. The IRQ loop has no lock so this is our
393	 * only way of making sure we are safe to dispose
394	 * of all IRQ handlers
395	 */
396	os_set_ioignore();
397
398	/* we can no longer call kfree() here so just deactivate */
399	list_for_each_entry(entry, &active_fds, list)
400		os_del_epoll_fd(entry->fd);
 
 
 
 
 
 
 
 
401	os_close_epoll_fd();
402	return 0;
403}
404
405/*
406 * do_IRQ handles all normal device IRQs (the special
407 * SMP cross-CPU interrupts have their own specific
408 * handlers).
409 */
410unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
411{
412	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
413	irq_enter();
414	generic_handle_irq(irq);
415	irq_exit();
416	set_irq_regs(old_regs);
417	return 1;
418}
419
420void um_free_irq(int irq, void *dev)
421{
422	if (WARN(irq < 0 || irq > UM_LAST_SIGNAL_IRQ,
423		 "freeing invalid irq %d", irq))
424		return;
425
426	free_irq_by_irq_and_dev(irq, dev);
427	free_irq(irq, dev);
428	clear_bit(irq, irqs_allocated);
429}
430EXPORT_SYMBOL(um_free_irq);
431
432static int
433_um_request_irq(int irq, int fd, enum um_irq_type type,
434		irq_handler_t handler, unsigned long irqflags,
435		const char *devname, void *dev_id,
436		void (*timetravel_handler)(int, int, void *,
437					   struct time_travel_event *))
438{
439	int err;
440
441	if (irq == UM_IRQ_ALLOC) {
442		int i;
443
444		for (i = UM_FIRST_DYN_IRQ; i < NR_IRQS; i++) {
445			if (!test_and_set_bit(i, irqs_allocated)) {
446				irq = i;
447				break;
448			}
449		}
450	}
451
452	if (irq < 0)
453		return -ENOSPC;
454
455	if (fd != -1) {
456		err = activate_fd(irq, fd, type, dev_id, timetravel_handler);
457		if (err)
458			goto error;
459	}
460
461	err = request_irq(irq, handler, irqflags, devname, dev_id);
462	if (err < 0)
463		goto error;
464
465	return irq;
466error:
467	clear_bit(irq, irqs_allocated);
468	return err;
469}
470
471int um_request_irq(int irq, int fd, enum um_irq_type type,
472		   irq_handler_t handler, unsigned long irqflags,
473		   const char *devname, void *dev_id)
474{
475	return _um_request_irq(irq, fd, type, handler, irqflags,
476			       devname, dev_id, NULL);
477}
478EXPORT_SYMBOL(um_request_irq);
479
480#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
481int um_request_irq_tt(int irq, int fd, enum um_irq_type type,
482		      irq_handler_t handler, unsigned long irqflags,
483		      const char *devname, void *dev_id,
484		      void (*timetravel_handler)(int, int, void *,
485						 struct time_travel_event *))
486{
487	return _um_request_irq(irq, fd, type, handler, irqflags,
488			       devname, dev_id, timetravel_handler);
489}
490EXPORT_SYMBOL(um_request_irq_tt);
491
492void sigio_run_timetravel_handlers(void)
493{
494	_sigio_handler(NULL, true);
495}
496#endif
497
498#ifdef CONFIG_PM_SLEEP
499void um_irqs_suspend(void)
500{
501	struct irq_entry *entry;
502	unsigned long flags;
503
504	irqs_suspended = true;
505
506	spin_lock_irqsave(&irq_lock, flags);
507	list_for_each_entry(entry, &active_fds, list) {
508		enum um_irq_type t;
509		bool clear = true;
510
511		for (t = 0; t < NUM_IRQ_TYPES; t++) {
512			if (!entry->reg[t].events)
513				continue;
514
515			/*
516			 * For the SIGIO_WRITE_IRQ, which is used to handle the
517			 * SIGIO workaround thread, we need special handling:
518			 * enable wake for it itself, but below we tell it about
519			 * any FDs that should be suspended.
520			 */
521			if (entry->reg[t].wakeup ||
522			    entry->reg[t].irq == SIGIO_WRITE_IRQ
523#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
524			    || entry->reg[t].timetravel_handler
525#endif
526			    ) {
527				clear = false;
528				break;
529			}
530		}
531
532		if (clear) {
533			entry->suspended = true;
534			os_clear_fd_async(entry->fd);
535			entry->sigio_workaround =
536				!__ignore_sigio_fd(entry->fd);
537		}
538	}
539	spin_unlock_irqrestore(&irq_lock, flags);
540}
541
542void um_irqs_resume(void)
543{
544	struct irq_entry *entry;
545	unsigned long flags;
546
547
548	local_irq_save(flags);
549#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
550	/*
551	 * We don't need to lock anything here since we're in resume
552	 * and nothing else is running, but have disabled IRQs so we
553	 * don't try anything else with the interrupt list from there.
554	 */
555	list_for_each_entry(entry, &active_fds, list) {
556		enum um_irq_type t;
557
558		for (t = 0; t < NUM_IRQ_TYPES; t++) {
559			struct irq_reg *reg = &entry->reg[t];
560
561			if (reg->pending_on_resume) {
562				irq_enter();
563				generic_handle_irq(reg->irq);
564				irq_exit();
565				reg->pending_on_resume = false;
566			}
567		}
568	}
569#endif
570
571	spin_lock(&irq_lock);
572	list_for_each_entry(entry, &active_fds, list) {
573		if (entry->suspended) {
574			int err = os_set_fd_async(entry->fd);
575
576			WARN(err < 0, "os_set_fd_async returned %d\n", err);
577			entry->suspended = false;
578
579			if (entry->sigio_workaround) {
580				err = __add_sigio_fd(entry->fd);
581				WARN(err < 0, "add_sigio_returned %d\n", err);
582			}
583		}
584	}
585	spin_unlock_irqrestore(&irq_lock, flags);
586
587	irqs_suspended = false;
588	send_sigio_to_self();
589}
590
591static int normal_irq_set_wake(struct irq_data *d, unsigned int on)
592{
593	struct irq_entry *entry;
594	unsigned long flags;
595
596	spin_lock_irqsave(&irq_lock, flags);
597	list_for_each_entry(entry, &active_fds, list) {
598		enum um_irq_type t;
599
600		for (t = 0; t < NUM_IRQ_TYPES; t++) {
601			if (!entry->reg[t].events)
602				continue;
603
604			if (entry->reg[t].irq != d->irq)
605				continue;
606			entry->reg[t].wakeup = on;
607			goto unlock;
608		}
609	}
610unlock:
611	spin_unlock_irqrestore(&irq_lock, flags);
612	return 0;
613}
614#else
615#define normal_irq_set_wake NULL
616#endif
617
618/*
619 * irq_chip must define at least enable/disable and ack when
620 * the edge handler is used.
621 */
622static void dummy(struct irq_data *d)
623{
624}
625
626/* This is used for everything other than the timer. */
627static struct irq_chip normal_irq_type = {
628	.name = "SIGIO",
629	.irq_disable = dummy,
630	.irq_enable = dummy,
631	.irq_ack = dummy,
632	.irq_mask = dummy,
633	.irq_unmask = dummy,
634	.irq_set_wake = normal_irq_set_wake,
635};
636
637static struct irq_chip alarm_irq_type = {
638	.name = "SIGALRM",
639	.irq_disable = dummy,
640	.irq_enable = dummy,
641	.irq_ack = dummy,
642	.irq_mask = dummy,
643	.irq_unmask = dummy,
644};
645
646void __init init_IRQ(void)
647{
648	int i;
649
650	irq_set_chip_and_handler(TIMER_IRQ, &alarm_irq_type, handle_edge_irq);
 
651
652	for (i = 1; i < UM_LAST_SIGNAL_IRQ; i++)
653		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
654	/* Initialize EPOLL Loop */
655	os_setup_epoll();
656}
657
658/*
659 * IRQ stack entry and exit:
660 *
661 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
662 * and switch over to the IRQ stack after some preparation.  We use
663 * sigaltstack to receive signals on a separate stack from the start.
664 * These two functions make sure the rest of the kernel won't be too
665 * upset by being on a different stack.  The IRQ stack has a
666 * thread_info structure at the bottom so that current et al continue
667 * to work.
668 *
669 * to_irq_stack copies the current task's thread_info to the IRQ stack
670 * thread_info and sets the tasks's stack to point to the IRQ stack.
671 *
672 * from_irq_stack copies the thread_info struct back (flags may have
673 * been modified) and resets the task's stack pointer.
674 *
675 * Tricky bits -
676 *
677 * What happens when two signals race each other?  UML doesn't block
678 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
679 * could arrive while a previous one is still setting up the
680 * thread_info.
681 *
682 * There are three cases -
683 *     The first interrupt on the stack - sets up the thread_info and
684 * handles the interrupt
685 *     A nested interrupt interrupting the copying of the thread_info -
686 * can't handle the interrupt, as the stack is in an unknown state
687 *     A nested interrupt not interrupting the copying of the
688 * thread_info - doesn't do any setup, just handles the interrupt
689 *
690 * The first job is to figure out whether we interrupted stack setup.
691 * This is done by xchging the signal mask with thread_info->pending.
692 * If the value that comes back is zero, then there is no setup in
693 * progress, and the interrupt can be handled.  If the value is
694 * non-zero, then there is stack setup in progress.  In order to have
695 * the interrupt handled, we leave our signal in the mask, and it will
696 * be handled by the upper handler after it has set up the stack.
697 *
698 * Next is to figure out whether we are the outer handler or a nested
699 * one.  As part of setting up the stack, thread_info->real_thread is
700 * set to non-NULL (and is reset to NULL on exit).  This is the
701 * nesting indicator.  If it is non-NULL, then the stack is already
702 * set up and the handler can run.
703 */
704
705static unsigned long pending_mask;
706
707unsigned long to_irq_stack(unsigned long *mask_out)
708{
709	struct thread_info *ti;
710	unsigned long mask, old;
711	int nested;
712
713	mask = xchg(&pending_mask, *mask_out);
714	if (mask != 0) {
715		/*
716		 * If any interrupts come in at this point, we want to
717		 * make sure that their bits aren't lost by our
718		 * putting our bit in.  So, this loop accumulates bits
719		 * until xchg returns the same value that we put in.
720		 * When that happens, there were no new interrupts,
721		 * and pending_mask contains a bit for each interrupt
722		 * that came in.
723		 */
724		old = *mask_out;
725		do {
726			old |= mask;
727			mask = xchg(&pending_mask, old);
728		} while (mask != old);
729		return 1;
730	}
731
732	ti = current_thread_info();
733	nested = (ti->real_thread != NULL);
734	if (!nested) {
735		struct task_struct *task;
736		struct thread_info *tti;
737
738		task = cpu_tasks[ti->cpu].task;
739		tti = task_thread_info(task);
740
741		*ti = *tti;
742		ti->real_thread = tti;
743		task->stack = ti;
744	}
745
746	mask = xchg(&pending_mask, 0);
747	*mask_out |= mask | nested;
748	return 0;
749}
750
751unsigned long from_irq_stack(int nested)
752{
753	struct thread_info *ti, *to;
754	unsigned long mask;
755
756	ti = current_thread_info();
757
758	pending_mask = 1;
759
760	to = ti->real_thread;
761	current->stack = to;
762	ti->real_thread = NULL;
763	*to = *ti;
764
765	mask = xchg(&pending_mask, 0);
766	return mask & ~1;
767}
768