Loading...
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18#include <linux/irqdomain.h>
19
20#include <trace/events/irq.h>
21
22#include "internals.h"
23
24static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25{
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 return IRQ_NONE;
28}
29
30/*
31 * Chained handlers should never call action on their IRQ. This default
32 * action will emit warning if such thing happens.
33 */
34struct irqaction chained_action = {
35 .handler = bad_chained_irq,
36};
37
38/**
39 * irq_set_chip - set the irq chip for an irq
40 * @irq: irq number
41 * @chip: pointer to irq chip description structure
42 */
43int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44{
45 unsigned long flags;
46 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47
48 if (!desc)
49 return -EINVAL;
50
51 if (!chip)
52 chip = &no_irq_chip;
53
54 desc->irq_data.chip = chip;
55 irq_put_desc_unlock(desc, flags);
56 /*
57 * For !CONFIG_SPARSE_IRQ make the irq show up in
58 * allocated_irqs.
59 */
60 irq_mark_irq(irq);
61 return 0;
62}
63EXPORT_SYMBOL(irq_set_chip);
64
65/**
66 * irq_set_type - set the irq trigger type for an irq
67 * @irq: irq number
68 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
69 */
70int irq_set_irq_type(unsigned int irq, unsigned int type)
71{
72 unsigned long flags;
73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74 int ret = 0;
75
76 if (!desc)
77 return -EINVAL;
78
79 ret = __irq_set_trigger(desc, type);
80 irq_put_desc_busunlock(desc, flags);
81 return ret;
82}
83EXPORT_SYMBOL(irq_set_irq_type);
84
85/**
86 * irq_set_handler_data - set irq handler data for an irq
87 * @irq: Interrupt number
88 * @data: Pointer to interrupt specific data
89 *
90 * Set the hardware irq controller data for an irq
91 */
92int irq_set_handler_data(unsigned int irq, void *data)
93{
94 unsigned long flags;
95 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96
97 if (!desc)
98 return -EINVAL;
99 desc->irq_common_data.handler_data = data;
100 irq_put_desc_unlock(desc, flags);
101 return 0;
102}
103EXPORT_SYMBOL(irq_set_handler_data);
104
105/**
106 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107 * @irq_base: Interrupt number base
108 * @irq_offset: Interrupt number offset
109 * @entry: Pointer to MSI descriptor data
110 *
111 * Set the MSI descriptor entry for an irq at offset
112 */
113int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 struct msi_desc *entry)
115{
116 unsigned long flags;
117 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118
119 if (!desc)
120 return -EINVAL;
121 desc->irq_common_data.msi_desc = entry;
122 if (entry && !irq_offset)
123 entry->irq = irq_base;
124 irq_put_desc_unlock(desc, flags);
125 return 0;
126}
127
128/**
129 * irq_set_msi_desc - set MSI descriptor data for an irq
130 * @irq: Interrupt number
131 * @entry: Pointer to MSI descriptor data
132 *
133 * Set the MSI descriptor entry for an irq
134 */
135int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
136{
137 return irq_set_msi_desc_off(irq, 0, entry);
138}
139
140/**
141 * irq_set_chip_data - set irq chip data for an irq
142 * @irq: Interrupt number
143 * @data: Pointer to chip specific data
144 *
145 * Set the hardware irq chip data for an irq
146 */
147int irq_set_chip_data(unsigned int irq, void *data)
148{
149 unsigned long flags;
150 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151
152 if (!desc)
153 return -EINVAL;
154 desc->irq_data.chip_data = data;
155 irq_put_desc_unlock(desc, flags);
156 return 0;
157}
158EXPORT_SYMBOL(irq_set_chip_data);
159
160struct irq_data *irq_get_irq_data(unsigned int irq)
161{
162 struct irq_desc *desc = irq_to_desc(irq);
163
164 return desc ? &desc->irq_data : NULL;
165}
166EXPORT_SYMBOL_GPL(irq_get_irq_data);
167
168static void irq_state_clr_disabled(struct irq_desc *desc)
169{
170 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171}
172
173static void irq_state_set_disabled(struct irq_desc *desc)
174{
175 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
176}
177
178static void irq_state_clr_masked(struct irq_desc *desc)
179{
180 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
181}
182
183static void irq_state_set_masked(struct irq_desc *desc)
184{
185 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
186}
187
188int irq_startup(struct irq_desc *desc, bool resend)
189{
190 int ret = 0;
191
192 irq_state_clr_disabled(desc);
193 desc->depth = 0;
194
195 irq_domain_activate_irq(&desc->irq_data);
196 if (desc->irq_data.chip->irq_startup) {
197 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
198 irq_state_clr_masked(desc);
199 } else {
200 irq_enable(desc);
201 }
202 if (resend)
203 check_irq_resend(desc);
204 return ret;
205}
206
207void irq_shutdown(struct irq_desc *desc)
208{
209 irq_state_set_disabled(desc);
210 desc->depth = 1;
211 if (desc->irq_data.chip->irq_shutdown)
212 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
213 else if (desc->irq_data.chip->irq_disable)
214 desc->irq_data.chip->irq_disable(&desc->irq_data);
215 else
216 desc->irq_data.chip->irq_mask(&desc->irq_data);
217 irq_domain_deactivate_irq(&desc->irq_data);
218 irq_state_set_masked(desc);
219}
220
221void irq_enable(struct irq_desc *desc)
222{
223 irq_state_clr_disabled(desc);
224 if (desc->irq_data.chip->irq_enable)
225 desc->irq_data.chip->irq_enable(&desc->irq_data);
226 else
227 desc->irq_data.chip->irq_unmask(&desc->irq_data);
228 irq_state_clr_masked(desc);
229}
230
231/**
232 * irq_disable - Mark interrupt disabled
233 * @desc: irq descriptor which should be disabled
234 *
235 * If the chip does not implement the irq_disable callback, we
236 * use a lazy disable approach. That means we mark the interrupt
237 * disabled, but leave the hardware unmasked. That's an
238 * optimization because we avoid the hardware access for the
239 * common case where no interrupt happens after we marked it
240 * disabled. If an interrupt happens, then the interrupt flow
241 * handler masks the line at the hardware level and marks it
242 * pending.
243 *
244 * If the interrupt chip does not implement the irq_disable callback,
245 * a driver can disable the lazy approach for a particular irq line by
246 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
247 * be used for devices which cannot disable the interrupt at the
248 * device level under certain circumstances and have to use
249 * disable_irq[_nosync] instead.
250 */
251void irq_disable(struct irq_desc *desc)
252{
253 irq_state_set_disabled(desc);
254 if (desc->irq_data.chip->irq_disable) {
255 desc->irq_data.chip->irq_disable(&desc->irq_data);
256 irq_state_set_masked(desc);
257 } else if (irq_settings_disable_unlazy(desc)) {
258 mask_irq(desc);
259 }
260}
261
262void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
263{
264 if (desc->irq_data.chip->irq_enable)
265 desc->irq_data.chip->irq_enable(&desc->irq_data);
266 else
267 desc->irq_data.chip->irq_unmask(&desc->irq_data);
268 cpumask_set_cpu(cpu, desc->percpu_enabled);
269}
270
271void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
272{
273 if (desc->irq_data.chip->irq_disable)
274 desc->irq_data.chip->irq_disable(&desc->irq_data);
275 else
276 desc->irq_data.chip->irq_mask(&desc->irq_data);
277 cpumask_clear_cpu(cpu, desc->percpu_enabled);
278}
279
280static inline void mask_ack_irq(struct irq_desc *desc)
281{
282 if (desc->irq_data.chip->irq_mask_ack)
283 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
284 else {
285 desc->irq_data.chip->irq_mask(&desc->irq_data);
286 if (desc->irq_data.chip->irq_ack)
287 desc->irq_data.chip->irq_ack(&desc->irq_data);
288 }
289 irq_state_set_masked(desc);
290}
291
292void mask_irq(struct irq_desc *desc)
293{
294 if (desc->irq_data.chip->irq_mask) {
295 desc->irq_data.chip->irq_mask(&desc->irq_data);
296 irq_state_set_masked(desc);
297 }
298}
299
300void unmask_irq(struct irq_desc *desc)
301{
302 if (desc->irq_data.chip->irq_unmask) {
303 desc->irq_data.chip->irq_unmask(&desc->irq_data);
304 irq_state_clr_masked(desc);
305 }
306}
307
308void unmask_threaded_irq(struct irq_desc *desc)
309{
310 struct irq_chip *chip = desc->irq_data.chip;
311
312 if (chip->flags & IRQCHIP_EOI_THREADED)
313 chip->irq_eoi(&desc->irq_data);
314
315 if (chip->irq_unmask) {
316 chip->irq_unmask(&desc->irq_data);
317 irq_state_clr_masked(desc);
318 }
319}
320
321/*
322 * handle_nested_irq - Handle a nested irq from a irq thread
323 * @irq: the interrupt number
324 *
325 * Handle interrupts which are nested into a threaded interrupt
326 * handler. The handler function is called inside the calling
327 * threads context.
328 */
329void handle_nested_irq(unsigned int irq)
330{
331 struct irq_desc *desc = irq_to_desc(irq);
332 struct irqaction *action;
333 irqreturn_t action_ret;
334
335 might_sleep();
336
337 raw_spin_lock_irq(&desc->lock);
338
339 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
340
341 action = desc->action;
342 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
343 desc->istate |= IRQS_PENDING;
344 goto out_unlock;
345 }
346
347 kstat_incr_irqs_this_cpu(desc);
348 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
349 raw_spin_unlock_irq(&desc->lock);
350
351 action_ret = action->thread_fn(action->irq, action->dev_id);
352 if (!noirqdebug)
353 note_interrupt(desc, action_ret);
354
355 raw_spin_lock_irq(&desc->lock);
356 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
357
358out_unlock:
359 raw_spin_unlock_irq(&desc->lock);
360}
361EXPORT_SYMBOL_GPL(handle_nested_irq);
362
363static bool irq_check_poll(struct irq_desc *desc)
364{
365 if (!(desc->istate & IRQS_POLL_INPROGRESS))
366 return false;
367 return irq_wait_for_poll(desc);
368}
369
370static bool irq_may_run(struct irq_desc *desc)
371{
372 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
373
374 /*
375 * If the interrupt is not in progress and is not an armed
376 * wakeup interrupt, proceed.
377 */
378 if (!irqd_has_set(&desc->irq_data, mask))
379 return true;
380
381 /*
382 * If the interrupt is an armed wakeup source, mark it pending
383 * and suspended, disable it and notify the pm core about the
384 * event.
385 */
386 if (irq_pm_check_wakeup(desc))
387 return false;
388
389 /*
390 * Handle a potential concurrent poll on a different core.
391 */
392 return irq_check_poll(desc);
393}
394
395/**
396 * handle_simple_irq - Simple and software-decoded IRQs.
397 * @desc: the interrupt description structure for this irq
398 *
399 * Simple interrupts are either sent from a demultiplexing interrupt
400 * handler or come from hardware, where no interrupt hardware control
401 * is necessary.
402 *
403 * Note: The caller is expected to handle the ack, clear, mask and
404 * unmask issues if necessary.
405 */
406void handle_simple_irq(struct irq_desc *desc)
407{
408 raw_spin_lock(&desc->lock);
409
410 if (!irq_may_run(desc))
411 goto out_unlock;
412
413 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
414
415 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
416 desc->istate |= IRQS_PENDING;
417 goto out_unlock;
418 }
419
420 kstat_incr_irqs_this_cpu(desc);
421 handle_irq_event(desc);
422
423out_unlock:
424 raw_spin_unlock(&desc->lock);
425}
426EXPORT_SYMBOL_GPL(handle_simple_irq);
427
428/**
429 * handle_untracked_irq - Simple and software-decoded IRQs.
430 * @desc: the interrupt description structure for this irq
431 *
432 * Untracked interrupts are sent from a demultiplexing interrupt
433 * handler when the demultiplexer does not know which device it its
434 * multiplexed irq domain generated the interrupt. IRQ's handled
435 * through here are not subjected to stats tracking, randomness, or
436 * spurious interrupt detection.
437 *
438 * Note: Like handle_simple_irq, the caller is expected to handle
439 * the ack, clear, mask and unmask issues if necessary.
440 */
441void handle_untracked_irq(struct irq_desc *desc)
442{
443 unsigned int flags = 0;
444
445 raw_spin_lock(&desc->lock);
446
447 if (!irq_may_run(desc))
448 goto out_unlock;
449
450 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
451
452 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
453 desc->istate |= IRQS_PENDING;
454 goto out_unlock;
455 }
456
457 desc->istate &= ~IRQS_PENDING;
458 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
459 raw_spin_unlock(&desc->lock);
460
461 __handle_irq_event_percpu(desc, &flags);
462
463 raw_spin_lock(&desc->lock);
464 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
465
466out_unlock:
467 raw_spin_unlock(&desc->lock);
468}
469EXPORT_SYMBOL_GPL(handle_untracked_irq);
470
471/*
472 * Called unconditionally from handle_level_irq() and only for oneshot
473 * interrupts from handle_fasteoi_irq()
474 */
475static void cond_unmask_irq(struct irq_desc *desc)
476{
477 /*
478 * We need to unmask in the following cases:
479 * - Standard level irq (IRQF_ONESHOT is not set)
480 * - Oneshot irq which did not wake the thread (caused by a
481 * spurious interrupt or a primary handler handling it
482 * completely).
483 */
484 if (!irqd_irq_disabled(&desc->irq_data) &&
485 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
486 unmask_irq(desc);
487}
488
489/**
490 * handle_level_irq - Level type irq handler
491 * @desc: the interrupt description structure for this irq
492 *
493 * Level type interrupts are active as long as the hardware line has
494 * the active level. This may require to mask the interrupt and unmask
495 * it after the associated handler has acknowledged the device, so the
496 * interrupt line is back to inactive.
497 */
498void handle_level_irq(struct irq_desc *desc)
499{
500 raw_spin_lock(&desc->lock);
501 mask_ack_irq(desc);
502
503 if (!irq_may_run(desc))
504 goto out_unlock;
505
506 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
507
508 /*
509 * If its disabled or no action available
510 * keep it masked and get out of here
511 */
512 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
513 desc->istate |= IRQS_PENDING;
514 goto out_unlock;
515 }
516
517 kstat_incr_irqs_this_cpu(desc);
518 handle_irq_event(desc);
519
520 cond_unmask_irq(desc);
521
522out_unlock:
523 raw_spin_unlock(&desc->lock);
524}
525EXPORT_SYMBOL_GPL(handle_level_irq);
526
527#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
528static inline void preflow_handler(struct irq_desc *desc)
529{
530 if (desc->preflow_handler)
531 desc->preflow_handler(&desc->irq_data);
532}
533#else
534static inline void preflow_handler(struct irq_desc *desc) { }
535#endif
536
537static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
538{
539 if (!(desc->istate & IRQS_ONESHOT)) {
540 chip->irq_eoi(&desc->irq_data);
541 return;
542 }
543 /*
544 * We need to unmask in the following cases:
545 * - Oneshot irq which did not wake the thread (caused by a
546 * spurious interrupt or a primary handler handling it
547 * completely).
548 */
549 if (!irqd_irq_disabled(&desc->irq_data) &&
550 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
551 chip->irq_eoi(&desc->irq_data);
552 unmask_irq(desc);
553 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
554 chip->irq_eoi(&desc->irq_data);
555 }
556}
557
558/**
559 * handle_fasteoi_irq - irq handler for transparent controllers
560 * @desc: the interrupt description structure for this irq
561 *
562 * Only a single callback will be issued to the chip: an ->eoi()
563 * call when the interrupt has been serviced. This enables support
564 * for modern forms of interrupt handlers, which handle the flow
565 * details in hardware, transparently.
566 */
567void handle_fasteoi_irq(struct irq_desc *desc)
568{
569 struct irq_chip *chip = desc->irq_data.chip;
570
571 raw_spin_lock(&desc->lock);
572
573 if (!irq_may_run(desc))
574 goto out;
575
576 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
577
578 /*
579 * If its disabled or no action available
580 * then mask it and get out of here:
581 */
582 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
583 desc->istate |= IRQS_PENDING;
584 mask_irq(desc);
585 goto out;
586 }
587
588 kstat_incr_irqs_this_cpu(desc);
589 if (desc->istate & IRQS_ONESHOT)
590 mask_irq(desc);
591
592 preflow_handler(desc);
593 handle_irq_event(desc);
594
595 cond_unmask_eoi_irq(desc, chip);
596
597 raw_spin_unlock(&desc->lock);
598 return;
599out:
600 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
601 chip->irq_eoi(&desc->irq_data);
602 raw_spin_unlock(&desc->lock);
603}
604EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
605
606/**
607 * handle_edge_irq - edge type IRQ handler
608 * @desc: the interrupt description structure for this irq
609 *
610 * Interrupt occures on the falling and/or rising edge of a hardware
611 * signal. The occurrence is latched into the irq controller hardware
612 * and must be acked in order to be reenabled. After the ack another
613 * interrupt can happen on the same source even before the first one
614 * is handled by the associated event handler. If this happens it
615 * might be necessary to disable (mask) the interrupt depending on the
616 * controller hardware. This requires to reenable the interrupt inside
617 * of the loop which handles the interrupts which have arrived while
618 * the handler was running. If all pending interrupts are handled, the
619 * loop is left.
620 */
621void handle_edge_irq(struct irq_desc *desc)
622{
623 raw_spin_lock(&desc->lock);
624
625 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
626
627 if (!irq_may_run(desc)) {
628 desc->istate |= IRQS_PENDING;
629 mask_ack_irq(desc);
630 goto out_unlock;
631 }
632
633 /*
634 * If its disabled or no action available then mask it and get
635 * out of here.
636 */
637 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
638 desc->istate |= IRQS_PENDING;
639 mask_ack_irq(desc);
640 goto out_unlock;
641 }
642
643 kstat_incr_irqs_this_cpu(desc);
644
645 /* Start handling the irq */
646 desc->irq_data.chip->irq_ack(&desc->irq_data);
647
648 do {
649 if (unlikely(!desc->action)) {
650 mask_irq(desc);
651 goto out_unlock;
652 }
653
654 /*
655 * When another irq arrived while we were handling
656 * one, we could have masked the irq.
657 * Renable it, if it was not disabled in meantime.
658 */
659 if (unlikely(desc->istate & IRQS_PENDING)) {
660 if (!irqd_irq_disabled(&desc->irq_data) &&
661 irqd_irq_masked(&desc->irq_data))
662 unmask_irq(desc);
663 }
664
665 handle_irq_event(desc);
666
667 } while ((desc->istate & IRQS_PENDING) &&
668 !irqd_irq_disabled(&desc->irq_data));
669
670out_unlock:
671 raw_spin_unlock(&desc->lock);
672}
673EXPORT_SYMBOL(handle_edge_irq);
674
675#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
676/**
677 * handle_edge_eoi_irq - edge eoi type IRQ handler
678 * @desc: the interrupt description structure for this irq
679 *
680 * Similar as the above handle_edge_irq, but using eoi and w/o the
681 * mask/unmask logic.
682 */
683void handle_edge_eoi_irq(struct irq_desc *desc)
684{
685 struct irq_chip *chip = irq_desc_get_chip(desc);
686
687 raw_spin_lock(&desc->lock);
688
689 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
690
691 if (!irq_may_run(desc)) {
692 desc->istate |= IRQS_PENDING;
693 goto out_eoi;
694 }
695
696 /*
697 * If its disabled or no action available then mask it and get
698 * out of here.
699 */
700 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
701 desc->istate |= IRQS_PENDING;
702 goto out_eoi;
703 }
704
705 kstat_incr_irqs_this_cpu(desc);
706
707 do {
708 if (unlikely(!desc->action))
709 goto out_eoi;
710
711 handle_irq_event(desc);
712
713 } while ((desc->istate & IRQS_PENDING) &&
714 !irqd_irq_disabled(&desc->irq_data));
715
716out_eoi:
717 chip->irq_eoi(&desc->irq_data);
718 raw_spin_unlock(&desc->lock);
719}
720#endif
721
722/**
723 * handle_percpu_irq - Per CPU local irq handler
724 * @desc: the interrupt description structure for this irq
725 *
726 * Per CPU interrupts on SMP machines without locking requirements
727 */
728void handle_percpu_irq(struct irq_desc *desc)
729{
730 struct irq_chip *chip = irq_desc_get_chip(desc);
731
732 kstat_incr_irqs_this_cpu(desc);
733
734 if (chip->irq_ack)
735 chip->irq_ack(&desc->irq_data);
736
737 handle_irq_event_percpu(desc);
738
739 if (chip->irq_eoi)
740 chip->irq_eoi(&desc->irq_data);
741}
742
743/**
744 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
745 * @desc: the interrupt description structure for this irq
746 *
747 * Per CPU interrupts on SMP machines without locking requirements. Same as
748 * handle_percpu_irq() above but with the following extras:
749 *
750 * action->percpu_dev_id is a pointer to percpu variables which
751 * contain the real device id for the cpu on which this handler is
752 * called
753 */
754void handle_percpu_devid_irq(struct irq_desc *desc)
755{
756 struct irq_chip *chip = irq_desc_get_chip(desc);
757 struct irqaction *action = desc->action;
758 unsigned int irq = irq_desc_get_irq(desc);
759 irqreturn_t res;
760
761 kstat_incr_irqs_this_cpu(desc);
762
763 if (chip->irq_ack)
764 chip->irq_ack(&desc->irq_data);
765
766 if (likely(action)) {
767 trace_irq_handler_entry(irq, action);
768 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
769 trace_irq_handler_exit(irq, action, res);
770 } else {
771 unsigned int cpu = smp_processor_id();
772 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
773
774 if (enabled)
775 irq_percpu_disable(desc, cpu);
776
777 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
778 enabled ? " and unmasked" : "", irq, cpu);
779 }
780
781 if (chip->irq_eoi)
782 chip->irq_eoi(&desc->irq_data);
783}
784
785static void
786__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
787 int is_chained, const char *name)
788{
789 if (!handle) {
790 handle = handle_bad_irq;
791 } else {
792 struct irq_data *irq_data = &desc->irq_data;
793#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
794 /*
795 * With hierarchical domains we might run into a
796 * situation where the outermost chip is not yet set
797 * up, but the inner chips are there. Instead of
798 * bailing we install the handler, but obviously we
799 * cannot enable/startup the interrupt at this point.
800 */
801 while (irq_data) {
802 if (irq_data->chip != &no_irq_chip)
803 break;
804 /*
805 * Bail out if the outer chip is not set up
806 * and the interrrupt supposed to be started
807 * right away.
808 */
809 if (WARN_ON(is_chained))
810 return;
811 /* Try the parent */
812 irq_data = irq_data->parent_data;
813 }
814#endif
815 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
816 return;
817 }
818
819 /* Uninstall? */
820 if (handle == handle_bad_irq) {
821 if (desc->irq_data.chip != &no_irq_chip)
822 mask_ack_irq(desc);
823 irq_state_set_disabled(desc);
824 if (is_chained)
825 desc->action = NULL;
826 desc->depth = 1;
827 }
828 desc->handle_irq = handle;
829 desc->name = name;
830
831 if (handle != handle_bad_irq && is_chained) {
832 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
833
834 /*
835 * We're about to start this interrupt immediately,
836 * hence the need to set the trigger configuration.
837 * But the .set_type callback may have overridden the
838 * flow handler, ignoring that we're dealing with a
839 * chained interrupt. Reset it immediately because we
840 * do know better.
841 */
842 if (type != IRQ_TYPE_NONE) {
843 __irq_set_trigger(desc, type);
844 desc->handle_irq = handle;
845 }
846
847 irq_settings_set_noprobe(desc);
848 irq_settings_set_norequest(desc);
849 irq_settings_set_nothread(desc);
850 desc->action = &chained_action;
851 irq_startup(desc, true);
852 }
853}
854
855void
856__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
857 const char *name)
858{
859 unsigned long flags;
860 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
861
862 if (!desc)
863 return;
864
865 __irq_do_set_handler(desc, handle, is_chained, name);
866 irq_put_desc_busunlock(desc, flags);
867}
868EXPORT_SYMBOL_GPL(__irq_set_handler);
869
870void
871irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
872 void *data)
873{
874 unsigned long flags;
875 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
876
877 if (!desc)
878 return;
879
880 __irq_do_set_handler(desc, handle, 1, NULL);
881 desc->irq_common_data.handler_data = data;
882
883 irq_put_desc_busunlock(desc, flags);
884}
885EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
886
887void
888irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
889 irq_flow_handler_t handle, const char *name)
890{
891 irq_set_chip(irq, chip);
892 __irq_set_handler(irq, handle, 0, name);
893}
894EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
895
896void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
897{
898 unsigned long flags;
899 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
900
901 if (!desc)
902 return;
903 irq_settings_clr_and_set(desc, clr, set);
904
905 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
906 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
907 if (irq_settings_has_no_balance_set(desc))
908 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
909 if (irq_settings_is_per_cpu(desc))
910 irqd_set(&desc->irq_data, IRQD_PER_CPU);
911 if (irq_settings_can_move_pcntxt(desc))
912 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
913 if (irq_settings_is_level(desc))
914 irqd_set(&desc->irq_data, IRQD_LEVEL);
915
916 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
917
918 irq_put_desc_unlock(desc, flags);
919}
920EXPORT_SYMBOL_GPL(irq_modify_status);
921
922/**
923 * irq_cpu_online - Invoke all irq_cpu_online functions.
924 *
925 * Iterate through all irqs and invoke the chip.irq_cpu_online()
926 * for each.
927 */
928void irq_cpu_online(void)
929{
930 struct irq_desc *desc;
931 struct irq_chip *chip;
932 unsigned long flags;
933 unsigned int irq;
934
935 for_each_active_irq(irq) {
936 desc = irq_to_desc(irq);
937 if (!desc)
938 continue;
939
940 raw_spin_lock_irqsave(&desc->lock, flags);
941
942 chip = irq_data_get_irq_chip(&desc->irq_data);
943 if (chip && chip->irq_cpu_online &&
944 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
945 !irqd_irq_disabled(&desc->irq_data)))
946 chip->irq_cpu_online(&desc->irq_data);
947
948 raw_spin_unlock_irqrestore(&desc->lock, flags);
949 }
950}
951
952/**
953 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
954 *
955 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
956 * for each.
957 */
958void irq_cpu_offline(void)
959{
960 struct irq_desc *desc;
961 struct irq_chip *chip;
962 unsigned long flags;
963 unsigned int irq;
964
965 for_each_active_irq(irq) {
966 desc = irq_to_desc(irq);
967 if (!desc)
968 continue;
969
970 raw_spin_lock_irqsave(&desc->lock, flags);
971
972 chip = irq_data_get_irq_chip(&desc->irq_data);
973 if (chip && chip->irq_cpu_offline &&
974 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
975 !irqd_irq_disabled(&desc->irq_data)))
976 chip->irq_cpu_offline(&desc->irq_data);
977
978 raw_spin_unlock_irqrestore(&desc->lock, flags);
979 }
980}
981
982#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
983/**
984 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
985 * NULL)
986 * @data: Pointer to interrupt specific data
987 */
988void irq_chip_enable_parent(struct irq_data *data)
989{
990 data = data->parent_data;
991 if (data->chip->irq_enable)
992 data->chip->irq_enable(data);
993 else
994 data->chip->irq_unmask(data);
995}
996
997/**
998 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
999 * NULL)
1000 * @data: Pointer to interrupt specific data
1001 */
1002void irq_chip_disable_parent(struct irq_data *data)
1003{
1004 data = data->parent_data;
1005 if (data->chip->irq_disable)
1006 data->chip->irq_disable(data);
1007 else
1008 data->chip->irq_mask(data);
1009}
1010
1011/**
1012 * irq_chip_ack_parent - Acknowledge the parent interrupt
1013 * @data: Pointer to interrupt specific data
1014 */
1015void irq_chip_ack_parent(struct irq_data *data)
1016{
1017 data = data->parent_data;
1018 data->chip->irq_ack(data);
1019}
1020EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1021
1022/**
1023 * irq_chip_mask_parent - Mask the parent interrupt
1024 * @data: Pointer to interrupt specific data
1025 */
1026void irq_chip_mask_parent(struct irq_data *data)
1027{
1028 data = data->parent_data;
1029 data->chip->irq_mask(data);
1030}
1031EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1032
1033/**
1034 * irq_chip_unmask_parent - Unmask the parent interrupt
1035 * @data: Pointer to interrupt specific data
1036 */
1037void irq_chip_unmask_parent(struct irq_data *data)
1038{
1039 data = data->parent_data;
1040 data->chip->irq_unmask(data);
1041}
1042EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1043
1044/**
1045 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1046 * @data: Pointer to interrupt specific data
1047 */
1048void irq_chip_eoi_parent(struct irq_data *data)
1049{
1050 data = data->parent_data;
1051 data->chip->irq_eoi(data);
1052}
1053EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1054
1055/**
1056 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1057 * @data: Pointer to interrupt specific data
1058 * @dest: The affinity mask to set
1059 * @force: Flag to enforce setting (disable online checks)
1060 *
1061 * Conditinal, as the underlying parent chip might not implement it.
1062 */
1063int irq_chip_set_affinity_parent(struct irq_data *data,
1064 const struct cpumask *dest, bool force)
1065{
1066 data = data->parent_data;
1067 if (data->chip->irq_set_affinity)
1068 return data->chip->irq_set_affinity(data, dest, force);
1069
1070 return -ENOSYS;
1071}
1072
1073/**
1074 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1075 * @data: Pointer to interrupt specific data
1076 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1077 *
1078 * Conditional, as the underlying parent chip might not implement it.
1079 */
1080int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1081{
1082 data = data->parent_data;
1083
1084 if (data->chip->irq_set_type)
1085 return data->chip->irq_set_type(data, type);
1086
1087 return -ENOSYS;
1088}
1089EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1090
1091/**
1092 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1093 * @data: Pointer to interrupt specific data
1094 *
1095 * Iterate through the domain hierarchy of the interrupt and check
1096 * whether a hw retrigger function exists. If yes, invoke it.
1097 */
1098int irq_chip_retrigger_hierarchy(struct irq_data *data)
1099{
1100 for (data = data->parent_data; data; data = data->parent_data)
1101 if (data->chip && data->chip->irq_retrigger)
1102 return data->chip->irq_retrigger(data);
1103
1104 return 0;
1105}
1106
1107/**
1108 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1109 * @data: Pointer to interrupt specific data
1110 * @vcpu_info: The vcpu affinity information
1111 */
1112int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1113{
1114 data = data->parent_data;
1115 if (data->chip->irq_set_vcpu_affinity)
1116 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1117
1118 return -ENOSYS;
1119}
1120
1121/**
1122 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1123 * @data: Pointer to interrupt specific data
1124 * @on: Whether to set or reset the wake-up capability of this irq
1125 *
1126 * Conditional, as the underlying parent chip might not implement it.
1127 */
1128int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1129{
1130 data = data->parent_data;
1131 if (data->chip->irq_set_wake)
1132 return data->chip->irq_set_wake(data, on);
1133
1134 return -ENOSYS;
1135}
1136#endif
1137
1138/**
1139 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1140 * @data: Pointer to interrupt specific data
1141 * @msg: Pointer to the MSI message
1142 *
1143 * For hierarchical domains we find the first chip in the hierarchy
1144 * which implements the irq_compose_msi_msg callback. For non
1145 * hierarchical we use the top level chip.
1146 */
1147int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1148{
1149 struct irq_data *pos = NULL;
1150
1151#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1152 for (; data; data = data->parent_data)
1153#endif
1154 if (data->chip && data->chip->irq_compose_msi_msg)
1155 pos = data;
1156 if (!pos)
1157 return -ENOSYS;
1158
1159 pos->chip->irq_compose_msi_msg(pos, msg);
1160
1161 return 0;
1162}
1163
1164/**
1165 * irq_chip_pm_get - Enable power for an IRQ chip
1166 * @data: Pointer to interrupt specific data
1167 *
1168 * Enable the power to the IRQ chip referenced by the interrupt data
1169 * structure.
1170 */
1171int irq_chip_pm_get(struct irq_data *data)
1172{
1173 int retval;
1174
1175 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1176 retval = pm_runtime_get_sync(data->chip->parent_device);
1177 if (retval < 0) {
1178 pm_runtime_put_noidle(data->chip->parent_device);
1179 return retval;
1180 }
1181 }
1182
1183 return 0;
1184}
1185
1186/**
1187 * irq_chip_pm_put - Disable power for an IRQ chip
1188 * @data: Pointer to interrupt specific data
1189 *
1190 * Disable the power to the IRQ chip referenced by the interrupt data
1191 * structure, belongs. Note that power will only be disabled, once this
1192 * function has been called for all IRQs that have called irq_chip_pm_get().
1193 */
1194int irq_chip_pm_put(struct irq_data *data)
1195{
1196 int retval = 0;
1197
1198 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1199 retval = pm_runtime_put(data->chip->parent_device);
1200
1201 return (retval < 0) ? retval : 0;
1202}
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include <trace/events/irq.h>
20
21#include "internals.h"
22
23/**
24 * irq_set_chip - set the irq chip for an irq
25 * @irq: irq number
26 * @chip: pointer to irq chip description structure
27 */
28int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29{
30 unsigned long flags;
31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32
33 if (!desc)
34 return -EINVAL;
35
36 if (!chip)
37 chip = &no_irq_chip;
38
39 desc->irq_data.chip = chip;
40 irq_put_desc_unlock(desc, flags);
41 /*
42 * For !CONFIG_SPARSE_IRQ make the irq show up in
43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
44 * already marked, and this call is harmless.
45 */
46 irq_reserve_irq(irq);
47 return 0;
48}
49EXPORT_SYMBOL(irq_set_chip);
50
51/**
52 * irq_set_type - set the irq trigger type for an irq
53 * @irq: irq number
54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55 */
56int irq_set_irq_type(unsigned int irq, unsigned int type)
57{
58 unsigned long flags;
59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60 int ret = 0;
61
62 if (!desc)
63 return -EINVAL;
64
65 type &= IRQ_TYPE_SENSE_MASK;
66 ret = __irq_set_trigger(desc, irq, type);
67 irq_put_desc_busunlock(desc, flags);
68 return ret;
69}
70EXPORT_SYMBOL(irq_set_irq_type);
71
72/**
73 * irq_set_handler_data - set irq handler data for an irq
74 * @irq: Interrupt number
75 * @data: Pointer to interrupt specific data
76 *
77 * Set the hardware irq controller data for an irq
78 */
79int irq_set_handler_data(unsigned int irq, void *data)
80{
81 unsigned long flags;
82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83
84 if (!desc)
85 return -EINVAL;
86 desc->irq_data.handler_data = data;
87 irq_put_desc_unlock(desc, flags);
88 return 0;
89}
90EXPORT_SYMBOL(irq_set_handler_data);
91
92/**
93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94 * @irq_base: Interrupt number base
95 * @irq_offset: Interrupt number offset
96 * @entry: Pointer to MSI descriptor data
97 *
98 * Set the MSI descriptor entry for an irq at offset
99 */
100int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
101 struct msi_desc *entry)
102{
103 unsigned long flags;
104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
105
106 if (!desc)
107 return -EINVAL;
108 desc->irq_data.msi_desc = entry;
109 if (entry && !irq_offset)
110 entry->irq = irq_base;
111 irq_put_desc_unlock(desc, flags);
112 return 0;
113}
114
115/**
116 * irq_set_msi_desc - set MSI descriptor data for an irq
117 * @irq: Interrupt number
118 * @entry: Pointer to MSI descriptor data
119 *
120 * Set the MSI descriptor entry for an irq
121 */
122int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
123{
124 return irq_set_msi_desc_off(irq, 0, entry);
125}
126
127/**
128 * irq_set_chip_data - set irq chip data for an irq
129 * @irq: Interrupt number
130 * @data: Pointer to chip specific data
131 *
132 * Set the hardware irq chip data for an irq
133 */
134int irq_set_chip_data(unsigned int irq, void *data)
135{
136 unsigned long flags;
137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
138
139 if (!desc)
140 return -EINVAL;
141 desc->irq_data.chip_data = data;
142 irq_put_desc_unlock(desc, flags);
143 return 0;
144}
145EXPORT_SYMBOL(irq_set_chip_data);
146
147struct irq_data *irq_get_irq_data(unsigned int irq)
148{
149 struct irq_desc *desc = irq_to_desc(irq);
150
151 return desc ? &desc->irq_data : NULL;
152}
153EXPORT_SYMBOL_GPL(irq_get_irq_data);
154
155static void irq_state_clr_disabled(struct irq_desc *desc)
156{
157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
158}
159
160static void irq_state_set_disabled(struct irq_desc *desc)
161{
162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
163}
164
165static void irq_state_clr_masked(struct irq_desc *desc)
166{
167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
168}
169
170static void irq_state_set_masked(struct irq_desc *desc)
171{
172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
173}
174
175int irq_startup(struct irq_desc *desc, bool resend)
176{
177 int ret = 0;
178
179 irq_state_clr_disabled(desc);
180 desc->depth = 0;
181
182 if (desc->irq_data.chip->irq_startup) {
183 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
184 irq_state_clr_masked(desc);
185 } else {
186 irq_enable(desc);
187 }
188 if (resend)
189 check_irq_resend(desc, desc->irq_data.irq);
190 return ret;
191}
192
193void irq_shutdown(struct irq_desc *desc)
194{
195 irq_state_set_disabled(desc);
196 desc->depth = 1;
197 if (desc->irq_data.chip->irq_shutdown)
198 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
199 else if (desc->irq_data.chip->irq_disable)
200 desc->irq_data.chip->irq_disable(&desc->irq_data);
201 else
202 desc->irq_data.chip->irq_mask(&desc->irq_data);
203 irq_state_set_masked(desc);
204}
205
206void irq_enable(struct irq_desc *desc)
207{
208 irq_state_clr_disabled(desc);
209 if (desc->irq_data.chip->irq_enable)
210 desc->irq_data.chip->irq_enable(&desc->irq_data);
211 else
212 desc->irq_data.chip->irq_unmask(&desc->irq_data);
213 irq_state_clr_masked(desc);
214}
215
216/**
217 * irq_disable - Mark interrupt disabled
218 * @desc: irq descriptor which should be disabled
219 *
220 * If the chip does not implement the irq_disable callback, we
221 * use a lazy disable approach. That means we mark the interrupt
222 * disabled, but leave the hardware unmasked. That's an
223 * optimization because we avoid the hardware access for the
224 * common case where no interrupt happens after we marked it
225 * disabled. If an interrupt happens, then the interrupt flow
226 * handler masks the line at the hardware level and marks it
227 * pending.
228 */
229void irq_disable(struct irq_desc *desc)
230{
231 irq_state_set_disabled(desc);
232 if (desc->irq_data.chip->irq_disable) {
233 desc->irq_data.chip->irq_disable(&desc->irq_data);
234 irq_state_set_masked(desc);
235 }
236}
237
238void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
239{
240 if (desc->irq_data.chip->irq_enable)
241 desc->irq_data.chip->irq_enable(&desc->irq_data);
242 else
243 desc->irq_data.chip->irq_unmask(&desc->irq_data);
244 cpumask_set_cpu(cpu, desc->percpu_enabled);
245}
246
247void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
248{
249 if (desc->irq_data.chip->irq_disable)
250 desc->irq_data.chip->irq_disable(&desc->irq_data);
251 else
252 desc->irq_data.chip->irq_mask(&desc->irq_data);
253 cpumask_clear_cpu(cpu, desc->percpu_enabled);
254}
255
256static inline void mask_ack_irq(struct irq_desc *desc)
257{
258 if (desc->irq_data.chip->irq_mask_ack)
259 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
260 else {
261 desc->irq_data.chip->irq_mask(&desc->irq_data);
262 if (desc->irq_data.chip->irq_ack)
263 desc->irq_data.chip->irq_ack(&desc->irq_data);
264 }
265 irq_state_set_masked(desc);
266}
267
268void mask_irq(struct irq_desc *desc)
269{
270 if (desc->irq_data.chip->irq_mask) {
271 desc->irq_data.chip->irq_mask(&desc->irq_data);
272 irq_state_set_masked(desc);
273 }
274}
275
276void unmask_irq(struct irq_desc *desc)
277{
278 if (desc->irq_data.chip->irq_unmask) {
279 desc->irq_data.chip->irq_unmask(&desc->irq_data);
280 irq_state_clr_masked(desc);
281 }
282}
283
284void unmask_threaded_irq(struct irq_desc *desc)
285{
286 struct irq_chip *chip = desc->irq_data.chip;
287
288 if (chip->flags & IRQCHIP_EOI_THREADED)
289 chip->irq_eoi(&desc->irq_data);
290
291 if (chip->irq_unmask) {
292 chip->irq_unmask(&desc->irq_data);
293 irq_state_clr_masked(desc);
294 }
295}
296
297/*
298 * handle_nested_irq - Handle a nested irq from a irq thread
299 * @irq: the interrupt number
300 *
301 * Handle interrupts which are nested into a threaded interrupt
302 * handler. The handler function is called inside the calling
303 * threads context.
304 */
305void handle_nested_irq(unsigned int irq)
306{
307 struct irq_desc *desc = irq_to_desc(irq);
308 struct irqaction *action;
309 irqreturn_t action_ret;
310
311 might_sleep();
312
313 raw_spin_lock_irq(&desc->lock);
314
315 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
316 kstat_incr_irqs_this_cpu(irq, desc);
317
318 action = desc->action;
319 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
320 desc->istate |= IRQS_PENDING;
321 goto out_unlock;
322 }
323
324 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
325 raw_spin_unlock_irq(&desc->lock);
326
327 action_ret = action->thread_fn(action->irq, action->dev_id);
328 if (!noirqdebug)
329 note_interrupt(irq, desc, action_ret);
330
331 raw_spin_lock_irq(&desc->lock);
332 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
333
334out_unlock:
335 raw_spin_unlock_irq(&desc->lock);
336}
337EXPORT_SYMBOL_GPL(handle_nested_irq);
338
339static bool irq_check_poll(struct irq_desc *desc)
340{
341 if (!(desc->istate & IRQS_POLL_INPROGRESS))
342 return false;
343 return irq_wait_for_poll(desc);
344}
345
346/**
347 * handle_simple_irq - Simple and software-decoded IRQs.
348 * @irq: the interrupt number
349 * @desc: the interrupt description structure for this irq
350 *
351 * Simple interrupts are either sent from a demultiplexing interrupt
352 * handler or come from hardware, where no interrupt hardware control
353 * is necessary.
354 *
355 * Note: The caller is expected to handle the ack, clear, mask and
356 * unmask issues if necessary.
357 */
358void
359handle_simple_irq(unsigned int irq, struct irq_desc *desc)
360{
361 raw_spin_lock(&desc->lock);
362
363 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
364 if (!irq_check_poll(desc))
365 goto out_unlock;
366
367 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
368 kstat_incr_irqs_this_cpu(irq, desc);
369
370 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
371 desc->istate |= IRQS_PENDING;
372 goto out_unlock;
373 }
374
375 handle_irq_event(desc);
376
377out_unlock:
378 raw_spin_unlock(&desc->lock);
379}
380EXPORT_SYMBOL_GPL(handle_simple_irq);
381
382/*
383 * Called unconditionally from handle_level_irq() and only for oneshot
384 * interrupts from handle_fasteoi_irq()
385 */
386static void cond_unmask_irq(struct irq_desc *desc)
387{
388 /*
389 * We need to unmask in the following cases:
390 * - Standard level irq (IRQF_ONESHOT is not set)
391 * - Oneshot irq which did not wake the thread (caused by a
392 * spurious interrupt or a primary handler handling it
393 * completely).
394 */
395 if (!irqd_irq_disabled(&desc->irq_data) &&
396 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
397 unmask_irq(desc);
398}
399
400/**
401 * handle_level_irq - Level type irq handler
402 * @irq: the interrupt number
403 * @desc: the interrupt description structure for this irq
404 *
405 * Level type interrupts are active as long as the hardware line has
406 * the active level. This may require to mask the interrupt and unmask
407 * it after the associated handler has acknowledged the device, so the
408 * interrupt line is back to inactive.
409 */
410void
411handle_level_irq(unsigned int irq, struct irq_desc *desc)
412{
413 raw_spin_lock(&desc->lock);
414 mask_ack_irq(desc);
415
416 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
417 if (!irq_check_poll(desc))
418 goto out_unlock;
419
420 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
421 kstat_incr_irqs_this_cpu(irq, desc);
422
423 /*
424 * If its disabled or no action available
425 * keep it masked and get out of here
426 */
427 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
428 desc->istate |= IRQS_PENDING;
429 goto out_unlock;
430 }
431
432 handle_irq_event(desc);
433
434 cond_unmask_irq(desc);
435
436out_unlock:
437 raw_spin_unlock(&desc->lock);
438}
439EXPORT_SYMBOL_GPL(handle_level_irq);
440
441#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
442static inline void preflow_handler(struct irq_desc *desc)
443{
444 if (desc->preflow_handler)
445 desc->preflow_handler(&desc->irq_data);
446}
447#else
448static inline void preflow_handler(struct irq_desc *desc) { }
449#endif
450
451static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
452{
453 if (!(desc->istate & IRQS_ONESHOT)) {
454 chip->irq_eoi(&desc->irq_data);
455 return;
456 }
457 /*
458 * We need to unmask in the following cases:
459 * - Oneshot irq which did not wake the thread (caused by a
460 * spurious interrupt or a primary handler handling it
461 * completely).
462 */
463 if (!irqd_irq_disabled(&desc->irq_data) &&
464 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
465 chip->irq_eoi(&desc->irq_data);
466 unmask_irq(desc);
467 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
468 chip->irq_eoi(&desc->irq_data);
469 }
470}
471
472/**
473 * handle_fasteoi_irq - irq handler for transparent controllers
474 * @irq: the interrupt number
475 * @desc: the interrupt description structure for this irq
476 *
477 * Only a single callback will be issued to the chip: an ->eoi()
478 * call when the interrupt has been serviced. This enables support
479 * for modern forms of interrupt handlers, which handle the flow
480 * details in hardware, transparently.
481 */
482void
483handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
484{
485 struct irq_chip *chip = desc->irq_data.chip;
486
487 raw_spin_lock(&desc->lock);
488
489 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
490 if (!irq_check_poll(desc))
491 goto out;
492
493 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
494 kstat_incr_irqs_this_cpu(irq, desc);
495
496 /*
497 * If its disabled or no action available
498 * then mask it and get out of here:
499 */
500 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
501 desc->istate |= IRQS_PENDING;
502 mask_irq(desc);
503 goto out;
504 }
505
506 if (desc->istate & IRQS_ONESHOT)
507 mask_irq(desc);
508
509 preflow_handler(desc);
510 handle_irq_event(desc);
511
512 cond_unmask_eoi_irq(desc, chip);
513
514 raw_spin_unlock(&desc->lock);
515 return;
516out:
517 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
518 chip->irq_eoi(&desc->irq_data);
519 raw_spin_unlock(&desc->lock);
520}
521
522/**
523 * handle_edge_irq - edge type IRQ handler
524 * @irq: the interrupt number
525 * @desc: the interrupt description structure for this irq
526 *
527 * Interrupt occures on the falling and/or rising edge of a hardware
528 * signal. The occurrence is latched into the irq controller hardware
529 * and must be acked in order to be reenabled. After the ack another
530 * interrupt can happen on the same source even before the first one
531 * is handled by the associated event handler. If this happens it
532 * might be necessary to disable (mask) the interrupt depending on the
533 * controller hardware. This requires to reenable the interrupt inside
534 * of the loop which handles the interrupts which have arrived while
535 * the handler was running. If all pending interrupts are handled, the
536 * loop is left.
537 */
538void
539handle_edge_irq(unsigned int irq, struct irq_desc *desc)
540{
541 raw_spin_lock(&desc->lock);
542
543 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
544 /*
545 * If we're currently running this IRQ, or its disabled,
546 * we shouldn't process the IRQ. Mark it pending, handle
547 * the necessary masking and go out
548 */
549 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
550 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
551 if (!irq_check_poll(desc)) {
552 desc->istate |= IRQS_PENDING;
553 mask_ack_irq(desc);
554 goto out_unlock;
555 }
556 }
557 kstat_incr_irqs_this_cpu(irq, desc);
558
559 /* Start handling the irq */
560 desc->irq_data.chip->irq_ack(&desc->irq_data);
561
562 do {
563 if (unlikely(!desc->action)) {
564 mask_irq(desc);
565 goto out_unlock;
566 }
567
568 /*
569 * When another irq arrived while we were handling
570 * one, we could have masked the irq.
571 * Renable it, if it was not disabled in meantime.
572 */
573 if (unlikely(desc->istate & IRQS_PENDING)) {
574 if (!irqd_irq_disabled(&desc->irq_data) &&
575 irqd_irq_masked(&desc->irq_data))
576 unmask_irq(desc);
577 }
578
579 handle_irq_event(desc);
580
581 } while ((desc->istate & IRQS_PENDING) &&
582 !irqd_irq_disabled(&desc->irq_data));
583
584out_unlock:
585 raw_spin_unlock(&desc->lock);
586}
587EXPORT_SYMBOL(handle_edge_irq);
588
589#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
590/**
591 * handle_edge_eoi_irq - edge eoi type IRQ handler
592 * @irq: the interrupt number
593 * @desc: the interrupt description structure for this irq
594 *
595 * Similar as the above handle_edge_irq, but using eoi and w/o the
596 * mask/unmask logic.
597 */
598void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
599{
600 struct irq_chip *chip = irq_desc_get_chip(desc);
601
602 raw_spin_lock(&desc->lock);
603
604 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
605 /*
606 * If we're currently running this IRQ, or its disabled,
607 * we shouldn't process the IRQ. Mark it pending, handle
608 * the necessary masking and go out
609 */
610 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
611 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
612 if (!irq_check_poll(desc)) {
613 desc->istate |= IRQS_PENDING;
614 goto out_eoi;
615 }
616 }
617 kstat_incr_irqs_this_cpu(irq, desc);
618
619 do {
620 if (unlikely(!desc->action))
621 goto out_eoi;
622
623 handle_irq_event(desc);
624
625 } while ((desc->istate & IRQS_PENDING) &&
626 !irqd_irq_disabled(&desc->irq_data));
627
628out_eoi:
629 chip->irq_eoi(&desc->irq_data);
630 raw_spin_unlock(&desc->lock);
631}
632#endif
633
634/**
635 * handle_percpu_irq - Per CPU local irq handler
636 * @irq: the interrupt number
637 * @desc: the interrupt description structure for this irq
638 *
639 * Per CPU interrupts on SMP machines without locking requirements
640 */
641void
642handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
643{
644 struct irq_chip *chip = irq_desc_get_chip(desc);
645
646 kstat_incr_irqs_this_cpu(irq, desc);
647
648 if (chip->irq_ack)
649 chip->irq_ack(&desc->irq_data);
650
651 handle_irq_event_percpu(desc, desc->action);
652
653 if (chip->irq_eoi)
654 chip->irq_eoi(&desc->irq_data);
655}
656
657/**
658 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
659 * @irq: the interrupt number
660 * @desc: the interrupt description structure for this irq
661 *
662 * Per CPU interrupts on SMP machines without locking requirements. Same as
663 * handle_percpu_irq() above but with the following extras:
664 *
665 * action->percpu_dev_id is a pointer to percpu variables which
666 * contain the real device id for the cpu on which this handler is
667 * called
668 */
669void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
670{
671 struct irq_chip *chip = irq_desc_get_chip(desc);
672 struct irqaction *action = desc->action;
673 void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
674 irqreturn_t res;
675
676 kstat_incr_irqs_this_cpu(irq, desc);
677
678 if (chip->irq_ack)
679 chip->irq_ack(&desc->irq_data);
680
681 trace_irq_handler_entry(irq, action);
682 res = action->handler(irq, dev_id);
683 trace_irq_handler_exit(irq, action, res);
684
685 if (chip->irq_eoi)
686 chip->irq_eoi(&desc->irq_data);
687}
688
689void
690__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
691 const char *name)
692{
693 unsigned long flags;
694 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
695
696 if (!desc)
697 return;
698
699 if (!handle) {
700 handle = handle_bad_irq;
701 } else {
702 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
703 goto out;
704 }
705
706 /* Uninstall? */
707 if (handle == handle_bad_irq) {
708 if (desc->irq_data.chip != &no_irq_chip)
709 mask_ack_irq(desc);
710 irq_state_set_disabled(desc);
711 desc->depth = 1;
712 }
713 desc->handle_irq = handle;
714 desc->name = name;
715
716 if (handle != handle_bad_irq && is_chained) {
717 irq_settings_set_noprobe(desc);
718 irq_settings_set_norequest(desc);
719 irq_settings_set_nothread(desc);
720 irq_startup(desc, true);
721 }
722out:
723 irq_put_desc_busunlock(desc, flags);
724}
725EXPORT_SYMBOL_GPL(__irq_set_handler);
726
727void
728irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
729 irq_flow_handler_t handle, const char *name)
730{
731 irq_set_chip(irq, chip);
732 __irq_set_handler(irq, handle, 0, name);
733}
734EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
735
736void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
737{
738 unsigned long flags;
739 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
740
741 if (!desc)
742 return;
743 irq_settings_clr_and_set(desc, clr, set);
744
745 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
746 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
747 if (irq_settings_has_no_balance_set(desc))
748 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
749 if (irq_settings_is_per_cpu(desc))
750 irqd_set(&desc->irq_data, IRQD_PER_CPU);
751 if (irq_settings_can_move_pcntxt(desc))
752 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
753 if (irq_settings_is_level(desc))
754 irqd_set(&desc->irq_data, IRQD_LEVEL);
755
756 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
757
758 irq_put_desc_unlock(desc, flags);
759}
760EXPORT_SYMBOL_GPL(irq_modify_status);
761
762/**
763 * irq_cpu_online - Invoke all irq_cpu_online functions.
764 *
765 * Iterate through all irqs and invoke the chip.irq_cpu_online()
766 * for each.
767 */
768void irq_cpu_online(void)
769{
770 struct irq_desc *desc;
771 struct irq_chip *chip;
772 unsigned long flags;
773 unsigned int irq;
774
775 for_each_active_irq(irq) {
776 desc = irq_to_desc(irq);
777 if (!desc)
778 continue;
779
780 raw_spin_lock_irqsave(&desc->lock, flags);
781
782 chip = irq_data_get_irq_chip(&desc->irq_data);
783 if (chip && chip->irq_cpu_online &&
784 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
785 !irqd_irq_disabled(&desc->irq_data)))
786 chip->irq_cpu_online(&desc->irq_data);
787
788 raw_spin_unlock_irqrestore(&desc->lock, flags);
789 }
790}
791
792/**
793 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
794 *
795 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
796 * for each.
797 */
798void irq_cpu_offline(void)
799{
800 struct irq_desc *desc;
801 struct irq_chip *chip;
802 unsigned long flags;
803 unsigned int irq;
804
805 for_each_active_irq(irq) {
806 desc = irq_to_desc(irq);
807 if (!desc)
808 continue;
809
810 raw_spin_lock_irqsave(&desc->lock, flags);
811
812 chip = irq_data_get_irq_chip(&desc->irq_data);
813 if (chip && chip->irq_cpu_offline &&
814 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
815 !irqd_irq_disabled(&desc->irq_data)))
816 chip->irq_cpu_offline(&desc->irq_data);
817
818 raw_spin_unlock_irqrestore(&desc->lock, flags);
819 }
820}