Loading...
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include <trace/events/irq.h>
20
21#include "internals.h"
22
23/**
24 * irq_set_chip - set the irq chip for an irq
25 * @irq: irq number
26 * @chip: pointer to irq chip description structure
27 */
28int irq_set_chip(unsigned int irq, struct irq_chip *chip)
29{
30 unsigned long flags;
31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
32
33 if (!desc)
34 return -EINVAL;
35
36 if (!chip)
37 chip = &no_irq_chip;
38
39 desc->irq_data.chip = chip;
40 irq_put_desc_unlock(desc, flags);
41 /*
42 * For !CONFIG_SPARSE_IRQ make the irq show up in
43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
44 * already marked, and this call is harmless.
45 */
46 irq_reserve_irq(irq);
47 return 0;
48}
49EXPORT_SYMBOL(irq_set_chip);
50
51/**
52 * irq_set_type - set the irq trigger type for an irq
53 * @irq: irq number
54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
55 */
56int irq_set_irq_type(unsigned int irq, unsigned int type)
57{
58 unsigned long flags;
59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
60 int ret = 0;
61
62 if (!desc)
63 return -EINVAL;
64
65 type &= IRQ_TYPE_SENSE_MASK;
66 ret = __irq_set_trigger(desc, irq, type);
67 irq_put_desc_busunlock(desc, flags);
68 return ret;
69}
70EXPORT_SYMBOL(irq_set_irq_type);
71
72/**
73 * irq_set_handler_data - set irq handler data for an irq
74 * @irq: Interrupt number
75 * @data: Pointer to interrupt specific data
76 *
77 * Set the hardware irq controller data for an irq
78 */
79int irq_set_handler_data(unsigned int irq, void *data)
80{
81 unsigned long flags;
82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
83
84 if (!desc)
85 return -EINVAL;
86 desc->irq_data.handler_data = data;
87 irq_put_desc_unlock(desc, flags);
88 return 0;
89}
90EXPORT_SYMBOL(irq_set_handler_data);
91
92/**
93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
94 * @irq_base: Interrupt number base
95 * @irq_offset: Interrupt number offset
96 * @entry: Pointer to MSI descriptor data
97 *
98 * Set the MSI descriptor entry for an irq at offset
99 */
100int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
101 struct msi_desc *entry)
102{
103 unsigned long flags;
104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
105
106 if (!desc)
107 return -EINVAL;
108 desc->irq_data.msi_desc = entry;
109 if (entry && !irq_offset)
110 entry->irq = irq_base;
111 irq_put_desc_unlock(desc, flags);
112 return 0;
113}
114
115/**
116 * irq_set_msi_desc - set MSI descriptor data for an irq
117 * @irq: Interrupt number
118 * @entry: Pointer to MSI descriptor data
119 *
120 * Set the MSI descriptor entry for an irq
121 */
122int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
123{
124 return irq_set_msi_desc_off(irq, 0, entry);
125}
126
127/**
128 * irq_set_chip_data - set irq chip data for an irq
129 * @irq: Interrupt number
130 * @data: Pointer to chip specific data
131 *
132 * Set the hardware irq chip data for an irq
133 */
134int irq_set_chip_data(unsigned int irq, void *data)
135{
136 unsigned long flags;
137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
138
139 if (!desc)
140 return -EINVAL;
141 desc->irq_data.chip_data = data;
142 irq_put_desc_unlock(desc, flags);
143 return 0;
144}
145EXPORT_SYMBOL(irq_set_chip_data);
146
147struct irq_data *irq_get_irq_data(unsigned int irq)
148{
149 struct irq_desc *desc = irq_to_desc(irq);
150
151 return desc ? &desc->irq_data : NULL;
152}
153EXPORT_SYMBOL_GPL(irq_get_irq_data);
154
155static void irq_state_clr_disabled(struct irq_desc *desc)
156{
157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
158}
159
160static void irq_state_set_disabled(struct irq_desc *desc)
161{
162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
163}
164
165static void irq_state_clr_masked(struct irq_desc *desc)
166{
167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
168}
169
170static void irq_state_set_masked(struct irq_desc *desc)
171{
172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
173}
174
175int irq_startup(struct irq_desc *desc, bool resend)
176{
177 int ret = 0;
178
179 irq_state_clr_disabled(desc);
180 desc->depth = 0;
181
182 if (desc->irq_data.chip->irq_startup) {
183 ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
184 irq_state_clr_masked(desc);
185 } else {
186 irq_enable(desc);
187 }
188 if (resend)
189 check_irq_resend(desc, desc->irq_data.irq);
190 return ret;
191}
192
193void irq_shutdown(struct irq_desc *desc)
194{
195 irq_state_set_disabled(desc);
196 desc->depth = 1;
197 if (desc->irq_data.chip->irq_shutdown)
198 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
199 else if (desc->irq_data.chip->irq_disable)
200 desc->irq_data.chip->irq_disable(&desc->irq_data);
201 else
202 desc->irq_data.chip->irq_mask(&desc->irq_data);
203 irq_state_set_masked(desc);
204}
205
206void irq_enable(struct irq_desc *desc)
207{
208 irq_state_clr_disabled(desc);
209 if (desc->irq_data.chip->irq_enable)
210 desc->irq_data.chip->irq_enable(&desc->irq_data);
211 else
212 desc->irq_data.chip->irq_unmask(&desc->irq_data);
213 irq_state_clr_masked(desc);
214}
215
216/**
217 * irq_disable - Mark interrupt disabled
218 * @desc: irq descriptor which should be disabled
219 *
220 * If the chip does not implement the irq_disable callback, we
221 * use a lazy disable approach. That means we mark the interrupt
222 * disabled, but leave the hardware unmasked. That's an
223 * optimization because we avoid the hardware access for the
224 * common case where no interrupt happens after we marked it
225 * disabled. If an interrupt happens, then the interrupt flow
226 * handler masks the line at the hardware level and marks it
227 * pending.
228 */
229void irq_disable(struct irq_desc *desc)
230{
231 irq_state_set_disabled(desc);
232 if (desc->irq_data.chip->irq_disable) {
233 desc->irq_data.chip->irq_disable(&desc->irq_data);
234 irq_state_set_masked(desc);
235 }
236}
237
238void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
239{
240 if (desc->irq_data.chip->irq_enable)
241 desc->irq_data.chip->irq_enable(&desc->irq_data);
242 else
243 desc->irq_data.chip->irq_unmask(&desc->irq_data);
244 cpumask_set_cpu(cpu, desc->percpu_enabled);
245}
246
247void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
248{
249 if (desc->irq_data.chip->irq_disable)
250 desc->irq_data.chip->irq_disable(&desc->irq_data);
251 else
252 desc->irq_data.chip->irq_mask(&desc->irq_data);
253 cpumask_clear_cpu(cpu, desc->percpu_enabled);
254}
255
256static inline void mask_ack_irq(struct irq_desc *desc)
257{
258 if (desc->irq_data.chip->irq_mask_ack)
259 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
260 else {
261 desc->irq_data.chip->irq_mask(&desc->irq_data);
262 if (desc->irq_data.chip->irq_ack)
263 desc->irq_data.chip->irq_ack(&desc->irq_data);
264 }
265 irq_state_set_masked(desc);
266}
267
268void mask_irq(struct irq_desc *desc)
269{
270 if (desc->irq_data.chip->irq_mask) {
271 desc->irq_data.chip->irq_mask(&desc->irq_data);
272 irq_state_set_masked(desc);
273 }
274}
275
276void unmask_irq(struct irq_desc *desc)
277{
278 if (desc->irq_data.chip->irq_unmask) {
279 desc->irq_data.chip->irq_unmask(&desc->irq_data);
280 irq_state_clr_masked(desc);
281 }
282}
283
284void unmask_threaded_irq(struct irq_desc *desc)
285{
286 struct irq_chip *chip = desc->irq_data.chip;
287
288 if (chip->flags & IRQCHIP_EOI_THREADED)
289 chip->irq_eoi(&desc->irq_data);
290
291 if (chip->irq_unmask) {
292 chip->irq_unmask(&desc->irq_data);
293 irq_state_clr_masked(desc);
294 }
295}
296
297/*
298 * handle_nested_irq - Handle a nested irq from a irq thread
299 * @irq: the interrupt number
300 *
301 * Handle interrupts which are nested into a threaded interrupt
302 * handler. The handler function is called inside the calling
303 * threads context.
304 */
305void handle_nested_irq(unsigned int irq)
306{
307 struct irq_desc *desc = irq_to_desc(irq);
308 struct irqaction *action;
309 irqreturn_t action_ret;
310
311 might_sleep();
312
313 raw_spin_lock_irq(&desc->lock);
314
315 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
316 kstat_incr_irqs_this_cpu(irq, desc);
317
318 action = desc->action;
319 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
320 desc->istate |= IRQS_PENDING;
321 goto out_unlock;
322 }
323
324 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
325 raw_spin_unlock_irq(&desc->lock);
326
327 action_ret = action->thread_fn(action->irq, action->dev_id);
328 if (!noirqdebug)
329 note_interrupt(irq, desc, action_ret);
330
331 raw_spin_lock_irq(&desc->lock);
332 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
333
334out_unlock:
335 raw_spin_unlock_irq(&desc->lock);
336}
337EXPORT_SYMBOL_GPL(handle_nested_irq);
338
339static bool irq_check_poll(struct irq_desc *desc)
340{
341 if (!(desc->istate & IRQS_POLL_INPROGRESS))
342 return false;
343 return irq_wait_for_poll(desc);
344}
345
346/**
347 * handle_simple_irq - Simple and software-decoded IRQs.
348 * @irq: the interrupt number
349 * @desc: the interrupt description structure for this irq
350 *
351 * Simple interrupts are either sent from a demultiplexing interrupt
352 * handler or come from hardware, where no interrupt hardware control
353 * is necessary.
354 *
355 * Note: The caller is expected to handle the ack, clear, mask and
356 * unmask issues if necessary.
357 */
358void
359handle_simple_irq(unsigned int irq, struct irq_desc *desc)
360{
361 raw_spin_lock(&desc->lock);
362
363 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
364 if (!irq_check_poll(desc))
365 goto out_unlock;
366
367 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
368 kstat_incr_irqs_this_cpu(irq, desc);
369
370 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
371 desc->istate |= IRQS_PENDING;
372 goto out_unlock;
373 }
374
375 handle_irq_event(desc);
376
377out_unlock:
378 raw_spin_unlock(&desc->lock);
379}
380EXPORT_SYMBOL_GPL(handle_simple_irq);
381
382/*
383 * Called unconditionally from handle_level_irq() and only for oneshot
384 * interrupts from handle_fasteoi_irq()
385 */
386static void cond_unmask_irq(struct irq_desc *desc)
387{
388 /*
389 * We need to unmask in the following cases:
390 * - Standard level irq (IRQF_ONESHOT is not set)
391 * - Oneshot irq which did not wake the thread (caused by a
392 * spurious interrupt or a primary handler handling it
393 * completely).
394 */
395 if (!irqd_irq_disabled(&desc->irq_data) &&
396 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
397 unmask_irq(desc);
398}
399
400/**
401 * handle_level_irq - Level type irq handler
402 * @irq: the interrupt number
403 * @desc: the interrupt description structure for this irq
404 *
405 * Level type interrupts are active as long as the hardware line has
406 * the active level. This may require to mask the interrupt and unmask
407 * it after the associated handler has acknowledged the device, so the
408 * interrupt line is back to inactive.
409 */
410void
411handle_level_irq(unsigned int irq, struct irq_desc *desc)
412{
413 raw_spin_lock(&desc->lock);
414 mask_ack_irq(desc);
415
416 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
417 if (!irq_check_poll(desc))
418 goto out_unlock;
419
420 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
421 kstat_incr_irqs_this_cpu(irq, desc);
422
423 /*
424 * If its disabled or no action available
425 * keep it masked and get out of here
426 */
427 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
428 desc->istate |= IRQS_PENDING;
429 goto out_unlock;
430 }
431
432 handle_irq_event(desc);
433
434 cond_unmask_irq(desc);
435
436out_unlock:
437 raw_spin_unlock(&desc->lock);
438}
439EXPORT_SYMBOL_GPL(handle_level_irq);
440
441#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
442static inline void preflow_handler(struct irq_desc *desc)
443{
444 if (desc->preflow_handler)
445 desc->preflow_handler(&desc->irq_data);
446}
447#else
448static inline void preflow_handler(struct irq_desc *desc) { }
449#endif
450
451static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
452{
453 if (!(desc->istate & IRQS_ONESHOT)) {
454 chip->irq_eoi(&desc->irq_data);
455 return;
456 }
457 /*
458 * We need to unmask in the following cases:
459 * - Oneshot irq which did not wake the thread (caused by a
460 * spurious interrupt or a primary handler handling it
461 * completely).
462 */
463 if (!irqd_irq_disabled(&desc->irq_data) &&
464 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
465 chip->irq_eoi(&desc->irq_data);
466 unmask_irq(desc);
467 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
468 chip->irq_eoi(&desc->irq_data);
469 }
470}
471
472/**
473 * handle_fasteoi_irq - irq handler for transparent controllers
474 * @irq: the interrupt number
475 * @desc: the interrupt description structure for this irq
476 *
477 * Only a single callback will be issued to the chip: an ->eoi()
478 * call when the interrupt has been serviced. This enables support
479 * for modern forms of interrupt handlers, which handle the flow
480 * details in hardware, transparently.
481 */
482void
483handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
484{
485 struct irq_chip *chip = desc->irq_data.chip;
486
487 raw_spin_lock(&desc->lock);
488
489 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
490 if (!irq_check_poll(desc))
491 goto out;
492
493 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
494 kstat_incr_irqs_this_cpu(irq, desc);
495
496 /*
497 * If its disabled or no action available
498 * then mask it and get out of here:
499 */
500 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
501 desc->istate |= IRQS_PENDING;
502 mask_irq(desc);
503 goto out;
504 }
505
506 if (desc->istate & IRQS_ONESHOT)
507 mask_irq(desc);
508
509 preflow_handler(desc);
510 handle_irq_event(desc);
511
512 cond_unmask_eoi_irq(desc, chip);
513
514 raw_spin_unlock(&desc->lock);
515 return;
516out:
517 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
518 chip->irq_eoi(&desc->irq_data);
519 raw_spin_unlock(&desc->lock);
520}
521
522/**
523 * handle_edge_irq - edge type IRQ handler
524 * @irq: the interrupt number
525 * @desc: the interrupt description structure for this irq
526 *
527 * Interrupt occures on the falling and/or rising edge of a hardware
528 * signal. The occurrence is latched into the irq controller hardware
529 * and must be acked in order to be reenabled. After the ack another
530 * interrupt can happen on the same source even before the first one
531 * is handled by the associated event handler. If this happens it
532 * might be necessary to disable (mask) the interrupt depending on the
533 * controller hardware. This requires to reenable the interrupt inside
534 * of the loop which handles the interrupts which have arrived while
535 * the handler was running. If all pending interrupts are handled, the
536 * loop is left.
537 */
538void
539handle_edge_irq(unsigned int irq, struct irq_desc *desc)
540{
541 raw_spin_lock(&desc->lock);
542
543 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
544 /*
545 * If we're currently running this IRQ, or its disabled,
546 * we shouldn't process the IRQ. Mark it pending, handle
547 * the necessary masking and go out
548 */
549 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
550 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
551 if (!irq_check_poll(desc)) {
552 desc->istate |= IRQS_PENDING;
553 mask_ack_irq(desc);
554 goto out_unlock;
555 }
556 }
557 kstat_incr_irqs_this_cpu(irq, desc);
558
559 /* Start handling the irq */
560 desc->irq_data.chip->irq_ack(&desc->irq_data);
561
562 do {
563 if (unlikely(!desc->action)) {
564 mask_irq(desc);
565 goto out_unlock;
566 }
567
568 /*
569 * When another irq arrived while we were handling
570 * one, we could have masked the irq.
571 * Renable it, if it was not disabled in meantime.
572 */
573 if (unlikely(desc->istate & IRQS_PENDING)) {
574 if (!irqd_irq_disabled(&desc->irq_data) &&
575 irqd_irq_masked(&desc->irq_data))
576 unmask_irq(desc);
577 }
578
579 handle_irq_event(desc);
580
581 } while ((desc->istate & IRQS_PENDING) &&
582 !irqd_irq_disabled(&desc->irq_data));
583
584out_unlock:
585 raw_spin_unlock(&desc->lock);
586}
587EXPORT_SYMBOL(handle_edge_irq);
588
589#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
590/**
591 * handle_edge_eoi_irq - edge eoi type IRQ handler
592 * @irq: the interrupt number
593 * @desc: the interrupt description structure for this irq
594 *
595 * Similar as the above handle_edge_irq, but using eoi and w/o the
596 * mask/unmask logic.
597 */
598void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
599{
600 struct irq_chip *chip = irq_desc_get_chip(desc);
601
602 raw_spin_lock(&desc->lock);
603
604 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
605 /*
606 * If we're currently running this IRQ, or its disabled,
607 * we shouldn't process the IRQ. Mark it pending, handle
608 * the necessary masking and go out
609 */
610 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
611 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
612 if (!irq_check_poll(desc)) {
613 desc->istate |= IRQS_PENDING;
614 goto out_eoi;
615 }
616 }
617 kstat_incr_irqs_this_cpu(irq, desc);
618
619 do {
620 if (unlikely(!desc->action))
621 goto out_eoi;
622
623 handle_irq_event(desc);
624
625 } while ((desc->istate & IRQS_PENDING) &&
626 !irqd_irq_disabled(&desc->irq_data));
627
628out_eoi:
629 chip->irq_eoi(&desc->irq_data);
630 raw_spin_unlock(&desc->lock);
631}
632#endif
633
634/**
635 * handle_percpu_irq - Per CPU local irq handler
636 * @irq: the interrupt number
637 * @desc: the interrupt description structure for this irq
638 *
639 * Per CPU interrupts on SMP machines without locking requirements
640 */
641void
642handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
643{
644 struct irq_chip *chip = irq_desc_get_chip(desc);
645
646 kstat_incr_irqs_this_cpu(irq, desc);
647
648 if (chip->irq_ack)
649 chip->irq_ack(&desc->irq_data);
650
651 handle_irq_event_percpu(desc, desc->action);
652
653 if (chip->irq_eoi)
654 chip->irq_eoi(&desc->irq_data);
655}
656
657/**
658 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
659 * @irq: the interrupt number
660 * @desc: the interrupt description structure for this irq
661 *
662 * Per CPU interrupts on SMP machines without locking requirements. Same as
663 * handle_percpu_irq() above but with the following extras:
664 *
665 * action->percpu_dev_id is a pointer to percpu variables which
666 * contain the real device id for the cpu on which this handler is
667 * called
668 */
669void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
670{
671 struct irq_chip *chip = irq_desc_get_chip(desc);
672 struct irqaction *action = desc->action;
673 void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
674 irqreturn_t res;
675
676 kstat_incr_irqs_this_cpu(irq, desc);
677
678 if (chip->irq_ack)
679 chip->irq_ack(&desc->irq_data);
680
681 trace_irq_handler_entry(irq, action);
682 res = action->handler(irq, dev_id);
683 trace_irq_handler_exit(irq, action, res);
684
685 if (chip->irq_eoi)
686 chip->irq_eoi(&desc->irq_data);
687}
688
689void
690__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
691 const char *name)
692{
693 unsigned long flags;
694 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
695
696 if (!desc)
697 return;
698
699 if (!handle) {
700 handle = handle_bad_irq;
701 } else {
702 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
703 goto out;
704 }
705
706 /* Uninstall? */
707 if (handle == handle_bad_irq) {
708 if (desc->irq_data.chip != &no_irq_chip)
709 mask_ack_irq(desc);
710 irq_state_set_disabled(desc);
711 desc->depth = 1;
712 }
713 desc->handle_irq = handle;
714 desc->name = name;
715
716 if (handle != handle_bad_irq && is_chained) {
717 irq_settings_set_noprobe(desc);
718 irq_settings_set_norequest(desc);
719 irq_settings_set_nothread(desc);
720 irq_startup(desc, true);
721 }
722out:
723 irq_put_desc_busunlock(desc, flags);
724}
725EXPORT_SYMBOL_GPL(__irq_set_handler);
726
727void
728irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
729 irq_flow_handler_t handle, const char *name)
730{
731 irq_set_chip(irq, chip);
732 __irq_set_handler(irq, handle, 0, name);
733}
734EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
735
736void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
737{
738 unsigned long flags;
739 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
740
741 if (!desc)
742 return;
743 irq_settings_clr_and_set(desc, clr, set);
744
745 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
746 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
747 if (irq_settings_has_no_balance_set(desc))
748 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
749 if (irq_settings_is_per_cpu(desc))
750 irqd_set(&desc->irq_data, IRQD_PER_CPU);
751 if (irq_settings_can_move_pcntxt(desc))
752 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
753 if (irq_settings_is_level(desc))
754 irqd_set(&desc->irq_data, IRQD_LEVEL);
755
756 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
757
758 irq_put_desc_unlock(desc, flags);
759}
760EXPORT_SYMBOL_GPL(irq_modify_status);
761
762/**
763 * irq_cpu_online - Invoke all irq_cpu_online functions.
764 *
765 * Iterate through all irqs and invoke the chip.irq_cpu_online()
766 * for each.
767 */
768void irq_cpu_online(void)
769{
770 struct irq_desc *desc;
771 struct irq_chip *chip;
772 unsigned long flags;
773 unsigned int irq;
774
775 for_each_active_irq(irq) {
776 desc = irq_to_desc(irq);
777 if (!desc)
778 continue;
779
780 raw_spin_lock_irqsave(&desc->lock, flags);
781
782 chip = irq_data_get_irq_chip(&desc->irq_data);
783 if (chip && chip->irq_cpu_online &&
784 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
785 !irqd_irq_disabled(&desc->irq_data)))
786 chip->irq_cpu_online(&desc->irq_data);
787
788 raw_spin_unlock_irqrestore(&desc->lock, flags);
789 }
790}
791
792/**
793 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
794 *
795 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
796 * for each.
797 */
798void irq_cpu_offline(void)
799{
800 struct irq_desc *desc;
801 struct irq_chip *chip;
802 unsigned long flags;
803 unsigned int irq;
804
805 for_each_active_irq(irq) {
806 desc = irq_to_desc(irq);
807 if (!desc)
808 continue;
809
810 raw_spin_lock_irqsave(&desc->lock, flags);
811
812 chip = irq_data_get_irq_chip(&desc->irq_data);
813 if (chip && chip->irq_cpu_offline &&
814 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
815 !irqd_irq_disabled(&desc->irq_data)))
816 chip->irq_cpu_offline(&desc->irq_data);
817
818 raw_spin_unlock_irqrestore(&desc->lock, flags);
819 }
820}
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include "internals.h"
20
21/**
22 * irq_set_chip - set the irq chip for an irq
23 * @irq: irq number
24 * @chip: pointer to irq chip description structure
25 */
26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27{
28 unsigned long flags;
29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
30
31 if (!desc)
32 return -EINVAL;
33
34 if (!chip)
35 chip = &no_irq_chip;
36
37 desc->irq_data.chip = chip;
38 irq_put_desc_unlock(desc, flags);
39 /*
40 * For !CONFIG_SPARSE_IRQ make the irq show up in
41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
42 * already marked, and this call is harmless.
43 */
44 irq_reserve_irq(irq);
45 return 0;
46}
47EXPORT_SYMBOL(irq_set_chip);
48
49/**
50 * irq_set_type - set the irq trigger type for an irq
51 * @irq: irq number
52 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
53 */
54int irq_set_irq_type(unsigned int irq, unsigned int type)
55{
56 unsigned long flags;
57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
58 int ret = 0;
59
60 if (!desc)
61 return -EINVAL;
62
63 type &= IRQ_TYPE_SENSE_MASK;
64 if (type != IRQ_TYPE_NONE)
65 ret = __irq_set_trigger(desc, irq, type);
66 irq_put_desc_busunlock(desc, flags);
67 return ret;
68}
69EXPORT_SYMBOL(irq_set_irq_type);
70
71/**
72 * irq_set_handler_data - set irq handler data for an irq
73 * @irq: Interrupt number
74 * @data: Pointer to interrupt specific data
75 *
76 * Set the hardware irq controller data for an irq
77 */
78int irq_set_handler_data(unsigned int irq, void *data)
79{
80 unsigned long flags;
81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
82
83 if (!desc)
84 return -EINVAL;
85 desc->irq_data.handler_data = data;
86 irq_put_desc_unlock(desc, flags);
87 return 0;
88}
89EXPORT_SYMBOL(irq_set_handler_data);
90
91/**
92 * irq_set_msi_desc - set MSI descriptor data for an irq
93 * @irq: Interrupt number
94 * @entry: Pointer to MSI descriptor data
95 *
96 * Set the MSI descriptor entry for an irq
97 */
98int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99{
100 unsigned long flags;
101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
102
103 if (!desc)
104 return -EINVAL;
105 desc->irq_data.msi_desc = entry;
106 if (entry)
107 entry->irq = irq;
108 irq_put_desc_unlock(desc, flags);
109 return 0;
110}
111
112/**
113 * irq_set_chip_data - set irq chip data for an irq
114 * @irq: Interrupt number
115 * @data: Pointer to chip specific data
116 *
117 * Set the hardware irq chip data for an irq
118 */
119int irq_set_chip_data(unsigned int irq, void *data)
120{
121 unsigned long flags;
122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
123
124 if (!desc)
125 return -EINVAL;
126 desc->irq_data.chip_data = data;
127 irq_put_desc_unlock(desc, flags);
128 return 0;
129}
130EXPORT_SYMBOL(irq_set_chip_data);
131
132struct irq_data *irq_get_irq_data(unsigned int irq)
133{
134 struct irq_desc *desc = irq_to_desc(irq);
135
136 return desc ? &desc->irq_data : NULL;
137}
138EXPORT_SYMBOL_GPL(irq_get_irq_data);
139
140static void irq_state_clr_disabled(struct irq_desc *desc)
141{
142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
143}
144
145static void irq_state_set_disabled(struct irq_desc *desc)
146{
147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148}
149
150static void irq_state_clr_masked(struct irq_desc *desc)
151{
152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
153}
154
155static void irq_state_set_masked(struct irq_desc *desc)
156{
157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158}
159
160int irq_startup(struct irq_desc *desc)
161{
162 irq_state_clr_disabled(desc);
163 desc->depth = 0;
164
165 if (desc->irq_data.chip->irq_startup) {
166 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167 irq_state_clr_masked(desc);
168 return ret;
169 }
170
171 irq_enable(desc);
172 return 0;
173}
174
175void irq_shutdown(struct irq_desc *desc)
176{
177 irq_state_set_disabled(desc);
178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 else if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data);
185 irq_state_set_masked(desc);
186}
187
188void irq_enable(struct irq_desc *desc)
189{
190 irq_state_clr_disabled(desc);
191 if (desc->irq_data.chip->irq_enable)
192 desc->irq_data.chip->irq_enable(&desc->irq_data);
193 else
194 desc->irq_data.chip->irq_unmask(&desc->irq_data);
195 irq_state_clr_masked(desc);
196}
197
198void irq_disable(struct irq_desc *desc)
199{
200 irq_state_set_disabled(desc);
201 if (desc->irq_data.chip->irq_disable) {
202 desc->irq_data.chip->irq_disable(&desc->irq_data);
203 irq_state_set_masked(desc);
204 }
205}
206
207static inline void mask_ack_irq(struct irq_desc *desc)
208{
209 if (desc->irq_data.chip->irq_mask_ack)
210 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
211 else {
212 desc->irq_data.chip->irq_mask(&desc->irq_data);
213 if (desc->irq_data.chip->irq_ack)
214 desc->irq_data.chip->irq_ack(&desc->irq_data);
215 }
216 irq_state_set_masked(desc);
217}
218
219void mask_irq(struct irq_desc *desc)
220{
221 if (desc->irq_data.chip->irq_mask) {
222 desc->irq_data.chip->irq_mask(&desc->irq_data);
223 irq_state_set_masked(desc);
224 }
225}
226
227void unmask_irq(struct irq_desc *desc)
228{
229 if (desc->irq_data.chip->irq_unmask) {
230 desc->irq_data.chip->irq_unmask(&desc->irq_data);
231 irq_state_clr_masked(desc);
232 }
233}
234
235/*
236 * handle_nested_irq - Handle a nested irq from a irq thread
237 * @irq: the interrupt number
238 *
239 * Handle interrupts which are nested into a threaded interrupt
240 * handler. The handler function is called inside the calling
241 * threads context.
242 */
243void handle_nested_irq(unsigned int irq)
244{
245 struct irq_desc *desc = irq_to_desc(irq);
246 struct irqaction *action;
247 irqreturn_t action_ret;
248
249 might_sleep();
250
251 raw_spin_lock_irq(&desc->lock);
252
253 kstat_incr_irqs_this_cpu(irq, desc);
254
255 action = desc->action;
256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
257 goto out_unlock;
258
259 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
260 raw_spin_unlock_irq(&desc->lock);
261
262 action_ret = action->thread_fn(action->irq, action->dev_id);
263 if (!noirqdebug)
264 note_interrupt(irq, desc, action_ret);
265
266 raw_spin_lock_irq(&desc->lock);
267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
268
269out_unlock:
270 raw_spin_unlock_irq(&desc->lock);
271}
272EXPORT_SYMBOL_GPL(handle_nested_irq);
273
274static bool irq_check_poll(struct irq_desc *desc)
275{
276 if (!(desc->istate & IRQS_POLL_INPROGRESS))
277 return false;
278 return irq_wait_for_poll(desc);
279}
280
281/**
282 * handle_simple_irq - Simple and software-decoded IRQs.
283 * @irq: the interrupt number
284 * @desc: the interrupt description structure for this irq
285 *
286 * Simple interrupts are either sent from a demultiplexing interrupt
287 * handler or come from hardware, where no interrupt hardware control
288 * is necessary.
289 *
290 * Note: The caller is expected to handle the ack, clear, mask and
291 * unmask issues if necessary.
292 */
293void
294handle_simple_irq(unsigned int irq, struct irq_desc *desc)
295{
296 raw_spin_lock(&desc->lock);
297
298 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
299 if (!irq_check_poll(desc))
300 goto out_unlock;
301
302 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
303 kstat_incr_irqs_this_cpu(irq, desc);
304
305 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
306 goto out_unlock;
307
308 handle_irq_event(desc);
309
310out_unlock:
311 raw_spin_unlock(&desc->lock);
312}
313EXPORT_SYMBOL_GPL(handle_simple_irq);
314
315/**
316 * handle_level_irq - Level type irq handler
317 * @irq: the interrupt number
318 * @desc: the interrupt description structure for this irq
319 *
320 * Level type interrupts are active as long as the hardware line has
321 * the active level. This may require to mask the interrupt and unmask
322 * it after the associated handler has acknowledged the device, so the
323 * interrupt line is back to inactive.
324 */
325void
326handle_level_irq(unsigned int irq, struct irq_desc *desc)
327{
328 raw_spin_lock(&desc->lock);
329 mask_ack_irq(desc);
330
331 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
332 if (!irq_check_poll(desc))
333 goto out_unlock;
334
335 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
336 kstat_incr_irqs_this_cpu(irq, desc);
337
338 /*
339 * If its disabled or no action available
340 * keep it masked and get out of here
341 */
342 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
343 goto out_unlock;
344
345 handle_irq_event(desc);
346
347 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
348 unmask_irq(desc);
349out_unlock:
350 raw_spin_unlock(&desc->lock);
351}
352EXPORT_SYMBOL_GPL(handle_level_irq);
353
354#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
355static inline void preflow_handler(struct irq_desc *desc)
356{
357 if (desc->preflow_handler)
358 desc->preflow_handler(&desc->irq_data);
359}
360#else
361static inline void preflow_handler(struct irq_desc *desc) { }
362#endif
363
364/**
365 * handle_fasteoi_irq - irq handler for transparent controllers
366 * @irq: the interrupt number
367 * @desc: the interrupt description structure for this irq
368 *
369 * Only a single callback will be issued to the chip: an ->eoi()
370 * call when the interrupt has been serviced. This enables support
371 * for modern forms of interrupt handlers, which handle the flow
372 * details in hardware, transparently.
373 */
374void
375handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
376{
377 raw_spin_lock(&desc->lock);
378
379 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
380 if (!irq_check_poll(desc))
381 goto out;
382
383 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
384 kstat_incr_irqs_this_cpu(irq, desc);
385
386 /*
387 * If its disabled or no action available
388 * then mask it and get out of here:
389 */
390 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
391 desc->istate |= IRQS_PENDING;
392 mask_irq(desc);
393 goto out;
394 }
395
396 if (desc->istate & IRQS_ONESHOT)
397 mask_irq(desc);
398
399 preflow_handler(desc);
400 handle_irq_event(desc);
401
402out_eoi:
403 desc->irq_data.chip->irq_eoi(&desc->irq_data);
404out_unlock:
405 raw_spin_unlock(&desc->lock);
406 return;
407out:
408 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
409 goto out_eoi;
410 goto out_unlock;
411}
412
413/**
414 * handle_edge_irq - edge type IRQ handler
415 * @irq: the interrupt number
416 * @desc: the interrupt description structure for this irq
417 *
418 * Interrupt occures on the falling and/or rising edge of a hardware
419 * signal. The occurrence is latched into the irq controller hardware
420 * and must be acked in order to be reenabled. After the ack another
421 * interrupt can happen on the same source even before the first one
422 * is handled by the associated event handler. If this happens it
423 * might be necessary to disable (mask) the interrupt depending on the
424 * controller hardware. This requires to reenable the interrupt inside
425 * of the loop which handles the interrupts which have arrived while
426 * the handler was running. If all pending interrupts are handled, the
427 * loop is left.
428 */
429void
430handle_edge_irq(unsigned int irq, struct irq_desc *desc)
431{
432 raw_spin_lock(&desc->lock);
433
434 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
435 /*
436 * If we're currently running this IRQ, or its disabled,
437 * we shouldn't process the IRQ. Mark it pending, handle
438 * the necessary masking and go out
439 */
440 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
441 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
442 if (!irq_check_poll(desc)) {
443 desc->istate |= IRQS_PENDING;
444 mask_ack_irq(desc);
445 goto out_unlock;
446 }
447 }
448 kstat_incr_irqs_this_cpu(irq, desc);
449
450 /* Start handling the irq */
451 desc->irq_data.chip->irq_ack(&desc->irq_data);
452
453 do {
454 if (unlikely(!desc->action)) {
455 mask_irq(desc);
456 goto out_unlock;
457 }
458
459 /*
460 * When another irq arrived while we were handling
461 * one, we could have masked the irq.
462 * Renable it, if it was not disabled in meantime.
463 */
464 if (unlikely(desc->istate & IRQS_PENDING)) {
465 if (!irqd_irq_disabled(&desc->irq_data) &&
466 irqd_irq_masked(&desc->irq_data))
467 unmask_irq(desc);
468 }
469
470 handle_irq_event(desc);
471
472 } while ((desc->istate & IRQS_PENDING) &&
473 !irqd_irq_disabled(&desc->irq_data));
474
475out_unlock:
476 raw_spin_unlock(&desc->lock);
477}
478
479#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
480/**
481 * handle_edge_eoi_irq - edge eoi type IRQ handler
482 * @irq: the interrupt number
483 * @desc: the interrupt description structure for this irq
484 *
485 * Similar as the above handle_edge_irq, but using eoi and w/o the
486 * mask/unmask logic.
487 */
488void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
489{
490 struct irq_chip *chip = irq_desc_get_chip(desc);
491
492 raw_spin_lock(&desc->lock);
493
494 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
495 /*
496 * If we're currently running this IRQ, or its disabled,
497 * we shouldn't process the IRQ. Mark it pending, handle
498 * the necessary masking and go out
499 */
500 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
501 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
502 if (!irq_check_poll(desc)) {
503 desc->istate |= IRQS_PENDING;
504 goto out_eoi;
505 }
506 }
507 kstat_incr_irqs_this_cpu(irq, desc);
508
509 do {
510 if (unlikely(!desc->action))
511 goto out_eoi;
512
513 handle_irq_event(desc);
514
515 } while ((desc->istate & IRQS_PENDING) &&
516 !irqd_irq_disabled(&desc->irq_data));
517
518out_eoi:
519 chip->irq_eoi(&desc->irq_data);
520 raw_spin_unlock(&desc->lock);
521}
522#endif
523
524/**
525 * handle_percpu_irq - Per CPU local irq handler
526 * @irq: the interrupt number
527 * @desc: the interrupt description structure for this irq
528 *
529 * Per CPU interrupts on SMP machines without locking requirements
530 */
531void
532handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
533{
534 struct irq_chip *chip = irq_desc_get_chip(desc);
535
536 kstat_incr_irqs_this_cpu(irq, desc);
537
538 if (chip->irq_ack)
539 chip->irq_ack(&desc->irq_data);
540
541 handle_irq_event_percpu(desc, desc->action);
542
543 if (chip->irq_eoi)
544 chip->irq_eoi(&desc->irq_data);
545}
546
547void
548__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
549 const char *name)
550{
551 unsigned long flags;
552 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
553
554 if (!desc)
555 return;
556
557 if (!handle) {
558 handle = handle_bad_irq;
559 } else {
560 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
561 goto out;
562 }
563
564 /* Uninstall? */
565 if (handle == handle_bad_irq) {
566 if (desc->irq_data.chip != &no_irq_chip)
567 mask_ack_irq(desc);
568 irq_state_set_disabled(desc);
569 desc->depth = 1;
570 }
571 desc->handle_irq = handle;
572 desc->name = name;
573
574 if (handle != handle_bad_irq && is_chained) {
575 irq_settings_set_noprobe(desc);
576 irq_settings_set_norequest(desc);
577 irq_settings_set_nothread(desc);
578 irq_startup(desc);
579 }
580out:
581 irq_put_desc_busunlock(desc, flags);
582}
583EXPORT_SYMBOL_GPL(__irq_set_handler);
584
585void
586irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
587 irq_flow_handler_t handle, const char *name)
588{
589 irq_set_chip(irq, chip);
590 __irq_set_handler(irq, handle, 0, name);
591}
592
593void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
594{
595 unsigned long flags;
596 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
597
598 if (!desc)
599 return;
600 irq_settings_clr_and_set(desc, clr, set);
601
602 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
603 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
604 if (irq_settings_has_no_balance_set(desc))
605 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
606 if (irq_settings_is_per_cpu(desc))
607 irqd_set(&desc->irq_data, IRQD_PER_CPU);
608 if (irq_settings_can_move_pcntxt(desc))
609 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
610 if (irq_settings_is_level(desc))
611 irqd_set(&desc->irq_data, IRQD_LEVEL);
612
613 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
614
615 irq_put_desc_unlock(desc, flags);
616}
617EXPORT_SYMBOL_GPL(irq_modify_status);
618
619/**
620 * irq_cpu_online - Invoke all irq_cpu_online functions.
621 *
622 * Iterate through all irqs and invoke the chip.irq_cpu_online()
623 * for each.
624 */
625void irq_cpu_online(void)
626{
627 struct irq_desc *desc;
628 struct irq_chip *chip;
629 unsigned long flags;
630 unsigned int irq;
631
632 for_each_active_irq(irq) {
633 desc = irq_to_desc(irq);
634 if (!desc)
635 continue;
636
637 raw_spin_lock_irqsave(&desc->lock, flags);
638
639 chip = irq_data_get_irq_chip(&desc->irq_data);
640 if (chip && chip->irq_cpu_online &&
641 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
642 !irqd_irq_disabled(&desc->irq_data)))
643 chip->irq_cpu_online(&desc->irq_data);
644
645 raw_spin_unlock_irqrestore(&desc->lock, flags);
646 }
647}
648
649/**
650 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
651 *
652 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
653 * for each.
654 */
655void irq_cpu_offline(void)
656{
657 struct irq_desc *desc;
658 struct irq_chip *chip;
659 unsigned long flags;
660 unsigned int irq;
661
662 for_each_active_irq(irq) {
663 desc = irq_to_desc(irq);
664 if (!desc)
665 continue;
666
667 raw_spin_lock_irqsave(&desc->lock, flags);
668
669 chip = irq_data_get_irq_chip(&desc->irq_data);
670 if (chip && chip->irq_cpu_offline &&
671 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
672 !irqd_irq_disabled(&desc->irq_data)))
673 chip->irq_cpu_offline(&desc->irq_data);
674
675 raw_spin_unlock_irqrestore(&desc->lock, flags);
676 }
677}