Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 *
6 * This file contains the core interrupt handling code, for irq-chip based
7 * architectures. Detailed information is available in
8 * Documentation/core-api/genericirq.rst
9 */
10
11#include <linux/irq.h>
12#include <linux/msi.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/irqdomain.h>
17
18#include <trace/events/irq.h>
19
20#include "internals.h"
21
22static irqreturn_t bad_chained_irq(int irq, void *dev_id)
23{
24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
25 return IRQ_NONE;
26}
27
28/*
29 * Chained handlers should never call action on their IRQ. This default
30 * action will emit warning if such thing happens.
31 */
32struct irqaction chained_action = {
33 .handler = bad_chained_irq,
34};
35
36/**
37 * irq_set_chip - set the irq chip for an irq
38 * @irq: irq number
39 * @chip: pointer to irq chip description structure
40 */
41int irq_set_chip(unsigned int irq, struct irq_chip *chip)
42{
43 unsigned long flags;
44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
45
46 if (!desc)
47 return -EINVAL;
48
49 if (!chip)
50 chip = &no_irq_chip;
51
52 desc->irq_data.chip = chip;
53 irq_put_desc_unlock(desc, flags);
54 /*
55 * For !CONFIG_SPARSE_IRQ make the irq show up in
56 * allocated_irqs.
57 */
58 irq_mark_irq(irq);
59 return 0;
60}
61EXPORT_SYMBOL(irq_set_chip);
62
63/**
64 * irq_set_irq_type - set the irq trigger type for an irq
65 * @irq: irq number
66 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
67 */
68int irq_set_irq_type(unsigned int irq, unsigned int type)
69{
70 unsigned long flags;
71 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
72 int ret = 0;
73
74 if (!desc)
75 return -EINVAL;
76
77 ret = __irq_set_trigger(desc, type);
78 irq_put_desc_busunlock(desc, flags);
79 return ret;
80}
81EXPORT_SYMBOL(irq_set_irq_type);
82
83/**
84 * irq_set_handler_data - set irq handler data for an irq
85 * @irq: Interrupt number
86 * @data: Pointer to interrupt specific data
87 *
88 * Set the hardware irq controller data for an irq
89 */
90int irq_set_handler_data(unsigned int irq, void *data)
91{
92 unsigned long flags;
93 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
94
95 if (!desc)
96 return -EINVAL;
97 desc->irq_common_data.handler_data = data;
98 irq_put_desc_unlock(desc, flags);
99 return 0;
100}
101EXPORT_SYMBOL(irq_set_handler_data);
102
103/**
104 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
105 * @irq_base: Interrupt number base
106 * @irq_offset: Interrupt number offset
107 * @entry: Pointer to MSI descriptor data
108 *
109 * Set the MSI descriptor entry for an irq at offset
110 */
111int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
112 struct msi_desc *entry)
113{
114 unsigned long flags;
115 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
116
117 if (!desc)
118 return -EINVAL;
119 desc->irq_common_data.msi_desc = entry;
120 if (entry && !irq_offset)
121 entry->irq = irq_base;
122 irq_put_desc_unlock(desc, flags);
123 return 0;
124}
125
126/**
127 * irq_set_msi_desc - set MSI descriptor data for an irq
128 * @irq: Interrupt number
129 * @entry: Pointer to MSI descriptor data
130 *
131 * Set the MSI descriptor entry for an irq
132 */
133int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
134{
135 return irq_set_msi_desc_off(irq, 0, entry);
136}
137
138/**
139 * irq_set_chip_data - set irq chip data for an irq
140 * @irq: Interrupt number
141 * @data: Pointer to chip specific data
142 *
143 * Set the hardware irq chip data for an irq
144 */
145int irq_set_chip_data(unsigned int irq, void *data)
146{
147 unsigned long flags;
148 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
149
150 if (!desc)
151 return -EINVAL;
152 desc->irq_data.chip_data = data;
153 irq_put_desc_unlock(desc, flags);
154 return 0;
155}
156EXPORT_SYMBOL(irq_set_chip_data);
157
158struct irq_data *irq_get_irq_data(unsigned int irq)
159{
160 struct irq_desc *desc = irq_to_desc(irq);
161
162 return desc ? &desc->irq_data : NULL;
163}
164EXPORT_SYMBOL_GPL(irq_get_irq_data);
165
166static void irq_state_clr_disabled(struct irq_desc *desc)
167{
168 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
169}
170
171static void irq_state_clr_masked(struct irq_desc *desc)
172{
173 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
174}
175
176static void irq_state_clr_started(struct irq_desc *desc)
177{
178 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
179}
180
181static void irq_state_set_started(struct irq_desc *desc)
182{
183 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
184}
185
186enum {
187 IRQ_STARTUP_NORMAL,
188 IRQ_STARTUP_MANAGED,
189 IRQ_STARTUP_ABORT,
190};
191
192#ifdef CONFIG_SMP
193static int
194__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
195{
196 struct irq_data *d = irq_desc_get_irq_data(desc);
197
198 if (!irqd_affinity_is_managed(d))
199 return IRQ_STARTUP_NORMAL;
200
201 irqd_clr_managed_shutdown(d);
202
203 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
204 /*
205 * Catch code which fiddles with enable_irq() on a managed
206 * and potentially shutdown IRQ. Chained interrupt
207 * installment or irq auto probing should not happen on
208 * managed irqs either.
209 */
210 if (WARN_ON_ONCE(force))
211 return IRQ_STARTUP_ABORT;
212 /*
213 * The interrupt was requested, but there is no online CPU
214 * in it's affinity mask. Put it into managed shutdown
215 * state and let the cpu hotplug mechanism start it up once
216 * a CPU in the mask becomes available.
217 */
218 return IRQ_STARTUP_ABORT;
219 }
220 /*
221 * Managed interrupts have reserved resources, so this should not
222 * happen.
223 */
224 if (WARN_ON(irq_domain_activate_irq(d, false)))
225 return IRQ_STARTUP_ABORT;
226 return IRQ_STARTUP_MANAGED;
227}
228#else
229static __always_inline int
230__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
231{
232 return IRQ_STARTUP_NORMAL;
233}
234#endif
235
236static int __irq_startup(struct irq_desc *desc)
237{
238 struct irq_data *d = irq_desc_get_irq_data(desc);
239 int ret = 0;
240
241 /* Warn if this interrupt is not activated but try nevertheless */
242 WARN_ON_ONCE(!irqd_is_activated(d));
243
244 if (d->chip->irq_startup) {
245 ret = d->chip->irq_startup(d);
246 irq_state_clr_disabled(desc);
247 irq_state_clr_masked(desc);
248 } else {
249 irq_enable(desc);
250 }
251 irq_state_set_started(desc);
252 return ret;
253}
254
255int irq_startup(struct irq_desc *desc, bool resend, bool force)
256{
257 struct irq_data *d = irq_desc_get_irq_data(desc);
258 struct cpumask *aff = irq_data_get_affinity_mask(d);
259 int ret = 0;
260
261 desc->depth = 0;
262
263 if (irqd_is_started(d)) {
264 irq_enable(desc);
265 } else {
266 switch (__irq_startup_managed(desc, aff, force)) {
267 case IRQ_STARTUP_NORMAL:
268 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
269 irq_setup_affinity(desc);
270 ret = __irq_startup(desc);
271 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
272 irq_setup_affinity(desc);
273 break;
274 case IRQ_STARTUP_MANAGED:
275 irq_do_set_affinity(d, aff, false);
276 ret = __irq_startup(desc);
277 break;
278 case IRQ_STARTUP_ABORT:
279 irqd_set_managed_shutdown(d);
280 return 0;
281 }
282 }
283 if (resend)
284 check_irq_resend(desc, false);
285
286 return ret;
287}
288
289int irq_activate(struct irq_desc *desc)
290{
291 struct irq_data *d = irq_desc_get_irq_data(desc);
292
293 if (!irqd_affinity_is_managed(d))
294 return irq_domain_activate_irq(d, false);
295 return 0;
296}
297
298int irq_activate_and_startup(struct irq_desc *desc, bool resend)
299{
300 if (WARN_ON(irq_activate(desc)))
301 return 0;
302 return irq_startup(desc, resend, IRQ_START_FORCE);
303}
304
305static void __irq_disable(struct irq_desc *desc, bool mask);
306
307void irq_shutdown(struct irq_desc *desc)
308{
309 if (irqd_is_started(&desc->irq_data)) {
310 desc->depth = 1;
311 if (desc->irq_data.chip->irq_shutdown) {
312 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
313 irq_state_set_disabled(desc);
314 irq_state_set_masked(desc);
315 } else {
316 __irq_disable(desc, true);
317 }
318 irq_state_clr_started(desc);
319 }
320}
321
322
323void irq_shutdown_and_deactivate(struct irq_desc *desc)
324{
325 irq_shutdown(desc);
326 /*
327 * This must be called even if the interrupt was never started up,
328 * because the activation can happen before the interrupt is
329 * available for request/startup. It has it's own state tracking so
330 * it's safe to call it unconditionally.
331 */
332 irq_domain_deactivate_irq(&desc->irq_data);
333}
334
335void irq_enable(struct irq_desc *desc)
336{
337 if (!irqd_irq_disabled(&desc->irq_data)) {
338 unmask_irq(desc);
339 } else {
340 irq_state_clr_disabled(desc);
341 if (desc->irq_data.chip->irq_enable) {
342 desc->irq_data.chip->irq_enable(&desc->irq_data);
343 irq_state_clr_masked(desc);
344 } else {
345 unmask_irq(desc);
346 }
347 }
348}
349
350static void __irq_disable(struct irq_desc *desc, bool mask)
351{
352 if (irqd_irq_disabled(&desc->irq_data)) {
353 if (mask)
354 mask_irq(desc);
355 } else {
356 irq_state_set_disabled(desc);
357 if (desc->irq_data.chip->irq_disable) {
358 desc->irq_data.chip->irq_disable(&desc->irq_data);
359 irq_state_set_masked(desc);
360 } else if (mask) {
361 mask_irq(desc);
362 }
363 }
364}
365
366/**
367 * irq_disable - Mark interrupt disabled
368 * @desc: irq descriptor which should be disabled
369 *
370 * If the chip does not implement the irq_disable callback, we
371 * use a lazy disable approach. That means we mark the interrupt
372 * disabled, but leave the hardware unmasked. That's an
373 * optimization because we avoid the hardware access for the
374 * common case where no interrupt happens after we marked it
375 * disabled. If an interrupt happens, then the interrupt flow
376 * handler masks the line at the hardware level and marks it
377 * pending.
378 *
379 * If the interrupt chip does not implement the irq_disable callback,
380 * a driver can disable the lazy approach for a particular irq line by
381 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
382 * be used for devices which cannot disable the interrupt at the
383 * device level under certain circumstances and have to use
384 * disable_irq[_nosync] instead.
385 */
386void irq_disable(struct irq_desc *desc)
387{
388 __irq_disable(desc, irq_settings_disable_unlazy(desc));
389}
390
391void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
392{
393 if (desc->irq_data.chip->irq_enable)
394 desc->irq_data.chip->irq_enable(&desc->irq_data);
395 else
396 desc->irq_data.chip->irq_unmask(&desc->irq_data);
397 cpumask_set_cpu(cpu, desc->percpu_enabled);
398}
399
400void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
401{
402 if (desc->irq_data.chip->irq_disable)
403 desc->irq_data.chip->irq_disable(&desc->irq_data);
404 else
405 desc->irq_data.chip->irq_mask(&desc->irq_data);
406 cpumask_clear_cpu(cpu, desc->percpu_enabled);
407}
408
409static inline void mask_ack_irq(struct irq_desc *desc)
410{
411 if (desc->irq_data.chip->irq_mask_ack) {
412 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
413 irq_state_set_masked(desc);
414 } else {
415 mask_irq(desc);
416 if (desc->irq_data.chip->irq_ack)
417 desc->irq_data.chip->irq_ack(&desc->irq_data);
418 }
419}
420
421void mask_irq(struct irq_desc *desc)
422{
423 if (irqd_irq_masked(&desc->irq_data))
424 return;
425
426 if (desc->irq_data.chip->irq_mask) {
427 desc->irq_data.chip->irq_mask(&desc->irq_data);
428 irq_state_set_masked(desc);
429 }
430}
431
432void unmask_irq(struct irq_desc *desc)
433{
434 if (!irqd_irq_masked(&desc->irq_data))
435 return;
436
437 if (desc->irq_data.chip->irq_unmask) {
438 desc->irq_data.chip->irq_unmask(&desc->irq_data);
439 irq_state_clr_masked(desc);
440 }
441}
442
443void unmask_threaded_irq(struct irq_desc *desc)
444{
445 struct irq_chip *chip = desc->irq_data.chip;
446
447 if (chip->flags & IRQCHIP_EOI_THREADED)
448 chip->irq_eoi(&desc->irq_data);
449
450 unmask_irq(desc);
451}
452
453/*
454 * handle_nested_irq - Handle a nested irq from a irq thread
455 * @irq: the interrupt number
456 *
457 * Handle interrupts which are nested into a threaded interrupt
458 * handler. The handler function is called inside the calling
459 * threads context.
460 */
461void handle_nested_irq(unsigned int irq)
462{
463 struct irq_desc *desc = irq_to_desc(irq);
464 struct irqaction *action;
465 irqreturn_t action_ret;
466
467 might_sleep();
468
469 raw_spin_lock_irq(&desc->lock);
470
471 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
472
473 action = desc->action;
474 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
475 desc->istate |= IRQS_PENDING;
476 goto out_unlock;
477 }
478
479 kstat_incr_irqs_this_cpu(desc);
480 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
481 raw_spin_unlock_irq(&desc->lock);
482
483 action_ret = IRQ_NONE;
484 for_each_action_of_desc(desc, action)
485 action_ret |= action->thread_fn(action->irq, action->dev_id);
486
487 if (!irq_settings_no_debug(desc))
488 note_interrupt(desc, action_ret);
489
490 raw_spin_lock_irq(&desc->lock);
491 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
492
493out_unlock:
494 raw_spin_unlock_irq(&desc->lock);
495}
496EXPORT_SYMBOL_GPL(handle_nested_irq);
497
498static bool irq_check_poll(struct irq_desc *desc)
499{
500 if (!(desc->istate & IRQS_POLL_INPROGRESS))
501 return false;
502 return irq_wait_for_poll(desc);
503}
504
505static bool irq_may_run(struct irq_desc *desc)
506{
507 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
508
509 /*
510 * If the interrupt is not in progress and is not an armed
511 * wakeup interrupt, proceed.
512 */
513 if (!irqd_has_set(&desc->irq_data, mask))
514 return true;
515
516 /*
517 * If the interrupt is an armed wakeup source, mark it pending
518 * and suspended, disable it and notify the pm core about the
519 * event.
520 */
521 if (irq_pm_check_wakeup(desc))
522 return false;
523
524 /*
525 * Handle a potential concurrent poll on a different core.
526 */
527 return irq_check_poll(desc);
528}
529
530/**
531 * handle_simple_irq - Simple and software-decoded IRQs.
532 * @desc: the interrupt description structure for this irq
533 *
534 * Simple interrupts are either sent from a demultiplexing interrupt
535 * handler or come from hardware, where no interrupt hardware control
536 * is necessary.
537 *
538 * Note: The caller is expected to handle the ack, clear, mask and
539 * unmask issues if necessary.
540 */
541void handle_simple_irq(struct irq_desc *desc)
542{
543 raw_spin_lock(&desc->lock);
544
545 if (!irq_may_run(desc))
546 goto out_unlock;
547
548 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
549
550 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
551 desc->istate |= IRQS_PENDING;
552 goto out_unlock;
553 }
554
555 kstat_incr_irqs_this_cpu(desc);
556 handle_irq_event(desc);
557
558out_unlock:
559 raw_spin_unlock(&desc->lock);
560}
561EXPORT_SYMBOL_GPL(handle_simple_irq);
562
563/**
564 * handle_untracked_irq - Simple and software-decoded IRQs.
565 * @desc: the interrupt description structure for this irq
566 *
567 * Untracked interrupts are sent from a demultiplexing interrupt
568 * handler when the demultiplexer does not know which device it its
569 * multiplexed irq domain generated the interrupt. IRQ's handled
570 * through here are not subjected to stats tracking, randomness, or
571 * spurious interrupt detection.
572 *
573 * Note: Like handle_simple_irq, the caller is expected to handle
574 * the ack, clear, mask and unmask issues if necessary.
575 */
576void handle_untracked_irq(struct irq_desc *desc)
577{
578 unsigned int flags = 0;
579
580 raw_spin_lock(&desc->lock);
581
582 if (!irq_may_run(desc))
583 goto out_unlock;
584
585 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
586
587 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
588 desc->istate |= IRQS_PENDING;
589 goto out_unlock;
590 }
591
592 desc->istate &= ~IRQS_PENDING;
593 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
594 raw_spin_unlock(&desc->lock);
595
596 __handle_irq_event_percpu(desc, &flags);
597
598 raw_spin_lock(&desc->lock);
599 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
600
601out_unlock:
602 raw_spin_unlock(&desc->lock);
603}
604EXPORT_SYMBOL_GPL(handle_untracked_irq);
605
606/*
607 * Called unconditionally from handle_level_irq() and only for oneshot
608 * interrupts from handle_fasteoi_irq()
609 */
610static void cond_unmask_irq(struct irq_desc *desc)
611{
612 /*
613 * We need to unmask in the following cases:
614 * - Standard level irq (IRQF_ONESHOT is not set)
615 * - Oneshot irq which did not wake the thread (caused by a
616 * spurious interrupt or a primary handler handling it
617 * completely).
618 */
619 if (!irqd_irq_disabled(&desc->irq_data) &&
620 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
621 unmask_irq(desc);
622}
623
624/**
625 * handle_level_irq - Level type irq handler
626 * @desc: the interrupt description structure for this irq
627 *
628 * Level type interrupts are active as long as the hardware line has
629 * the active level. This may require to mask the interrupt and unmask
630 * it after the associated handler has acknowledged the device, so the
631 * interrupt line is back to inactive.
632 */
633void handle_level_irq(struct irq_desc *desc)
634{
635 raw_spin_lock(&desc->lock);
636 mask_ack_irq(desc);
637
638 if (!irq_may_run(desc))
639 goto out_unlock;
640
641 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
642
643 /*
644 * If its disabled or no action available
645 * keep it masked and get out of here
646 */
647 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
648 desc->istate |= IRQS_PENDING;
649 goto out_unlock;
650 }
651
652 kstat_incr_irqs_this_cpu(desc);
653 handle_irq_event(desc);
654
655 cond_unmask_irq(desc);
656
657out_unlock:
658 raw_spin_unlock(&desc->lock);
659}
660EXPORT_SYMBOL_GPL(handle_level_irq);
661
662static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
663{
664 if (!(desc->istate & IRQS_ONESHOT)) {
665 chip->irq_eoi(&desc->irq_data);
666 return;
667 }
668 /*
669 * We need to unmask in the following cases:
670 * - Oneshot irq which did not wake the thread (caused by a
671 * spurious interrupt or a primary handler handling it
672 * completely).
673 */
674 if (!irqd_irq_disabled(&desc->irq_data) &&
675 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
676 chip->irq_eoi(&desc->irq_data);
677 unmask_irq(desc);
678 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
679 chip->irq_eoi(&desc->irq_data);
680 }
681}
682
683/**
684 * handle_fasteoi_irq - irq handler for transparent controllers
685 * @desc: the interrupt description structure for this irq
686 *
687 * Only a single callback will be issued to the chip: an ->eoi()
688 * call when the interrupt has been serviced. This enables support
689 * for modern forms of interrupt handlers, which handle the flow
690 * details in hardware, transparently.
691 */
692void handle_fasteoi_irq(struct irq_desc *desc)
693{
694 struct irq_chip *chip = desc->irq_data.chip;
695
696 raw_spin_lock(&desc->lock);
697
698 if (!irq_may_run(desc))
699 goto out;
700
701 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
702
703 /*
704 * If its disabled or no action available
705 * then mask it and get out of here:
706 */
707 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
708 desc->istate |= IRQS_PENDING;
709 mask_irq(desc);
710 goto out;
711 }
712
713 kstat_incr_irqs_this_cpu(desc);
714 if (desc->istate & IRQS_ONESHOT)
715 mask_irq(desc);
716
717 handle_irq_event(desc);
718
719 cond_unmask_eoi_irq(desc, chip);
720
721 raw_spin_unlock(&desc->lock);
722 return;
723out:
724 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
725 chip->irq_eoi(&desc->irq_data);
726 raw_spin_unlock(&desc->lock);
727}
728EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
729
730/**
731 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
732 * @desc: the interrupt description structure for this irq
733 *
734 * A simple NMI-safe handler, considering the restrictions
735 * from request_nmi.
736 *
737 * Only a single callback will be issued to the chip: an ->eoi()
738 * call when the interrupt has been serviced. This enables support
739 * for modern forms of interrupt handlers, which handle the flow
740 * details in hardware, transparently.
741 */
742void handle_fasteoi_nmi(struct irq_desc *desc)
743{
744 struct irq_chip *chip = irq_desc_get_chip(desc);
745 struct irqaction *action = desc->action;
746 unsigned int irq = irq_desc_get_irq(desc);
747 irqreturn_t res;
748
749 __kstat_incr_irqs_this_cpu(desc);
750
751 trace_irq_handler_entry(irq, action);
752 /*
753 * NMIs cannot be shared, there is only one action.
754 */
755 res = action->handler(irq, action->dev_id);
756 trace_irq_handler_exit(irq, action, res);
757
758 if (chip->irq_eoi)
759 chip->irq_eoi(&desc->irq_data);
760}
761EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
762
763/**
764 * handle_edge_irq - edge type IRQ handler
765 * @desc: the interrupt description structure for this irq
766 *
767 * Interrupt occurs on the falling and/or rising edge of a hardware
768 * signal. The occurrence is latched into the irq controller hardware
769 * and must be acked in order to be reenabled. After the ack another
770 * interrupt can happen on the same source even before the first one
771 * is handled by the associated event handler. If this happens it
772 * might be necessary to disable (mask) the interrupt depending on the
773 * controller hardware. This requires to reenable the interrupt inside
774 * of the loop which handles the interrupts which have arrived while
775 * the handler was running. If all pending interrupts are handled, the
776 * loop is left.
777 */
778void handle_edge_irq(struct irq_desc *desc)
779{
780 raw_spin_lock(&desc->lock);
781
782 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
783
784 if (!irq_may_run(desc)) {
785 desc->istate |= IRQS_PENDING;
786 mask_ack_irq(desc);
787 goto out_unlock;
788 }
789
790 /*
791 * If its disabled or no action available then mask it and get
792 * out of here.
793 */
794 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
795 desc->istate |= IRQS_PENDING;
796 mask_ack_irq(desc);
797 goto out_unlock;
798 }
799
800 kstat_incr_irqs_this_cpu(desc);
801
802 /* Start handling the irq */
803 desc->irq_data.chip->irq_ack(&desc->irq_data);
804
805 do {
806 if (unlikely(!desc->action)) {
807 mask_irq(desc);
808 goto out_unlock;
809 }
810
811 /*
812 * When another irq arrived while we were handling
813 * one, we could have masked the irq.
814 * Reenable it, if it was not disabled in meantime.
815 */
816 if (unlikely(desc->istate & IRQS_PENDING)) {
817 if (!irqd_irq_disabled(&desc->irq_data) &&
818 irqd_irq_masked(&desc->irq_data))
819 unmask_irq(desc);
820 }
821
822 handle_irq_event(desc);
823
824 } while ((desc->istate & IRQS_PENDING) &&
825 !irqd_irq_disabled(&desc->irq_data));
826
827out_unlock:
828 raw_spin_unlock(&desc->lock);
829}
830EXPORT_SYMBOL(handle_edge_irq);
831
832#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
833/**
834 * handle_edge_eoi_irq - edge eoi type IRQ handler
835 * @desc: the interrupt description structure for this irq
836 *
837 * Similar as the above handle_edge_irq, but using eoi and w/o the
838 * mask/unmask logic.
839 */
840void handle_edge_eoi_irq(struct irq_desc *desc)
841{
842 struct irq_chip *chip = irq_desc_get_chip(desc);
843
844 raw_spin_lock(&desc->lock);
845
846 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
847
848 if (!irq_may_run(desc)) {
849 desc->istate |= IRQS_PENDING;
850 goto out_eoi;
851 }
852
853 /*
854 * If its disabled or no action available then mask it and get
855 * out of here.
856 */
857 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
858 desc->istate |= IRQS_PENDING;
859 goto out_eoi;
860 }
861
862 kstat_incr_irqs_this_cpu(desc);
863
864 do {
865 if (unlikely(!desc->action))
866 goto out_eoi;
867
868 handle_irq_event(desc);
869
870 } while ((desc->istate & IRQS_PENDING) &&
871 !irqd_irq_disabled(&desc->irq_data));
872
873out_eoi:
874 chip->irq_eoi(&desc->irq_data);
875 raw_spin_unlock(&desc->lock);
876}
877#endif
878
879/**
880 * handle_percpu_irq - Per CPU local irq handler
881 * @desc: the interrupt description structure for this irq
882 *
883 * Per CPU interrupts on SMP machines without locking requirements
884 */
885void handle_percpu_irq(struct irq_desc *desc)
886{
887 struct irq_chip *chip = irq_desc_get_chip(desc);
888
889 /*
890 * PER CPU interrupts are not serialized. Do not touch
891 * desc->tot_count.
892 */
893 __kstat_incr_irqs_this_cpu(desc);
894
895 if (chip->irq_ack)
896 chip->irq_ack(&desc->irq_data);
897
898 handle_irq_event_percpu(desc);
899
900 if (chip->irq_eoi)
901 chip->irq_eoi(&desc->irq_data);
902}
903
904/**
905 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
906 * @desc: the interrupt description structure for this irq
907 *
908 * Per CPU interrupts on SMP machines without locking requirements. Same as
909 * handle_percpu_irq() above but with the following extras:
910 *
911 * action->percpu_dev_id is a pointer to percpu variables which
912 * contain the real device id for the cpu on which this handler is
913 * called
914 */
915void handle_percpu_devid_irq(struct irq_desc *desc)
916{
917 struct irq_chip *chip = irq_desc_get_chip(desc);
918 struct irqaction *action = desc->action;
919 unsigned int irq = irq_desc_get_irq(desc);
920 irqreturn_t res;
921
922 /*
923 * PER CPU interrupts are not serialized. Do not touch
924 * desc->tot_count.
925 */
926 __kstat_incr_irqs_this_cpu(desc);
927
928 if (chip->irq_ack)
929 chip->irq_ack(&desc->irq_data);
930
931 if (likely(action)) {
932 trace_irq_handler_entry(irq, action);
933 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
934 trace_irq_handler_exit(irq, action, res);
935 } else {
936 unsigned int cpu = smp_processor_id();
937 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
938
939 if (enabled)
940 irq_percpu_disable(desc, cpu);
941
942 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
943 enabled ? " and unmasked" : "", irq, cpu);
944 }
945
946 if (chip->irq_eoi)
947 chip->irq_eoi(&desc->irq_data);
948}
949
950/**
951 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
952 * dev ids
953 * @desc: the interrupt description structure for this irq
954 *
955 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
956 * as a percpu pointer.
957 */
958void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
959{
960 struct irq_chip *chip = irq_desc_get_chip(desc);
961 struct irqaction *action = desc->action;
962 unsigned int irq = irq_desc_get_irq(desc);
963 irqreturn_t res;
964
965 __kstat_incr_irqs_this_cpu(desc);
966
967 trace_irq_handler_entry(irq, action);
968 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
969 trace_irq_handler_exit(irq, action, res);
970
971 if (chip->irq_eoi)
972 chip->irq_eoi(&desc->irq_data);
973}
974
975static void
976__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
977 int is_chained, const char *name)
978{
979 if (!handle) {
980 handle = handle_bad_irq;
981 } else {
982 struct irq_data *irq_data = &desc->irq_data;
983#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
984 /*
985 * With hierarchical domains we might run into a
986 * situation where the outermost chip is not yet set
987 * up, but the inner chips are there. Instead of
988 * bailing we install the handler, but obviously we
989 * cannot enable/startup the interrupt at this point.
990 */
991 while (irq_data) {
992 if (irq_data->chip != &no_irq_chip)
993 break;
994 /*
995 * Bail out if the outer chip is not set up
996 * and the interrupt supposed to be started
997 * right away.
998 */
999 if (WARN_ON(is_chained))
1000 return;
1001 /* Try the parent */
1002 irq_data = irq_data->parent_data;
1003 }
1004#endif
1005 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1006 return;
1007 }
1008
1009 /* Uninstall? */
1010 if (handle == handle_bad_irq) {
1011 if (desc->irq_data.chip != &no_irq_chip)
1012 mask_ack_irq(desc);
1013 irq_state_set_disabled(desc);
1014 if (is_chained)
1015 desc->action = NULL;
1016 desc->depth = 1;
1017 }
1018 desc->handle_irq = handle;
1019 desc->name = name;
1020
1021 if (handle != handle_bad_irq && is_chained) {
1022 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1023
1024 /*
1025 * We're about to start this interrupt immediately,
1026 * hence the need to set the trigger configuration.
1027 * But the .set_type callback may have overridden the
1028 * flow handler, ignoring that we're dealing with a
1029 * chained interrupt. Reset it immediately because we
1030 * do know better.
1031 */
1032 if (type != IRQ_TYPE_NONE) {
1033 __irq_set_trigger(desc, type);
1034 desc->handle_irq = handle;
1035 }
1036
1037 irq_settings_set_noprobe(desc);
1038 irq_settings_set_norequest(desc);
1039 irq_settings_set_nothread(desc);
1040 desc->action = &chained_action;
1041 irq_activate_and_startup(desc, IRQ_RESEND);
1042 }
1043}
1044
1045void
1046__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1047 const char *name)
1048{
1049 unsigned long flags;
1050 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1051
1052 if (!desc)
1053 return;
1054
1055 __irq_do_set_handler(desc, handle, is_chained, name);
1056 irq_put_desc_busunlock(desc, flags);
1057}
1058EXPORT_SYMBOL_GPL(__irq_set_handler);
1059
1060void
1061irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1062 void *data)
1063{
1064 unsigned long flags;
1065 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1066
1067 if (!desc)
1068 return;
1069
1070 desc->irq_common_data.handler_data = data;
1071 __irq_do_set_handler(desc, handle, 1, NULL);
1072
1073 irq_put_desc_busunlock(desc, flags);
1074}
1075EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1076
1077void
1078irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1079 irq_flow_handler_t handle, const char *name)
1080{
1081 irq_set_chip(irq, chip);
1082 __irq_set_handler(irq, handle, 0, name);
1083}
1084EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1085
1086void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1087{
1088 unsigned long flags, trigger, tmp;
1089 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1090
1091 if (!desc)
1092 return;
1093
1094 /*
1095 * Warn when a driver sets the no autoenable flag on an already
1096 * active interrupt.
1097 */
1098 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1099
1100 irq_settings_clr_and_set(desc, clr, set);
1101
1102 trigger = irqd_get_trigger_type(&desc->irq_data);
1103
1104 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1105 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1106 if (irq_settings_has_no_balance_set(desc))
1107 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1108 if (irq_settings_is_per_cpu(desc))
1109 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1110 if (irq_settings_can_move_pcntxt(desc))
1111 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1112 if (irq_settings_is_level(desc))
1113 irqd_set(&desc->irq_data, IRQD_LEVEL);
1114
1115 tmp = irq_settings_get_trigger_mask(desc);
1116 if (tmp != IRQ_TYPE_NONE)
1117 trigger = tmp;
1118
1119 irqd_set(&desc->irq_data, trigger);
1120
1121 irq_put_desc_unlock(desc, flags);
1122}
1123EXPORT_SYMBOL_GPL(irq_modify_status);
1124
1125/**
1126 * irq_cpu_online - Invoke all irq_cpu_online functions.
1127 *
1128 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1129 * for each.
1130 */
1131void irq_cpu_online(void)
1132{
1133 struct irq_desc *desc;
1134 struct irq_chip *chip;
1135 unsigned long flags;
1136 unsigned int irq;
1137
1138 for_each_active_irq(irq) {
1139 desc = irq_to_desc(irq);
1140 if (!desc)
1141 continue;
1142
1143 raw_spin_lock_irqsave(&desc->lock, flags);
1144
1145 chip = irq_data_get_irq_chip(&desc->irq_data);
1146 if (chip && chip->irq_cpu_online &&
1147 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1148 !irqd_irq_disabled(&desc->irq_data)))
1149 chip->irq_cpu_online(&desc->irq_data);
1150
1151 raw_spin_unlock_irqrestore(&desc->lock, flags);
1152 }
1153}
1154
1155/**
1156 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1157 *
1158 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1159 * for each.
1160 */
1161void irq_cpu_offline(void)
1162{
1163 struct irq_desc *desc;
1164 struct irq_chip *chip;
1165 unsigned long flags;
1166 unsigned int irq;
1167
1168 for_each_active_irq(irq) {
1169 desc = irq_to_desc(irq);
1170 if (!desc)
1171 continue;
1172
1173 raw_spin_lock_irqsave(&desc->lock, flags);
1174
1175 chip = irq_data_get_irq_chip(&desc->irq_data);
1176 if (chip && chip->irq_cpu_offline &&
1177 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1178 !irqd_irq_disabled(&desc->irq_data)))
1179 chip->irq_cpu_offline(&desc->irq_data);
1180
1181 raw_spin_unlock_irqrestore(&desc->lock, flags);
1182 }
1183}
1184
1185#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1186
1187#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1188/**
1189 * handle_fasteoi_ack_irq - irq handler for edge hierarchy
1190 * stacked on transparent controllers
1191 *
1192 * @desc: the interrupt description structure for this irq
1193 *
1194 * Like handle_fasteoi_irq(), but for use with hierarchy where
1195 * the irq_chip also needs to have its ->irq_ack() function
1196 * called.
1197 */
1198void handle_fasteoi_ack_irq(struct irq_desc *desc)
1199{
1200 struct irq_chip *chip = desc->irq_data.chip;
1201
1202 raw_spin_lock(&desc->lock);
1203
1204 if (!irq_may_run(desc))
1205 goto out;
1206
1207 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1208
1209 /*
1210 * If its disabled or no action available
1211 * then mask it and get out of here:
1212 */
1213 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1214 desc->istate |= IRQS_PENDING;
1215 mask_irq(desc);
1216 goto out;
1217 }
1218
1219 kstat_incr_irqs_this_cpu(desc);
1220 if (desc->istate & IRQS_ONESHOT)
1221 mask_irq(desc);
1222
1223 /* Start handling the irq */
1224 desc->irq_data.chip->irq_ack(&desc->irq_data);
1225
1226 handle_irq_event(desc);
1227
1228 cond_unmask_eoi_irq(desc, chip);
1229
1230 raw_spin_unlock(&desc->lock);
1231 return;
1232out:
1233 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1234 chip->irq_eoi(&desc->irq_data);
1235 raw_spin_unlock(&desc->lock);
1236}
1237EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1238
1239/**
1240 * handle_fasteoi_mask_irq - irq handler for level hierarchy
1241 * stacked on transparent controllers
1242 *
1243 * @desc: the interrupt description structure for this irq
1244 *
1245 * Like handle_fasteoi_irq(), but for use with hierarchy where
1246 * the irq_chip also needs to have its ->irq_mask_ack() function
1247 * called.
1248 */
1249void handle_fasteoi_mask_irq(struct irq_desc *desc)
1250{
1251 struct irq_chip *chip = desc->irq_data.chip;
1252
1253 raw_spin_lock(&desc->lock);
1254 mask_ack_irq(desc);
1255
1256 if (!irq_may_run(desc))
1257 goto out;
1258
1259 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1260
1261 /*
1262 * If its disabled or no action available
1263 * then mask it and get out of here:
1264 */
1265 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1266 desc->istate |= IRQS_PENDING;
1267 mask_irq(desc);
1268 goto out;
1269 }
1270
1271 kstat_incr_irqs_this_cpu(desc);
1272 if (desc->istate & IRQS_ONESHOT)
1273 mask_irq(desc);
1274
1275 handle_irq_event(desc);
1276
1277 cond_unmask_eoi_irq(desc, chip);
1278
1279 raw_spin_unlock(&desc->lock);
1280 return;
1281out:
1282 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1283 chip->irq_eoi(&desc->irq_data);
1284 raw_spin_unlock(&desc->lock);
1285}
1286EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1287
1288#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1289
1290/**
1291 * irq_chip_set_parent_state - set the state of a parent interrupt.
1292 *
1293 * @data: Pointer to interrupt specific data
1294 * @which: State to be restored (one of IRQCHIP_STATE_*)
1295 * @val: Value corresponding to @which
1296 *
1297 * Conditional success, if the underlying irqchip does not implement it.
1298 */
1299int irq_chip_set_parent_state(struct irq_data *data,
1300 enum irqchip_irq_state which,
1301 bool val)
1302{
1303 data = data->parent_data;
1304
1305 if (!data || !data->chip->irq_set_irqchip_state)
1306 return 0;
1307
1308 return data->chip->irq_set_irqchip_state(data, which, val);
1309}
1310EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1311
1312/**
1313 * irq_chip_get_parent_state - get the state of a parent interrupt.
1314 *
1315 * @data: Pointer to interrupt specific data
1316 * @which: one of IRQCHIP_STATE_* the caller wants to know
1317 * @state: a pointer to a boolean where the state is to be stored
1318 *
1319 * Conditional success, if the underlying irqchip does not implement it.
1320 */
1321int irq_chip_get_parent_state(struct irq_data *data,
1322 enum irqchip_irq_state which,
1323 bool *state)
1324{
1325 data = data->parent_data;
1326
1327 if (!data || !data->chip->irq_get_irqchip_state)
1328 return 0;
1329
1330 return data->chip->irq_get_irqchip_state(data, which, state);
1331}
1332EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1333
1334/**
1335 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1336 * NULL)
1337 * @data: Pointer to interrupt specific data
1338 */
1339void irq_chip_enable_parent(struct irq_data *data)
1340{
1341 data = data->parent_data;
1342 if (data->chip->irq_enable)
1343 data->chip->irq_enable(data);
1344 else
1345 data->chip->irq_unmask(data);
1346}
1347EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1348
1349/**
1350 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1351 * NULL)
1352 * @data: Pointer to interrupt specific data
1353 */
1354void irq_chip_disable_parent(struct irq_data *data)
1355{
1356 data = data->parent_data;
1357 if (data->chip->irq_disable)
1358 data->chip->irq_disable(data);
1359 else
1360 data->chip->irq_mask(data);
1361}
1362EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1363
1364/**
1365 * irq_chip_ack_parent - Acknowledge the parent interrupt
1366 * @data: Pointer to interrupt specific data
1367 */
1368void irq_chip_ack_parent(struct irq_data *data)
1369{
1370 data = data->parent_data;
1371 data->chip->irq_ack(data);
1372}
1373EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1374
1375/**
1376 * irq_chip_mask_parent - Mask the parent interrupt
1377 * @data: Pointer to interrupt specific data
1378 */
1379void irq_chip_mask_parent(struct irq_data *data)
1380{
1381 data = data->parent_data;
1382 data->chip->irq_mask(data);
1383}
1384EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1385
1386/**
1387 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1388 * @data: Pointer to interrupt specific data
1389 */
1390void irq_chip_mask_ack_parent(struct irq_data *data)
1391{
1392 data = data->parent_data;
1393 data->chip->irq_mask_ack(data);
1394}
1395EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1396
1397/**
1398 * irq_chip_unmask_parent - Unmask the parent interrupt
1399 * @data: Pointer to interrupt specific data
1400 */
1401void irq_chip_unmask_parent(struct irq_data *data)
1402{
1403 data = data->parent_data;
1404 data->chip->irq_unmask(data);
1405}
1406EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1407
1408/**
1409 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1410 * @data: Pointer to interrupt specific data
1411 */
1412void irq_chip_eoi_parent(struct irq_data *data)
1413{
1414 data = data->parent_data;
1415 data->chip->irq_eoi(data);
1416}
1417EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1418
1419/**
1420 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1421 * @data: Pointer to interrupt specific data
1422 * @dest: The affinity mask to set
1423 * @force: Flag to enforce setting (disable online checks)
1424 *
1425 * Conditional, as the underlying parent chip might not implement it.
1426 */
1427int irq_chip_set_affinity_parent(struct irq_data *data,
1428 const struct cpumask *dest, bool force)
1429{
1430 data = data->parent_data;
1431 if (data->chip->irq_set_affinity)
1432 return data->chip->irq_set_affinity(data, dest, force);
1433
1434 return -ENOSYS;
1435}
1436EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1437
1438/**
1439 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1440 * @data: Pointer to interrupt specific data
1441 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1442 *
1443 * Conditional, as the underlying parent chip might not implement it.
1444 */
1445int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1446{
1447 data = data->parent_data;
1448
1449 if (data->chip->irq_set_type)
1450 return data->chip->irq_set_type(data, type);
1451
1452 return -ENOSYS;
1453}
1454EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1455
1456/**
1457 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1458 * @data: Pointer to interrupt specific data
1459 *
1460 * Iterate through the domain hierarchy of the interrupt and check
1461 * whether a hw retrigger function exists. If yes, invoke it.
1462 */
1463int irq_chip_retrigger_hierarchy(struct irq_data *data)
1464{
1465 for (data = data->parent_data; data; data = data->parent_data)
1466 if (data->chip && data->chip->irq_retrigger)
1467 return data->chip->irq_retrigger(data);
1468
1469 return 0;
1470}
1471EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1472
1473/**
1474 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1475 * @data: Pointer to interrupt specific data
1476 * @vcpu_info: The vcpu affinity information
1477 */
1478int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1479{
1480 data = data->parent_data;
1481 if (data->chip->irq_set_vcpu_affinity)
1482 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1483
1484 return -ENOSYS;
1485}
1486EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1487/**
1488 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1489 * @data: Pointer to interrupt specific data
1490 * @on: Whether to set or reset the wake-up capability of this irq
1491 *
1492 * Conditional, as the underlying parent chip might not implement it.
1493 */
1494int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1495{
1496 data = data->parent_data;
1497
1498 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1499 return 0;
1500
1501 if (data->chip->irq_set_wake)
1502 return data->chip->irq_set_wake(data, on);
1503
1504 return -ENOSYS;
1505}
1506EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1507
1508/**
1509 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1510 * @data: Pointer to interrupt specific data
1511 */
1512int irq_chip_request_resources_parent(struct irq_data *data)
1513{
1514 data = data->parent_data;
1515
1516 if (data->chip->irq_request_resources)
1517 return data->chip->irq_request_resources(data);
1518
1519 return -ENOSYS;
1520}
1521EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1522
1523/**
1524 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1525 * @data: Pointer to interrupt specific data
1526 */
1527void irq_chip_release_resources_parent(struct irq_data *data)
1528{
1529 data = data->parent_data;
1530 if (data->chip->irq_release_resources)
1531 data->chip->irq_release_resources(data);
1532}
1533EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1534#endif
1535
1536/**
1537 * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1538 * @data: Pointer to interrupt specific data
1539 * @msg: Pointer to the MSI message
1540 *
1541 * For hierarchical domains we find the first chip in the hierarchy
1542 * which implements the irq_compose_msi_msg callback. For non
1543 * hierarchical we use the top level chip.
1544 */
1545int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1546{
1547 struct irq_data *pos;
1548
1549 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
1550 if (data->chip && data->chip->irq_compose_msi_msg)
1551 pos = data;
1552 }
1553
1554 if (!pos)
1555 return -ENOSYS;
1556
1557 pos->chip->irq_compose_msi_msg(pos, msg);
1558 return 0;
1559}
1560
1561/**
1562 * irq_chip_pm_get - Enable power for an IRQ chip
1563 * @data: Pointer to interrupt specific data
1564 *
1565 * Enable the power to the IRQ chip referenced by the interrupt data
1566 * structure.
1567 */
1568int irq_chip_pm_get(struct irq_data *data)
1569{
1570 int retval;
1571
1572 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1573 retval = pm_runtime_get_sync(data->chip->parent_device);
1574 if (retval < 0) {
1575 pm_runtime_put_noidle(data->chip->parent_device);
1576 return retval;
1577 }
1578 }
1579
1580 return 0;
1581}
1582
1583/**
1584 * irq_chip_pm_put - Disable power for an IRQ chip
1585 * @data: Pointer to interrupt specific data
1586 *
1587 * Disable the power to the IRQ chip referenced by the interrupt data
1588 * structure, belongs. Note that power will only be disabled, once this
1589 * function has been called for all IRQs that have called irq_chip_pm_get().
1590 */
1591int irq_chip_pm_put(struct irq_data *data)
1592{
1593 int retval = 0;
1594
1595 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1596 retval = pm_runtime_put(data->chip->parent_device);
1597
1598 return (retval < 0) ? retval : 0;
1599}
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18
19#include "internals.h"
20
21/**
22 * irq_set_chip - set the irq chip for an irq
23 * @irq: irq number
24 * @chip: pointer to irq chip description structure
25 */
26int irq_set_chip(unsigned int irq, struct irq_chip *chip)
27{
28 unsigned long flags;
29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
30
31 if (!desc)
32 return -EINVAL;
33
34 if (!chip)
35 chip = &no_irq_chip;
36
37 desc->irq_data.chip = chip;
38 irq_put_desc_unlock(desc, flags);
39 /*
40 * For !CONFIG_SPARSE_IRQ make the irq show up in
41 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is
42 * already marked, and this call is harmless.
43 */
44 irq_reserve_irq(irq);
45 return 0;
46}
47EXPORT_SYMBOL(irq_set_chip);
48
49/**
50 * irq_set_type - set the irq trigger type for an irq
51 * @irq: irq number
52 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
53 */
54int irq_set_irq_type(unsigned int irq, unsigned int type)
55{
56 unsigned long flags;
57 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
58 int ret = 0;
59
60 if (!desc)
61 return -EINVAL;
62
63 type &= IRQ_TYPE_SENSE_MASK;
64 if (type != IRQ_TYPE_NONE)
65 ret = __irq_set_trigger(desc, irq, type);
66 irq_put_desc_busunlock(desc, flags);
67 return ret;
68}
69EXPORT_SYMBOL(irq_set_irq_type);
70
71/**
72 * irq_set_handler_data - set irq handler data for an irq
73 * @irq: Interrupt number
74 * @data: Pointer to interrupt specific data
75 *
76 * Set the hardware irq controller data for an irq
77 */
78int irq_set_handler_data(unsigned int irq, void *data)
79{
80 unsigned long flags;
81 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
82
83 if (!desc)
84 return -EINVAL;
85 desc->irq_data.handler_data = data;
86 irq_put_desc_unlock(desc, flags);
87 return 0;
88}
89EXPORT_SYMBOL(irq_set_handler_data);
90
91/**
92 * irq_set_msi_desc - set MSI descriptor data for an irq
93 * @irq: Interrupt number
94 * @entry: Pointer to MSI descriptor data
95 *
96 * Set the MSI descriptor entry for an irq
97 */
98int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
99{
100 unsigned long flags;
101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
102
103 if (!desc)
104 return -EINVAL;
105 desc->irq_data.msi_desc = entry;
106 if (entry)
107 entry->irq = irq;
108 irq_put_desc_unlock(desc, flags);
109 return 0;
110}
111
112/**
113 * irq_set_chip_data - set irq chip data for an irq
114 * @irq: Interrupt number
115 * @data: Pointer to chip specific data
116 *
117 * Set the hardware irq chip data for an irq
118 */
119int irq_set_chip_data(unsigned int irq, void *data)
120{
121 unsigned long flags;
122 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
123
124 if (!desc)
125 return -EINVAL;
126 desc->irq_data.chip_data = data;
127 irq_put_desc_unlock(desc, flags);
128 return 0;
129}
130EXPORT_SYMBOL(irq_set_chip_data);
131
132struct irq_data *irq_get_irq_data(unsigned int irq)
133{
134 struct irq_desc *desc = irq_to_desc(irq);
135
136 return desc ? &desc->irq_data : NULL;
137}
138EXPORT_SYMBOL_GPL(irq_get_irq_data);
139
140static void irq_state_clr_disabled(struct irq_desc *desc)
141{
142 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
143}
144
145static void irq_state_set_disabled(struct irq_desc *desc)
146{
147 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
148}
149
150static void irq_state_clr_masked(struct irq_desc *desc)
151{
152 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
153}
154
155static void irq_state_set_masked(struct irq_desc *desc)
156{
157 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
158}
159
160int irq_startup(struct irq_desc *desc)
161{
162 irq_state_clr_disabled(desc);
163 desc->depth = 0;
164
165 if (desc->irq_data.chip->irq_startup) {
166 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
167 irq_state_clr_masked(desc);
168 return ret;
169 }
170
171 irq_enable(desc);
172 return 0;
173}
174
175void irq_shutdown(struct irq_desc *desc)
176{
177 irq_state_set_disabled(desc);
178 desc->depth = 1;
179 if (desc->irq_data.chip->irq_shutdown)
180 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
181 else if (desc->irq_data.chip->irq_disable)
182 desc->irq_data.chip->irq_disable(&desc->irq_data);
183 else
184 desc->irq_data.chip->irq_mask(&desc->irq_data);
185 irq_state_set_masked(desc);
186}
187
188void irq_enable(struct irq_desc *desc)
189{
190 irq_state_clr_disabled(desc);
191 if (desc->irq_data.chip->irq_enable)
192 desc->irq_data.chip->irq_enable(&desc->irq_data);
193 else
194 desc->irq_data.chip->irq_unmask(&desc->irq_data);
195 irq_state_clr_masked(desc);
196}
197
198void irq_disable(struct irq_desc *desc)
199{
200 irq_state_set_disabled(desc);
201 if (desc->irq_data.chip->irq_disable) {
202 desc->irq_data.chip->irq_disable(&desc->irq_data);
203 irq_state_set_masked(desc);
204 }
205}
206
207static inline void mask_ack_irq(struct irq_desc *desc)
208{
209 if (desc->irq_data.chip->irq_mask_ack)
210 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
211 else {
212 desc->irq_data.chip->irq_mask(&desc->irq_data);
213 if (desc->irq_data.chip->irq_ack)
214 desc->irq_data.chip->irq_ack(&desc->irq_data);
215 }
216 irq_state_set_masked(desc);
217}
218
219void mask_irq(struct irq_desc *desc)
220{
221 if (desc->irq_data.chip->irq_mask) {
222 desc->irq_data.chip->irq_mask(&desc->irq_data);
223 irq_state_set_masked(desc);
224 }
225}
226
227void unmask_irq(struct irq_desc *desc)
228{
229 if (desc->irq_data.chip->irq_unmask) {
230 desc->irq_data.chip->irq_unmask(&desc->irq_data);
231 irq_state_clr_masked(desc);
232 }
233}
234
235/*
236 * handle_nested_irq - Handle a nested irq from a irq thread
237 * @irq: the interrupt number
238 *
239 * Handle interrupts which are nested into a threaded interrupt
240 * handler. The handler function is called inside the calling
241 * threads context.
242 */
243void handle_nested_irq(unsigned int irq)
244{
245 struct irq_desc *desc = irq_to_desc(irq);
246 struct irqaction *action;
247 irqreturn_t action_ret;
248
249 might_sleep();
250
251 raw_spin_lock_irq(&desc->lock);
252
253 kstat_incr_irqs_this_cpu(irq, desc);
254
255 action = desc->action;
256 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data)))
257 goto out_unlock;
258
259 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
260 raw_spin_unlock_irq(&desc->lock);
261
262 action_ret = action->thread_fn(action->irq, action->dev_id);
263 if (!noirqdebug)
264 note_interrupt(irq, desc, action_ret);
265
266 raw_spin_lock_irq(&desc->lock);
267 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
268
269out_unlock:
270 raw_spin_unlock_irq(&desc->lock);
271}
272EXPORT_SYMBOL_GPL(handle_nested_irq);
273
274static bool irq_check_poll(struct irq_desc *desc)
275{
276 if (!(desc->istate & IRQS_POLL_INPROGRESS))
277 return false;
278 return irq_wait_for_poll(desc);
279}
280
281/**
282 * handle_simple_irq - Simple and software-decoded IRQs.
283 * @irq: the interrupt number
284 * @desc: the interrupt description structure for this irq
285 *
286 * Simple interrupts are either sent from a demultiplexing interrupt
287 * handler or come from hardware, where no interrupt hardware control
288 * is necessary.
289 *
290 * Note: The caller is expected to handle the ack, clear, mask and
291 * unmask issues if necessary.
292 */
293void
294handle_simple_irq(unsigned int irq, struct irq_desc *desc)
295{
296 raw_spin_lock(&desc->lock);
297
298 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
299 if (!irq_check_poll(desc))
300 goto out_unlock;
301
302 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
303 kstat_incr_irqs_this_cpu(irq, desc);
304
305 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
306 goto out_unlock;
307
308 handle_irq_event(desc);
309
310out_unlock:
311 raw_spin_unlock(&desc->lock);
312}
313EXPORT_SYMBOL_GPL(handle_simple_irq);
314
315/**
316 * handle_level_irq - Level type irq handler
317 * @irq: the interrupt number
318 * @desc: the interrupt description structure for this irq
319 *
320 * Level type interrupts are active as long as the hardware line has
321 * the active level. This may require to mask the interrupt and unmask
322 * it after the associated handler has acknowledged the device, so the
323 * interrupt line is back to inactive.
324 */
325void
326handle_level_irq(unsigned int irq, struct irq_desc *desc)
327{
328 raw_spin_lock(&desc->lock);
329 mask_ack_irq(desc);
330
331 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
332 if (!irq_check_poll(desc))
333 goto out_unlock;
334
335 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
336 kstat_incr_irqs_this_cpu(irq, desc);
337
338 /*
339 * If its disabled or no action available
340 * keep it masked and get out of here
341 */
342 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data)))
343 goto out_unlock;
344
345 handle_irq_event(desc);
346
347 if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT))
348 unmask_irq(desc);
349out_unlock:
350 raw_spin_unlock(&desc->lock);
351}
352EXPORT_SYMBOL_GPL(handle_level_irq);
353
354#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
355static inline void preflow_handler(struct irq_desc *desc)
356{
357 if (desc->preflow_handler)
358 desc->preflow_handler(&desc->irq_data);
359}
360#else
361static inline void preflow_handler(struct irq_desc *desc) { }
362#endif
363
364/**
365 * handle_fasteoi_irq - irq handler for transparent controllers
366 * @irq: the interrupt number
367 * @desc: the interrupt description structure for this irq
368 *
369 * Only a single callback will be issued to the chip: an ->eoi()
370 * call when the interrupt has been serviced. This enables support
371 * for modern forms of interrupt handlers, which handle the flow
372 * details in hardware, transparently.
373 */
374void
375handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
376{
377 raw_spin_lock(&desc->lock);
378
379 if (unlikely(irqd_irq_inprogress(&desc->irq_data)))
380 if (!irq_check_poll(desc))
381 goto out;
382
383 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
384 kstat_incr_irqs_this_cpu(irq, desc);
385
386 /*
387 * If its disabled or no action available
388 * then mask it and get out of here:
389 */
390 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
391 desc->istate |= IRQS_PENDING;
392 mask_irq(desc);
393 goto out;
394 }
395
396 if (desc->istate & IRQS_ONESHOT)
397 mask_irq(desc);
398
399 preflow_handler(desc);
400 handle_irq_event(desc);
401
402out_eoi:
403 desc->irq_data.chip->irq_eoi(&desc->irq_data);
404out_unlock:
405 raw_spin_unlock(&desc->lock);
406 return;
407out:
408 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED))
409 goto out_eoi;
410 goto out_unlock;
411}
412
413/**
414 * handle_edge_irq - edge type IRQ handler
415 * @irq: the interrupt number
416 * @desc: the interrupt description structure for this irq
417 *
418 * Interrupt occures on the falling and/or rising edge of a hardware
419 * signal. The occurrence is latched into the irq controller hardware
420 * and must be acked in order to be reenabled. After the ack another
421 * interrupt can happen on the same source even before the first one
422 * is handled by the associated event handler. If this happens it
423 * might be necessary to disable (mask) the interrupt depending on the
424 * controller hardware. This requires to reenable the interrupt inside
425 * of the loop which handles the interrupts which have arrived while
426 * the handler was running. If all pending interrupts are handled, the
427 * loop is left.
428 */
429void
430handle_edge_irq(unsigned int irq, struct irq_desc *desc)
431{
432 raw_spin_lock(&desc->lock);
433
434 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
435 /*
436 * If we're currently running this IRQ, or its disabled,
437 * we shouldn't process the IRQ. Mark it pending, handle
438 * the necessary masking and go out
439 */
440 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
441 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
442 if (!irq_check_poll(desc)) {
443 desc->istate |= IRQS_PENDING;
444 mask_ack_irq(desc);
445 goto out_unlock;
446 }
447 }
448 kstat_incr_irqs_this_cpu(irq, desc);
449
450 /* Start handling the irq */
451 desc->irq_data.chip->irq_ack(&desc->irq_data);
452
453 do {
454 if (unlikely(!desc->action)) {
455 mask_irq(desc);
456 goto out_unlock;
457 }
458
459 /*
460 * When another irq arrived while we were handling
461 * one, we could have masked the irq.
462 * Renable it, if it was not disabled in meantime.
463 */
464 if (unlikely(desc->istate & IRQS_PENDING)) {
465 if (!irqd_irq_disabled(&desc->irq_data) &&
466 irqd_irq_masked(&desc->irq_data))
467 unmask_irq(desc);
468 }
469
470 handle_irq_event(desc);
471
472 } while ((desc->istate & IRQS_PENDING) &&
473 !irqd_irq_disabled(&desc->irq_data));
474
475out_unlock:
476 raw_spin_unlock(&desc->lock);
477}
478
479#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
480/**
481 * handle_edge_eoi_irq - edge eoi type IRQ handler
482 * @irq: the interrupt number
483 * @desc: the interrupt description structure for this irq
484 *
485 * Similar as the above handle_edge_irq, but using eoi and w/o the
486 * mask/unmask logic.
487 */
488void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
489{
490 struct irq_chip *chip = irq_desc_get_chip(desc);
491
492 raw_spin_lock(&desc->lock);
493
494 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
495 /*
496 * If we're currently running this IRQ, or its disabled,
497 * we shouldn't process the IRQ. Mark it pending, handle
498 * the necessary masking and go out
499 */
500 if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
501 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
502 if (!irq_check_poll(desc)) {
503 desc->istate |= IRQS_PENDING;
504 goto out_eoi;
505 }
506 }
507 kstat_incr_irqs_this_cpu(irq, desc);
508
509 do {
510 if (unlikely(!desc->action))
511 goto out_eoi;
512
513 handle_irq_event(desc);
514
515 } while ((desc->istate & IRQS_PENDING) &&
516 !irqd_irq_disabled(&desc->irq_data));
517
518out_eoi:
519 chip->irq_eoi(&desc->irq_data);
520 raw_spin_unlock(&desc->lock);
521}
522#endif
523
524/**
525 * handle_percpu_irq - Per CPU local irq handler
526 * @irq: the interrupt number
527 * @desc: the interrupt description structure for this irq
528 *
529 * Per CPU interrupts on SMP machines without locking requirements
530 */
531void
532handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
533{
534 struct irq_chip *chip = irq_desc_get_chip(desc);
535
536 kstat_incr_irqs_this_cpu(irq, desc);
537
538 if (chip->irq_ack)
539 chip->irq_ack(&desc->irq_data);
540
541 handle_irq_event_percpu(desc, desc->action);
542
543 if (chip->irq_eoi)
544 chip->irq_eoi(&desc->irq_data);
545}
546
547void
548__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
549 const char *name)
550{
551 unsigned long flags;
552 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
553
554 if (!desc)
555 return;
556
557 if (!handle) {
558 handle = handle_bad_irq;
559 } else {
560 if (WARN_ON(desc->irq_data.chip == &no_irq_chip))
561 goto out;
562 }
563
564 /* Uninstall? */
565 if (handle == handle_bad_irq) {
566 if (desc->irq_data.chip != &no_irq_chip)
567 mask_ack_irq(desc);
568 irq_state_set_disabled(desc);
569 desc->depth = 1;
570 }
571 desc->handle_irq = handle;
572 desc->name = name;
573
574 if (handle != handle_bad_irq && is_chained) {
575 irq_settings_set_noprobe(desc);
576 irq_settings_set_norequest(desc);
577 irq_settings_set_nothread(desc);
578 irq_startup(desc);
579 }
580out:
581 irq_put_desc_busunlock(desc, flags);
582}
583EXPORT_SYMBOL_GPL(__irq_set_handler);
584
585void
586irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
587 irq_flow_handler_t handle, const char *name)
588{
589 irq_set_chip(irq, chip);
590 __irq_set_handler(irq, handle, 0, name);
591}
592
593void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
594{
595 unsigned long flags;
596 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
597
598 if (!desc)
599 return;
600 irq_settings_clr_and_set(desc, clr, set);
601
602 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
603 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
604 if (irq_settings_has_no_balance_set(desc))
605 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
606 if (irq_settings_is_per_cpu(desc))
607 irqd_set(&desc->irq_data, IRQD_PER_CPU);
608 if (irq_settings_can_move_pcntxt(desc))
609 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
610 if (irq_settings_is_level(desc))
611 irqd_set(&desc->irq_data, IRQD_LEVEL);
612
613 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
614
615 irq_put_desc_unlock(desc, flags);
616}
617EXPORT_SYMBOL_GPL(irq_modify_status);
618
619/**
620 * irq_cpu_online - Invoke all irq_cpu_online functions.
621 *
622 * Iterate through all irqs and invoke the chip.irq_cpu_online()
623 * for each.
624 */
625void irq_cpu_online(void)
626{
627 struct irq_desc *desc;
628 struct irq_chip *chip;
629 unsigned long flags;
630 unsigned int irq;
631
632 for_each_active_irq(irq) {
633 desc = irq_to_desc(irq);
634 if (!desc)
635 continue;
636
637 raw_spin_lock_irqsave(&desc->lock, flags);
638
639 chip = irq_data_get_irq_chip(&desc->irq_data);
640 if (chip && chip->irq_cpu_online &&
641 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
642 !irqd_irq_disabled(&desc->irq_data)))
643 chip->irq_cpu_online(&desc->irq_data);
644
645 raw_spin_unlock_irqrestore(&desc->lock, flags);
646 }
647}
648
649/**
650 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
651 *
652 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
653 * for each.
654 */
655void irq_cpu_offline(void)
656{
657 struct irq_desc *desc;
658 struct irq_chip *chip;
659 unsigned long flags;
660 unsigned int irq;
661
662 for_each_active_irq(irq) {
663 desc = irq_to_desc(irq);
664 if (!desc)
665 continue;
666
667 raw_spin_lock_irqsave(&desc->lock, flags);
668
669 chip = irq_data_get_irq_chip(&desc->irq_data);
670 if (chip && chip->irq_cpu_offline &&
671 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
672 !irqd_irq_disabled(&desc->irq_data)))
673 chip->irq_cpu_offline(&desc->irq_data);
674
675 raw_spin_unlock_irqrestore(&desc->lock, flags);
676 }
677}