Loading...
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42 /*
43 * Wait until we're out of the critical section. This might
44 * give the wrong answer due to the lack of memory barriers.
45 */
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49 /* Ok, that indicated we're done: double-check carefully. */
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54 /* Oops, that failed? */
55 } while (inprogress);
56}
57
58/**
59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60 * @irq: interrupt number to wait for
61 *
62 * This function waits for any pending hard IRQ handlers for this
63 * interrupt to complete before returning. If you use this
64 * function while holding a resource the IRQ handler may need you
65 * will deadlock. It does not take associated threaded handlers
66 * into account.
67 *
68 * Do not use this for shutdown scenarios where you must be sure
69 * that all parts (hardirq and threaded handler) have completed.
70 *
71 * Returns: false if a threaded handler is active.
72 *
73 * This function may be called - with care - from IRQ context.
74 */
75bool synchronize_hardirq(unsigned int irq)
76{
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85}
86EXPORT_SYMBOL(synchronize_hardirq);
87
88/**
89 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
90 * @irq: interrupt number to wait for
91 *
92 * This function waits for any pending IRQ handlers for this interrupt
93 * to complete before returning. If you use this function while
94 * holding a resource the IRQ handler may need you will deadlock.
95 *
96 * This function may be called - with care - from IRQ context.
97 */
98void synchronize_irq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104 /*
105 * We made sure that no hardirq handler is
106 * running. Now verify that no threaded handlers are
107 * active.
108 */
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112}
113EXPORT_SYMBOL(synchronize_irq);
114
115#ifdef CONFIG_SMP
116cpumask_var_t irq_default_affinity;
117
118static bool __irq_can_set_affinity(struct irq_desc *desc)
119{
120 if (!desc || !irqd_can_balance(&desc->irq_data) ||
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122 return false;
123 return true;
124}
125
126/**
127 * irq_can_set_affinity - Check if the affinity of a given irq can be set
128 * @irq: Interrupt to check
129 *
130 */
131int irq_can_set_affinity(unsigned int irq)
132{
133 return __irq_can_set_affinity(irq_to_desc(irq));
134}
135
136/**
137 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
138 * @irq: Interrupt to check
139 *
140 * Like irq_can_set_affinity() above, but additionally checks for the
141 * AFFINITY_MANAGED flag.
142 */
143bool irq_can_set_affinity_usr(unsigned int irq)
144{
145 struct irq_desc *desc = irq_to_desc(irq);
146
147 return __irq_can_set_affinity(desc) &&
148 !irqd_affinity_is_managed(&desc->irq_data);
149}
150
151/**
152 * irq_set_thread_affinity - Notify irq threads to adjust affinity
153 * @desc: irq descriptor which has affitnity changed
154 *
155 * We just set IRQTF_AFFINITY and delegate the affinity setting
156 * to the interrupt thread itself. We can not call
157 * set_cpus_allowed_ptr() here as we hold desc->lock and this
158 * code can be called from hard interrupt context.
159 */
160void irq_set_thread_affinity(struct irq_desc *desc)
161{
162 struct irqaction *action;
163
164 for_each_action_of_desc(desc, action)
165 if (action->thread)
166 set_bit(IRQTF_AFFINITY, &action->thread_flags);
167}
168
169#ifdef CONFIG_GENERIC_PENDING_IRQ
170static inline bool irq_can_move_pcntxt(struct irq_data *data)
171{
172 return irqd_can_move_in_process_context(data);
173}
174static inline bool irq_move_pending(struct irq_data *data)
175{
176 return irqd_is_setaffinity_pending(data);
177}
178static inline void
179irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
180{
181 cpumask_copy(desc->pending_mask, mask);
182}
183static inline void
184irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
185{
186 cpumask_copy(mask, desc->pending_mask);
187}
188#else
189static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
190static inline bool irq_move_pending(struct irq_data *data) { return false; }
191static inline void
192irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
193static inline void
194irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
195#endif
196
197int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
198 bool force)
199{
200 struct irq_desc *desc = irq_data_to_desc(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202 int ret;
203
204 ret = chip->irq_set_affinity(data, mask, force);
205 switch (ret) {
206 case IRQ_SET_MASK_OK:
207 case IRQ_SET_MASK_OK_DONE:
208 cpumask_copy(desc->irq_common_data.affinity, mask);
209 case IRQ_SET_MASK_OK_NOCOPY:
210 irq_set_thread_affinity(desc);
211 ret = 0;
212 }
213
214 return ret;
215}
216
217int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
218 bool force)
219{
220 struct irq_chip *chip = irq_data_get_irq_chip(data);
221 struct irq_desc *desc = irq_data_to_desc(data);
222 int ret = 0;
223
224 if (!chip || !chip->irq_set_affinity)
225 return -EINVAL;
226
227 if (irq_can_move_pcntxt(data)) {
228 ret = irq_do_set_affinity(data, mask, force);
229 } else {
230 irqd_set_move_pending(data);
231 irq_copy_pending(desc, mask);
232 }
233
234 if (desc->affinity_notify) {
235 kref_get(&desc->affinity_notify->kref);
236 schedule_work(&desc->affinity_notify->work);
237 }
238 irqd_set(data, IRQD_AFFINITY_SET);
239
240 return ret;
241}
242
243int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
244{
245 struct irq_desc *desc = irq_to_desc(irq);
246 unsigned long flags;
247 int ret;
248
249 if (!desc)
250 return -EINVAL;
251
252 raw_spin_lock_irqsave(&desc->lock, flags);
253 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
254 raw_spin_unlock_irqrestore(&desc->lock, flags);
255 return ret;
256}
257
258int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
259{
260 unsigned long flags;
261 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
262
263 if (!desc)
264 return -EINVAL;
265 desc->affinity_hint = m;
266 irq_put_desc_unlock(desc, flags);
267 /* set the initial affinity to prevent every interrupt being on CPU0 */
268 if (m)
269 __irq_set_affinity(irq, m, false);
270 return 0;
271}
272EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
273
274static void irq_affinity_notify(struct work_struct *work)
275{
276 struct irq_affinity_notify *notify =
277 container_of(work, struct irq_affinity_notify, work);
278 struct irq_desc *desc = irq_to_desc(notify->irq);
279 cpumask_var_t cpumask;
280 unsigned long flags;
281
282 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
283 goto out;
284
285 raw_spin_lock_irqsave(&desc->lock, flags);
286 if (irq_move_pending(&desc->irq_data))
287 irq_get_pending(cpumask, desc);
288 else
289 cpumask_copy(cpumask, desc->irq_common_data.affinity);
290 raw_spin_unlock_irqrestore(&desc->lock, flags);
291
292 notify->notify(notify, cpumask);
293
294 free_cpumask_var(cpumask);
295out:
296 kref_put(¬ify->kref, notify->release);
297}
298
299/**
300 * irq_set_affinity_notifier - control notification of IRQ affinity changes
301 * @irq: Interrupt for which to enable/disable notification
302 * @notify: Context for notification, or %NULL to disable
303 * notification. Function pointers must be initialised;
304 * the other fields will be initialised by this function.
305 *
306 * Must be called in process context. Notification may only be enabled
307 * after the IRQ is allocated and must be disabled before the IRQ is
308 * freed using free_irq().
309 */
310int
311irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
312{
313 struct irq_desc *desc = irq_to_desc(irq);
314 struct irq_affinity_notify *old_notify;
315 unsigned long flags;
316
317 /* The release function is promised process context */
318 might_sleep();
319
320 if (!desc)
321 return -EINVAL;
322
323 /* Complete initialisation of *notify */
324 if (notify) {
325 notify->irq = irq;
326 kref_init(¬ify->kref);
327 INIT_WORK(¬ify->work, irq_affinity_notify);
328 }
329
330 raw_spin_lock_irqsave(&desc->lock, flags);
331 old_notify = desc->affinity_notify;
332 desc->affinity_notify = notify;
333 raw_spin_unlock_irqrestore(&desc->lock, flags);
334
335 if (old_notify)
336 kref_put(&old_notify->kref, old_notify->release);
337
338 return 0;
339}
340EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
341
342#ifndef CONFIG_AUTO_IRQ_AFFINITY
343/*
344 * Generic version of the affinity autoselector.
345 */
346static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
347{
348 struct cpumask *set = irq_default_affinity;
349 int node = irq_desc_get_node(desc);
350
351 /* Excludes PER_CPU and NO_BALANCE interrupts */
352 if (!__irq_can_set_affinity(desc))
353 return 0;
354
355 /*
356 * Preserve the managed affinity setting and an userspace affinity
357 * setup, but make sure that one of the targets is online.
358 */
359 if (irqd_affinity_is_managed(&desc->irq_data) ||
360 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
361 if (cpumask_intersects(desc->irq_common_data.affinity,
362 cpu_online_mask))
363 set = desc->irq_common_data.affinity;
364 else
365 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
366 }
367
368 cpumask_and(mask, cpu_online_mask, set);
369 if (node != NUMA_NO_NODE) {
370 const struct cpumask *nodemask = cpumask_of_node(node);
371
372 /* make sure at least one of the cpus in nodemask is online */
373 if (cpumask_intersects(mask, nodemask))
374 cpumask_and(mask, mask, nodemask);
375 }
376 irq_do_set_affinity(&desc->irq_data, mask, false);
377 return 0;
378}
379#else
380/* Wrapper for ALPHA specific affinity selector magic */
381static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
382{
383 return irq_select_affinity(irq_desc_get_irq(d));
384}
385#endif
386
387/*
388 * Called when affinity is set via /proc/irq
389 */
390int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
391{
392 struct irq_desc *desc = irq_to_desc(irq);
393 unsigned long flags;
394 int ret;
395
396 raw_spin_lock_irqsave(&desc->lock, flags);
397 ret = setup_affinity(desc, mask);
398 raw_spin_unlock_irqrestore(&desc->lock, flags);
399 return ret;
400}
401
402#else
403static inline int
404setup_affinity(struct irq_desc *desc, struct cpumask *mask)
405{
406 return 0;
407}
408#endif
409
410/**
411 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
412 * @irq: interrupt number to set affinity
413 * @vcpu_info: vCPU specific data
414 *
415 * This function uses the vCPU specific data to set the vCPU
416 * affinity for an irq. The vCPU specific data is passed from
417 * outside, such as KVM. One example code path is as below:
418 * KVM -> IOMMU -> irq_set_vcpu_affinity().
419 */
420int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
421{
422 unsigned long flags;
423 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
424 struct irq_data *data;
425 struct irq_chip *chip;
426 int ret = -ENOSYS;
427
428 if (!desc)
429 return -EINVAL;
430
431 data = irq_desc_get_irq_data(desc);
432 chip = irq_data_get_irq_chip(data);
433 if (chip && chip->irq_set_vcpu_affinity)
434 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
435 irq_put_desc_unlock(desc, flags);
436
437 return ret;
438}
439EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
440
441void __disable_irq(struct irq_desc *desc)
442{
443 if (!desc->depth++)
444 irq_disable(desc);
445}
446
447static int __disable_irq_nosync(unsigned int irq)
448{
449 unsigned long flags;
450 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
451
452 if (!desc)
453 return -EINVAL;
454 __disable_irq(desc);
455 irq_put_desc_busunlock(desc, flags);
456 return 0;
457}
458
459/**
460 * disable_irq_nosync - disable an irq without waiting
461 * @irq: Interrupt to disable
462 *
463 * Disable the selected interrupt line. Disables and Enables are
464 * nested.
465 * Unlike disable_irq(), this function does not ensure existing
466 * instances of the IRQ handler have completed before returning.
467 *
468 * This function may be called from IRQ context.
469 */
470void disable_irq_nosync(unsigned int irq)
471{
472 __disable_irq_nosync(irq);
473}
474EXPORT_SYMBOL(disable_irq_nosync);
475
476/**
477 * disable_irq - disable an irq and wait for completion
478 * @irq: Interrupt to disable
479 *
480 * Disable the selected interrupt line. Enables and Disables are
481 * nested.
482 * This function waits for any pending IRQ handlers for this interrupt
483 * to complete before returning. If you use this function while
484 * holding a resource the IRQ handler may need you will deadlock.
485 *
486 * This function may be called - with care - from IRQ context.
487 */
488void disable_irq(unsigned int irq)
489{
490 if (!__disable_irq_nosync(irq))
491 synchronize_irq(irq);
492}
493EXPORT_SYMBOL(disable_irq);
494
495/**
496 * disable_hardirq - disables an irq and waits for hardirq completion
497 * @irq: Interrupt to disable
498 *
499 * Disable the selected interrupt line. Enables and Disables are
500 * nested.
501 * This function waits for any pending hard IRQ handlers for this
502 * interrupt to complete before returning. If you use this function while
503 * holding a resource the hard IRQ handler may need you will deadlock.
504 *
505 * When used to optimistically disable an interrupt from atomic context
506 * the return value must be checked.
507 *
508 * Returns: false if a threaded handler is active.
509 *
510 * This function may be called - with care - from IRQ context.
511 */
512bool disable_hardirq(unsigned int irq)
513{
514 if (!__disable_irq_nosync(irq))
515 return synchronize_hardirq(irq);
516
517 return false;
518}
519EXPORT_SYMBOL_GPL(disable_hardirq);
520
521void __enable_irq(struct irq_desc *desc)
522{
523 switch (desc->depth) {
524 case 0:
525 err_out:
526 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
527 irq_desc_get_irq(desc));
528 break;
529 case 1: {
530 if (desc->istate & IRQS_SUSPENDED)
531 goto err_out;
532 /* Prevent probing on this irq: */
533 irq_settings_set_noprobe(desc);
534 irq_enable(desc);
535 check_irq_resend(desc);
536 /* fall-through */
537 }
538 default:
539 desc->depth--;
540 }
541}
542
543/**
544 * enable_irq - enable handling of an irq
545 * @irq: Interrupt to enable
546 *
547 * Undoes the effect of one call to disable_irq(). If this
548 * matches the last disable, processing of interrupts on this
549 * IRQ line is re-enabled.
550 *
551 * This function may be called from IRQ context only when
552 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
553 */
554void enable_irq(unsigned int irq)
555{
556 unsigned long flags;
557 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
558
559 if (!desc)
560 return;
561 if (WARN(!desc->irq_data.chip,
562 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
563 goto out;
564
565 __enable_irq(desc);
566out:
567 irq_put_desc_busunlock(desc, flags);
568}
569EXPORT_SYMBOL(enable_irq);
570
571static int set_irq_wake_real(unsigned int irq, unsigned int on)
572{
573 struct irq_desc *desc = irq_to_desc(irq);
574 int ret = -ENXIO;
575
576 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
577 return 0;
578
579 if (desc->irq_data.chip->irq_set_wake)
580 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
581
582 return ret;
583}
584
585/**
586 * irq_set_irq_wake - control irq power management wakeup
587 * @irq: interrupt to control
588 * @on: enable/disable power management wakeup
589 *
590 * Enable/disable power management wakeup mode, which is
591 * disabled by default. Enables and disables must match,
592 * just as they match for non-wakeup mode support.
593 *
594 * Wakeup mode lets this IRQ wake the system from sleep
595 * states like "suspend to RAM".
596 */
597int irq_set_irq_wake(unsigned int irq, unsigned int on)
598{
599 unsigned long flags;
600 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
601 int ret = 0;
602
603 if (!desc)
604 return -EINVAL;
605
606 /* wakeup-capable irqs can be shared between drivers that
607 * don't need to have the same sleep mode behaviors.
608 */
609 if (on) {
610 if (desc->wake_depth++ == 0) {
611 ret = set_irq_wake_real(irq, on);
612 if (ret)
613 desc->wake_depth = 0;
614 else
615 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
616 }
617 } else {
618 if (desc->wake_depth == 0) {
619 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
620 } else if (--desc->wake_depth == 0) {
621 ret = set_irq_wake_real(irq, on);
622 if (ret)
623 desc->wake_depth = 1;
624 else
625 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
626 }
627 }
628 irq_put_desc_busunlock(desc, flags);
629 return ret;
630}
631EXPORT_SYMBOL(irq_set_irq_wake);
632
633/*
634 * Internal function that tells the architecture code whether a
635 * particular irq has been exclusively allocated or is available
636 * for driver use.
637 */
638int can_request_irq(unsigned int irq, unsigned long irqflags)
639{
640 unsigned long flags;
641 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
642 int canrequest = 0;
643
644 if (!desc)
645 return 0;
646
647 if (irq_settings_can_request(desc)) {
648 if (!desc->action ||
649 irqflags & desc->action->flags & IRQF_SHARED)
650 canrequest = 1;
651 }
652 irq_put_desc_unlock(desc, flags);
653 return canrequest;
654}
655
656int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
657{
658 struct irq_chip *chip = desc->irq_data.chip;
659 int ret, unmask = 0;
660
661 if (!chip || !chip->irq_set_type) {
662 /*
663 * IRQF_TRIGGER_* but the PIC does not support multiple
664 * flow-types?
665 */
666 pr_debug("No set_type function for IRQ %d (%s)\n",
667 irq_desc_get_irq(desc),
668 chip ? (chip->name ? : "unknown") : "unknown");
669 return 0;
670 }
671
672 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
673 if (!irqd_irq_masked(&desc->irq_data))
674 mask_irq(desc);
675 if (!irqd_irq_disabled(&desc->irq_data))
676 unmask = 1;
677 }
678
679 /* Mask all flags except trigger mode */
680 flags &= IRQ_TYPE_SENSE_MASK;
681 ret = chip->irq_set_type(&desc->irq_data, flags);
682
683 switch (ret) {
684 case IRQ_SET_MASK_OK:
685 case IRQ_SET_MASK_OK_DONE:
686 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
687 irqd_set(&desc->irq_data, flags);
688
689 case IRQ_SET_MASK_OK_NOCOPY:
690 flags = irqd_get_trigger_type(&desc->irq_data);
691 irq_settings_set_trigger_mask(desc, flags);
692 irqd_clear(&desc->irq_data, IRQD_LEVEL);
693 irq_settings_clr_level(desc);
694 if (flags & IRQ_TYPE_LEVEL_MASK) {
695 irq_settings_set_level(desc);
696 irqd_set(&desc->irq_data, IRQD_LEVEL);
697 }
698
699 ret = 0;
700 break;
701 default:
702 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
703 flags, irq_desc_get_irq(desc), chip->irq_set_type);
704 }
705 if (unmask)
706 unmask_irq(desc);
707 return ret;
708}
709
710#ifdef CONFIG_HARDIRQS_SW_RESEND
711int irq_set_parent(int irq, int parent_irq)
712{
713 unsigned long flags;
714 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
715
716 if (!desc)
717 return -EINVAL;
718
719 desc->parent_irq = parent_irq;
720
721 irq_put_desc_unlock(desc, flags);
722 return 0;
723}
724EXPORT_SYMBOL_GPL(irq_set_parent);
725#endif
726
727/*
728 * Default primary interrupt handler for threaded interrupts. Is
729 * assigned as primary handler when request_threaded_irq is called
730 * with handler == NULL. Useful for oneshot interrupts.
731 */
732static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
733{
734 return IRQ_WAKE_THREAD;
735}
736
737/*
738 * Primary handler for nested threaded interrupts. Should never be
739 * called.
740 */
741static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
742{
743 WARN(1, "Primary handler called for nested irq %d\n", irq);
744 return IRQ_NONE;
745}
746
747static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
748{
749 WARN(1, "Secondary action handler called for irq %d\n", irq);
750 return IRQ_NONE;
751}
752
753static int irq_wait_for_interrupt(struct irqaction *action)
754{
755 set_current_state(TASK_INTERRUPTIBLE);
756
757 while (!kthread_should_stop()) {
758
759 if (test_and_clear_bit(IRQTF_RUNTHREAD,
760 &action->thread_flags)) {
761 __set_current_state(TASK_RUNNING);
762 return 0;
763 }
764 schedule();
765 set_current_state(TASK_INTERRUPTIBLE);
766 }
767 __set_current_state(TASK_RUNNING);
768 return -1;
769}
770
771/*
772 * Oneshot interrupts keep the irq line masked until the threaded
773 * handler finished. unmask if the interrupt has not been disabled and
774 * is marked MASKED.
775 */
776static void irq_finalize_oneshot(struct irq_desc *desc,
777 struct irqaction *action)
778{
779 if (!(desc->istate & IRQS_ONESHOT) ||
780 action->handler == irq_forced_secondary_handler)
781 return;
782again:
783 chip_bus_lock(desc);
784 raw_spin_lock_irq(&desc->lock);
785
786 /*
787 * Implausible though it may be we need to protect us against
788 * the following scenario:
789 *
790 * The thread is faster done than the hard interrupt handler
791 * on the other CPU. If we unmask the irq line then the
792 * interrupt can come in again and masks the line, leaves due
793 * to IRQS_INPROGRESS and the irq line is masked forever.
794 *
795 * This also serializes the state of shared oneshot handlers
796 * versus "desc->threads_onehsot |= action->thread_mask;" in
797 * irq_wake_thread(). See the comment there which explains the
798 * serialization.
799 */
800 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
801 raw_spin_unlock_irq(&desc->lock);
802 chip_bus_sync_unlock(desc);
803 cpu_relax();
804 goto again;
805 }
806
807 /*
808 * Now check again, whether the thread should run. Otherwise
809 * we would clear the threads_oneshot bit of this thread which
810 * was just set.
811 */
812 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
813 goto out_unlock;
814
815 desc->threads_oneshot &= ~action->thread_mask;
816
817 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
818 irqd_irq_masked(&desc->irq_data))
819 unmask_threaded_irq(desc);
820
821out_unlock:
822 raw_spin_unlock_irq(&desc->lock);
823 chip_bus_sync_unlock(desc);
824}
825
826#ifdef CONFIG_SMP
827/*
828 * Check whether we need to change the affinity of the interrupt thread.
829 */
830static void
831irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
832{
833 cpumask_var_t mask;
834 bool valid = true;
835
836 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
837 return;
838
839 /*
840 * In case we are out of memory we set IRQTF_AFFINITY again and
841 * try again next time
842 */
843 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
844 set_bit(IRQTF_AFFINITY, &action->thread_flags);
845 return;
846 }
847
848 raw_spin_lock_irq(&desc->lock);
849 /*
850 * This code is triggered unconditionally. Check the affinity
851 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
852 */
853 if (desc->irq_common_data.affinity)
854 cpumask_copy(mask, desc->irq_common_data.affinity);
855 else
856 valid = false;
857 raw_spin_unlock_irq(&desc->lock);
858
859 if (valid)
860 set_cpus_allowed_ptr(current, mask);
861 free_cpumask_var(mask);
862}
863#else
864static inline void
865irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
866#endif
867
868/*
869 * Interrupts which are not explicitely requested as threaded
870 * interrupts rely on the implicit bh/preempt disable of the hard irq
871 * context. So we need to disable bh here to avoid deadlocks and other
872 * side effects.
873 */
874static irqreturn_t
875irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
876{
877 irqreturn_t ret;
878
879 local_bh_disable();
880 ret = action->thread_fn(action->irq, action->dev_id);
881 irq_finalize_oneshot(desc, action);
882 local_bh_enable();
883 return ret;
884}
885
886/*
887 * Interrupts explicitly requested as threaded interrupts want to be
888 * preemtible - many of them need to sleep and wait for slow busses to
889 * complete.
890 */
891static irqreturn_t irq_thread_fn(struct irq_desc *desc,
892 struct irqaction *action)
893{
894 irqreturn_t ret;
895
896 ret = action->thread_fn(action->irq, action->dev_id);
897 irq_finalize_oneshot(desc, action);
898 return ret;
899}
900
901static void wake_threads_waitq(struct irq_desc *desc)
902{
903 if (atomic_dec_and_test(&desc->threads_active))
904 wake_up(&desc->wait_for_threads);
905}
906
907static void irq_thread_dtor(struct callback_head *unused)
908{
909 struct task_struct *tsk = current;
910 struct irq_desc *desc;
911 struct irqaction *action;
912
913 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
914 return;
915
916 action = kthread_data(tsk);
917
918 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
919 tsk->comm, tsk->pid, action->irq);
920
921
922 desc = irq_to_desc(action->irq);
923 /*
924 * If IRQTF_RUNTHREAD is set, we need to decrement
925 * desc->threads_active and wake possible waiters.
926 */
927 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
928 wake_threads_waitq(desc);
929
930 /* Prevent a stale desc->threads_oneshot */
931 irq_finalize_oneshot(desc, action);
932}
933
934static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
935{
936 struct irqaction *secondary = action->secondary;
937
938 if (WARN_ON_ONCE(!secondary))
939 return;
940
941 raw_spin_lock_irq(&desc->lock);
942 __irq_wake_thread(desc, secondary);
943 raw_spin_unlock_irq(&desc->lock);
944}
945
946/*
947 * Interrupt handler thread
948 */
949static int irq_thread(void *data)
950{
951 struct callback_head on_exit_work;
952 struct irqaction *action = data;
953 struct irq_desc *desc = irq_to_desc(action->irq);
954 irqreturn_t (*handler_fn)(struct irq_desc *desc,
955 struct irqaction *action);
956
957 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
958 &action->thread_flags))
959 handler_fn = irq_forced_thread_fn;
960 else
961 handler_fn = irq_thread_fn;
962
963 init_task_work(&on_exit_work, irq_thread_dtor);
964 task_work_add(current, &on_exit_work, false);
965
966 irq_thread_check_affinity(desc, action);
967
968 while (!irq_wait_for_interrupt(action)) {
969 irqreturn_t action_ret;
970
971 irq_thread_check_affinity(desc, action);
972
973 action_ret = handler_fn(desc, action);
974 if (action_ret == IRQ_HANDLED)
975 atomic_inc(&desc->threads_handled);
976 if (action_ret == IRQ_WAKE_THREAD)
977 irq_wake_secondary(desc, action);
978
979 wake_threads_waitq(desc);
980 }
981
982 /*
983 * This is the regular exit path. __free_irq() is stopping the
984 * thread via kthread_stop() after calling
985 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
986 * oneshot mask bit can be set. We cannot verify that as we
987 * cannot touch the oneshot mask at this point anymore as
988 * __setup_irq() might have given out currents thread_mask
989 * again.
990 */
991 task_work_cancel(current, irq_thread_dtor);
992 return 0;
993}
994
995/**
996 * irq_wake_thread - wake the irq thread for the action identified by dev_id
997 * @irq: Interrupt line
998 * @dev_id: Device identity for which the thread should be woken
999 *
1000 */
1001void irq_wake_thread(unsigned int irq, void *dev_id)
1002{
1003 struct irq_desc *desc = irq_to_desc(irq);
1004 struct irqaction *action;
1005 unsigned long flags;
1006
1007 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1008 return;
1009
1010 raw_spin_lock_irqsave(&desc->lock, flags);
1011 for_each_action_of_desc(desc, action) {
1012 if (action->dev_id == dev_id) {
1013 if (action->thread)
1014 __irq_wake_thread(desc, action);
1015 break;
1016 }
1017 }
1018 raw_spin_unlock_irqrestore(&desc->lock, flags);
1019}
1020EXPORT_SYMBOL_GPL(irq_wake_thread);
1021
1022static int irq_setup_forced_threading(struct irqaction *new)
1023{
1024 if (!force_irqthreads)
1025 return 0;
1026 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1027 return 0;
1028
1029 new->flags |= IRQF_ONESHOT;
1030
1031 /*
1032 * Handle the case where we have a real primary handler and a
1033 * thread handler. We force thread them as well by creating a
1034 * secondary action.
1035 */
1036 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1037 /* Allocate the secondary action */
1038 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1039 if (!new->secondary)
1040 return -ENOMEM;
1041 new->secondary->handler = irq_forced_secondary_handler;
1042 new->secondary->thread_fn = new->thread_fn;
1043 new->secondary->dev_id = new->dev_id;
1044 new->secondary->irq = new->irq;
1045 new->secondary->name = new->name;
1046 }
1047 /* Deal with the primary handler */
1048 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1049 new->thread_fn = new->handler;
1050 new->handler = irq_default_primary_handler;
1051 return 0;
1052}
1053
1054static int irq_request_resources(struct irq_desc *desc)
1055{
1056 struct irq_data *d = &desc->irq_data;
1057 struct irq_chip *c = d->chip;
1058
1059 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1060}
1061
1062static void irq_release_resources(struct irq_desc *desc)
1063{
1064 struct irq_data *d = &desc->irq_data;
1065 struct irq_chip *c = d->chip;
1066
1067 if (c->irq_release_resources)
1068 c->irq_release_resources(d);
1069}
1070
1071static int
1072setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1073{
1074 struct task_struct *t;
1075 struct sched_param param = {
1076 .sched_priority = MAX_USER_RT_PRIO/2,
1077 };
1078
1079 if (!secondary) {
1080 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1081 new->name);
1082 } else {
1083 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1084 new->name);
1085 param.sched_priority -= 1;
1086 }
1087
1088 if (IS_ERR(t))
1089 return PTR_ERR(t);
1090
1091 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1092
1093 /*
1094 * We keep the reference to the task struct even if
1095 * the thread dies to avoid that the interrupt code
1096 * references an already freed task_struct.
1097 */
1098 get_task_struct(t);
1099 new->thread = t;
1100 /*
1101 * Tell the thread to set its affinity. This is
1102 * important for shared interrupt handlers as we do
1103 * not invoke setup_affinity() for the secondary
1104 * handlers as everything is already set up. Even for
1105 * interrupts marked with IRQF_NO_BALANCE this is
1106 * correct as we want the thread to move to the cpu(s)
1107 * on which the requesting code placed the interrupt.
1108 */
1109 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1110 return 0;
1111}
1112
1113/*
1114 * Internal function to register an irqaction - typically used to
1115 * allocate special interrupts that are part of the architecture.
1116 */
1117static int
1118__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1119{
1120 struct irqaction *old, **old_ptr;
1121 unsigned long flags, thread_mask = 0;
1122 int ret, nested, shared = 0;
1123 cpumask_var_t mask;
1124
1125 if (!desc)
1126 return -EINVAL;
1127
1128 if (desc->irq_data.chip == &no_irq_chip)
1129 return -ENOSYS;
1130 if (!try_module_get(desc->owner))
1131 return -ENODEV;
1132
1133 new->irq = irq;
1134
1135 /*
1136 * If the trigger type is not specified by the caller,
1137 * then use the default for this interrupt.
1138 */
1139 if (!(new->flags & IRQF_TRIGGER_MASK))
1140 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1141
1142 /*
1143 * Check whether the interrupt nests into another interrupt
1144 * thread.
1145 */
1146 nested = irq_settings_is_nested_thread(desc);
1147 if (nested) {
1148 if (!new->thread_fn) {
1149 ret = -EINVAL;
1150 goto out_mput;
1151 }
1152 /*
1153 * Replace the primary handler which was provided from
1154 * the driver for non nested interrupt handling by the
1155 * dummy function which warns when called.
1156 */
1157 new->handler = irq_nested_primary_handler;
1158 } else {
1159 if (irq_settings_can_thread(desc)) {
1160 ret = irq_setup_forced_threading(new);
1161 if (ret)
1162 goto out_mput;
1163 }
1164 }
1165
1166 /*
1167 * Create a handler thread when a thread function is supplied
1168 * and the interrupt does not nest into another interrupt
1169 * thread.
1170 */
1171 if (new->thread_fn && !nested) {
1172 ret = setup_irq_thread(new, irq, false);
1173 if (ret)
1174 goto out_mput;
1175 if (new->secondary) {
1176 ret = setup_irq_thread(new->secondary, irq, true);
1177 if (ret)
1178 goto out_thread;
1179 }
1180 }
1181
1182 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1183 ret = -ENOMEM;
1184 goto out_thread;
1185 }
1186
1187 /*
1188 * Drivers are often written to work w/o knowledge about the
1189 * underlying irq chip implementation, so a request for a
1190 * threaded irq without a primary hard irq context handler
1191 * requires the ONESHOT flag to be set. Some irq chips like
1192 * MSI based interrupts are per se one shot safe. Check the
1193 * chip flags, so we can avoid the unmask dance at the end of
1194 * the threaded handler for those.
1195 */
1196 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1197 new->flags &= ~IRQF_ONESHOT;
1198
1199 /*
1200 * The following block of code has to be executed atomically
1201 */
1202 raw_spin_lock_irqsave(&desc->lock, flags);
1203 old_ptr = &desc->action;
1204 old = *old_ptr;
1205 if (old) {
1206 /*
1207 * Can't share interrupts unless both agree to and are
1208 * the same type (level, edge, polarity). So both flag
1209 * fields must have IRQF_SHARED set and the bits which
1210 * set the trigger type must match. Also all must
1211 * agree on ONESHOT.
1212 */
1213 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1214 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1215 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1216 goto mismatch;
1217
1218 /* All handlers must agree on per-cpuness */
1219 if ((old->flags & IRQF_PERCPU) !=
1220 (new->flags & IRQF_PERCPU))
1221 goto mismatch;
1222
1223 /* add new interrupt at end of irq queue */
1224 do {
1225 /*
1226 * Or all existing action->thread_mask bits,
1227 * so we can find the next zero bit for this
1228 * new action.
1229 */
1230 thread_mask |= old->thread_mask;
1231 old_ptr = &old->next;
1232 old = *old_ptr;
1233 } while (old);
1234 shared = 1;
1235 }
1236
1237 /*
1238 * Setup the thread mask for this irqaction for ONESHOT. For
1239 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1240 * conditional in irq_wake_thread().
1241 */
1242 if (new->flags & IRQF_ONESHOT) {
1243 /*
1244 * Unlikely to have 32 resp 64 irqs sharing one line,
1245 * but who knows.
1246 */
1247 if (thread_mask == ~0UL) {
1248 ret = -EBUSY;
1249 goto out_mask;
1250 }
1251 /*
1252 * The thread_mask for the action is or'ed to
1253 * desc->thread_active to indicate that the
1254 * IRQF_ONESHOT thread handler has been woken, but not
1255 * yet finished. The bit is cleared when a thread
1256 * completes. When all threads of a shared interrupt
1257 * line have completed desc->threads_active becomes
1258 * zero and the interrupt line is unmasked. See
1259 * handle.c:irq_wake_thread() for further information.
1260 *
1261 * If no thread is woken by primary (hard irq context)
1262 * interrupt handlers, then desc->threads_active is
1263 * also checked for zero to unmask the irq line in the
1264 * affected hard irq flow handlers
1265 * (handle_[fasteoi|level]_irq).
1266 *
1267 * The new action gets the first zero bit of
1268 * thread_mask assigned. See the loop above which or's
1269 * all existing action->thread_mask bits.
1270 */
1271 new->thread_mask = 1 << ffz(thread_mask);
1272
1273 } else if (new->handler == irq_default_primary_handler &&
1274 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1275 /*
1276 * The interrupt was requested with handler = NULL, so
1277 * we use the default primary handler for it. But it
1278 * does not have the oneshot flag set. In combination
1279 * with level interrupts this is deadly, because the
1280 * default primary handler just wakes the thread, then
1281 * the irq lines is reenabled, but the device still
1282 * has the level irq asserted. Rinse and repeat....
1283 *
1284 * While this works for edge type interrupts, we play
1285 * it safe and reject unconditionally because we can't
1286 * say for sure which type this interrupt really
1287 * has. The type flags are unreliable as the
1288 * underlying chip implementation can override them.
1289 */
1290 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1291 irq);
1292 ret = -EINVAL;
1293 goto out_mask;
1294 }
1295
1296 if (!shared) {
1297 ret = irq_request_resources(desc);
1298 if (ret) {
1299 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1300 new->name, irq, desc->irq_data.chip->name);
1301 goto out_mask;
1302 }
1303
1304 init_waitqueue_head(&desc->wait_for_threads);
1305
1306 /* Setup the type (level, edge polarity) if configured: */
1307 if (new->flags & IRQF_TRIGGER_MASK) {
1308 ret = __irq_set_trigger(desc,
1309 new->flags & IRQF_TRIGGER_MASK);
1310
1311 if (ret)
1312 goto out_mask;
1313 }
1314
1315 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1316 IRQS_ONESHOT | IRQS_WAITING);
1317 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1318
1319 if (new->flags & IRQF_PERCPU) {
1320 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1321 irq_settings_set_per_cpu(desc);
1322 }
1323
1324 if (new->flags & IRQF_ONESHOT)
1325 desc->istate |= IRQS_ONESHOT;
1326
1327 if (irq_settings_can_autoenable(desc))
1328 irq_startup(desc, true);
1329 else
1330 /* Undo nested disables: */
1331 desc->depth = 1;
1332
1333 /* Exclude IRQ from balancing if requested */
1334 if (new->flags & IRQF_NOBALANCING) {
1335 irq_settings_set_no_balancing(desc);
1336 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1337 }
1338
1339 /* Set default affinity mask once everything is setup */
1340 setup_affinity(desc, mask);
1341
1342 } else if (new->flags & IRQF_TRIGGER_MASK) {
1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1344 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1345
1346 if (nmsk != omsk)
1347 /* hope the handler works with current trigger mode */
1348 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1349 irq, omsk, nmsk);
1350 }
1351
1352 *old_ptr = new;
1353
1354 irq_pm_install_action(desc, new);
1355
1356 /* Reset broken irq detection when installing new handler */
1357 desc->irq_count = 0;
1358 desc->irqs_unhandled = 0;
1359
1360 /*
1361 * Check whether we disabled the irq via the spurious handler
1362 * before. Reenable it and give it another chance.
1363 */
1364 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1365 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1366 __enable_irq(desc);
1367 }
1368
1369 raw_spin_unlock_irqrestore(&desc->lock, flags);
1370
1371 /*
1372 * Strictly no need to wake it up, but hung_task complains
1373 * when no hard interrupt wakes the thread up.
1374 */
1375 if (new->thread)
1376 wake_up_process(new->thread);
1377 if (new->secondary)
1378 wake_up_process(new->secondary->thread);
1379
1380 register_irq_proc(irq, desc);
1381 new->dir = NULL;
1382 register_handler_proc(irq, new);
1383 free_cpumask_var(mask);
1384
1385 return 0;
1386
1387mismatch:
1388 if (!(new->flags & IRQF_PROBE_SHARED)) {
1389 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1390 irq, new->flags, new->name, old->flags, old->name);
1391#ifdef CONFIG_DEBUG_SHIRQ
1392 dump_stack();
1393#endif
1394 }
1395 ret = -EBUSY;
1396
1397out_mask:
1398 raw_spin_unlock_irqrestore(&desc->lock, flags);
1399 free_cpumask_var(mask);
1400
1401out_thread:
1402 if (new->thread) {
1403 struct task_struct *t = new->thread;
1404
1405 new->thread = NULL;
1406 kthread_stop(t);
1407 put_task_struct(t);
1408 }
1409 if (new->secondary && new->secondary->thread) {
1410 struct task_struct *t = new->secondary->thread;
1411
1412 new->secondary->thread = NULL;
1413 kthread_stop(t);
1414 put_task_struct(t);
1415 }
1416out_mput:
1417 module_put(desc->owner);
1418 return ret;
1419}
1420
1421/**
1422 * setup_irq - setup an interrupt
1423 * @irq: Interrupt line to setup
1424 * @act: irqaction for the interrupt
1425 *
1426 * Used to statically setup interrupts in the early boot process.
1427 */
1428int setup_irq(unsigned int irq, struct irqaction *act)
1429{
1430 int retval;
1431 struct irq_desc *desc = irq_to_desc(irq);
1432
1433 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1434 return -EINVAL;
1435
1436 retval = irq_chip_pm_get(&desc->irq_data);
1437 if (retval < 0)
1438 return retval;
1439
1440 chip_bus_lock(desc);
1441 retval = __setup_irq(irq, desc, act);
1442 chip_bus_sync_unlock(desc);
1443
1444 if (retval)
1445 irq_chip_pm_put(&desc->irq_data);
1446
1447 return retval;
1448}
1449EXPORT_SYMBOL_GPL(setup_irq);
1450
1451/*
1452 * Internal function to unregister an irqaction - used to free
1453 * regular and special interrupts that are part of the architecture.
1454 */
1455static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1456{
1457 struct irq_desc *desc = irq_to_desc(irq);
1458 struct irqaction *action, **action_ptr;
1459 unsigned long flags;
1460
1461 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1462
1463 if (!desc)
1464 return NULL;
1465
1466 chip_bus_lock(desc);
1467 raw_spin_lock_irqsave(&desc->lock, flags);
1468
1469 /*
1470 * There can be multiple actions per IRQ descriptor, find the right
1471 * one based on the dev_id:
1472 */
1473 action_ptr = &desc->action;
1474 for (;;) {
1475 action = *action_ptr;
1476
1477 if (!action) {
1478 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1479 raw_spin_unlock_irqrestore(&desc->lock, flags);
1480 chip_bus_sync_unlock(desc);
1481 return NULL;
1482 }
1483
1484 if (action->dev_id == dev_id)
1485 break;
1486 action_ptr = &action->next;
1487 }
1488
1489 /* Found it - now remove it from the list of entries: */
1490 *action_ptr = action->next;
1491
1492 irq_pm_remove_action(desc, action);
1493
1494 /* If this was the last handler, shut down the IRQ line: */
1495 if (!desc->action) {
1496 irq_settings_clr_disable_unlazy(desc);
1497 irq_shutdown(desc);
1498 irq_release_resources(desc);
1499 }
1500
1501#ifdef CONFIG_SMP
1502 /* make sure affinity_hint is cleaned up */
1503 if (WARN_ON_ONCE(desc->affinity_hint))
1504 desc->affinity_hint = NULL;
1505#endif
1506
1507 raw_spin_unlock_irqrestore(&desc->lock, flags);
1508 chip_bus_sync_unlock(desc);
1509
1510 unregister_handler_proc(irq, action);
1511
1512 /* Make sure it's not being used on another CPU: */
1513 synchronize_irq(irq);
1514
1515#ifdef CONFIG_DEBUG_SHIRQ
1516 /*
1517 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1518 * event to happen even now it's being freed, so let's make sure that
1519 * is so by doing an extra call to the handler ....
1520 *
1521 * ( We do this after actually deregistering it, to make sure that a
1522 * 'real' IRQ doesn't run in * parallel with our fake. )
1523 */
1524 if (action->flags & IRQF_SHARED) {
1525 local_irq_save(flags);
1526 action->handler(irq, dev_id);
1527 local_irq_restore(flags);
1528 }
1529#endif
1530
1531 if (action->thread) {
1532 kthread_stop(action->thread);
1533 put_task_struct(action->thread);
1534 if (action->secondary && action->secondary->thread) {
1535 kthread_stop(action->secondary->thread);
1536 put_task_struct(action->secondary->thread);
1537 }
1538 }
1539
1540 irq_chip_pm_put(&desc->irq_data);
1541 module_put(desc->owner);
1542 kfree(action->secondary);
1543 return action;
1544}
1545
1546/**
1547 * remove_irq - free an interrupt
1548 * @irq: Interrupt line to free
1549 * @act: irqaction for the interrupt
1550 *
1551 * Used to remove interrupts statically setup by the early boot process.
1552 */
1553void remove_irq(unsigned int irq, struct irqaction *act)
1554{
1555 struct irq_desc *desc = irq_to_desc(irq);
1556
1557 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1558 __free_irq(irq, act->dev_id);
1559}
1560EXPORT_SYMBOL_GPL(remove_irq);
1561
1562/**
1563 * free_irq - free an interrupt allocated with request_irq
1564 * @irq: Interrupt line to free
1565 * @dev_id: Device identity to free
1566 *
1567 * Remove an interrupt handler. The handler is removed and if the
1568 * interrupt line is no longer in use by any driver it is disabled.
1569 * On a shared IRQ the caller must ensure the interrupt is disabled
1570 * on the card it drives before calling this function. The function
1571 * does not return until any executing interrupts for this IRQ
1572 * have completed.
1573 *
1574 * This function must not be called from interrupt context.
1575 */
1576void free_irq(unsigned int irq, void *dev_id)
1577{
1578 struct irq_desc *desc = irq_to_desc(irq);
1579
1580 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1581 return;
1582
1583#ifdef CONFIG_SMP
1584 if (WARN_ON(desc->affinity_notify))
1585 desc->affinity_notify = NULL;
1586#endif
1587
1588 kfree(__free_irq(irq, dev_id));
1589}
1590EXPORT_SYMBOL(free_irq);
1591
1592/**
1593 * request_threaded_irq - allocate an interrupt line
1594 * @irq: Interrupt line to allocate
1595 * @handler: Function to be called when the IRQ occurs.
1596 * Primary handler for threaded interrupts
1597 * If NULL and thread_fn != NULL the default
1598 * primary handler is installed
1599 * @thread_fn: Function called from the irq handler thread
1600 * If NULL, no irq thread is created
1601 * @irqflags: Interrupt type flags
1602 * @devname: An ascii name for the claiming device
1603 * @dev_id: A cookie passed back to the handler function
1604 *
1605 * This call allocates interrupt resources and enables the
1606 * interrupt line and IRQ handling. From the point this
1607 * call is made your handler function may be invoked. Since
1608 * your handler function must clear any interrupt the board
1609 * raises, you must take care both to initialise your hardware
1610 * and to set up the interrupt handler in the right order.
1611 *
1612 * If you want to set up a threaded irq handler for your device
1613 * then you need to supply @handler and @thread_fn. @handler is
1614 * still called in hard interrupt context and has to check
1615 * whether the interrupt originates from the device. If yes it
1616 * needs to disable the interrupt on the device and return
1617 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1618 * @thread_fn. This split handler design is necessary to support
1619 * shared interrupts.
1620 *
1621 * Dev_id must be globally unique. Normally the address of the
1622 * device data structure is used as the cookie. Since the handler
1623 * receives this value it makes sense to use it.
1624 *
1625 * If your interrupt is shared you must pass a non NULL dev_id
1626 * as this is required when freeing the interrupt.
1627 *
1628 * Flags:
1629 *
1630 * IRQF_SHARED Interrupt is shared
1631 * IRQF_TRIGGER_* Specify active edge(s) or level
1632 *
1633 */
1634int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1635 irq_handler_t thread_fn, unsigned long irqflags,
1636 const char *devname, void *dev_id)
1637{
1638 struct irqaction *action;
1639 struct irq_desc *desc;
1640 int retval;
1641
1642 if (irq == IRQ_NOTCONNECTED)
1643 return -ENOTCONN;
1644
1645 /*
1646 * Sanity-check: shared interrupts must pass in a real dev-ID,
1647 * otherwise we'll have trouble later trying to figure out
1648 * which interrupt is which (messes up the interrupt freeing
1649 * logic etc).
1650 *
1651 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1652 * it cannot be set along with IRQF_NO_SUSPEND.
1653 */
1654 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1655 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1656 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1657 return -EINVAL;
1658
1659 desc = irq_to_desc(irq);
1660 if (!desc)
1661 return -EINVAL;
1662
1663 if (!irq_settings_can_request(desc) ||
1664 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1665 return -EINVAL;
1666
1667 if (!handler) {
1668 if (!thread_fn)
1669 return -EINVAL;
1670 handler = irq_default_primary_handler;
1671 }
1672
1673 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1674 if (!action)
1675 return -ENOMEM;
1676
1677 action->handler = handler;
1678 action->thread_fn = thread_fn;
1679 action->flags = irqflags;
1680 action->name = devname;
1681 action->dev_id = dev_id;
1682
1683 retval = irq_chip_pm_get(&desc->irq_data);
1684 if (retval < 0) {
1685 kfree(action);
1686 return retval;
1687 }
1688
1689 chip_bus_lock(desc);
1690 retval = __setup_irq(irq, desc, action);
1691 chip_bus_sync_unlock(desc);
1692
1693 if (retval) {
1694 irq_chip_pm_put(&desc->irq_data);
1695 kfree(action->secondary);
1696 kfree(action);
1697 }
1698
1699#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1700 if (!retval && (irqflags & IRQF_SHARED)) {
1701 /*
1702 * It's a shared IRQ -- the driver ought to be prepared for it
1703 * to happen immediately, so let's make sure....
1704 * We disable the irq to make sure that a 'real' IRQ doesn't
1705 * run in parallel with our fake.
1706 */
1707 unsigned long flags;
1708
1709 disable_irq(irq);
1710 local_irq_save(flags);
1711
1712 handler(irq, dev_id);
1713
1714 local_irq_restore(flags);
1715 enable_irq(irq);
1716 }
1717#endif
1718 return retval;
1719}
1720EXPORT_SYMBOL(request_threaded_irq);
1721
1722/**
1723 * request_any_context_irq - allocate an interrupt line
1724 * @irq: Interrupt line to allocate
1725 * @handler: Function to be called when the IRQ occurs.
1726 * Threaded handler for threaded interrupts.
1727 * @flags: Interrupt type flags
1728 * @name: An ascii name for the claiming device
1729 * @dev_id: A cookie passed back to the handler function
1730 *
1731 * This call allocates interrupt resources and enables the
1732 * interrupt line and IRQ handling. It selects either a
1733 * hardirq or threaded handling method depending on the
1734 * context.
1735 *
1736 * On failure, it returns a negative value. On success,
1737 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1738 */
1739int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1740 unsigned long flags, const char *name, void *dev_id)
1741{
1742 struct irq_desc *desc;
1743 int ret;
1744
1745 if (irq == IRQ_NOTCONNECTED)
1746 return -ENOTCONN;
1747
1748 desc = irq_to_desc(irq);
1749 if (!desc)
1750 return -EINVAL;
1751
1752 if (irq_settings_is_nested_thread(desc)) {
1753 ret = request_threaded_irq(irq, NULL, handler,
1754 flags, name, dev_id);
1755 return !ret ? IRQC_IS_NESTED : ret;
1756 }
1757
1758 ret = request_irq(irq, handler, flags, name, dev_id);
1759 return !ret ? IRQC_IS_HARDIRQ : ret;
1760}
1761EXPORT_SYMBOL_GPL(request_any_context_irq);
1762
1763void enable_percpu_irq(unsigned int irq, unsigned int type)
1764{
1765 unsigned int cpu = smp_processor_id();
1766 unsigned long flags;
1767 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1768
1769 if (!desc)
1770 return;
1771
1772 /*
1773 * If the trigger type is not specified by the caller, then
1774 * use the default for this interrupt.
1775 */
1776 type &= IRQ_TYPE_SENSE_MASK;
1777 if (type == IRQ_TYPE_NONE)
1778 type = irqd_get_trigger_type(&desc->irq_data);
1779
1780 if (type != IRQ_TYPE_NONE) {
1781 int ret;
1782
1783 ret = __irq_set_trigger(desc, type);
1784
1785 if (ret) {
1786 WARN(1, "failed to set type for IRQ%d\n", irq);
1787 goto out;
1788 }
1789 }
1790
1791 irq_percpu_enable(desc, cpu);
1792out:
1793 irq_put_desc_unlock(desc, flags);
1794}
1795EXPORT_SYMBOL_GPL(enable_percpu_irq);
1796
1797/**
1798 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1799 * @irq: Linux irq number to check for
1800 *
1801 * Must be called from a non migratable context. Returns the enable
1802 * state of a per cpu interrupt on the current cpu.
1803 */
1804bool irq_percpu_is_enabled(unsigned int irq)
1805{
1806 unsigned int cpu = smp_processor_id();
1807 struct irq_desc *desc;
1808 unsigned long flags;
1809 bool is_enabled;
1810
1811 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1812 if (!desc)
1813 return false;
1814
1815 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1816 irq_put_desc_unlock(desc, flags);
1817
1818 return is_enabled;
1819}
1820EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1821
1822void disable_percpu_irq(unsigned int irq)
1823{
1824 unsigned int cpu = smp_processor_id();
1825 unsigned long flags;
1826 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1827
1828 if (!desc)
1829 return;
1830
1831 irq_percpu_disable(desc, cpu);
1832 irq_put_desc_unlock(desc, flags);
1833}
1834EXPORT_SYMBOL_GPL(disable_percpu_irq);
1835
1836/*
1837 * Internal function to unregister a percpu irqaction.
1838 */
1839static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1840{
1841 struct irq_desc *desc = irq_to_desc(irq);
1842 struct irqaction *action;
1843 unsigned long flags;
1844
1845 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1846
1847 if (!desc)
1848 return NULL;
1849
1850 raw_spin_lock_irqsave(&desc->lock, flags);
1851
1852 action = desc->action;
1853 if (!action || action->percpu_dev_id != dev_id) {
1854 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1855 goto bad;
1856 }
1857
1858 if (!cpumask_empty(desc->percpu_enabled)) {
1859 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1860 irq, cpumask_first(desc->percpu_enabled));
1861 goto bad;
1862 }
1863
1864 /* Found it - now remove it from the list of entries: */
1865 desc->action = NULL;
1866
1867 raw_spin_unlock_irqrestore(&desc->lock, flags);
1868
1869 unregister_handler_proc(irq, action);
1870
1871 irq_chip_pm_put(&desc->irq_data);
1872 module_put(desc->owner);
1873 return action;
1874
1875bad:
1876 raw_spin_unlock_irqrestore(&desc->lock, flags);
1877 return NULL;
1878}
1879
1880/**
1881 * remove_percpu_irq - free a per-cpu interrupt
1882 * @irq: Interrupt line to free
1883 * @act: irqaction for the interrupt
1884 *
1885 * Used to remove interrupts statically setup by the early boot process.
1886 */
1887void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1888{
1889 struct irq_desc *desc = irq_to_desc(irq);
1890
1891 if (desc && irq_settings_is_per_cpu_devid(desc))
1892 __free_percpu_irq(irq, act->percpu_dev_id);
1893}
1894
1895/**
1896 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1897 * @irq: Interrupt line to free
1898 * @dev_id: Device identity to free
1899 *
1900 * Remove a percpu interrupt handler. The handler is removed, but
1901 * the interrupt line is not disabled. This must be done on each
1902 * CPU before calling this function. The function does not return
1903 * until any executing interrupts for this IRQ have completed.
1904 *
1905 * This function must not be called from interrupt context.
1906 */
1907void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1908{
1909 struct irq_desc *desc = irq_to_desc(irq);
1910
1911 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1912 return;
1913
1914 chip_bus_lock(desc);
1915 kfree(__free_percpu_irq(irq, dev_id));
1916 chip_bus_sync_unlock(desc);
1917}
1918EXPORT_SYMBOL_GPL(free_percpu_irq);
1919
1920/**
1921 * setup_percpu_irq - setup a per-cpu interrupt
1922 * @irq: Interrupt line to setup
1923 * @act: irqaction for the interrupt
1924 *
1925 * Used to statically setup per-cpu interrupts in the early boot process.
1926 */
1927int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1928{
1929 struct irq_desc *desc = irq_to_desc(irq);
1930 int retval;
1931
1932 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1933 return -EINVAL;
1934
1935 retval = irq_chip_pm_get(&desc->irq_data);
1936 if (retval < 0)
1937 return retval;
1938
1939 chip_bus_lock(desc);
1940 retval = __setup_irq(irq, desc, act);
1941 chip_bus_sync_unlock(desc);
1942
1943 if (retval)
1944 irq_chip_pm_put(&desc->irq_data);
1945
1946 return retval;
1947}
1948
1949/**
1950 * request_percpu_irq - allocate a percpu interrupt line
1951 * @irq: Interrupt line to allocate
1952 * @handler: Function to be called when the IRQ occurs.
1953 * @devname: An ascii name for the claiming device
1954 * @dev_id: A percpu cookie passed back to the handler function
1955 *
1956 * This call allocates interrupt resources and enables the
1957 * interrupt on the local CPU. If the interrupt is supposed to be
1958 * enabled on other CPUs, it has to be done on each CPU using
1959 * enable_percpu_irq().
1960 *
1961 * Dev_id must be globally unique. It is a per-cpu variable, and
1962 * the handler gets called with the interrupted CPU's instance of
1963 * that variable.
1964 */
1965int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1966 const char *devname, void __percpu *dev_id)
1967{
1968 struct irqaction *action;
1969 struct irq_desc *desc;
1970 int retval;
1971
1972 if (!dev_id)
1973 return -EINVAL;
1974
1975 desc = irq_to_desc(irq);
1976 if (!desc || !irq_settings_can_request(desc) ||
1977 !irq_settings_is_per_cpu_devid(desc))
1978 return -EINVAL;
1979
1980 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1981 if (!action)
1982 return -ENOMEM;
1983
1984 action->handler = handler;
1985 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1986 action->name = devname;
1987 action->percpu_dev_id = dev_id;
1988
1989 retval = irq_chip_pm_get(&desc->irq_data);
1990 if (retval < 0) {
1991 kfree(action);
1992 return retval;
1993 }
1994
1995 chip_bus_lock(desc);
1996 retval = __setup_irq(irq, desc, action);
1997 chip_bus_sync_unlock(desc);
1998
1999 if (retval) {
2000 irq_chip_pm_put(&desc->irq_data);
2001 kfree(action);
2002 }
2003
2004 return retval;
2005}
2006EXPORT_SYMBOL_GPL(request_percpu_irq);
2007
2008/**
2009 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2010 * @irq: Interrupt line that is forwarded to a VM
2011 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2012 * @state: a pointer to a boolean where the state is to be storeed
2013 *
2014 * This call snapshots the internal irqchip state of an
2015 * interrupt, returning into @state the bit corresponding to
2016 * stage @which
2017 *
2018 * This function should be called with preemption disabled if the
2019 * interrupt controller has per-cpu registers.
2020 */
2021int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2022 bool *state)
2023{
2024 struct irq_desc *desc;
2025 struct irq_data *data;
2026 struct irq_chip *chip;
2027 unsigned long flags;
2028 int err = -EINVAL;
2029
2030 desc = irq_get_desc_buslock(irq, &flags, 0);
2031 if (!desc)
2032 return err;
2033
2034 data = irq_desc_get_irq_data(desc);
2035
2036 do {
2037 chip = irq_data_get_irq_chip(data);
2038 if (chip->irq_get_irqchip_state)
2039 break;
2040#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2041 data = data->parent_data;
2042#else
2043 data = NULL;
2044#endif
2045 } while (data);
2046
2047 if (data)
2048 err = chip->irq_get_irqchip_state(data, which, state);
2049
2050 irq_put_desc_busunlock(desc, flags);
2051 return err;
2052}
2053EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2054
2055/**
2056 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2057 * @irq: Interrupt line that is forwarded to a VM
2058 * @which: State to be restored (one of IRQCHIP_STATE_*)
2059 * @val: Value corresponding to @which
2060 *
2061 * This call sets the internal irqchip state of an interrupt,
2062 * depending on the value of @which.
2063 *
2064 * This function should be called with preemption disabled if the
2065 * interrupt controller has per-cpu registers.
2066 */
2067int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2068 bool val)
2069{
2070 struct irq_desc *desc;
2071 struct irq_data *data;
2072 struct irq_chip *chip;
2073 unsigned long flags;
2074 int err = -EINVAL;
2075
2076 desc = irq_get_desc_buslock(irq, &flags, 0);
2077 if (!desc)
2078 return err;
2079
2080 data = irq_desc_get_irq_data(desc);
2081
2082 do {
2083 chip = irq_data_get_irq_chip(data);
2084 if (chip->irq_set_irqchip_state)
2085 break;
2086#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2087 data = data->parent_data;
2088#else
2089 data = NULL;
2090#endif
2091 } while (data);
2092
2093 if (data)
2094 err = chip->irq_set_irqchip_state(data, which, val);
2095
2096 irq_put_desc_busunlock(desc, flags);
2097 return err;
2098}
2099EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <linux/sched/isolation.h>
22#include <uapi/linux/sched/types.h>
23#include <linux/task_work.h>
24
25#include "internals.h"
26
27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29
30static int __init setup_forced_irqthreads(char *arg)
31{
32 static_branch_enable(&force_irqthreads_key);
33 return 0;
34}
35early_param("threadirqs", setup_forced_irqthreads);
36#endif
37
38static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39{
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46 /*
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
49 */
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57 /*
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
61 */
62 if (!inprogress && sync_chip) {
63 /*
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
66 */
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72 /* Oops, that failed? */
73 } while (inprogress);
74}
75
76/**
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
79 *
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
84 * into account.
85 *
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
88 *
89 * Returns: false if a threaded handler is active.
90 *
91 * This function may be called - with care - from IRQ context.
92 *
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
96 * is the current CPU.
97 */
98bool synchronize_hardirq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108}
109EXPORT_SYMBOL(synchronize_hardirq);
110
111static void __synchronize_irq(struct irq_desc *desc)
112{
113 __synchronize_hardirq(desc, true);
114 /*
115 * We made sure that no hardirq handler is running. Now verify that no
116 * threaded handlers are active.
117 */
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
119}
120
121/**
122 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
123 * @irq: interrupt number to wait for
124 *
125 * This function waits for any pending IRQ handlers for this interrupt
126 * to complete before returning. If you use this function while
127 * holding a resource the IRQ handler may need you will deadlock.
128 *
129 * Can only be called from preemptible code as it might sleep when
130 * an interrupt thread is associated to @irq.
131 *
132 * It optionally makes sure (when the irq chip supports that method)
133 * that the interrupt is not pending in any CPU and waiting for
134 * service.
135 */
136void synchronize_irq(unsigned int irq)
137{
138 struct irq_desc *desc = irq_to_desc(irq);
139
140 if (desc)
141 __synchronize_irq(desc);
142}
143EXPORT_SYMBOL(synchronize_irq);
144
145#ifdef CONFIG_SMP
146cpumask_var_t irq_default_affinity;
147
148static bool __irq_can_set_affinity(struct irq_desc *desc)
149{
150 if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
152 return false;
153 return true;
154}
155
156/**
157 * irq_can_set_affinity - Check if the affinity of a given irq can be set
158 * @irq: Interrupt to check
159 *
160 */
161int irq_can_set_affinity(unsigned int irq)
162{
163 return __irq_can_set_affinity(irq_to_desc(irq));
164}
165
166/**
167 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
168 * @irq: Interrupt to check
169 *
170 * Like irq_can_set_affinity() above, but additionally checks for the
171 * AFFINITY_MANAGED flag.
172 */
173bool irq_can_set_affinity_usr(unsigned int irq)
174{
175 struct irq_desc *desc = irq_to_desc(irq);
176
177 return __irq_can_set_affinity(desc) &&
178 !irqd_affinity_is_managed(&desc->irq_data);
179}
180
181/**
182 * irq_set_thread_affinity - Notify irq threads to adjust affinity
183 * @desc: irq descriptor which has affinity changed
184 *
185 * We just set IRQTF_AFFINITY and delegate the affinity setting
186 * to the interrupt thread itself. We can not call
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
188 * code can be called from hard interrupt context.
189 */
190void irq_set_thread_affinity(struct irq_desc *desc)
191{
192 struct irqaction *action;
193
194 for_each_action_of_desc(desc, action) {
195 if (action->thread)
196 set_bit(IRQTF_AFFINITY, &action->thread_flags);
197 if (action->secondary && action->secondary->thread)
198 set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
199 }
200}
201
202#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
203static void irq_validate_effective_affinity(struct irq_data *data)
204{
205 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
206 struct irq_chip *chip = irq_data_get_irq_chip(data);
207
208 if (!cpumask_empty(m))
209 return;
210 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
211 chip->name, data->irq);
212}
213#else
214static inline void irq_validate_effective_affinity(struct irq_data *data) { }
215#endif
216
217int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
218 bool force)
219{
220 struct irq_desc *desc = irq_data_to_desc(data);
221 struct irq_chip *chip = irq_data_get_irq_chip(data);
222 const struct cpumask *prog_mask;
223 int ret;
224
225 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
226 static struct cpumask tmp_mask;
227
228 if (!chip || !chip->irq_set_affinity)
229 return -EINVAL;
230
231 raw_spin_lock(&tmp_mask_lock);
232 /*
233 * If this is a managed interrupt and housekeeping is enabled on
234 * it check whether the requested affinity mask intersects with
235 * a housekeeping CPU. If so, then remove the isolated CPUs from
236 * the mask and just keep the housekeeping CPU(s). This prevents
237 * the affinity setter from routing the interrupt to an isolated
238 * CPU to avoid that I/O submitted from a housekeeping CPU causes
239 * interrupts on an isolated one.
240 *
241 * If the masks do not intersect or include online CPU(s) then
242 * keep the requested mask. The isolated target CPUs are only
243 * receiving interrupts when the I/O operation was submitted
244 * directly from them.
245 *
246 * If all housekeeping CPUs in the affinity mask are offline, the
247 * interrupt will be migrated by the CPU hotplug code once a
248 * housekeeping CPU which belongs to the affinity mask comes
249 * online.
250 */
251 if (irqd_affinity_is_managed(data) &&
252 housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
253 const struct cpumask *hk_mask;
254
255 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
256
257 cpumask_and(&tmp_mask, mask, hk_mask);
258 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
259 prog_mask = mask;
260 else
261 prog_mask = &tmp_mask;
262 } else {
263 prog_mask = mask;
264 }
265
266 /*
267 * Make sure we only provide online CPUs to the irqchip,
268 * unless we are being asked to force the affinity (in which
269 * case we do as we are told).
270 */
271 cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
272 if (!force && !cpumask_empty(&tmp_mask))
273 ret = chip->irq_set_affinity(data, &tmp_mask, force);
274 else if (force)
275 ret = chip->irq_set_affinity(data, mask, force);
276 else
277 ret = -EINVAL;
278
279 raw_spin_unlock(&tmp_mask_lock);
280
281 switch (ret) {
282 case IRQ_SET_MASK_OK:
283 case IRQ_SET_MASK_OK_DONE:
284 cpumask_copy(desc->irq_common_data.affinity, mask);
285 fallthrough;
286 case IRQ_SET_MASK_OK_NOCOPY:
287 irq_validate_effective_affinity(data);
288 irq_set_thread_affinity(desc);
289 ret = 0;
290 }
291
292 return ret;
293}
294
295#ifdef CONFIG_GENERIC_PENDING_IRQ
296static inline int irq_set_affinity_pending(struct irq_data *data,
297 const struct cpumask *dest)
298{
299 struct irq_desc *desc = irq_data_to_desc(data);
300
301 irqd_set_move_pending(data);
302 irq_copy_pending(desc, dest);
303 return 0;
304}
305#else
306static inline int irq_set_affinity_pending(struct irq_data *data,
307 const struct cpumask *dest)
308{
309 return -EBUSY;
310}
311#endif
312
313static int irq_try_set_affinity(struct irq_data *data,
314 const struct cpumask *dest, bool force)
315{
316 int ret = irq_do_set_affinity(data, dest, force);
317
318 /*
319 * In case that the underlying vector management is busy and the
320 * architecture supports the generic pending mechanism then utilize
321 * this to avoid returning an error to user space.
322 */
323 if (ret == -EBUSY && !force)
324 ret = irq_set_affinity_pending(data, dest);
325 return ret;
326}
327
328static bool irq_set_affinity_deactivated(struct irq_data *data,
329 const struct cpumask *mask)
330{
331 struct irq_desc *desc = irq_data_to_desc(data);
332
333 /*
334 * Handle irq chips which can handle affinity only in activated
335 * state correctly
336 *
337 * If the interrupt is not yet activated, just store the affinity
338 * mask and do not call the chip driver at all. On activation the
339 * driver has to make sure anyway that the interrupt is in a
340 * usable state so startup works.
341 */
342 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
343 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
344 return false;
345
346 cpumask_copy(desc->irq_common_data.affinity, mask);
347 irq_data_update_effective_affinity(data, mask);
348 irqd_set(data, IRQD_AFFINITY_SET);
349 return true;
350}
351
352int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
353 bool force)
354{
355 struct irq_chip *chip = irq_data_get_irq_chip(data);
356 struct irq_desc *desc = irq_data_to_desc(data);
357 int ret = 0;
358
359 if (!chip || !chip->irq_set_affinity)
360 return -EINVAL;
361
362 if (irq_set_affinity_deactivated(data, mask))
363 return 0;
364
365 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
366 ret = irq_try_set_affinity(data, mask, force);
367 } else {
368 irqd_set_move_pending(data);
369 irq_copy_pending(desc, mask);
370 }
371
372 if (desc->affinity_notify) {
373 kref_get(&desc->affinity_notify->kref);
374 if (!schedule_work(&desc->affinity_notify->work)) {
375 /* Work was already scheduled, drop our extra ref */
376 kref_put(&desc->affinity_notify->kref,
377 desc->affinity_notify->release);
378 }
379 }
380 irqd_set(data, IRQD_AFFINITY_SET);
381
382 return ret;
383}
384
385/**
386 * irq_update_affinity_desc - Update affinity management for an interrupt
387 * @irq: The interrupt number to update
388 * @affinity: Pointer to the affinity descriptor
389 *
390 * This interface can be used to configure the affinity management of
391 * interrupts which have been allocated already.
392 *
393 * There are certain limitations on when it may be used - attempts to use it
394 * for when the kernel is configured for generic IRQ reservation mode (in
395 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
396 * managed/non-managed interrupt accounting. In addition, attempts to use it on
397 * an interrupt which is already started or which has already been configured
398 * as managed will also fail, as these mean invalid init state or double init.
399 */
400int irq_update_affinity_desc(unsigned int irq,
401 struct irq_affinity_desc *affinity)
402{
403 struct irq_desc *desc;
404 unsigned long flags;
405 bool activated;
406 int ret = 0;
407
408 /*
409 * Supporting this with the reservation scheme used by x86 needs
410 * some more thought. Fail it for now.
411 */
412 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
413 return -EOPNOTSUPP;
414
415 desc = irq_get_desc_buslock(irq, &flags, 0);
416 if (!desc)
417 return -EINVAL;
418
419 /* Requires the interrupt to be shut down */
420 if (irqd_is_started(&desc->irq_data)) {
421 ret = -EBUSY;
422 goto out_unlock;
423 }
424
425 /* Interrupts which are already managed cannot be modified */
426 if (irqd_affinity_is_managed(&desc->irq_data)) {
427 ret = -EBUSY;
428 goto out_unlock;
429 }
430
431 /*
432 * Deactivate the interrupt. That's required to undo
433 * anything an earlier activation has established.
434 */
435 activated = irqd_is_activated(&desc->irq_data);
436 if (activated)
437 irq_domain_deactivate_irq(&desc->irq_data);
438
439 if (affinity->is_managed) {
440 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
441 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
442 }
443
444 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
445
446 /* Restore the activation state */
447 if (activated)
448 irq_domain_activate_irq(&desc->irq_data, false);
449
450out_unlock:
451 irq_put_desc_busunlock(desc, flags);
452 return ret;
453}
454
455static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
456 bool force)
457{
458 struct irq_desc *desc = irq_to_desc(irq);
459 unsigned long flags;
460 int ret;
461
462 if (!desc)
463 return -EINVAL;
464
465 raw_spin_lock_irqsave(&desc->lock, flags);
466 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
467 raw_spin_unlock_irqrestore(&desc->lock, flags);
468 return ret;
469}
470
471/**
472 * irq_set_affinity - Set the irq affinity of a given irq
473 * @irq: Interrupt to set affinity
474 * @cpumask: cpumask
475 *
476 * Fails if cpumask does not contain an online CPU
477 */
478int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
479{
480 return __irq_set_affinity(irq, cpumask, false);
481}
482EXPORT_SYMBOL_GPL(irq_set_affinity);
483
484/**
485 * irq_force_affinity - Force the irq affinity of a given irq
486 * @irq: Interrupt to set affinity
487 * @cpumask: cpumask
488 *
489 * Same as irq_set_affinity, but without checking the mask against
490 * online cpus.
491 *
492 * Solely for low level cpu hotplug code, where we need to make per
493 * cpu interrupts affine before the cpu becomes online.
494 */
495int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
496{
497 return __irq_set_affinity(irq, cpumask, true);
498}
499EXPORT_SYMBOL_GPL(irq_force_affinity);
500
501int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
502 bool setaffinity)
503{
504 unsigned long flags;
505 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
506
507 if (!desc)
508 return -EINVAL;
509 desc->affinity_hint = m;
510 irq_put_desc_unlock(desc, flags);
511 if (m && setaffinity)
512 __irq_set_affinity(irq, m, false);
513 return 0;
514}
515EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
516
517static void irq_affinity_notify(struct work_struct *work)
518{
519 struct irq_affinity_notify *notify =
520 container_of(work, struct irq_affinity_notify, work);
521 struct irq_desc *desc = irq_to_desc(notify->irq);
522 cpumask_var_t cpumask;
523 unsigned long flags;
524
525 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
526 goto out;
527
528 raw_spin_lock_irqsave(&desc->lock, flags);
529 if (irq_move_pending(&desc->irq_data))
530 irq_get_pending(cpumask, desc);
531 else
532 cpumask_copy(cpumask, desc->irq_common_data.affinity);
533 raw_spin_unlock_irqrestore(&desc->lock, flags);
534
535 notify->notify(notify, cpumask);
536
537 free_cpumask_var(cpumask);
538out:
539 kref_put(¬ify->kref, notify->release);
540}
541
542/**
543 * irq_set_affinity_notifier - control notification of IRQ affinity changes
544 * @irq: Interrupt for which to enable/disable notification
545 * @notify: Context for notification, or %NULL to disable
546 * notification. Function pointers must be initialised;
547 * the other fields will be initialised by this function.
548 *
549 * Must be called in process context. Notification may only be enabled
550 * after the IRQ is allocated and must be disabled before the IRQ is
551 * freed using free_irq().
552 */
553int
554irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
555{
556 struct irq_desc *desc = irq_to_desc(irq);
557 struct irq_affinity_notify *old_notify;
558 unsigned long flags;
559
560 /* The release function is promised process context */
561 might_sleep();
562
563 if (!desc || desc->istate & IRQS_NMI)
564 return -EINVAL;
565
566 /* Complete initialisation of *notify */
567 if (notify) {
568 notify->irq = irq;
569 kref_init(¬ify->kref);
570 INIT_WORK(¬ify->work, irq_affinity_notify);
571 }
572
573 raw_spin_lock_irqsave(&desc->lock, flags);
574 old_notify = desc->affinity_notify;
575 desc->affinity_notify = notify;
576 raw_spin_unlock_irqrestore(&desc->lock, flags);
577
578 if (old_notify) {
579 if (cancel_work_sync(&old_notify->work)) {
580 /* Pending work had a ref, put that one too */
581 kref_put(&old_notify->kref, old_notify->release);
582 }
583 kref_put(&old_notify->kref, old_notify->release);
584 }
585
586 return 0;
587}
588EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
589
590#ifndef CONFIG_AUTO_IRQ_AFFINITY
591/*
592 * Generic version of the affinity autoselector.
593 */
594int irq_setup_affinity(struct irq_desc *desc)
595{
596 struct cpumask *set = irq_default_affinity;
597 int ret, node = irq_desc_get_node(desc);
598 static DEFINE_RAW_SPINLOCK(mask_lock);
599 static struct cpumask mask;
600
601 /* Excludes PER_CPU and NO_BALANCE interrupts */
602 if (!__irq_can_set_affinity(desc))
603 return 0;
604
605 raw_spin_lock(&mask_lock);
606 /*
607 * Preserve the managed affinity setting and a userspace affinity
608 * setup, but make sure that one of the targets is online.
609 */
610 if (irqd_affinity_is_managed(&desc->irq_data) ||
611 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
612 if (cpumask_intersects(desc->irq_common_data.affinity,
613 cpu_online_mask))
614 set = desc->irq_common_data.affinity;
615 else
616 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
617 }
618
619 cpumask_and(&mask, cpu_online_mask, set);
620 if (cpumask_empty(&mask))
621 cpumask_copy(&mask, cpu_online_mask);
622
623 if (node != NUMA_NO_NODE) {
624 const struct cpumask *nodemask = cpumask_of_node(node);
625
626 /* make sure at least one of the cpus in nodemask is online */
627 if (cpumask_intersects(&mask, nodemask))
628 cpumask_and(&mask, &mask, nodemask);
629 }
630 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
631 raw_spin_unlock(&mask_lock);
632 return ret;
633}
634#else
635/* Wrapper for ALPHA specific affinity selector magic */
636int irq_setup_affinity(struct irq_desc *desc)
637{
638 return irq_select_affinity(irq_desc_get_irq(desc));
639}
640#endif /* CONFIG_AUTO_IRQ_AFFINITY */
641#endif /* CONFIG_SMP */
642
643
644/**
645 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
646 * @irq: interrupt number to set affinity
647 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
648 * specific data for percpu_devid interrupts
649 *
650 * This function uses the vCPU specific data to set the vCPU
651 * affinity for an irq. The vCPU specific data is passed from
652 * outside, such as KVM. One example code path is as below:
653 * KVM -> IOMMU -> irq_set_vcpu_affinity().
654 */
655int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
656{
657 unsigned long flags;
658 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
659 struct irq_data *data;
660 struct irq_chip *chip;
661 int ret = -ENOSYS;
662
663 if (!desc)
664 return -EINVAL;
665
666 data = irq_desc_get_irq_data(desc);
667 do {
668 chip = irq_data_get_irq_chip(data);
669 if (chip && chip->irq_set_vcpu_affinity)
670 break;
671#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
672 data = data->parent_data;
673#else
674 data = NULL;
675#endif
676 } while (data);
677
678 if (data)
679 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
680 irq_put_desc_unlock(desc, flags);
681
682 return ret;
683}
684EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
685
686void __disable_irq(struct irq_desc *desc)
687{
688 if (!desc->depth++)
689 irq_disable(desc);
690}
691
692static int __disable_irq_nosync(unsigned int irq)
693{
694 unsigned long flags;
695 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
696
697 if (!desc)
698 return -EINVAL;
699 __disable_irq(desc);
700 irq_put_desc_busunlock(desc, flags);
701 return 0;
702}
703
704/**
705 * disable_irq_nosync - disable an irq without waiting
706 * @irq: Interrupt to disable
707 *
708 * Disable the selected interrupt line. Disables and Enables are
709 * nested.
710 * Unlike disable_irq(), this function does not ensure existing
711 * instances of the IRQ handler have completed before returning.
712 *
713 * This function may be called from IRQ context.
714 */
715void disable_irq_nosync(unsigned int irq)
716{
717 __disable_irq_nosync(irq);
718}
719EXPORT_SYMBOL(disable_irq_nosync);
720
721/**
722 * disable_irq - disable an irq and wait for completion
723 * @irq: Interrupt to disable
724 *
725 * Disable the selected interrupt line. Enables and Disables are
726 * nested.
727 * This function waits for any pending IRQ handlers for this interrupt
728 * to complete before returning. If you use this function while
729 * holding a resource the IRQ handler may need you will deadlock.
730 *
731 * Can only be called from preemptible code as it might sleep when
732 * an interrupt thread is associated to @irq.
733 *
734 */
735void disable_irq(unsigned int irq)
736{
737 might_sleep();
738 if (!__disable_irq_nosync(irq))
739 synchronize_irq(irq);
740}
741EXPORT_SYMBOL(disable_irq);
742
743/**
744 * disable_hardirq - disables an irq and waits for hardirq completion
745 * @irq: Interrupt to disable
746 *
747 * Disable the selected interrupt line. Enables and Disables are
748 * nested.
749 * This function waits for any pending hard IRQ handlers for this
750 * interrupt to complete before returning. If you use this function while
751 * holding a resource the hard IRQ handler may need you will deadlock.
752 *
753 * When used to optimistically disable an interrupt from atomic context
754 * the return value must be checked.
755 *
756 * Returns: false if a threaded handler is active.
757 *
758 * This function may be called - with care - from IRQ context.
759 */
760bool disable_hardirq(unsigned int irq)
761{
762 if (!__disable_irq_nosync(irq))
763 return synchronize_hardirq(irq);
764
765 return false;
766}
767EXPORT_SYMBOL_GPL(disable_hardirq);
768
769/**
770 * disable_nmi_nosync - disable an nmi without waiting
771 * @irq: Interrupt to disable
772 *
773 * Disable the selected interrupt line. Disables and enables are
774 * nested.
775 * The interrupt to disable must have been requested through request_nmi.
776 * Unlike disable_nmi(), this function does not ensure existing
777 * instances of the IRQ handler have completed before returning.
778 */
779void disable_nmi_nosync(unsigned int irq)
780{
781 disable_irq_nosync(irq);
782}
783
784void __enable_irq(struct irq_desc *desc)
785{
786 switch (desc->depth) {
787 case 0:
788 err_out:
789 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
790 irq_desc_get_irq(desc));
791 break;
792 case 1: {
793 if (desc->istate & IRQS_SUSPENDED)
794 goto err_out;
795 /* Prevent probing on this irq: */
796 irq_settings_set_noprobe(desc);
797 /*
798 * Call irq_startup() not irq_enable() here because the
799 * interrupt might be marked NOAUTOEN. So irq_startup()
800 * needs to be invoked when it gets enabled the first
801 * time. If it was already started up, then irq_startup()
802 * will invoke irq_enable() under the hood.
803 */
804 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
805 break;
806 }
807 default:
808 desc->depth--;
809 }
810}
811
812/**
813 * enable_irq - enable handling of an irq
814 * @irq: Interrupt to enable
815 *
816 * Undoes the effect of one call to disable_irq(). If this
817 * matches the last disable, processing of interrupts on this
818 * IRQ line is re-enabled.
819 *
820 * This function may be called from IRQ context only when
821 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
822 */
823void enable_irq(unsigned int irq)
824{
825 unsigned long flags;
826 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
827
828 if (!desc)
829 return;
830 if (WARN(!desc->irq_data.chip,
831 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
832 goto out;
833
834 __enable_irq(desc);
835out:
836 irq_put_desc_busunlock(desc, flags);
837}
838EXPORT_SYMBOL(enable_irq);
839
840/**
841 * enable_nmi - enable handling of an nmi
842 * @irq: Interrupt to enable
843 *
844 * The interrupt to enable must have been requested through request_nmi.
845 * Undoes the effect of one call to disable_nmi(). If this
846 * matches the last disable, processing of interrupts on this
847 * IRQ line is re-enabled.
848 */
849void enable_nmi(unsigned int irq)
850{
851 enable_irq(irq);
852}
853
854static int set_irq_wake_real(unsigned int irq, unsigned int on)
855{
856 struct irq_desc *desc = irq_to_desc(irq);
857 int ret = -ENXIO;
858
859 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
860 return 0;
861
862 if (desc->irq_data.chip->irq_set_wake)
863 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
864
865 return ret;
866}
867
868/**
869 * irq_set_irq_wake - control irq power management wakeup
870 * @irq: interrupt to control
871 * @on: enable/disable power management wakeup
872 *
873 * Enable/disable power management wakeup mode, which is
874 * disabled by default. Enables and disables must match,
875 * just as they match for non-wakeup mode support.
876 *
877 * Wakeup mode lets this IRQ wake the system from sleep
878 * states like "suspend to RAM".
879 *
880 * Note: irq enable/disable state is completely orthogonal
881 * to the enable/disable state of irq wake. An irq can be
882 * disabled with disable_irq() and still wake the system as
883 * long as the irq has wake enabled. If this does not hold,
884 * then the underlying irq chip and the related driver need
885 * to be investigated.
886 */
887int irq_set_irq_wake(unsigned int irq, unsigned int on)
888{
889 unsigned long flags;
890 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
891 int ret = 0;
892
893 if (!desc)
894 return -EINVAL;
895
896 /* Don't use NMIs as wake up interrupts please */
897 if (desc->istate & IRQS_NMI) {
898 ret = -EINVAL;
899 goto out_unlock;
900 }
901
902 /* wakeup-capable irqs can be shared between drivers that
903 * don't need to have the same sleep mode behaviors.
904 */
905 if (on) {
906 if (desc->wake_depth++ == 0) {
907 ret = set_irq_wake_real(irq, on);
908 if (ret)
909 desc->wake_depth = 0;
910 else
911 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
912 }
913 } else {
914 if (desc->wake_depth == 0) {
915 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
916 } else if (--desc->wake_depth == 0) {
917 ret = set_irq_wake_real(irq, on);
918 if (ret)
919 desc->wake_depth = 1;
920 else
921 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
922 }
923 }
924
925out_unlock:
926 irq_put_desc_busunlock(desc, flags);
927 return ret;
928}
929EXPORT_SYMBOL(irq_set_irq_wake);
930
931/*
932 * Internal function that tells the architecture code whether a
933 * particular irq has been exclusively allocated or is available
934 * for driver use.
935 */
936int can_request_irq(unsigned int irq, unsigned long irqflags)
937{
938 unsigned long flags;
939 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
940 int canrequest = 0;
941
942 if (!desc)
943 return 0;
944
945 if (irq_settings_can_request(desc)) {
946 if (!desc->action ||
947 irqflags & desc->action->flags & IRQF_SHARED)
948 canrequest = 1;
949 }
950 irq_put_desc_unlock(desc, flags);
951 return canrequest;
952}
953
954int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
955{
956 struct irq_chip *chip = desc->irq_data.chip;
957 int ret, unmask = 0;
958
959 if (!chip || !chip->irq_set_type) {
960 /*
961 * IRQF_TRIGGER_* but the PIC does not support multiple
962 * flow-types?
963 */
964 pr_debug("No set_type function for IRQ %d (%s)\n",
965 irq_desc_get_irq(desc),
966 chip ? (chip->name ? : "unknown") : "unknown");
967 return 0;
968 }
969
970 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
971 if (!irqd_irq_masked(&desc->irq_data))
972 mask_irq(desc);
973 if (!irqd_irq_disabled(&desc->irq_data))
974 unmask = 1;
975 }
976
977 /* Mask all flags except trigger mode */
978 flags &= IRQ_TYPE_SENSE_MASK;
979 ret = chip->irq_set_type(&desc->irq_data, flags);
980
981 switch (ret) {
982 case IRQ_SET_MASK_OK:
983 case IRQ_SET_MASK_OK_DONE:
984 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
985 irqd_set(&desc->irq_data, flags);
986 fallthrough;
987
988 case IRQ_SET_MASK_OK_NOCOPY:
989 flags = irqd_get_trigger_type(&desc->irq_data);
990 irq_settings_set_trigger_mask(desc, flags);
991 irqd_clear(&desc->irq_data, IRQD_LEVEL);
992 irq_settings_clr_level(desc);
993 if (flags & IRQ_TYPE_LEVEL_MASK) {
994 irq_settings_set_level(desc);
995 irqd_set(&desc->irq_data, IRQD_LEVEL);
996 }
997
998 ret = 0;
999 break;
1000 default:
1001 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1002 flags, irq_desc_get_irq(desc), chip->irq_set_type);
1003 }
1004 if (unmask)
1005 unmask_irq(desc);
1006 return ret;
1007}
1008
1009#ifdef CONFIG_HARDIRQS_SW_RESEND
1010int irq_set_parent(int irq, int parent_irq)
1011{
1012 unsigned long flags;
1013 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1014
1015 if (!desc)
1016 return -EINVAL;
1017
1018 desc->parent_irq = parent_irq;
1019
1020 irq_put_desc_unlock(desc, flags);
1021 return 0;
1022}
1023EXPORT_SYMBOL_GPL(irq_set_parent);
1024#endif
1025
1026/*
1027 * Default primary interrupt handler for threaded interrupts. Is
1028 * assigned as primary handler when request_threaded_irq is called
1029 * with handler == NULL. Useful for oneshot interrupts.
1030 */
1031static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1032{
1033 return IRQ_WAKE_THREAD;
1034}
1035
1036/*
1037 * Primary handler for nested threaded interrupts. Should never be
1038 * called.
1039 */
1040static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1041{
1042 WARN(1, "Primary handler called for nested irq %d\n", irq);
1043 return IRQ_NONE;
1044}
1045
1046static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1047{
1048 WARN(1, "Secondary action handler called for irq %d\n", irq);
1049 return IRQ_NONE;
1050}
1051
1052static int irq_wait_for_interrupt(struct irqaction *action)
1053{
1054 for (;;) {
1055 set_current_state(TASK_INTERRUPTIBLE);
1056
1057 if (kthread_should_stop()) {
1058 /* may need to run one last time */
1059 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1060 &action->thread_flags)) {
1061 __set_current_state(TASK_RUNNING);
1062 return 0;
1063 }
1064 __set_current_state(TASK_RUNNING);
1065 return -1;
1066 }
1067
1068 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1069 &action->thread_flags)) {
1070 __set_current_state(TASK_RUNNING);
1071 return 0;
1072 }
1073 schedule();
1074 }
1075}
1076
1077/*
1078 * Oneshot interrupts keep the irq line masked until the threaded
1079 * handler finished. unmask if the interrupt has not been disabled and
1080 * is marked MASKED.
1081 */
1082static void irq_finalize_oneshot(struct irq_desc *desc,
1083 struct irqaction *action)
1084{
1085 if (!(desc->istate & IRQS_ONESHOT) ||
1086 action->handler == irq_forced_secondary_handler)
1087 return;
1088again:
1089 chip_bus_lock(desc);
1090 raw_spin_lock_irq(&desc->lock);
1091
1092 /*
1093 * Implausible though it may be we need to protect us against
1094 * the following scenario:
1095 *
1096 * The thread is faster done than the hard interrupt handler
1097 * on the other CPU. If we unmask the irq line then the
1098 * interrupt can come in again and masks the line, leaves due
1099 * to IRQS_INPROGRESS and the irq line is masked forever.
1100 *
1101 * This also serializes the state of shared oneshot handlers
1102 * versus "desc->threads_oneshot |= action->thread_mask;" in
1103 * irq_wake_thread(). See the comment there which explains the
1104 * serialization.
1105 */
1106 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1107 raw_spin_unlock_irq(&desc->lock);
1108 chip_bus_sync_unlock(desc);
1109 cpu_relax();
1110 goto again;
1111 }
1112
1113 /*
1114 * Now check again, whether the thread should run. Otherwise
1115 * we would clear the threads_oneshot bit of this thread which
1116 * was just set.
1117 */
1118 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1119 goto out_unlock;
1120
1121 desc->threads_oneshot &= ~action->thread_mask;
1122
1123 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1124 irqd_irq_masked(&desc->irq_data))
1125 unmask_threaded_irq(desc);
1126
1127out_unlock:
1128 raw_spin_unlock_irq(&desc->lock);
1129 chip_bus_sync_unlock(desc);
1130}
1131
1132#ifdef CONFIG_SMP
1133/*
1134 * Check whether we need to change the affinity of the interrupt thread.
1135 */
1136static void
1137irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1138{
1139 cpumask_var_t mask;
1140 bool valid = true;
1141
1142 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1143 return;
1144
1145 /*
1146 * In case we are out of memory we set IRQTF_AFFINITY again and
1147 * try again next time
1148 */
1149 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1150 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1151 return;
1152 }
1153
1154 raw_spin_lock_irq(&desc->lock);
1155 /*
1156 * This code is triggered unconditionally. Check the affinity
1157 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1158 */
1159 if (cpumask_available(desc->irq_common_data.affinity)) {
1160 const struct cpumask *m;
1161
1162 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1163 cpumask_copy(mask, m);
1164 } else {
1165 valid = false;
1166 }
1167 raw_spin_unlock_irq(&desc->lock);
1168
1169 if (valid)
1170 set_cpus_allowed_ptr(current, mask);
1171 free_cpumask_var(mask);
1172}
1173#else
1174static inline void
1175irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1176#endif
1177
1178/*
1179 * Interrupts which are not explicitly requested as threaded
1180 * interrupts rely on the implicit bh/preempt disable of the hard irq
1181 * context. So we need to disable bh here to avoid deadlocks and other
1182 * side effects.
1183 */
1184static irqreturn_t
1185irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1186{
1187 irqreturn_t ret;
1188
1189 local_bh_disable();
1190 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1191 local_irq_disable();
1192 ret = action->thread_fn(action->irq, action->dev_id);
1193 if (ret == IRQ_HANDLED)
1194 atomic_inc(&desc->threads_handled);
1195
1196 irq_finalize_oneshot(desc, action);
1197 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1198 local_irq_enable();
1199 local_bh_enable();
1200 return ret;
1201}
1202
1203/*
1204 * Interrupts explicitly requested as threaded interrupts want to be
1205 * preemptible - many of them need to sleep and wait for slow busses to
1206 * complete.
1207 */
1208static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1209 struct irqaction *action)
1210{
1211 irqreturn_t ret;
1212
1213 ret = action->thread_fn(action->irq, action->dev_id);
1214 if (ret == IRQ_HANDLED)
1215 atomic_inc(&desc->threads_handled);
1216
1217 irq_finalize_oneshot(desc, action);
1218 return ret;
1219}
1220
1221void wake_threads_waitq(struct irq_desc *desc)
1222{
1223 if (atomic_dec_and_test(&desc->threads_active))
1224 wake_up(&desc->wait_for_threads);
1225}
1226
1227static void irq_thread_dtor(struct callback_head *unused)
1228{
1229 struct task_struct *tsk = current;
1230 struct irq_desc *desc;
1231 struct irqaction *action;
1232
1233 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1234 return;
1235
1236 action = kthread_data(tsk);
1237
1238 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1239 tsk->comm, tsk->pid, action->irq);
1240
1241
1242 desc = irq_to_desc(action->irq);
1243 /*
1244 * If IRQTF_RUNTHREAD is set, we need to decrement
1245 * desc->threads_active and wake possible waiters.
1246 */
1247 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1248 wake_threads_waitq(desc);
1249
1250 /* Prevent a stale desc->threads_oneshot */
1251 irq_finalize_oneshot(desc, action);
1252}
1253
1254static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1255{
1256 struct irqaction *secondary = action->secondary;
1257
1258 if (WARN_ON_ONCE(!secondary))
1259 return;
1260
1261 raw_spin_lock_irq(&desc->lock);
1262 __irq_wake_thread(desc, secondary);
1263 raw_spin_unlock_irq(&desc->lock);
1264}
1265
1266/*
1267 * Internal function to notify that a interrupt thread is ready.
1268 */
1269static void irq_thread_set_ready(struct irq_desc *desc,
1270 struct irqaction *action)
1271{
1272 set_bit(IRQTF_READY, &action->thread_flags);
1273 wake_up(&desc->wait_for_threads);
1274}
1275
1276/*
1277 * Internal function to wake up a interrupt thread and wait until it is
1278 * ready.
1279 */
1280static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1281 struct irqaction *action)
1282{
1283 if (!action || !action->thread)
1284 return;
1285
1286 wake_up_process(action->thread);
1287 wait_event(desc->wait_for_threads,
1288 test_bit(IRQTF_READY, &action->thread_flags));
1289}
1290
1291/*
1292 * Interrupt handler thread
1293 */
1294static int irq_thread(void *data)
1295{
1296 struct callback_head on_exit_work;
1297 struct irqaction *action = data;
1298 struct irq_desc *desc = irq_to_desc(action->irq);
1299 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1300 struct irqaction *action);
1301
1302 irq_thread_set_ready(desc, action);
1303
1304 sched_set_fifo(current);
1305
1306 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1307 &action->thread_flags))
1308 handler_fn = irq_forced_thread_fn;
1309 else
1310 handler_fn = irq_thread_fn;
1311
1312 init_task_work(&on_exit_work, irq_thread_dtor);
1313 task_work_add(current, &on_exit_work, TWA_NONE);
1314
1315 irq_thread_check_affinity(desc, action);
1316
1317 while (!irq_wait_for_interrupt(action)) {
1318 irqreturn_t action_ret;
1319
1320 irq_thread_check_affinity(desc, action);
1321
1322 action_ret = handler_fn(desc, action);
1323 if (action_ret == IRQ_WAKE_THREAD)
1324 irq_wake_secondary(desc, action);
1325
1326 wake_threads_waitq(desc);
1327 }
1328
1329 /*
1330 * This is the regular exit path. __free_irq() is stopping the
1331 * thread via kthread_stop() after calling
1332 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1333 * oneshot mask bit can be set.
1334 */
1335 task_work_cancel(current, irq_thread_dtor);
1336 return 0;
1337}
1338
1339/**
1340 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1341 * @irq: Interrupt line
1342 * @dev_id: Device identity for which the thread should be woken
1343 *
1344 */
1345void irq_wake_thread(unsigned int irq, void *dev_id)
1346{
1347 struct irq_desc *desc = irq_to_desc(irq);
1348 struct irqaction *action;
1349 unsigned long flags;
1350
1351 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1352 return;
1353
1354 raw_spin_lock_irqsave(&desc->lock, flags);
1355 for_each_action_of_desc(desc, action) {
1356 if (action->dev_id == dev_id) {
1357 if (action->thread)
1358 __irq_wake_thread(desc, action);
1359 break;
1360 }
1361 }
1362 raw_spin_unlock_irqrestore(&desc->lock, flags);
1363}
1364EXPORT_SYMBOL_GPL(irq_wake_thread);
1365
1366static int irq_setup_forced_threading(struct irqaction *new)
1367{
1368 if (!force_irqthreads())
1369 return 0;
1370 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1371 return 0;
1372
1373 /*
1374 * No further action required for interrupts which are requested as
1375 * threaded interrupts already
1376 */
1377 if (new->handler == irq_default_primary_handler)
1378 return 0;
1379
1380 new->flags |= IRQF_ONESHOT;
1381
1382 /*
1383 * Handle the case where we have a real primary handler and a
1384 * thread handler. We force thread them as well by creating a
1385 * secondary action.
1386 */
1387 if (new->handler && new->thread_fn) {
1388 /* Allocate the secondary action */
1389 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1390 if (!new->secondary)
1391 return -ENOMEM;
1392 new->secondary->handler = irq_forced_secondary_handler;
1393 new->secondary->thread_fn = new->thread_fn;
1394 new->secondary->dev_id = new->dev_id;
1395 new->secondary->irq = new->irq;
1396 new->secondary->name = new->name;
1397 }
1398 /* Deal with the primary handler */
1399 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1400 new->thread_fn = new->handler;
1401 new->handler = irq_default_primary_handler;
1402 return 0;
1403}
1404
1405static int irq_request_resources(struct irq_desc *desc)
1406{
1407 struct irq_data *d = &desc->irq_data;
1408 struct irq_chip *c = d->chip;
1409
1410 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1411}
1412
1413static void irq_release_resources(struct irq_desc *desc)
1414{
1415 struct irq_data *d = &desc->irq_data;
1416 struct irq_chip *c = d->chip;
1417
1418 if (c->irq_release_resources)
1419 c->irq_release_resources(d);
1420}
1421
1422static bool irq_supports_nmi(struct irq_desc *desc)
1423{
1424 struct irq_data *d = irq_desc_get_irq_data(desc);
1425
1426#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1427 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1428 if (d->parent_data)
1429 return false;
1430#endif
1431 /* Don't support NMIs for chips behind a slow bus */
1432 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1433 return false;
1434
1435 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1436}
1437
1438static int irq_nmi_setup(struct irq_desc *desc)
1439{
1440 struct irq_data *d = irq_desc_get_irq_data(desc);
1441 struct irq_chip *c = d->chip;
1442
1443 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1444}
1445
1446static void irq_nmi_teardown(struct irq_desc *desc)
1447{
1448 struct irq_data *d = irq_desc_get_irq_data(desc);
1449 struct irq_chip *c = d->chip;
1450
1451 if (c->irq_nmi_teardown)
1452 c->irq_nmi_teardown(d);
1453}
1454
1455static int
1456setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1457{
1458 struct task_struct *t;
1459
1460 if (!secondary) {
1461 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1462 new->name);
1463 } else {
1464 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1465 new->name);
1466 }
1467
1468 if (IS_ERR(t))
1469 return PTR_ERR(t);
1470
1471 /*
1472 * We keep the reference to the task struct even if
1473 * the thread dies to avoid that the interrupt code
1474 * references an already freed task_struct.
1475 */
1476 new->thread = get_task_struct(t);
1477 /*
1478 * Tell the thread to set its affinity. This is
1479 * important for shared interrupt handlers as we do
1480 * not invoke setup_affinity() for the secondary
1481 * handlers as everything is already set up. Even for
1482 * interrupts marked with IRQF_NO_BALANCE this is
1483 * correct as we want the thread to move to the cpu(s)
1484 * on which the requesting code placed the interrupt.
1485 */
1486 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1487 return 0;
1488}
1489
1490/*
1491 * Internal function to register an irqaction - typically used to
1492 * allocate special interrupts that are part of the architecture.
1493 *
1494 * Locking rules:
1495 *
1496 * desc->request_mutex Provides serialization against a concurrent free_irq()
1497 * chip_bus_lock Provides serialization for slow bus operations
1498 * desc->lock Provides serialization against hard interrupts
1499 *
1500 * chip_bus_lock and desc->lock are sufficient for all other management and
1501 * interrupt related functions. desc->request_mutex solely serializes
1502 * request/free_irq().
1503 */
1504static int
1505__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1506{
1507 struct irqaction *old, **old_ptr;
1508 unsigned long flags, thread_mask = 0;
1509 int ret, nested, shared = 0;
1510
1511 if (!desc)
1512 return -EINVAL;
1513
1514 if (desc->irq_data.chip == &no_irq_chip)
1515 return -ENOSYS;
1516 if (!try_module_get(desc->owner))
1517 return -ENODEV;
1518
1519 new->irq = irq;
1520
1521 /*
1522 * If the trigger type is not specified by the caller,
1523 * then use the default for this interrupt.
1524 */
1525 if (!(new->flags & IRQF_TRIGGER_MASK))
1526 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1527
1528 /*
1529 * Check whether the interrupt nests into another interrupt
1530 * thread.
1531 */
1532 nested = irq_settings_is_nested_thread(desc);
1533 if (nested) {
1534 if (!new->thread_fn) {
1535 ret = -EINVAL;
1536 goto out_mput;
1537 }
1538 /*
1539 * Replace the primary handler which was provided from
1540 * the driver for non nested interrupt handling by the
1541 * dummy function which warns when called.
1542 */
1543 new->handler = irq_nested_primary_handler;
1544 } else {
1545 if (irq_settings_can_thread(desc)) {
1546 ret = irq_setup_forced_threading(new);
1547 if (ret)
1548 goto out_mput;
1549 }
1550 }
1551
1552 /*
1553 * Create a handler thread when a thread function is supplied
1554 * and the interrupt does not nest into another interrupt
1555 * thread.
1556 */
1557 if (new->thread_fn && !nested) {
1558 ret = setup_irq_thread(new, irq, false);
1559 if (ret)
1560 goto out_mput;
1561 if (new->secondary) {
1562 ret = setup_irq_thread(new->secondary, irq, true);
1563 if (ret)
1564 goto out_thread;
1565 }
1566 }
1567
1568 /*
1569 * Drivers are often written to work w/o knowledge about the
1570 * underlying irq chip implementation, so a request for a
1571 * threaded irq without a primary hard irq context handler
1572 * requires the ONESHOT flag to be set. Some irq chips like
1573 * MSI based interrupts are per se one shot safe. Check the
1574 * chip flags, so we can avoid the unmask dance at the end of
1575 * the threaded handler for those.
1576 */
1577 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1578 new->flags &= ~IRQF_ONESHOT;
1579
1580 /*
1581 * Protects against a concurrent __free_irq() call which might wait
1582 * for synchronize_hardirq() to complete without holding the optional
1583 * chip bus lock and desc->lock. Also protects against handing out
1584 * a recycled oneshot thread_mask bit while it's still in use by
1585 * its previous owner.
1586 */
1587 mutex_lock(&desc->request_mutex);
1588
1589 /*
1590 * Acquire bus lock as the irq_request_resources() callback below
1591 * might rely on the serialization or the magic power management
1592 * functions which are abusing the irq_bus_lock() callback,
1593 */
1594 chip_bus_lock(desc);
1595
1596 /* First installed action requests resources. */
1597 if (!desc->action) {
1598 ret = irq_request_resources(desc);
1599 if (ret) {
1600 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1601 new->name, irq, desc->irq_data.chip->name);
1602 goto out_bus_unlock;
1603 }
1604 }
1605
1606 /*
1607 * The following block of code has to be executed atomically
1608 * protected against a concurrent interrupt and any of the other
1609 * management calls which are not serialized via
1610 * desc->request_mutex or the optional bus lock.
1611 */
1612 raw_spin_lock_irqsave(&desc->lock, flags);
1613 old_ptr = &desc->action;
1614 old = *old_ptr;
1615 if (old) {
1616 /*
1617 * Can't share interrupts unless both agree to and are
1618 * the same type (level, edge, polarity). So both flag
1619 * fields must have IRQF_SHARED set and the bits which
1620 * set the trigger type must match. Also all must
1621 * agree on ONESHOT.
1622 * Interrupt lines used for NMIs cannot be shared.
1623 */
1624 unsigned int oldtype;
1625
1626 if (desc->istate & IRQS_NMI) {
1627 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1628 new->name, irq, desc->irq_data.chip->name);
1629 ret = -EINVAL;
1630 goto out_unlock;
1631 }
1632
1633 /*
1634 * If nobody did set the configuration before, inherit
1635 * the one provided by the requester.
1636 */
1637 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1638 oldtype = irqd_get_trigger_type(&desc->irq_data);
1639 } else {
1640 oldtype = new->flags & IRQF_TRIGGER_MASK;
1641 irqd_set_trigger_type(&desc->irq_data, oldtype);
1642 }
1643
1644 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1645 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1646 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1647 goto mismatch;
1648
1649 /* All handlers must agree on per-cpuness */
1650 if ((old->flags & IRQF_PERCPU) !=
1651 (new->flags & IRQF_PERCPU))
1652 goto mismatch;
1653
1654 /* add new interrupt at end of irq queue */
1655 do {
1656 /*
1657 * Or all existing action->thread_mask bits,
1658 * so we can find the next zero bit for this
1659 * new action.
1660 */
1661 thread_mask |= old->thread_mask;
1662 old_ptr = &old->next;
1663 old = *old_ptr;
1664 } while (old);
1665 shared = 1;
1666 }
1667
1668 /*
1669 * Setup the thread mask for this irqaction for ONESHOT. For
1670 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1671 * conditional in irq_wake_thread().
1672 */
1673 if (new->flags & IRQF_ONESHOT) {
1674 /*
1675 * Unlikely to have 32 resp 64 irqs sharing one line,
1676 * but who knows.
1677 */
1678 if (thread_mask == ~0UL) {
1679 ret = -EBUSY;
1680 goto out_unlock;
1681 }
1682 /*
1683 * The thread_mask for the action is or'ed to
1684 * desc->thread_active to indicate that the
1685 * IRQF_ONESHOT thread handler has been woken, but not
1686 * yet finished. The bit is cleared when a thread
1687 * completes. When all threads of a shared interrupt
1688 * line have completed desc->threads_active becomes
1689 * zero and the interrupt line is unmasked. See
1690 * handle.c:irq_wake_thread() for further information.
1691 *
1692 * If no thread is woken by primary (hard irq context)
1693 * interrupt handlers, then desc->threads_active is
1694 * also checked for zero to unmask the irq line in the
1695 * affected hard irq flow handlers
1696 * (handle_[fasteoi|level]_irq).
1697 *
1698 * The new action gets the first zero bit of
1699 * thread_mask assigned. See the loop above which or's
1700 * all existing action->thread_mask bits.
1701 */
1702 new->thread_mask = 1UL << ffz(thread_mask);
1703
1704 } else if (new->handler == irq_default_primary_handler &&
1705 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1706 /*
1707 * The interrupt was requested with handler = NULL, so
1708 * we use the default primary handler for it. But it
1709 * does not have the oneshot flag set. In combination
1710 * with level interrupts this is deadly, because the
1711 * default primary handler just wakes the thread, then
1712 * the irq lines is reenabled, but the device still
1713 * has the level irq asserted. Rinse and repeat....
1714 *
1715 * While this works for edge type interrupts, we play
1716 * it safe and reject unconditionally because we can't
1717 * say for sure which type this interrupt really
1718 * has. The type flags are unreliable as the
1719 * underlying chip implementation can override them.
1720 */
1721 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1722 new->name, irq);
1723 ret = -EINVAL;
1724 goto out_unlock;
1725 }
1726
1727 if (!shared) {
1728 /* Setup the type (level, edge polarity) if configured: */
1729 if (new->flags & IRQF_TRIGGER_MASK) {
1730 ret = __irq_set_trigger(desc,
1731 new->flags & IRQF_TRIGGER_MASK);
1732
1733 if (ret)
1734 goto out_unlock;
1735 }
1736
1737 /*
1738 * Activate the interrupt. That activation must happen
1739 * independently of IRQ_NOAUTOEN. request_irq() can fail
1740 * and the callers are supposed to handle
1741 * that. enable_irq() of an interrupt requested with
1742 * IRQ_NOAUTOEN is not supposed to fail. The activation
1743 * keeps it in shutdown mode, it merily associates
1744 * resources if necessary and if that's not possible it
1745 * fails. Interrupts which are in managed shutdown mode
1746 * will simply ignore that activation request.
1747 */
1748 ret = irq_activate(desc);
1749 if (ret)
1750 goto out_unlock;
1751
1752 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1753 IRQS_ONESHOT | IRQS_WAITING);
1754 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1755
1756 if (new->flags & IRQF_PERCPU) {
1757 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1758 irq_settings_set_per_cpu(desc);
1759 if (new->flags & IRQF_NO_DEBUG)
1760 irq_settings_set_no_debug(desc);
1761 }
1762
1763 if (noirqdebug)
1764 irq_settings_set_no_debug(desc);
1765
1766 if (new->flags & IRQF_ONESHOT)
1767 desc->istate |= IRQS_ONESHOT;
1768
1769 /* Exclude IRQ from balancing if requested */
1770 if (new->flags & IRQF_NOBALANCING) {
1771 irq_settings_set_no_balancing(desc);
1772 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1773 }
1774
1775 if (!(new->flags & IRQF_NO_AUTOEN) &&
1776 irq_settings_can_autoenable(desc)) {
1777 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1778 } else {
1779 /*
1780 * Shared interrupts do not go well with disabling
1781 * auto enable. The sharing interrupt might request
1782 * it while it's still disabled and then wait for
1783 * interrupts forever.
1784 */
1785 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1786 /* Undo nested disables: */
1787 desc->depth = 1;
1788 }
1789
1790 } else if (new->flags & IRQF_TRIGGER_MASK) {
1791 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1792 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1793
1794 if (nmsk != omsk)
1795 /* hope the handler works with current trigger mode */
1796 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1797 irq, omsk, nmsk);
1798 }
1799
1800 *old_ptr = new;
1801
1802 irq_pm_install_action(desc, new);
1803
1804 /* Reset broken irq detection when installing new handler */
1805 desc->irq_count = 0;
1806 desc->irqs_unhandled = 0;
1807
1808 /*
1809 * Check whether we disabled the irq via the spurious handler
1810 * before. Reenable it and give it another chance.
1811 */
1812 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1813 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1814 __enable_irq(desc);
1815 }
1816
1817 raw_spin_unlock_irqrestore(&desc->lock, flags);
1818 chip_bus_sync_unlock(desc);
1819 mutex_unlock(&desc->request_mutex);
1820
1821 irq_setup_timings(desc, new);
1822
1823 wake_up_and_wait_for_irq_thread_ready(desc, new);
1824 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1825
1826 register_irq_proc(irq, desc);
1827 new->dir = NULL;
1828 register_handler_proc(irq, new);
1829 return 0;
1830
1831mismatch:
1832 if (!(new->flags & IRQF_PROBE_SHARED)) {
1833 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1834 irq, new->flags, new->name, old->flags, old->name);
1835#ifdef CONFIG_DEBUG_SHIRQ
1836 dump_stack();
1837#endif
1838 }
1839 ret = -EBUSY;
1840
1841out_unlock:
1842 raw_spin_unlock_irqrestore(&desc->lock, flags);
1843
1844 if (!desc->action)
1845 irq_release_resources(desc);
1846out_bus_unlock:
1847 chip_bus_sync_unlock(desc);
1848 mutex_unlock(&desc->request_mutex);
1849
1850out_thread:
1851 if (new->thread) {
1852 struct task_struct *t = new->thread;
1853
1854 new->thread = NULL;
1855 kthread_stop_put(t);
1856 }
1857 if (new->secondary && new->secondary->thread) {
1858 struct task_struct *t = new->secondary->thread;
1859
1860 new->secondary->thread = NULL;
1861 kthread_stop_put(t);
1862 }
1863out_mput:
1864 module_put(desc->owner);
1865 return ret;
1866}
1867
1868/*
1869 * Internal function to unregister an irqaction - used to free
1870 * regular and special interrupts that are part of the architecture.
1871 */
1872static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1873{
1874 unsigned irq = desc->irq_data.irq;
1875 struct irqaction *action, **action_ptr;
1876 unsigned long flags;
1877
1878 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1879
1880 mutex_lock(&desc->request_mutex);
1881 chip_bus_lock(desc);
1882 raw_spin_lock_irqsave(&desc->lock, flags);
1883
1884 /*
1885 * There can be multiple actions per IRQ descriptor, find the right
1886 * one based on the dev_id:
1887 */
1888 action_ptr = &desc->action;
1889 for (;;) {
1890 action = *action_ptr;
1891
1892 if (!action) {
1893 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1894 raw_spin_unlock_irqrestore(&desc->lock, flags);
1895 chip_bus_sync_unlock(desc);
1896 mutex_unlock(&desc->request_mutex);
1897 return NULL;
1898 }
1899
1900 if (action->dev_id == dev_id)
1901 break;
1902 action_ptr = &action->next;
1903 }
1904
1905 /* Found it - now remove it from the list of entries: */
1906 *action_ptr = action->next;
1907
1908 irq_pm_remove_action(desc, action);
1909
1910 /* If this was the last handler, shut down the IRQ line: */
1911 if (!desc->action) {
1912 irq_settings_clr_disable_unlazy(desc);
1913 /* Only shutdown. Deactivate after synchronize_hardirq() */
1914 irq_shutdown(desc);
1915 }
1916
1917#ifdef CONFIG_SMP
1918 /* make sure affinity_hint is cleaned up */
1919 if (WARN_ON_ONCE(desc->affinity_hint))
1920 desc->affinity_hint = NULL;
1921#endif
1922
1923 raw_spin_unlock_irqrestore(&desc->lock, flags);
1924 /*
1925 * Drop bus_lock here so the changes which were done in the chip
1926 * callbacks above are synced out to the irq chips which hang
1927 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1928 *
1929 * Aside of that the bus_lock can also be taken from the threaded
1930 * handler in irq_finalize_oneshot() which results in a deadlock
1931 * because kthread_stop() would wait forever for the thread to
1932 * complete, which is blocked on the bus lock.
1933 *
1934 * The still held desc->request_mutex() protects against a
1935 * concurrent request_irq() of this irq so the release of resources
1936 * and timing data is properly serialized.
1937 */
1938 chip_bus_sync_unlock(desc);
1939
1940 unregister_handler_proc(irq, action);
1941
1942 /*
1943 * Make sure it's not being used on another CPU and if the chip
1944 * supports it also make sure that there is no (not yet serviced)
1945 * interrupt in flight at the hardware level.
1946 */
1947 __synchronize_irq(desc);
1948
1949#ifdef CONFIG_DEBUG_SHIRQ
1950 /*
1951 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1952 * event to happen even now it's being freed, so let's make sure that
1953 * is so by doing an extra call to the handler ....
1954 *
1955 * ( We do this after actually deregistering it, to make sure that a
1956 * 'real' IRQ doesn't run in parallel with our fake. )
1957 */
1958 if (action->flags & IRQF_SHARED) {
1959 local_irq_save(flags);
1960 action->handler(irq, dev_id);
1961 local_irq_restore(flags);
1962 }
1963#endif
1964
1965 /*
1966 * The action has already been removed above, but the thread writes
1967 * its oneshot mask bit when it completes. Though request_mutex is
1968 * held across this which prevents __setup_irq() from handing out
1969 * the same bit to a newly requested action.
1970 */
1971 if (action->thread) {
1972 kthread_stop_put(action->thread);
1973 if (action->secondary && action->secondary->thread)
1974 kthread_stop_put(action->secondary->thread);
1975 }
1976
1977 /* Last action releases resources */
1978 if (!desc->action) {
1979 /*
1980 * Reacquire bus lock as irq_release_resources() might
1981 * require it to deallocate resources over the slow bus.
1982 */
1983 chip_bus_lock(desc);
1984 /*
1985 * There is no interrupt on the fly anymore. Deactivate it
1986 * completely.
1987 */
1988 raw_spin_lock_irqsave(&desc->lock, flags);
1989 irq_domain_deactivate_irq(&desc->irq_data);
1990 raw_spin_unlock_irqrestore(&desc->lock, flags);
1991
1992 irq_release_resources(desc);
1993 chip_bus_sync_unlock(desc);
1994 irq_remove_timings(desc);
1995 }
1996
1997 mutex_unlock(&desc->request_mutex);
1998
1999 irq_chip_pm_put(&desc->irq_data);
2000 module_put(desc->owner);
2001 kfree(action->secondary);
2002 return action;
2003}
2004
2005/**
2006 * free_irq - free an interrupt allocated with request_irq
2007 * @irq: Interrupt line to free
2008 * @dev_id: Device identity to free
2009 *
2010 * Remove an interrupt handler. The handler is removed and if the
2011 * interrupt line is no longer in use by any driver it is disabled.
2012 * On a shared IRQ the caller must ensure the interrupt is disabled
2013 * on the card it drives before calling this function. The function
2014 * does not return until any executing interrupts for this IRQ
2015 * have completed.
2016 *
2017 * This function must not be called from interrupt context.
2018 *
2019 * Returns the devname argument passed to request_irq.
2020 */
2021const void *free_irq(unsigned int irq, void *dev_id)
2022{
2023 struct irq_desc *desc = irq_to_desc(irq);
2024 struct irqaction *action;
2025 const char *devname;
2026
2027 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2028 return NULL;
2029
2030#ifdef CONFIG_SMP
2031 if (WARN_ON(desc->affinity_notify))
2032 desc->affinity_notify = NULL;
2033#endif
2034
2035 action = __free_irq(desc, dev_id);
2036
2037 if (!action)
2038 return NULL;
2039
2040 devname = action->name;
2041 kfree(action);
2042 return devname;
2043}
2044EXPORT_SYMBOL(free_irq);
2045
2046/* This function must be called with desc->lock held */
2047static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2048{
2049 const char *devname = NULL;
2050
2051 desc->istate &= ~IRQS_NMI;
2052
2053 if (!WARN_ON(desc->action == NULL)) {
2054 irq_pm_remove_action(desc, desc->action);
2055 devname = desc->action->name;
2056 unregister_handler_proc(irq, desc->action);
2057
2058 kfree(desc->action);
2059 desc->action = NULL;
2060 }
2061
2062 irq_settings_clr_disable_unlazy(desc);
2063 irq_shutdown_and_deactivate(desc);
2064
2065 irq_release_resources(desc);
2066
2067 irq_chip_pm_put(&desc->irq_data);
2068 module_put(desc->owner);
2069
2070 return devname;
2071}
2072
2073const void *free_nmi(unsigned int irq, void *dev_id)
2074{
2075 struct irq_desc *desc = irq_to_desc(irq);
2076 unsigned long flags;
2077 const void *devname;
2078
2079 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2080 return NULL;
2081
2082 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2083 return NULL;
2084
2085 /* NMI still enabled */
2086 if (WARN_ON(desc->depth == 0))
2087 disable_nmi_nosync(irq);
2088
2089 raw_spin_lock_irqsave(&desc->lock, flags);
2090
2091 irq_nmi_teardown(desc);
2092 devname = __cleanup_nmi(irq, desc);
2093
2094 raw_spin_unlock_irqrestore(&desc->lock, flags);
2095
2096 return devname;
2097}
2098
2099/**
2100 * request_threaded_irq - allocate an interrupt line
2101 * @irq: Interrupt line to allocate
2102 * @handler: Function to be called when the IRQ occurs.
2103 * Primary handler for threaded interrupts.
2104 * If handler is NULL and thread_fn != NULL
2105 * the default primary handler is installed.
2106 * @thread_fn: Function called from the irq handler thread
2107 * If NULL, no irq thread is created
2108 * @irqflags: Interrupt type flags
2109 * @devname: An ascii name for the claiming device
2110 * @dev_id: A cookie passed back to the handler function
2111 *
2112 * This call allocates interrupt resources and enables the
2113 * interrupt line and IRQ handling. From the point this
2114 * call is made your handler function may be invoked. Since
2115 * your handler function must clear any interrupt the board
2116 * raises, you must take care both to initialise your hardware
2117 * and to set up the interrupt handler in the right order.
2118 *
2119 * If you want to set up a threaded irq handler for your device
2120 * then you need to supply @handler and @thread_fn. @handler is
2121 * still called in hard interrupt context and has to check
2122 * whether the interrupt originates from the device. If yes it
2123 * needs to disable the interrupt on the device and return
2124 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2125 * @thread_fn. This split handler design is necessary to support
2126 * shared interrupts.
2127 *
2128 * Dev_id must be globally unique. Normally the address of the
2129 * device data structure is used as the cookie. Since the handler
2130 * receives this value it makes sense to use it.
2131 *
2132 * If your interrupt is shared you must pass a non NULL dev_id
2133 * as this is required when freeing the interrupt.
2134 *
2135 * Flags:
2136 *
2137 * IRQF_SHARED Interrupt is shared
2138 * IRQF_TRIGGER_* Specify active edge(s) or level
2139 * IRQF_ONESHOT Run thread_fn with interrupt line masked
2140 */
2141int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2142 irq_handler_t thread_fn, unsigned long irqflags,
2143 const char *devname, void *dev_id)
2144{
2145 struct irqaction *action;
2146 struct irq_desc *desc;
2147 int retval;
2148
2149 if (irq == IRQ_NOTCONNECTED)
2150 return -ENOTCONN;
2151
2152 /*
2153 * Sanity-check: shared interrupts must pass in a real dev-ID,
2154 * otherwise we'll have trouble later trying to figure out
2155 * which interrupt is which (messes up the interrupt freeing
2156 * logic etc).
2157 *
2158 * Also shared interrupts do not go well with disabling auto enable.
2159 * The sharing interrupt might request it while it's still disabled
2160 * and then wait for interrupts forever.
2161 *
2162 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2163 * it cannot be set along with IRQF_NO_SUSPEND.
2164 */
2165 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2166 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2167 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2168 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2169 return -EINVAL;
2170
2171 desc = irq_to_desc(irq);
2172 if (!desc)
2173 return -EINVAL;
2174
2175 if (!irq_settings_can_request(desc) ||
2176 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2177 return -EINVAL;
2178
2179 if (!handler) {
2180 if (!thread_fn)
2181 return -EINVAL;
2182 handler = irq_default_primary_handler;
2183 }
2184
2185 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2186 if (!action)
2187 return -ENOMEM;
2188
2189 action->handler = handler;
2190 action->thread_fn = thread_fn;
2191 action->flags = irqflags;
2192 action->name = devname;
2193 action->dev_id = dev_id;
2194
2195 retval = irq_chip_pm_get(&desc->irq_data);
2196 if (retval < 0) {
2197 kfree(action);
2198 return retval;
2199 }
2200
2201 retval = __setup_irq(irq, desc, action);
2202
2203 if (retval) {
2204 irq_chip_pm_put(&desc->irq_data);
2205 kfree(action->secondary);
2206 kfree(action);
2207 }
2208
2209#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2210 if (!retval && (irqflags & IRQF_SHARED)) {
2211 /*
2212 * It's a shared IRQ -- the driver ought to be prepared for it
2213 * to happen immediately, so let's make sure....
2214 * We disable the irq to make sure that a 'real' IRQ doesn't
2215 * run in parallel with our fake.
2216 */
2217 unsigned long flags;
2218
2219 disable_irq(irq);
2220 local_irq_save(flags);
2221
2222 handler(irq, dev_id);
2223
2224 local_irq_restore(flags);
2225 enable_irq(irq);
2226 }
2227#endif
2228 return retval;
2229}
2230EXPORT_SYMBOL(request_threaded_irq);
2231
2232/**
2233 * request_any_context_irq - allocate an interrupt line
2234 * @irq: Interrupt line to allocate
2235 * @handler: Function to be called when the IRQ occurs.
2236 * Threaded handler for threaded interrupts.
2237 * @flags: Interrupt type flags
2238 * @name: An ascii name for the claiming device
2239 * @dev_id: A cookie passed back to the handler function
2240 *
2241 * This call allocates interrupt resources and enables the
2242 * interrupt line and IRQ handling. It selects either a
2243 * hardirq or threaded handling method depending on the
2244 * context.
2245 *
2246 * On failure, it returns a negative value. On success,
2247 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2248 */
2249int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2250 unsigned long flags, const char *name, void *dev_id)
2251{
2252 struct irq_desc *desc;
2253 int ret;
2254
2255 if (irq == IRQ_NOTCONNECTED)
2256 return -ENOTCONN;
2257
2258 desc = irq_to_desc(irq);
2259 if (!desc)
2260 return -EINVAL;
2261
2262 if (irq_settings_is_nested_thread(desc)) {
2263 ret = request_threaded_irq(irq, NULL, handler,
2264 flags, name, dev_id);
2265 return !ret ? IRQC_IS_NESTED : ret;
2266 }
2267
2268 ret = request_irq(irq, handler, flags, name, dev_id);
2269 return !ret ? IRQC_IS_HARDIRQ : ret;
2270}
2271EXPORT_SYMBOL_GPL(request_any_context_irq);
2272
2273/**
2274 * request_nmi - allocate an interrupt line for NMI delivery
2275 * @irq: Interrupt line to allocate
2276 * @handler: Function to be called when the IRQ occurs.
2277 * Threaded handler for threaded interrupts.
2278 * @irqflags: Interrupt type flags
2279 * @name: An ascii name for the claiming device
2280 * @dev_id: A cookie passed back to the handler function
2281 *
2282 * This call allocates interrupt resources and enables the
2283 * interrupt line and IRQ handling. It sets up the IRQ line
2284 * to be handled as an NMI.
2285 *
2286 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2287 * cannot be threaded.
2288 *
2289 * Interrupt lines requested for NMI delivering must produce per cpu
2290 * interrupts and have auto enabling setting disabled.
2291 *
2292 * Dev_id must be globally unique. Normally the address of the
2293 * device data structure is used as the cookie. Since the handler
2294 * receives this value it makes sense to use it.
2295 *
2296 * If the interrupt line cannot be used to deliver NMIs, function
2297 * will fail and return a negative value.
2298 */
2299int request_nmi(unsigned int irq, irq_handler_t handler,
2300 unsigned long irqflags, const char *name, void *dev_id)
2301{
2302 struct irqaction *action;
2303 struct irq_desc *desc;
2304 unsigned long flags;
2305 int retval;
2306
2307 if (irq == IRQ_NOTCONNECTED)
2308 return -ENOTCONN;
2309
2310 /* NMI cannot be shared, used for Polling */
2311 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2312 return -EINVAL;
2313
2314 if (!(irqflags & IRQF_PERCPU))
2315 return -EINVAL;
2316
2317 if (!handler)
2318 return -EINVAL;
2319
2320 desc = irq_to_desc(irq);
2321
2322 if (!desc || (irq_settings_can_autoenable(desc) &&
2323 !(irqflags & IRQF_NO_AUTOEN)) ||
2324 !irq_settings_can_request(desc) ||
2325 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2326 !irq_supports_nmi(desc))
2327 return -EINVAL;
2328
2329 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2330 if (!action)
2331 return -ENOMEM;
2332
2333 action->handler = handler;
2334 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2335 action->name = name;
2336 action->dev_id = dev_id;
2337
2338 retval = irq_chip_pm_get(&desc->irq_data);
2339 if (retval < 0)
2340 goto err_out;
2341
2342 retval = __setup_irq(irq, desc, action);
2343 if (retval)
2344 goto err_irq_setup;
2345
2346 raw_spin_lock_irqsave(&desc->lock, flags);
2347
2348 /* Setup NMI state */
2349 desc->istate |= IRQS_NMI;
2350 retval = irq_nmi_setup(desc);
2351 if (retval) {
2352 __cleanup_nmi(irq, desc);
2353 raw_spin_unlock_irqrestore(&desc->lock, flags);
2354 return -EINVAL;
2355 }
2356
2357 raw_spin_unlock_irqrestore(&desc->lock, flags);
2358
2359 return 0;
2360
2361err_irq_setup:
2362 irq_chip_pm_put(&desc->irq_data);
2363err_out:
2364 kfree(action);
2365
2366 return retval;
2367}
2368
2369void enable_percpu_irq(unsigned int irq, unsigned int type)
2370{
2371 unsigned int cpu = smp_processor_id();
2372 unsigned long flags;
2373 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2374
2375 if (!desc)
2376 return;
2377
2378 /*
2379 * If the trigger type is not specified by the caller, then
2380 * use the default for this interrupt.
2381 */
2382 type &= IRQ_TYPE_SENSE_MASK;
2383 if (type == IRQ_TYPE_NONE)
2384 type = irqd_get_trigger_type(&desc->irq_data);
2385
2386 if (type != IRQ_TYPE_NONE) {
2387 int ret;
2388
2389 ret = __irq_set_trigger(desc, type);
2390
2391 if (ret) {
2392 WARN(1, "failed to set type for IRQ%d\n", irq);
2393 goto out;
2394 }
2395 }
2396
2397 irq_percpu_enable(desc, cpu);
2398out:
2399 irq_put_desc_unlock(desc, flags);
2400}
2401EXPORT_SYMBOL_GPL(enable_percpu_irq);
2402
2403void enable_percpu_nmi(unsigned int irq, unsigned int type)
2404{
2405 enable_percpu_irq(irq, type);
2406}
2407
2408/**
2409 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2410 * @irq: Linux irq number to check for
2411 *
2412 * Must be called from a non migratable context. Returns the enable
2413 * state of a per cpu interrupt on the current cpu.
2414 */
2415bool irq_percpu_is_enabled(unsigned int irq)
2416{
2417 unsigned int cpu = smp_processor_id();
2418 struct irq_desc *desc;
2419 unsigned long flags;
2420 bool is_enabled;
2421
2422 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2423 if (!desc)
2424 return false;
2425
2426 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2427 irq_put_desc_unlock(desc, flags);
2428
2429 return is_enabled;
2430}
2431EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2432
2433void disable_percpu_irq(unsigned int irq)
2434{
2435 unsigned int cpu = smp_processor_id();
2436 unsigned long flags;
2437 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2438
2439 if (!desc)
2440 return;
2441
2442 irq_percpu_disable(desc, cpu);
2443 irq_put_desc_unlock(desc, flags);
2444}
2445EXPORT_SYMBOL_GPL(disable_percpu_irq);
2446
2447void disable_percpu_nmi(unsigned int irq)
2448{
2449 disable_percpu_irq(irq);
2450}
2451
2452/*
2453 * Internal function to unregister a percpu irqaction.
2454 */
2455static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2456{
2457 struct irq_desc *desc = irq_to_desc(irq);
2458 struct irqaction *action;
2459 unsigned long flags;
2460
2461 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2462
2463 if (!desc)
2464 return NULL;
2465
2466 raw_spin_lock_irqsave(&desc->lock, flags);
2467
2468 action = desc->action;
2469 if (!action || action->percpu_dev_id != dev_id) {
2470 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2471 goto bad;
2472 }
2473
2474 if (!cpumask_empty(desc->percpu_enabled)) {
2475 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2476 irq, cpumask_first(desc->percpu_enabled));
2477 goto bad;
2478 }
2479
2480 /* Found it - now remove it from the list of entries: */
2481 desc->action = NULL;
2482
2483 desc->istate &= ~IRQS_NMI;
2484
2485 raw_spin_unlock_irqrestore(&desc->lock, flags);
2486
2487 unregister_handler_proc(irq, action);
2488
2489 irq_chip_pm_put(&desc->irq_data);
2490 module_put(desc->owner);
2491 return action;
2492
2493bad:
2494 raw_spin_unlock_irqrestore(&desc->lock, flags);
2495 return NULL;
2496}
2497
2498/**
2499 * remove_percpu_irq - free a per-cpu interrupt
2500 * @irq: Interrupt line to free
2501 * @act: irqaction for the interrupt
2502 *
2503 * Used to remove interrupts statically setup by the early boot process.
2504 */
2505void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2506{
2507 struct irq_desc *desc = irq_to_desc(irq);
2508
2509 if (desc && irq_settings_is_per_cpu_devid(desc))
2510 __free_percpu_irq(irq, act->percpu_dev_id);
2511}
2512
2513/**
2514 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2515 * @irq: Interrupt line to free
2516 * @dev_id: Device identity to free
2517 *
2518 * Remove a percpu interrupt handler. The handler is removed, but
2519 * the interrupt line is not disabled. This must be done on each
2520 * CPU before calling this function. The function does not return
2521 * until any executing interrupts for this IRQ have completed.
2522 *
2523 * This function must not be called from interrupt context.
2524 */
2525void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2526{
2527 struct irq_desc *desc = irq_to_desc(irq);
2528
2529 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2530 return;
2531
2532 chip_bus_lock(desc);
2533 kfree(__free_percpu_irq(irq, dev_id));
2534 chip_bus_sync_unlock(desc);
2535}
2536EXPORT_SYMBOL_GPL(free_percpu_irq);
2537
2538void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2539{
2540 struct irq_desc *desc = irq_to_desc(irq);
2541
2542 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2543 return;
2544
2545 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2546 return;
2547
2548 kfree(__free_percpu_irq(irq, dev_id));
2549}
2550
2551/**
2552 * setup_percpu_irq - setup a per-cpu interrupt
2553 * @irq: Interrupt line to setup
2554 * @act: irqaction for the interrupt
2555 *
2556 * Used to statically setup per-cpu interrupts in the early boot process.
2557 */
2558int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2559{
2560 struct irq_desc *desc = irq_to_desc(irq);
2561 int retval;
2562
2563 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2564 return -EINVAL;
2565
2566 retval = irq_chip_pm_get(&desc->irq_data);
2567 if (retval < 0)
2568 return retval;
2569
2570 retval = __setup_irq(irq, desc, act);
2571
2572 if (retval)
2573 irq_chip_pm_put(&desc->irq_data);
2574
2575 return retval;
2576}
2577
2578/**
2579 * __request_percpu_irq - allocate a percpu interrupt line
2580 * @irq: Interrupt line to allocate
2581 * @handler: Function to be called when the IRQ occurs.
2582 * @flags: Interrupt type flags (IRQF_TIMER only)
2583 * @devname: An ascii name for the claiming device
2584 * @dev_id: A percpu cookie passed back to the handler function
2585 *
2586 * This call allocates interrupt resources and enables the
2587 * interrupt on the local CPU. If the interrupt is supposed to be
2588 * enabled on other CPUs, it has to be done on each CPU using
2589 * enable_percpu_irq().
2590 *
2591 * Dev_id must be globally unique. It is a per-cpu variable, and
2592 * the handler gets called with the interrupted CPU's instance of
2593 * that variable.
2594 */
2595int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2596 unsigned long flags, const char *devname,
2597 void __percpu *dev_id)
2598{
2599 struct irqaction *action;
2600 struct irq_desc *desc;
2601 int retval;
2602
2603 if (!dev_id)
2604 return -EINVAL;
2605
2606 desc = irq_to_desc(irq);
2607 if (!desc || !irq_settings_can_request(desc) ||
2608 !irq_settings_is_per_cpu_devid(desc))
2609 return -EINVAL;
2610
2611 if (flags && flags != IRQF_TIMER)
2612 return -EINVAL;
2613
2614 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2615 if (!action)
2616 return -ENOMEM;
2617
2618 action->handler = handler;
2619 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2620 action->name = devname;
2621 action->percpu_dev_id = dev_id;
2622
2623 retval = irq_chip_pm_get(&desc->irq_data);
2624 if (retval < 0) {
2625 kfree(action);
2626 return retval;
2627 }
2628
2629 retval = __setup_irq(irq, desc, action);
2630
2631 if (retval) {
2632 irq_chip_pm_put(&desc->irq_data);
2633 kfree(action);
2634 }
2635
2636 return retval;
2637}
2638EXPORT_SYMBOL_GPL(__request_percpu_irq);
2639
2640/**
2641 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2642 * @irq: Interrupt line to allocate
2643 * @handler: Function to be called when the IRQ occurs.
2644 * @name: An ascii name for the claiming device
2645 * @dev_id: A percpu cookie passed back to the handler function
2646 *
2647 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2648 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2649 * being enabled on the same CPU by using enable_percpu_nmi().
2650 *
2651 * Dev_id must be globally unique. It is a per-cpu variable, and
2652 * the handler gets called with the interrupted CPU's instance of
2653 * that variable.
2654 *
2655 * Interrupt lines requested for NMI delivering should have auto enabling
2656 * setting disabled.
2657 *
2658 * If the interrupt line cannot be used to deliver NMIs, function
2659 * will fail returning a negative value.
2660 */
2661int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2662 const char *name, void __percpu *dev_id)
2663{
2664 struct irqaction *action;
2665 struct irq_desc *desc;
2666 unsigned long flags;
2667 int retval;
2668
2669 if (!handler)
2670 return -EINVAL;
2671
2672 desc = irq_to_desc(irq);
2673
2674 if (!desc || !irq_settings_can_request(desc) ||
2675 !irq_settings_is_per_cpu_devid(desc) ||
2676 irq_settings_can_autoenable(desc) ||
2677 !irq_supports_nmi(desc))
2678 return -EINVAL;
2679
2680 /* The line cannot already be NMI */
2681 if (desc->istate & IRQS_NMI)
2682 return -EINVAL;
2683
2684 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2685 if (!action)
2686 return -ENOMEM;
2687
2688 action->handler = handler;
2689 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2690 | IRQF_NOBALANCING;
2691 action->name = name;
2692 action->percpu_dev_id = dev_id;
2693
2694 retval = irq_chip_pm_get(&desc->irq_data);
2695 if (retval < 0)
2696 goto err_out;
2697
2698 retval = __setup_irq(irq, desc, action);
2699 if (retval)
2700 goto err_irq_setup;
2701
2702 raw_spin_lock_irqsave(&desc->lock, flags);
2703 desc->istate |= IRQS_NMI;
2704 raw_spin_unlock_irqrestore(&desc->lock, flags);
2705
2706 return 0;
2707
2708err_irq_setup:
2709 irq_chip_pm_put(&desc->irq_data);
2710err_out:
2711 kfree(action);
2712
2713 return retval;
2714}
2715
2716/**
2717 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2718 * @irq: Interrupt line to prepare for NMI delivery
2719 *
2720 * This call prepares an interrupt line to deliver NMI on the current CPU,
2721 * before that interrupt line gets enabled with enable_percpu_nmi().
2722 *
2723 * As a CPU local operation, this should be called from non-preemptible
2724 * context.
2725 *
2726 * If the interrupt line cannot be used to deliver NMIs, function
2727 * will fail returning a negative value.
2728 */
2729int prepare_percpu_nmi(unsigned int irq)
2730{
2731 unsigned long flags;
2732 struct irq_desc *desc;
2733 int ret = 0;
2734
2735 WARN_ON(preemptible());
2736
2737 desc = irq_get_desc_lock(irq, &flags,
2738 IRQ_GET_DESC_CHECK_PERCPU);
2739 if (!desc)
2740 return -EINVAL;
2741
2742 if (WARN(!(desc->istate & IRQS_NMI),
2743 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2744 irq)) {
2745 ret = -EINVAL;
2746 goto out;
2747 }
2748
2749 ret = irq_nmi_setup(desc);
2750 if (ret) {
2751 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2752 goto out;
2753 }
2754
2755out:
2756 irq_put_desc_unlock(desc, flags);
2757 return ret;
2758}
2759
2760/**
2761 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2762 * @irq: Interrupt line from which CPU local NMI configuration should be
2763 * removed
2764 *
2765 * This call undoes the setup done by prepare_percpu_nmi().
2766 *
2767 * IRQ line should not be enabled for the current CPU.
2768 *
2769 * As a CPU local operation, this should be called from non-preemptible
2770 * context.
2771 */
2772void teardown_percpu_nmi(unsigned int irq)
2773{
2774 unsigned long flags;
2775 struct irq_desc *desc;
2776
2777 WARN_ON(preemptible());
2778
2779 desc = irq_get_desc_lock(irq, &flags,
2780 IRQ_GET_DESC_CHECK_PERCPU);
2781 if (!desc)
2782 return;
2783
2784 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2785 goto out;
2786
2787 irq_nmi_teardown(desc);
2788out:
2789 irq_put_desc_unlock(desc, flags);
2790}
2791
2792int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2793 bool *state)
2794{
2795 struct irq_chip *chip;
2796 int err = -EINVAL;
2797
2798 do {
2799 chip = irq_data_get_irq_chip(data);
2800 if (WARN_ON_ONCE(!chip))
2801 return -ENODEV;
2802 if (chip->irq_get_irqchip_state)
2803 break;
2804#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2805 data = data->parent_data;
2806#else
2807 data = NULL;
2808#endif
2809 } while (data);
2810
2811 if (data)
2812 err = chip->irq_get_irqchip_state(data, which, state);
2813 return err;
2814}
2815
2816/**
2817 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2818 * @irq: Interrupt line that is forwarded to a VM
2819 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2820 * @state: a pointer to a boolean where the state is to be stored
2821 *
2822 * This call snapshots the internal irqchip state of an
2823 * interrupt, returning into @state the bit corresponding to
2824 * stage @which
2825 *
2826 * This function should be called with preemption disabled if the
2827 * interrupt controller has per-cpu registers.
2828 */
2829int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2830 bool *state)
2831{
2832 struct irq_desc *desc;
2833 struct irq_data *data;
2834 unsigned long flags;
2835 int err = -EINVAL;
2836
2837 desc = irq_get_desc_buslock(irq, &flags, 0);
2838 if (!desc)
2839 return err;
2840
2841 data = irq_desc_get_irq_data(desc);
2842
2843 err = __irq_get_irqchip_state(data, which, state);
2844
2845 irq_put_desc_busunlock(desc, flags);
2846 return err;
2847}
2848EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2849
2850/**
2851 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2852 * @irq: Interrupt line that is forwarded to a VM
2853 * @which: State to be restored (one of IRQCHIP_STATE_*)
2854 * @val: Value corresponding to @which
2855 *
2856 * This call sets the internal irqchip state of an interrupt,
2857 * depending on the value of @which.
2858 *
2859 * This function should be called with migration disabled if the
2860 * interrupt controller has per-cpu registers.
2861 */
2862int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2863 bool val)
2864{
2865 struct irq_desc *desc;
2866 struct irq_data *data;
2867 struct irq_chip *chip;
2868 unsigned long flags;
2869 int err = -EINVAL;
2870
2871 desc = irq_get_desc_buslock(irq, &flags, 0);
2872 if (!desc)
2873 return err;
2874
2875 data = irq_desc_get_irq_data(desc);
2876
2877 do {
2878 chip = irq_data_get_irq_chip(data);
2879 if (WARN_ON_ONCE(!chip)) {
2880 err = -ENODEV;
2881 goto out_unlock;
2882 }
2883 if (chip->irq_set_irqchip_state)
2884 break;
2885#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2886 data = data->parent_data;
2887#else
2888 data = NULL;
2889#endif
2890 } while (data);
2891
2892 if (data)
2893 err = chip->irq_set_irqchip_state(data, which, val);
2894
2895out_unlock:
2896 irq_put_desc_busunlock(desc, flags);
2897 return err;
2898}
2899EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2900
2901/**
2902 * irq_has_action - Check whether an interrupt is requested
2903 * @irq: The linux irq number
2904 *
2905 * Returns: A snapshot of the current state
2906 */
2907bool irq_has_action(unsigned int irq)
2908{
2909 bool res;
2910
2911 rcu_read_lock();
2912 res = irq_desc_has_action(irq_to_desc(irq));
2913 rcu_read_unlock();
2914 return res;
2915}
2916EXPORT_SYMBOL_GPL(irq_has_action);
2917
2918/**
2919 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2920 * @irq: The linux irq number
2921 * @bitmask: The bitmask to evaluate
2922 *
2923 * Returns: True if one of the bits in @bitmask is set
2924 */
2925bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2926{
2927 struct irq_desc *desc;
2928 bool res = false;
2929
2930 rcu_read_lock();
2931 desc = irq_to_desc(irq);
2932 if (desc)
2933 res = !!(desc->status_use_accessors & bitmask);
2934 rcu_read_unlock();
2935 return res;
2936}
2937EXPORT_SYMBOL_GPL(irq_check_status_bit);