Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <linux/sched/isolation.h>
22#include <uapi/linux/sched/types.h>
23#include <linux/task_work.h>
24
25#include "internals.h"
26
27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29
30static int __init setup_forced_irqthreads(char *arg)
31{
32 static_branch_enable(&force_irqthreads_key);
33 return 0;
34}
35early_param("threadirqs", setup_forced_irqthreads);
36#endif
37
38static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39{
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46 /*
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
49 */
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57 /*
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
61 */
62 if (!inprogress && sync_chip) {
63 /*
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
66 */
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72 /* Oops, that failed? */
73 } while (inprogress);
74}
75
76/**
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
79 *
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
84 * into account.
85 *
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
88 *
89 * Returns: false if a threaded handler is active.
90 *
91 * This function may be called - with care - from IRQ context.
92 *
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
96 * is the current CPU.
97 */
98bool synchronize_hardirq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108}
109EXPORT_SYMBOL(synchronize_hardirq);
110
111static void __synchronize_irq(struct irq_desc *desc)
112{
113 __synchronize_hardirq(desc, true);
114 /*
115 * We made sure that no hardirq handler is running. Now verify that no
116 * threaded handlers are active.
117 */
118 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
119}
120
121/**
122 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
123 * @irq: interrupt number to wait for
124 *
125 * This function waits for any pending IRQ handlers for this interrupt
126 * to complete before returning. If you use this function while
127 * holding a resource the IRQ handler may need you will deadlock.
128 *
129 * Can only be called from preemptible code as it might sleep when
130 * an interrupt thread is associated to @irq.
131 *
132 * It optionally makes sure (when the irq chip supports that method)
133 * that the interrupt is not pending in any CPU and waiting for
134 * service.
135 */
136void synchronize_irq(unsigned int irq)
137{
138 struct irq_desc *desc = irq_to_desc(irq);
139
140 if (desc)
141 __synchronize_irq(desc);
142}
143EXPORT_SYMBOL(synchronize_irq);
144
145#ifdef CONFIG_SMP
146cpumask_var_t irq_default_affinity;
147
148static bool __irq_can_set_affinity(struct irq_desc *desc)
149{
150 if (!desc || !irqd_can_balance(&desc->irq_data) ||
151 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
152 return false;
153 return true;
154}
155
156/**
157 * irq_can_set_affinity - Check if the affinity of a given irq can be set
158 * @irq: Interrupt to check
159 *
160 */
161int irq_can_set_affinity(unsigned int irq)
162{
163 return __irq_can_set_affinity(irq_to_desc(irq));
164}
165
166/**
167 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
168 * @irq: Interrupt to check
169 *
170 * Like irq_can_set_affinity() above, but additionally checks for the
171 * AFFINITY_MANAGED flag.
172 */
173bool irq_can_set_affinity_usr(unsigned int irq)
174{
175 struct irq_desc *desc = irq_to_desc(irq);
176
177 return __irq_can_set_affinity(desc) &&
178 !irqd_affinity_is_managed(&desc->irq_data);
179}
180
181/**
182 * irq_set_thread_affinity - Notify irq threads to adjust affinity
183 * @desc: irq descriptor which has affinity changed
184 *
185 * We just set IRQTF_AFFINITY and delegate the affinity setting
186 * to the interrupt thread itself. We can not call
187 * set_cpus_allowed_ptr() here as we hold desc->lock and this
188 * code can be called from hard interrupt context.
189 */
190void irq_set_thread_affinity(struct irq_desc *desc)
191{
192 struct irqaction *action;
193
194 for_each_action_of_desc(desc, action) {
195 if (action->thread) {
196 set_bit(IRQTF_AFFINITY, &action->thread_flags);
197 wake_up_process(action->thread);
198 }
199 if (action->secondary && action->secondary->thread) {
200 set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
201 wake_up_process(action->secondary->thread);
202 }
203 }
204}
205
206#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
207static void irq_validate_effective_affinity(struct irq_data *data)
208{
209 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
210 struct irq_chip *chip = irq_data_get_irq_chip(data);
211
212 if (!cpumask_empty(m))
213 return;
214 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
215 chip->name, data->irq);
216}
217#else
218static inline void irq_validate_effective_affinity(struct irq_data *data) { }
219#endif
220
221static DEFINE_PER_CPU(struct cpumask, __tmp_mask);
222
223int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
224 bool force)
225{
226 struct cpumask *tmp_mask = this_cpu_ptr(&__tmp_mask);
227 struct irq_desc *desc = irq_data_to_desc(data);
228 struct irq_chip *chip = irq_data_get_irq_chip(data);
229 const struct cpumask *prog_mask;
230 int ret;
231
232 if (!chip || !chip->irq_set_affinity)
233 return -EINVAL;
234
235 /*
236 * If this is a managed interrupt and housekeeping is enabled on
237 * it check whether the requested affinity mask intersects with
238 * a housekeeping CPU. If so, then remove the isolated CPUs from
239 * the mask and just keep the housekeeping CPU(s). This prevents
240 * the affinity setter from routing the interrupt to an isolated
241 * CPU to avoid that I/O submitted from a housekeeping CPU causes
242 * interrupts on an isolated one.
243 *
244 * If the masks do not intersect or include online CPU(s) then
245 * keep the requested mask. The isolated target CPUs are only
246 * receiving interrupts when the I/O operation was submitted
247 * directly from them.
248 *
249 * If all housekeeping CPUs in the affinity mask are offline, the
250 * interrupt will be migrated by the CPU hotplug code once a
251 * housekeeping CPU which belongs to the affinity mask comes
252 * online.
253 */
254 if (irqd_affinity_is_managed(data) &&
255 housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
256 const struct cpumask *hk_mask;
257
258 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
259
260 cpumask_and(tmp_mask, mask, hk_mask);
261 if (!cpumask_intersects(tmp_mask, cpu_online_mask))
262 prog_mask = mask;
263 else
264 prog_mask = tmp_mask;
265 } else {
266 prog_mask = mask;
267 }
268
269 /*
270 * Make sure we only provide online CPUs to the irqchip,
271 * unless we are being asked to force the affinity (in which
272 * case we do as we are told).
273 */
274 cpumask_and(tmp_mask, prog_mask, cpu_online_mask);
275 if (!force && !cpumask_empty(tmp_mask))
276 ret = chip->irq_set_affinity(data, tmp_mask, force);
277 else if (force)
278 ret = chip->irq_set_affinity(data, mask, force);
279 else
280 ret = -EINVAL;
281
282 switch (ret) {
283 case IRQ_SET_MASK_OK:
284 case IRQ_SET_MASK_OK_DONE:
285 cpumask_copy(desc->irq_common_data.affinity, mask);
286 fallthrough;
287 case IRQ_SET_MASK_OK_NOCOPY:
288 irq_validate_effective_affinity(data);
289 irq_set_thread_affinity(desc);
290 ret = 0;
291 }
292
293 return ret;
294}
295
296#ifdef CONFIG_GENERIC_PENDING_IRQ
297static inline int irq_set_affinity_pending(struct irq_data *data,
298 const struct cpumask *dest)
299{
300 struct irq_desc *desc = irq_data_to_desc(data);
301
302 irqd_set_move_pending(data);
303 irq_copy_pending(desc, dest);
304 return 0;
305}
306#else
307static inline int irq_set_affinity_pending(struct irq_data *data,
308 const struct cpumask *dest)
309{
310 return -EBUSY;
311}
312#endif
313
314static int irq_try_set_affinity(struct irq_data *data,
315 const struct cpumask *dest, bool force)
316{
317 int ret = irq_do_set_affinity(data, dest, force);
318
319 /*
320 * In case that the underlying vector management is busy and the
321 * architecture supports the generic pending mechanism then utilize
322 * this to avoid returning an error to user space.
323 */
324 if (ret == -EBUSY && !force)
325 ret = irq_set_affinity_pending(data, dest);
326 return ret;
327}
328
329static bool irq_set_affinity_deactivated(struct irq_data *data,
330 const struct cpumask *mask)
331{
332 struct irq_desc *desc = irq_data_to_desc(data);
333
334 /*
335 * Handle irq chips which can handle affinity only in activated
336 * state correctly
337 *
338 * If the interrupt is not yet activated, just store the affinity
339 * mask and do not call the chip driver at all. On activation the
340 * driver has to make sure anyway that the interrupt is in a
341 * usable state so startup works.
342 */
343 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
344 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
345 return false;
346
347 cpumask_copy(desc->irq_common_data.affinity, mask);
348 irq_data_update_effective_affinity(data, mask);
349 irqd_set(data, IRQD_AFFINITY_SET);
350 return true;
351}
352
353int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
354 bool force)
355{
356 struct irq_chip *chip = irq_data_get_irq_chip(data);
357 struct irq_desc *desc = irq_data_to_desc(data);
358 int ret = 0;
359
360 if (!chip || !chip->irq_set_affinity)
361 return -EINVAL;
362
363 if (irq_set_affinity_deactivated(data, mask))
364 return 0;
365
366 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
367 ret = irq_try_set_affinity(data, mask, force);
368 } else {
369 irqd_set_move_pending(data);
370 irq_copy_pending(desc, mask);
371 }
372
373 if (desc->affinity_notify) {
374 kref_get(&desc->affinity_notify->kref);
375 if (!schedule_work(&desc->affinity_notify->work)) {
376 /* Work was already scheduled, drop our extra ref */
377 kref_put(&desc->affinity_notify->kref,
378 desc->affinity_notify->release);
379 }
380 }
381 irqd_set(data, IRQD_AFFINITY_SET);
382
383 return ret;
384}
385
386/**
387 * irq_update_affinity_desc - Update affinity management for an interrupt
388 * @irq: The interrupt number to update
389 * @affinity: Pointer to the affinity descriptor
390 *
391 * This interface can be used to configure the affinity management of
392 * interrupts which have been allocated already.
393 *
394 * There are certain limitations on when it may be used - attempts to use it
395 * for when the kernel is configured for generic IRQ reservation mode (in
396 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
397 * managed/non-managed interrupt accounting. In addition, attempts to use it on
398 * an interrupt which is already started or which has already been configured
399 * as managed will also fail, as these mean invalid init state or double init.
400 */
401int irq_update_affinity_desc(unsigned int irq,
402 struct irq_affinity_desc *affinity)
403{
404 struct irq_desc *desc;
405 unsigned long flags;
406 bool activated;
407 int ret = 0;
408
409 /*
410 * Supporting this with the reservation scheme used by x86 needs
411 * some more thought. Fail it for now.
412 */
413 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
414 return -EOPNOTSUPP;
415
416 desc = irq_get_desc_buslock(irq, &flags, 0);
417 if (!desc)
418 return -EINVAL;
419
420 /* Requires the interrupt to be shut down */
421 if (irqd_is_started(&desc->irq_data)) {
422 ret = -EBUSY;
423 goto out_unlock;
424 }
425
426 /* Interrupts which are already managed cannot be modified */
427 if (irqd_affinity_is_managed(&desc->irq_data)) {
428 ret = -EBUSY;
429 goto out_unlock;
430 }
431
432 /*
433 * Deactivate the interrupt. That's required to undo
434 * anything an earlier activation has established.
435 */
436 activated = irqd_is_activated(&desc->irq_data);
437 if (activated)
438 irq_domain_deactivate_irq(&desc->irq_data);
439
440 if (affinity->is_managed) {
441 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
442 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
443 }
444
445 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
446
447 /* Restore the activation state */
448 if (activated)
449 irq_domain_activate_irq(&desc->irq_data, false);
450
451out_unlock:
452 irq_put_desc_busunlock(desc, flags);
453 return ret;
454}
455
456static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
457 bool force)
458{
459 struct irq_desc *desc = irq_to_desc(irq);
460 unsigned long flags;
461 int ret;
462
463 if (!desc)
464 return -EINVAL;
465
466 raw_spin_lock_irqsave(&desc->lock, flags);
467 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
468 raw_spin_unlock_irqrestore(&desc->lock, flags);
469 return ret;
470}
471
472/**
473 * irq_set_affinity - Set the irq affinity of a given irq
474 * @irq: Interrupt to set affinity
475 * @cpumask: cpumask
476 *
477 * Fails if cpumask does not contain an online CPU
478 */
479int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
480{
481 return __irq_set_affinity(irq, cpumask, false);
482}
483EXPORT_SYMBOL_GPL(irq_set_affinity);
484
485/**
486 * irq_force_affinity - Force the irq affinity of a given irq
487 * @irq: Interrupt to set affinity
488 * @cpumask: cpumask
489 *
490 * Same as irq_set_affinity, but without checking the mask against
491 * online cpus.
492 *
493 * Solely for low level cpu hotplug code, where we need to make per
494 * cpu interrupts affine before the cpu becomes online.
495 */
496int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
497{
498 return __irq_set_affinity(irq, cpumask, true);
499}
500EXPORT_SYMBOL_GPL(irq_force_affinity);
501
502int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
503 bool setaffinity)
504{
505 unsigned long flags;
506 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
507
508 if (!desc)
509 return -EINVAL;
510 desc->affinity_hint = m;
511 irq_put_desc_unlock(desc, flags);
512 if (m && setaffinity)
513 __irq_set_affinity(irq, m, false);
514 return 0;
515}
516EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
517
518static void irq_affinity_notify(struct work_struct *work)
519{
520 struct irq_affinity_notify *notify =
521 container_of(work, struct irq_affinity_notify, work);
522 struct irq_desc *desc = irq_to_desc(notify->irq);
523 cpumask_var_t cpumask;
524 unsigned long flags;
525
526 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
527 goto out;
528
529 raw_spin_lock_irqsave(&desc->lock, flags);
530 if (irq_move_pending(&desc->irq_data))
531 irq_get_pending(cpumask, desc);
532 else
533 cpumask_copy(cpumask, desc->irq_common_data.affinity);
534 raw_spin_unlock_irqrestore(&desc->lock, flags);
535
536 notify->notify(notify, cpumask);
537
538 free_cpumask_var(cpumask);
539out:
540 kref_put(¬ify->kref, notify->release);
541}
542
543/**
544 * irq_set_affinity_notifier - control notification of IRQ affinity changes
545 * @irq: Interrupt for which to enable/disable notification
546 * @notify: Context for notification, or %NULL to disable
547 * notification. Function pointers must be initialised;
548 * the other fields will be initialised by this function.
549 *
550 * Must be called in process context. Notification may only be enabled
551 * after the IRQ is allocated and must be disabled before the IRQ is
552 * freed using free_irq().
553 */
554int
555irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
556{
557 struct irq_desc *desc = irq_to_desc(irq);
558 struct irq_affinity_notify *old_notify;
559 unsigned long flags;
560
561 /* The release function is promised process context */
562 might_sleep();
563
564 if (!desc || irq_is_nmi(desc))
565 return -EINVAL;
566
567 /* Complete initialisation of *notify */
568 if (notify) {
569 notify->irq = irq;
570 kref_init(¬ify->kref);
571 INIT_WORK(¬ify->work, irq_affinity_notify);
572 }
573
574 raw_spin_lock_irqsave(&desc->lock, flags);
575 old_notify = desc->affinity_notify;
576 desc->affinity_notify = notify;
577 raw_spin_unlock_irqrestore(&desc->lock, flags);
578
579 if (old_notify) {
580 if (cancel_work_sync(&old_notify->work)) {
581 /* Pending work had a ref, put that one too */
582 kref_put(&old_notify->kref, old_notify->release);
583 }
584 kref_put(&old_notify->kref, old_notify->release);
585 }
586
587 return 0;
588}
589EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
590
591#ifndef CONFIG_AUTO_IRQ_AFFINITY
592/*
593 * Generic version of the affinity autoselector.
594 */
595int irq_setup_affinity(struct irq_desc *desc)
596{
597 struct cpumask *set = irq_default_affinity;
598 int ret, node = irq_desc_get_node(desc);
599 static DEFINE_RAW_SPINLOCK(mask_lock);
600 static struct cpumask mask;
601
602 /* Excludes PER_CPU and NO_BALANCE interrupts */
603 if (!__irq_can_set_affinity(desc))
604 return 0;
605
606 raw_spin_lock(&mask_lock);
607 /*
608 * Preserve the managed affinity setting and a userspace affinity
609 * setup, but make sure that one of the targets is online.
610 */
611 if (irqd_affinity_is_managed(&desc->irq_data) ||
612 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
613 if (cpumask_intersects(desc->irq_common_data.affinity,
614 cpu_online_mask))
615 set = desc->irq_common_data.affinity;
616 else
617 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
618 }
619
620 cpumask_and(&mask, cpu_online_mask, set);
621 if (cpumask_empty(&mask))
622 cpumask_copy(&mask, cpu_online_mask);
623
624 if (node != NUMA_NO_NODE) {
625 const struct cpumask *nodemask = cpumask_of_node(node);
626
627 /* make sure at least one of the cpus in nodemask is online */
628 if (cpumask_intersects(&mask, nodemask))
629 cpumask_and(&mask, &mask, nodemask);
630 }
631 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
632 raw_spin_unlock(&mask_lock);
633 return ret;
634}
635#else
636/* Wrapper for ALPHA specific affinity selector magic */
637int irq_setup_affinity(struct irq_desc *desc)
638{
639 return irq_select_affinity(irq_desc_get_irq(desc));
640}
641#endif /* CONFIG_AUTO_IRQ_AFFINITY */
642#endif /* CONFIG_SMP */
643
644
645/**
646 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
647 * @irq: interrupt number to set affinity
648 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
649 * specific data for percpu_devid interrupts
650 *
651 * This function uses the vCPU specific data to set the vCPU
652 * affinity for an irq. The vCPU specific data is passed from
653 * outside, such as KVM. One example code path is as below:
654 * KVM -> IOMMU -> irq_set_vcpu_affinity().
655 */
656int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
657{
658 unsigned long flags;
659 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
660 struct irq_data *data;
661 struct irq_chip *chip;
662 int ret = -ENOSYS;
663
664 if (!desc)
665 return -EINVAL;
666
667 data = irq_desc_get_irq_data(desc);
668 do {
669 chip = irq_data_get_irq_chip(data);
670 if (chip && chip->irq_set_vcpu_affinity)
671 break;
672#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
673 data = data->parent_data;
674#else
675 data = NULL;
676#endif
677 } while (data);
678
679 if (data)
680 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
681 irq_put_desc_unlock(desc, flags);
682
683 return ret;
684}
685EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
686
687void __disable_irq(struct irq_desc *desc)
688{
689 if (!desc->depth++)
690 irq_disable(desc);
691}
692
693static int __disable_irq_nosync(unsigned int irq)
694{
695 unsigned long flags;
696 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
697
698 if (!desc)
699 return -EINVAL;
700 __disable_irq(desc);
701 irq_put_desc_busunlock(desc, flags);
702 return 0;
703}
704
705/**
706 * disable_irq_nosync - disable an irq without waiting
707 * @irq: Interrupt to disable
708 *
709 * Disable the selected interrupt line. Disables and Enables are
710 * nested.
711 * Unlike disable_irq(), this function does not ensure existing
712 * instances of the IRQ handler have completed before returning.
713 *
714 * This function may be called from IRQ context.
715 */
716void disable_irq_nosync(unsigned int irq)
717{
718 __disable_irq_nosync(irq);
719}
720EXPORT_SYMBOL(disable_irq_nosync);
721
722/**
723 * disable_irq - disable an irq and wait for completion
724 * @irq: Interrupt to disable
725 *
726 * Disable the selected interrupt line. Enables and Disables are
727 * nested.
728 * This function waits for any pending IRQ handlers for this interrupt
729 * to complete before returning. If you use this function while
730 * holding a resource the IRQ handler may need you will deadlock.
731 *
732 * Can only be called from preemptible code as it might sleep when
733 * an interrupt thread is associated to @irq.
734 *
735 */
736void disable_irq(unsigned int irq)
737{
738 might_sleep();
739 if (!__disable_irq_nosync(irq))
740 synchronize_irq(irq);
741}
742EXPORT_SYMBOL(disable_irq);
743
744/**
745 * disable_hardirq - disables an irq and waits for hardirq completion
746 * @irq: Interrupt to disable
747 *
748 * Disable the selected interrupt line. Enables and Disables are
749 * nested.
750 * This function waits for any pending hard IRQ handlers for this
751 * interrupt to complete before returning. If you use this function while
752 * holding a resource the hard IRQ handler may need you will deadlock.
753 *
754 * When used to optimistically disable an interrupt from atomic context
755 * the return value must be checked.
756 *
757 * Returns: false if a threaded handler is active.
758 *
759 * This function may be called - with care - from IRQ context.
760 */
761bool disable_hardirq(unsigned int irq)
762{
763 if (!__disable_irq_nosync(irq))
764 return synchronize_hardirq(irq);
765
766 return false;
767}
768EXPORT_SYMBOL_GPL(disable_hardirq);
769
770/**
771 * disable_nmi_nosync - disable an nmi without waiting
772 * @irq: Interrupt to disable
773 *
774 * Disable the selected interrupt line. Disables and enables are
775 * nested.
776 * The interrupt to disable must have been requested through request_nmi.
777 * Unlike disable_nmi(), this function does not ensure existing
778 * instances of the IRQ handler have completed before returning.
779 */
780void disable_nmi_nosync(unsigned int irq)
781{
782 disable_irq_nosync(irq);
783}
784
785void __enable_irq(struct irq_desc *desc)
786{
787 switch (desc->depth) {
788 case 0:
789 err_out:
790 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
791 irq_desc_get_irq(desc));
792 break;
793 case 1: {
794 if (desc->istate & IRQS_SUSPENDED)
795 goto err_out;
796 /* Prevent probing on this irq: */
797 irq_settings_set_noprobe(desc);
798 /*
799 * Call irq_startup() not irq_enable() here because the
800 * interrupt might be marked NOAUTOEN so irq_startup()
801 * needs to be invoked when it gets enabled the first time.
802 * This is also required when __enable_irq() is invoked for
803 * a managed and shutdown interrupt from the S3 resume
804 * path.
805 *
806 * If it was already started up, then irq_startup() will
807 * invoke irq_enable() under the hood.
808 */
809 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
810 break;
811 }
812 default:
813 desc->depth--;
814 }
815}
816
817/**
818 * enable_irq - enable handling of an irq
819 * @irq: Interrupt to enable
820 *
821 * Undoes the effect of one call to disable_irq(). If this
822 * matches the last disable, processing of interrupts on this
823 * IRQ line is re-enabled.
824 *
825 * This function may be called from IRQ context only when
826 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
827 */
828void enable_irq(unsigned int irq)
829{
830 unsigned long flags;
831 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
832
833 if (!desc)
834 return;
835 if (WARN(!desc->irq_data.chip,
836 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
837 goto out;
838
839 __enable_irq(desc);
840out:
841 irq_put_desc_busunlock(desc, flags);
842}
843EXPORT_SYMBOL(enable_irq);
844
845/**
846 * enable_nmi - enable handling of an nmi
847 * @irq: Interrupt to enable
848 *
849 * The interrupt to enable must have been requested through request_nmi.
850 * Undoes the effect of one call to disable_nmi(). If this
851 * matches the last disable, processing of interrupts on this
852 * IRQ line is re-enabled.
853 */
854void enable_nmi(unsigned int irq)
855{
856 enable_irq(irq);
857}
858
859static int set_irq_wake_real(unsigned int irq, unsigned int on)
860{
861 struct irq_desc *desc = irq_to_desc(irq);
862 int ret = -ENXIO;
863
864 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
865 return 0;
866
867 if (desc->irq_data.chip->irq_set_wake)
868 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
869
870 return ret;
871}
872
873/**
874 * irq_set_irq_wake - control irq power management wakeup
875 * @irq: interrupt to control
876 * @on: enable/disable power management wakeup
877 *
878 * Enable/disable power management wakeup mode, which is
879 * disabled by default. Enables and disables must match,
880 * just as they match for non-wakeup mode support.
881 *
882 * Wakeup mode lets this IRQ wake the system from sleep
883 * states like "suspend to RAM".
884 *
885 * Note: irq enable/disable state is completely orthogonal
886 * to the enable/disable state of irq wake. An irq can be
887 * disabled with disable_irq() and still wake the system as
888 * long as the irq has wake enabled. If this does not hold,
889 * then the underlying irq chip and the related driver need
890 * to be investigated.
891 */
892int irq_set_irq_wake(unsigned int irq, unsigned int on)
893{
894 unsigned long flags;
895 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
896 int ret = 0;
897
898 if (!desc)
899 return -EINVAL;
900
901 /* Don't use NMIs as wake up interrupts please */
902 if (irq_is_nmi(desc)) {
903 ret = -EINVAL;
904 goto out_unlock;
905 }
906
907 /* wakeup-capable irqs can be shared between drivers that
908 * don't need to have the same sleep mode behaviors.
909 */
910 if (on) {
911 if (desc->wake_depth++ == 0) {
912 ret = set_irq_wake_real(irq, on);
913 if (ret)
914 desc->wake_depth = 0;
915 else
916 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
917 }
918 } else {
919 if (desc->wake_depth == 0) {
920 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
921 } else if (--desc->wake_depth == 0) {
922 ret = set_irq_wake_real(irq, on);
923 if (ret)
924 desc->wake_depth = 1;
925 else
926 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
927 }
928 }
929
930out_unlock:
931 irq_put_desc_busunlock(desc, flags);
932 return ret;
933}
934EXPORT_SYMBOL(irq_set_irq_wake);
935
936/*
937 * Internal function that tells the architecture code whether a
938 * particular irq has been exclusively allocated or is available
939 * for driver use.
940 */
941int can_request_irq(unsigned int irq, unsigned long irqflags)
942{
943 unsigned long flags;
944 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
945 int canrequest = 0;
946
947 if (!desc)
948 return 0;
949
950 if (irq_settings_can_request(desc)) {
951 if (!desc->action ||
952 irqflags & desc->action->flags & IRQF_SHARED)
953 canrequest = 1;
954 }
955 irq_put_desc_unlock(desc, flags);
956 return canrequest;
957}
958
959int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
960{
961 struct irq_chip *chip = desc->irq_data.chip;
962 int ret, unmask = 0;
963
964 if (!chip || !chip->irq_set_type) {
965 /*
966 * IRQF_TRIGGER_* but the PIC does not support multiple
967 * flow-types?
968 */
969 pr_debug("No set_type function for IRQ %d (%s)\n",
970 irq_desc_get_irq(desc),
971 chip ? (chip->name ? : "unknown") : "unknown");
972 return 0;
973 }
974
975 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
976 if (!irqd_irq_masked(&desc->irq_data))
977 mask_irq(desc);
978 if (!irqd_irq_disabled(&desc->irq_data))
979 unmask = 1;
980 }
981
982 /* Mask all flags except trigger mode */
983 flags &= IRQ_TYPE_SENSE_MASK;
984 ret = chip->irq_set_type(&desc->irq_data, flags);
985
986 switch (ret) {
987 case IRQ_SET_MASK_OK:
988 case IRQ_SET_MASK_OK_DONE:
989 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
990 irqd_set(&desc->irq_data, flags);
991 fallthrough;
992
993 case IRQ_SET_MASK_OK_NOCOPY:
994 flags = irqd_get_trigger_type(&desc->irq_data);
995 irq_settings_set_trigger_mask(desc, flags);
996 irqd_clear(&desc->irq_data, IRQD_LEVEL);
997 irq_settings_clr_level(desc);
998 if (flags & IRQ_TYPE_LEVEL_MASK) {
999 irq_settings_set_level(desc);
1000 irqd_set(&desc->irq_data, IRQD_LEVEL);
1001 }
1002
1003 ret = 0;
1004 break;
1005 default:
1006 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
1007 flags, irq_desc_get_irq(desc), chip->irq_set_type);
1008 }
1009 if (unmask)
1010 unmask_irq(desc);
1011 return ret;
1012}
1013
1014#ifdef CONFIG_HARDIRQS_SW_RESEND
1015int irq_set_parent(int irq, int parent_irq)
1016{
1017 unsigned long flags;
1018 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1019
1020 if (!desc)
1021 return -EINVAL;
1022
1023 desc->parent_irq = parent_irq;
1024
1025 irq_put_desc_unlock(desc, flags);
1026 return 0;
1027}
1028EXPORT_SYMBOL_GPL(irq_set_parent);
1029#endif
1030
1031/*
1032 * Default primary interrupt handler for threaded interrupts. Is
1033 * assigned as primary handler when request_threaded_irq is called
1034 * with handler == NULL. Useful for oneshot interrupts.
1035 */
1036static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1037{
1038 return IRQ_WAKE_THREAD;
1039}
1040
1041/*
1042 * Primary handler for nested threaded interrupts. Should never be
1043 * called.
1044 */
1045static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1046{
1047 WARN(1, "Primary handler called for nested irq %d\n", irq);
1048 return IRQ_NONE;
1049}
1050
1051static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1052{
1053 WARN(1, "Secondary action handler called for irq %d\n", irq);
1054 return IRQ_NONE;
1055}
1056
1057#ifdef CONFIG_SMP
1058/*
1059 * Check whether we need to change the affinity of the interrupt thread.
1060 */
1061static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1062{
1063 cpumask_var_t mask;
1064 bool valid = false;
1065
1066 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1067 return;
1068
1069 __set_current_state(TASK_RUNNING);
1070
1071 /*
1072 * In case we are out of memory we set IRQTF_AFFINITY again and
1073 * try again next time
1074 */
1075 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1076 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1077 return;
1078 }
1079
1080 raw_spin_lock_irq(&desc->lock);
1081 /*
1082 * This code is triggered unconditionally. Check the affinity
1083 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1084 */
1085 if (cpumask_available(desc->irq_common_data.affinity)) {
1086 const struct cpumask *m;
1087
1088 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1089 cpumask_copy(mask, m);
1090 valid = true;
1091 }
1092 raw_spin_unlock_irq(&desc->lock);
1093
1094 if (valid)
1095 set_cpus_allowed_ptr(current, mask);
1096 free_cpumask_var(mask);
1097}
1098#else
1099static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1100#endif
1101
1102static int irq_wait_for_interrupt(struct irq_desc *desc,
1103 struct irqaction *action)
1104{
1105 for (;;) {
1106 set_current_state(TASK_INTERRUPTIBLE);
1107 irq_thread_check_affinity(desc, action);
1108
1109 if (kthread_should_stop()) {
1110 /* may need to run one last time */
1111 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1112 &action->thread_flags)) {
1113 __set_current_state(TASK_RUNNING);
1114 return 0;
1115 }
1116 __set_current_state(TASK_RUNNING);
1117 return -1;
1118 }
1119
1120 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1121 &action->thread_flags)) {
1122 __set_current_state(TASK_RUNNING);
1123 return 0;
1124 }
1125 schedule();
1126 }
1127}
1128
1129/*
1130 * Oneshot interrupts keep the irq line masked until the threaded
1131 * handler finished. unmask if the interrupt has not been disabled and
1132 * is marked MASKED.
1133 */
1134static void irq_finalize_oneshot(struct irq_desc *desc,
1135 struct irqaction *action)
1136{
1137 if (!(desc->istate & IRQS_ONESHOT) ||
1138 action->handler == irq_forced_secondary_handler)
1139 return;
1140again:
1141 chip_bus_lock(desc);
1142 raw_spin_lock_irq(&desc->lock);
1143
1144 /*
1145 * Implausible though it may be we need to protect us against
1146 * the following scenario:
1147 *
1148 * The thread is faster done than the hard interrupt handler
1149 * on the other CPU. If we unmask the irq line then the
1150 * interrupt can come in again and masks the line, leaves due
1151 * to IRQS_INPROGRESS and the irq line is masked forever.
1152 *
1153 * This also serializes the state of shared oneshot handlers
1154 * versus "desc->threads_oneshot |= action->thread_mask;" in
1155 * irq_wake_thread(). See the comment there which explains the
1156 * serialization.
1157 */
1158 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1159 raw_spin_unlock_irq(&desc->lock);
1160 chip_bus_sync_unlock(desc);
1161 cpu_relax();
1162 goto again;
1163 }
1164
1165 /*
1166 * Now check again, whether the thread should run. Otherwise
1167 * we would clear the threads_oneshot bit of this thread which
1168 * was just set.
1169 */
1170 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1171 goto out_unlock;
1172
1173 desc->threads_oneshot &= ~action->thread_mask;
1174
1175 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1176 irqd_irq_masked(&desc->irq_data))
1177 unmask_threaded_irq(desc);
1178
1179out_unlock:
1180 raw_spin_unlock_irq(&desc->lock);
1181 chip_bus_sync_unlock(desc);
1182}
1183
1184/*
1185 * Interrupts which are not explicitly requested as threaded
1186 * interrupts rely on the implicit bh/preempt disable of the hard irq
1187 * context. So we need to disable bh here to avoid deadlocks and other
1188 * side effects.
1189 */
1190static irqreturn_t
1191irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1192{
1193 irqreturn_t ret;
1194
1195 local_bh_disable();
1196 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1197 local_irq_disable();
1198 ret = action->thread_fn(action->irq, action->dev_id);
1199 if (ret == IRQ_HANDLED)
1200 atomic_inc(&desc->threads_handled);
1201
1202 irq_finalize_oneshot(desc, action);
1203 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1204 local_irq_enable();
1205 local_bh_enable();
1206 return ret;
1207}
1208
1209/*
1210 * Interrupts explicitly requested as threaded interrupts want to be
1211 * preemptible - many of them need to sleep and wait for slow busses to
1212 * complete.
1213 */
1214static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1215 struct irqaction *action)
1216{
1217 irqreturn_t ret;
1218
1219 ret = action->thread_fn(action->irq, action->dev_id);
1220 if (ret == IRQ_HANDLED)
1221 atomic_inc(&desc->threads_handled);
1222
1223 irq_finalize_oneshot(desc, action);
1224 return ret;
1225}
1226
1227void wake_threads_waitq(struct irq_desc *desc)
1228{
1229 if (atomic_dec_and_test(&desc->threads_active))
1230 wake_up(&desc->wait_for_threads);
1231}
1232
1233static void irq_thread_dtor(struct callback_head *unused)
1234{
1235 struct task_struct *tsk = current;
1236 struct irq_desc *desc;
1237 struct irqaction *action;
1238
1239 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1240 return;
1241
1242 action = kthread_data(tsk);
1243
1244 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1245 tsk->comm, tsk->pid, action->irq);
1246
1247
1248 desc = irq_to_desc(action->irq);
1249 /*
1250 * If IRQTF_RUNTHREAD is set, we need to decrement
1251 * desc->threads_active and wake possible waiters.
1252 */
1253 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1254 wake_threads_waitq(desc);
1255
1256 /* Prevent a stale desc->threads_oneshot */
1257 irq_finalize_oneshot(desc, action);
1258}
1259
1260static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1261{
1262 struct irqaction *secondary = action->secondary;
1263
1264 if (WARN_ON_ONCE(!secondary))
1265 return;
1266
1267 raw_spin_lock_irq(&desc->lock);
1268 __irq_wake_thread(desc, secondary);
1269 raw_spin_unlock_irq(&desc->lock);
1270}
1271
1272/*
1273 * Internal function to notify that a interrupt thread is ready.
1274 */
1275static void irq_thread_set_ready(struct irq_desc *desc,
1276 struct irqaction *action)
1277{
1278 set_bit(IRQTF_READY, &action->thread_flags);
1279 wake_up(&desc->wait_for_threads);
1280}
1281
1282/*
1283 * Internal function to wake up a interrupt thread and wait until it is
1284 * ready.
1285 */
1286static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1287 struct irqaction *action)
1288{
1289 if (!action || !action->thread)
1290 return;
1291
1292 wake_up_process(action->thread);
1293 wait_event(desc->wait_for_threads,
1294 test_bit(IRQTF_READY, &action->thread_flags));
1295}
1296
1297/*
1298 * Interrupt handler thread
1299 */
1300static int irq_thread(void *data)
1301{
1302 struct callback_head on_exit_work;
1303 struct irqaction *action = data;
1304 struct irq_desc *desc = irq_to_desc(action->irq);
1305 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1306 struct irqaction *action);
1307
1308 irq_thread_set_ready(desc, action);
1309
1310 sched_set_fifo(current);
1311
1312 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1313 &action->thread_flags))
1314 handler_fn = irq_forced_thread_fn;
1315 else
1316 handler_fn = irq_thread_fn;
1317
1318 init_task_work(&on_exit_work, irq_thread_dtor);
1319 task_work_add(current, &on_exit_work, TWA_NONE);
1320
1321 while (!irq_wait_for_interrupt(desc, action)) {
1322 irqreturn_t action_ret;
1323
1324 action_ret = handler_fn(desc, action);
1325 if (action_ret == IRQ_WAKE_THREAD)
1326 irq_wake_secondary(desc, action);
1327
1328 wake_threads_waitq(desc);
1329 }
1330
1331 /*
1332 * This is the regular exit path. __free_irq() is stopping the
1333 * thread via kthread_stop() after calling
1334 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1335 * oneshot mask bit can be set.
1336 */
1337 task_work_cancel_func(current, irq_thread_dtor);
1338 return 0;
1339}
1340
1341/**
1342 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1343 * @irq: Interrupt line
1344 * @dev_id: Device identity for which the thread should be woken
1345 *
1346 */
1347void irq_wake_thread(unsigned int irq, void *dev_id)
1348{
1349 struct irq_desc *desc = irq_to_desc(irq);
1350 struct irqaction *action;
1351 unsigned long flags;
1352
1353 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1354 return;
1355
1356 raw_spin_lock_irqsave(&desc->lock, flags);
1357 for_each_action_of_desc(desc, action) {
1358 if (action->dev_id == dev_id) {
1359 if (action->thread)
1360 __irq_wake_thread(desc, action);
1361 break;
1362 }
1363 }
1364 raw_spin_unlock_irqrestore(&desc->lock, flags);
1365}
1366EXPORT_SYMBOL_GPL(irq_wake_thread);
1367
1368static int irq_setup_forced_threading(struct irqaction *new)
1369{
1370 if (!force_irqthreads())
1371 return 0;
1372 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1373 return 0;
1374
1375 /*
1376 * No further action required for interrupts which are requested as
1377 * threaded interrupts already
1378 */
1379 if (new->handler == irq_default_primary_handler)
1380 return 0;
1381
1382 new->flags |= IRQF_ONESHOT;
1383
1384 /*
1385 * Handle the case where we have a real primary handler and a
1386 * thread handler. We force thread them as well by creating a
1387 * secondary action.
1388 */
1389 if (new->handler && new->thread_fn) {
1390 /* Allocate the secondary action */
1391 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1392 if (!new->secondary)
1393 return -ENOMEM;
1394 new->secondary->handler = irq_forced_secondary_handler;
1395 new->secondary->thread_fn = new->thread_fn;
1396 new->secondary->dev_id = new->dev_id;
1397 new->secondary->irq = new->irq;
1398 new->secondary->name = new->name;
1399 }
1400 /* Deal with the primary handler */
1401 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1402 new->thread_fn = new->handler;
1403 new->handler = irq_default_primary_handler;
1404 return 0;
1405}
1406
1407static int irq_request_resources(struct irq_desc *desc)
1408{
1409 struct irq_data *d = &desc->irq_data;
1410 struct irq_chip *c = d->chip;
1411
1412 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1413}
1414
1415static void irq_release_resources(struct irq_desc *desc)
1416{
1417 struct irq_data *d = &desc->irq_data;
1418 struct irq_chip *c = d->chip;
1419
1420 if (c->irq_release_resources)
1421 c->irq_release_resources(d);
1422}
1423
1424static bool irq_supports_nmi(struct irq_desc *desc)
1425{
1426 struct irq_data *d = irq_desc_get_irq_data(desc);
1427
1428#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1429 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1430 if (d->parent_data)
1431 return false;
1432#endif
1433 /* Don't support NMIs for chips behind a slow bus */
1434 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1435 return false;
1436
1437 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1438}
1439
1440static int irq_nmi_setup(struct irq_desc *desc)
1441{
1442 struct irq_data *d = irq_desc_get_irq_data(desc);
1443 struct irq_chip *c = d->chip;
1444
1445 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1446}
1447
1448static void irq_nmi_teardown(struct irq_desc *desc)
1449{
1450 struct irq_data *d = irq_desc_get_irq_data(desc);
1451 struct irq_chip *c = d->chip;
1452
1453 if (c->irq_nmi_teardown)
1454 c->irq_nmi_teardown(d);
1455}
1456
1457static int
1458setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1459{
1460 struct task_struct *t;
1461
1462 if (!secondary) {
1463 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1464 new->name);
1465 } else {
1466 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1467 new->name);
1468 }
1469
1470 if (IS_ERR(t))
1471 return PTR_ERR(t);
1472
1473 /*
1474 * We keep the reference to the task struct even if
1475 * the thread dies to avoid that the interrupt code
1476 * references an already freed task_struct.
1477 */
1478 new->thread = get_task_struct(t);
1479 /*
1480 * Tell the thread to set its affinity. This is
1481 * important for shared interrupt handlers as we do
1482 * not invoke setup_affinity() for the secondary
1483 * handlers as everything is already set up. Even for
1484 * interrupts marked with IRQF_NO_BALANCE this is
1485 * correct as we want the thread to move to the cpu(s)
1486 * on which the requesting code placed the interrupt.
1487 */
1488 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1489 return 0;
1490}
1491
1492/*
1493 * Internal function to register an irqaction - typically used to
1494 * allocate special interrupts that are part of the architecture.
1495 *
1496 * Locking rules:
1497 *
1498 * desc->request_mutex Provides serialization against a concurrent free_irq()
1499 * chip_bus_lock Provides serialization for slow bus operations
1500 * desc->lock Provides serialization against hard interrupts
1501 *
1502 * chip_bus_lock and desc->lock are sufficient for all other management and
1503 * interrupt related functions. desc->request_mutex solely serializes
1504 * request/free_irq().
1505 */
1506static int
1507__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1508{
1509 struct irqaction *old, **old_ptr;
1510 unsigned long flags, thread_mask = 0;
1511 int ret, nested, shared = 0;
1512
1513 if (!desc)
1514 return -EINVAL;
1515
1516 if (desc->irq_data.chip == &no_irq_chip)
1517 return -ENOSYS;
1518 if (!try_module_get(desc->owner))
1519 return -ENODEV;
1520
1521 new->irq = irq;
1522
1523 /*
1524 * If the trigger type is not specified by the caller,
1525 * then use the default for this interrupt.
1526 */
1527 if (!(new->flags & IRQF_TRIGGER_MASK))
1528 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1529
1530 /*
1531 * Check whether the interrupt nests into another interrupt
1532 * thread.
1533 */
1534 nested = irq_settings_is_nested_thread(desc);
1535 if (nested) {
1536 if (!new->thread_fn) {
1537 ret = -EINVAL;
1538 goto out_mput;
1539 }
1540 /*
1541 * Replace the primary handler which was provided from
1542 * the driver for non nested interrupt handling by the
1543 * dummy function which warns when called.
1544 */
1545 new->handler = irq_nested_primary_handler;
1546 } else {
1547 if (irq_settings_can_thread(desc)) {
1548 ret = irq_setup_forced_threading(new);
1549 if (ret)
1550 goto out_mput;
1551 }
1552 }
1553
1554 /*
1555 * Create a handler thread when a thread function is supplied
1556 * and the interrupt does not nest into another interrupt
1557 * thread.
1558 */
1559 if (new->thread_fn && !nested) {
1560 ret = setup_irq_thread(new, irq, false);
1561 if (ret)
1562 goto out_mput;
1563 if (new->secondary) {
1564 ret = setup_irq_thread(new->secondary, irq, true);
1565 if (ret)
1566 goto out_thread;
1567 }
1568 }
1569
1570 /*
1571 * Drivers are often written to work w/o knowledge about the
1572 * underlying irq chip implementation, so a request for a
1573 * threaded irq without a primary hard irq context handler
1574 * requires the ONESHOT flag to be set. Some irq chips like
1575 * MSI based interrupts are per se one shot safe. Check the
1576 * chip flags, so we can avoid the unmask dance at the end of
1577 * the threaded handler for those.
1578 */
1579 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1580 new->flags &= ~IRQF_ONESHOT;
1581
1582 /*
1583 * Protects against a concurrent __free_irq() call which might wait
1584 * for synchronize_hardirq() to complete without holding the optional
1585 * chip bus lock and desc->lock. Also protects against handing out
1586 * a recycled oneshot thread_mask bit while it's still in use by
1587 * its previous owner.
1588 */
1589 mutex_lock(&desc->request_mutex);
1590
1591 /*
1592 * Acquire bus lock as the irq_request_resources() callback below
1593 * might rely on the serialization or the magic power management
1594 * functions which are abusing the irq_bus_lock() callback,
1595 */
1596 chip_bus_lock(desc);
1597
1598 /* First installed action requests resources. */
1599 if (!desc->action) {
1600 ret = irq_request_resources(desc);
1601 if (ret) {
1602 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1603 new->name, irq, desc->irq_data.chip->name);
1604 goto out_bus_unlock;
1605 }
1606 }
1607
1608 /*
1609 * The following block of code has to be executed atomically
1610 * protected against a concurrent interrupt and any of the other
1611 * management calls which are not serialized via
1612 * desc->request_mutex or the optional bus lock.
1613 */
1614 raw_spin_lock_irqsave(&desc->lock, flags);
1615 old_ptr = &desc->action;
1616 old = *old_ptr;
1617 if (old) {
1618 /*
1619 * Can't share interrupts unless both agree to and are
1620 * the same type (level, edge, polarity). So both flag
1621 * fields must have IRQF_SHARED set and the bits which
1622 * set the trigger type must match. Also all must
1623 * agree on ONESHOT.
1624 * Interrupt lines used for NMIs cannot be shared.
1625 */
1626 unsigned int oldtype;
1627
1628 if (irq_is_nmi(desc)) {
1629 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1630 new->name, irq, desc->irq_data.chip->name);
1631 ret = -EINVAL;
1632 goto out_unlock;
1633 }
1634
1635 /*
1636 * If nobody did set the configuration before, inherit
1637 * the one provided by the requester.
1638 */
1639 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1640 oldtype = irqd_get_trigger_type(&desc->irq_data);
1641 } else {
1642 oldtype = new->flags & IRQF_TRIGGER_MASK;
1643 irqd_set_trigger_type(&desc->irq_data, oldtype);
1644 }
1645
1646 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1647 (oldtype != (new->flags & IRQF_TRIGGER_MASK)))
1648 goto mismatch;
1649
1650 if ((old->flags & IRQF_ONESHOT) &&
1651 (new->flags & IRQF_COND_ONESHOT))
1652 new->flags |= IRQF_ONESHOT;
1653 else if ((old->flags ^ new->flags) & IRQF_ONESHOT)
1654 goto mismatch;
1655
1656 /* All handlers must agree on per-cpuness */
1657 if ((old->flags & IRQF_PERCPU) !=
1658 (new->flags & IRQF_PERCPU))
1659 goto mismatch;
1660
1661 /* add new interrupt at end of irq queue */
1662 do {
1663 /*
1664 * Or all existing action->thread_mask bits,
1665 * so we can find the next zero bit for this
1666 * new action.
1667 */
1668 thread_mask |= old->thread_mask;
1669 old_ptr = &old->next;
1670 old = *old_ptr;
1671 } while (old);
1672 shared = 1;
1673 }
1674
1675 /*
1676 * Setup the thread mask for this irqaction for ONESHOT. For
1677 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1678 * conditional in irq_wake_thread().
1679 */
1680 if (new->flags & IRQF_ONESHOT) {
1681 /*
1682 * Unlikely to have 32 resp 64 irqs sharing one line,
1683 * but who knows.
1684 */
1685 if (thread_mask == ~0UL) {
1686 ret = -EBUSY;
1687 goto out_unlock;
1688 }
1689 /*
1690 * The thread_mask for the action is or'ed to
1691 * desc->thread_active to indicate that the
1692 * IRQF_ONESHOT thread handler has been woken, but not
1693 * yet finished. The bit is cleared when a thread
1694 * completes. When all threads of a shared interrupt
1695 * line have completed desc->threads_active becomes
1696 * zero and the interrupt line is unmasked. See
1697 * handle.c:irq_wake_thread() for further information.
1698 *
1699 * If no thread is woken by primary (hard irq context)
1700 * interrupt handlers, then desc->threads_active is
1701 * also checked for zero to unmask the irq line in the
1702 * affected hard irq flow handlers
1703 * (handle_[fasteoi|level]_irq).
1704 *
1705 * The new action gets the first zero bit of
1706 * thread_mask assigned. See the loop above which or's
1707 * all existing action->thread_mask bits.
1708 */
1709 new->thread_mask = 1UL << ffz(thread_mask);
1710
1711 } else if (new->handler == irq_default_primary_handler &&
1712 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1713 /*
1714 * The interrupt was requested with handler = NULL, so
1715 * we use the default primary handler for it. But it
1716 * does not have the oneshot flag set. In combination
1717 * with level interrupts this is deadly, because the
1718 * default primary handler just wakes the thread, then
1719 * the irq lines is reenabled, but the device still
1720 * has the level irq asserted. Rinse and repeat....
1721 *
1722 * While this works for edge type interrupts, we play
1723 * it safe and reject unconditionally because we can't
1724 * say for sure which type this interrupt really
1725 * has. The type flags are unreliable as the
1726 * underlying chip implementation can override them.
1727 */
1728 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1729 new->name, irq);
1730 ret = -EINVAL;
1731 goto out_unlock;
1732 }
1733
1734 if (!shared) {
1735 /* Setup the type (level, edge polarity) if configured: */
1736 if (new->flags & IRQF_TRIGGER_MASK) {
1737 ret = __irq_set_trigger(desc,
1738 new->flags & IRQF_TRIGGER_MASK);
1739
1740 if (ret)
1741 goto out_unlock;
1742 }
1743
1744 /*
1745 * Activate the interrupt. That activation must happen
1746 * independently of IRQ_NOAUTOEN. request_irq() can fail
1747 * and the callers are supposed to handle
1748 * that. enable_irq() of an interrupt requested with
1749 * IRQ_NOAUTOEN is not supposed to fail. The activation
1750 * keeps it in shutdown mode, it merily associates
1751 * resources if necessary and if that's not possible it
1752 * fails. Interrupts which are in managed shutdown mode
1753 * will simply ignore that activation request.
1754 */
1755 ret = irq_activate(desc);
1756 if (ret)
1757 goto out_unlock;
1758
1759 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1760 IRQS_ONESHOT | IRQS_WAITING);
1761 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1762
1763 if (new->flags & IRQF_PERCPU) {
1764 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1765 irq_settings_set_per_cpu(desc);
1766 if (new->flags & IRQF_NO_DEBUG)
1767 irq_settings_set_no_debug(desc);
1768 }
1769
1770 if (noirqdebug)
1771 irq_settings_set_no_debug(desc);
1772
1773 if (new->flags & IRQF_ONESHOT)
1774 desc->istate |= IRQS_ONESHOT;
1775
1776 /* Exclude IRQ from balancing if requested */
1777 if (new->flags & IRQF_NOBALANCING) {
1778 irq_settings_set_no_balancing(desc);
1779 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1780 }
1781
1782 if (!(new->flags & IRQF_NO_AUTOEN) &&
1783 irq_settings_can_autoenable(desc)) {
1784 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1785 } else {
1786 /*
1787 * Shared interrupts do not go well with disabling
1788 * auto enable. The sharing interrupt might request
1789 * it while it's still disabled and then wait for
1790 * interrupts forever.
1791 */
1792 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1793 /* Undo nested disables: */
1794 desc->depth = 1;
1795 }
1796
1797 } else if (new->flags & IRQF_TRIGGER_MASK) {
1798 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1799 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1800
1801 if (nmsk != omsk)
1802 /* hope the handler works with current trigger mode */
1803 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1804 irq, omsk, nmsk);
1805 }
1806
1807 *old_ptr = new;
1808
1809 irq_pm_install_action(desc, new);
1810
1811 /* Reset broken irq detection when installing new handler */
1812 desc->irq_count = 0;
1813 desc->irqs_unhandled = 0;
1814
1815 /*
1816 * Check whether we disabled the irq via the spurious handler
1817 * before. Reenable it and give it another chance.
1818 */
1819 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1820 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1821 __enable_irq(desc);
1822 }
1823
1824 raw_spin_unlock_irqrestore(&desc->lock, flags);
1825 chip_bus_sync_unlock(desc);
1826 mutex_unlock(&desc->request_mutex);
1827
1828 irq_setup_timings(desc, new);
1829
1830 wake_up_and_wait_for_irq_thread_ready(desc, new);
1831 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1832
1833 register_irq_proc(irq, desc);
1834 new->dir = NULL;
1835 register_handler_proc(irq, new);
1836 return 0;
1837
1838mismatch:
1839 if (!(new->flags & IRQF_PROBE_SHARED)) {
1840 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1841 irq, new->flags, new->name, old->flags, old->name);
1842#ifdef CONFIG_DEBUG_SHIRQ
1843 dump_stack();
1844#endif
1845 }
1846 ret = -EBUSY;
1847
1848out_unlock:
1849 raw_spin_unlock_irqrestore(&desc->lock, flags);
1850
1851 if (!desc->action)
1852 irq_release_resources(desc);
1853out_bus_unlock:
1854 chip_bus_sync_unlock(desc);
1855 mutex_unlock(&desc->request_mutex);
1856
1857out_thread:
1858 if (new->thread) {
1859 struct task_struct *t = new->thread;
1860
1861 new->thread = NULL;
1862 kthread_stop_put(t);
1863 }
1864 if (new->secondary && new->secondary->thread) {
1865 struct task_struct *t = new->secondary->thread;
1866
1867 new->secondary->thread = NULL;
1868 kthread_stop_put(t);
1869 }
1870out_mput:
1871 module_put(desc->owner);
1872 return ret;
1873}
1874
1875/*
1876 * Internal function to unregister an irqaction - used to free
1877 * regular and special interrupts that are part of the architecture.
1878 */
1879static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1880{
1881 unsigned irq = desc->irq_data.irq;
1882 struct irqaction *action, **action_ptr;
1883 unsigned long flags;
1884
1885 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1886
1887 mutex_lock(&desc->request_mutex);
1888 chip_bus_lock(desc);
1889 raw_spin_lock_irqsave(&desc->lock, flags);
1890
1891 /*
1892 * There can be multiple actions per IRQ descriptor, find the right
1893 * one based on the dev_id:
1894 */
1895 action_ptr = &desc->action;
1896 for (;;) {
1897 action = *action_ptr;
1898
1899 if (!action) {
1900 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1901 raw_spin_unlock_irqrestore(&desc->lock, flags);
1902 chip_bus_sync_unlock(desc);
1903 mutex_unlock(&desc->request_mutex);
1904 return NULL;
1905 }
1906
1907 if (action->dev_id == dev_id)
1908 break;
1909 action_ptr = &action->next;
1910 }
1911
1912 /* Found it - now remove it from the list of entries: */
1913 *action_ptr = action->next;
1914
1915 irq_pm_remove_action(desc, action);
1916
1917 /* If this was the last handler, shut down the IRQ line: */
1918 if (!desc->action) {
1919 irq_settings_clr_disable_unlazy(desc);
1920 /* Only shutdown. Deactivate after synchronize_hardirq() */
1921 irq_shutdown(desc);
1922 }
1923
1924#ifdef CONFIG_SMP
1925 /* make sure affinity_hint is cleaned up */
1926 if (WARN_ON_ONCE(desc->affinity_hint))
1927 desc->affinity_hint = NULL;
1928#endif
1929
1930 raw_spin_unlock_irqrestore(&desc->lock, flags);
1931 /*
1932 * Drop bus_lock here so the changes which were done in the chip
1933 * callbacks above are synced out to the irq chips which hang
1934 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1935 *
1936 * Aside of that the bus_lock can also be taken from the threaded
1937 * handler in irq_finalize_oneshot() which results in a deadlock
1938 * because kthread_stop() would wait forever for the thread to
1939 * complete, which is blocked on the bus lock.
1940 *
1941 * The still held desc->request_mutex() protects against a
1942 * concurrent request_irq() of this irq so the release of resources
1943 * and timing data is properly serialized.
1944 */
1945 chip_bus_sync_unlock(desc);
1946
1947 unregister_handler_proc(irq, action);
1948
1949 /*
1950 * Make sure it's not being used on another CPU and if the chip
1951 * supports it also make sure that there is no (not yet serviced)
1952 * interrupt in flight at the hardware level.
1953 */
1954 __synchronize_irq(desc);
1955
1956#ifdef CONFIG_DEBUG_SHIRQ
1957 /*
1958 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1959 * event to happen even now it's being freed, so let's make sure that
1960 * is so by doing an extra call to the handler ....
1961 *
1962 * ( We do this after actually deregistering it, to make sure that a
1963 * 'real' IRQ doesn't run in parallel with our fake. )
1964 */
1965 if (action->flags & IRQF_SHARED) {
1966 local_irq_save(flags);
1967 action->handler(irq, dev_id);
1968 local_irq_restore(flags);
1969 }
1970#endif
1971
1972 /*
1973 * The action has already been removed above, but the thread writes
1974 * its oneshot mask bit when it completes. Though request_mutex is
1975 * held across this which prevents __setup_irq() from handing out
1976 * the same bit to a newly requested action.
1977 */
1978 if (action->thread) {
1979 kthread_stop_put(action->thread);
1980 if (action->secondary && action->secondary->thread)
1981 kthread_stop_put(action->secondary->thread);
1982 }
1983
1984 /* Last action releases resources */
1985 if (!desc->action) {
1986 /*
1987 * Reacquire bus lock as irq_release_resources() might
1988 * require it to deallocate resources over the slow bus.
1989 */
1990 chip_bus_lock(desc);
1991 /*
1992 * There is no interrupt on the fly anymore. Deactivate it
1993 * completely.
1994 */
1995 raw_spin_lock_irqsave(&desc->lock, flags);
1996 irq_domain_deactivate_irq(&desc->irq_data);
1997 raw_spin_unlock_irqrestore(&desc->lock, flags);
1998
1999 irq_release_resources(desc);
2000 chip_bus_sync_unlock(desc);
2001 irq_remove_timings(desc);
2002 }
2003
2004 mutex_unlock(&desc->request_mutex);
2005
2006 irq_chip_pm_put(&desc->irq_data);
2007 module_put(desc->owner);
2008 kfree(action->secondary);
2009 return action;
2010}
2011
2012/**
2013 * free_irq - free an interrupt allocated with request_irq
2014 * @irq: Interrupt line to free
2015 * @dev_id: Device identity to free
2016 *
2017 * Remove an interrupt handler. The handler is removed and if the
2018 * interrupt line is no longer in use by any driver it is disabled.
2019 * On a shared IRQ the caller must ensure the interrupt is disabled
2020 * on the card it drives before calling this function. The function
2021 * does not return until any executing interrupts for this IRQ
2022 * have completed.
2023 *
2024 * This function must not be called from interrupt context.
2025 *
2026 * Returns the devname argument passed to request_irq.
2027 */
2028const void *free_irq(unsigned int irq, void *dev_id)
2029{
2030 struct irq_desc *desc = irq_to_desc(irq);
2031 struct irqaction *action;
2032 const char *devname;
2033
2034 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2035 return NULL;
2036
2037#ifdef CONFIG_SMP
2038 if (WARN_ON(desc->affinity_notify))
2039 desc->affinity_notify = NULL;
2040#endif
2041
2042 action = __free_irq(desc, dev_id);
2043
2044 if (!action)
2045 return NULL;
2046
2047 devname = action->name;
2048 kfree(action);
2049 return devname;
2050}
2051EXPORT_SYMBOL(free_irq);
2052
2053/* This function must be called with desc->lock held */
2054static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2055{
2056 const char *devname = NULL;
2057
2058 desc->istate &= ~IRQS_NMI;
2059
2060 if (!WARN_ON(desc->action == NULL)) {
2061 irq_pm_remove_action(desc, desc->action);
2062 devname = desc->action->name;
2063 unregister_handler_proc(irq, desc->action);
2064
2065 kfree(desc->action);
2066 desc->action = NULL;
2067 }
2068
2069 irq_settings_clr_disable_unlazy(desc);
2070 irq_shutdown_and_deactivate(desc);
2071
2072 irq_release_resources(desc);
2073
2074 irq_chip_pm_put(&desc->irq_data);
2075 module_put(desc->owner);
2076
2077 return devname;
2078}
2079
2080const void *free_nmi(unsigned int irq, void *dev_id)
2081{
2082 struct irq_desc *desc = irq_to_desc(irq);
2083 unsigned long flags;
2084 const void *devname;
2085
2086 if (!desc || WARN_ON(!irq_is_nmi(desc)))
2087 return NULL;
2088
2089 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2090 return NULL;
2091
2092 /* NMI still enabled */
2093 if (WARN_ON(desc->depth == 0))
2094 disable_nmi_nosync(irq);
2095
2096 raw_spin_lock_irqsave(&desc->lock, flags);
2097
2098 irq_nmi_teardown(desc);
2099 devname = __cleanup_nmi(irq, desc);
2100
2101 raw_spin_unlock_irqrestore(&desc->lock, flags);
2102
2103 return devname;
2104}
2105
2106/**
2107 * request_threaded_irq - allocate an interrupt line
2108 * @irq: Interrupt line to allocate
2109 * @handler: Function to be called when the IRQ occurs.
2110 * Primary handler for threaded interrupts.
2111 * If handler is NULL and thread_fn != NULL
2112 * the default primary handler is installed.
2113 * @thread_fn: Function called from the irq handler thread
2114 * If NULL, no irq thread is created
2115 * @irqflags: Interrupt type flags
2116 * @devname: An ascii name for the claiming device
2117 * @dev_id: A cookie passed back to the handler function
2118 *
2119 * This call allocates interrupt resources and enables the
2120 * interrupt line and IRQ handling. From the point this
2121 * call is made your handler function may be invoked. Since
2122 * your handler function must clear any interrupt the board
2123 * raises, you must take care both to initialise your hardware
2124 * and to set up the interrupt handler in the right order.
2125 *
2126 * If you want to set up a threaded irq handler for your device
2127 * then you need to supply @handler and @thread_fn. @handler is
2128 * still called in hard interrupt context and has to check
2129 * whether the interrupt originates from the device. If yes it
2130 * needs to disable the interrupt on the device and return
2131 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2132 * @thread_fn. This split handler design is necessary to support
2133 * shared interrupts.
2134 *
2135 * Dev_id must be globally unique. Normally the address of the
2136 * device data structure is used as the cookie. Since the handler
2137 * receives this value it makes sense to use it.
2138 *
2139 * If your interrupt is shared you must pass a non NULL dev_id
2140 * as this is required when freeing the interrupt.
2141 *
2142 * Flags:
2143 *
2144 * IRQF_SHARED Interrupt is shared
2145 * IRQF_TRIGGER_* Specify active edge(s) or level
2146 * IRQF_ONESHOT Run thread_fn with interrupt line masked
2147 */
2148int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2149 irq_handler_t thread_fn, unsigned long irqflags,
2150 const char *devname, void *dev_id)
2151{
2152 struct irqaction *action;
2153 struct irq_desc *desc;
2154 int retval;
2155
2156 if (irq == IRQ_NOTCONNECTED)
2157 return -ENOTCONN;
2158
2159 /*
2160 * Sanity-check: shared interrupts must pass in a real dev-ID,
2161 * otherwise we'll have trouble later trying to figure out
2162 * which interrupt is which (messes up the interrupt freeing
2163 * logic etc).
2164 *
2165 * Also shared interrupts do not go well with disabling auto enable.
2166 * The sharing interrupt might request it while it's still disabled
2167 * and then wait for interrupts forever.
2168 *
2169 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2170 * it cannot be set along with IRQF_NO_SUSPEND.
2171 */
2172 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2173 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2174 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2175 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2176 return -EINVAL;
2177
2178 desc = irq_to_desc(irq);
2179 if (!desc)
2180 return -EINVAL;
2181
2182 if (!irq_settings_can_request(desc) ||
2183 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2184 return -EINVAL;
2185
2186 if (!handler) {
2187 if (!thread_fn)
2188 return -EINVAL;
2189 handler = irq_default_primary_handler;
2190 }
2191
2192 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2193 if (!action)
2194 return -ENOMEM;
2195
2196 action->handler = handler;
2197 action->thread_fn = thread_fn;
2198 action->flags = irqflags;
2199 action->name = devname;
2200 action->dev_id = dev_id;
2201
2202 retval = irq_chip_pm_get(&desc->irq_data);
2203 if (retval < 0) {
2204 kfree(action);
2205 return retval;
2206 }
2207
2208 retval = __setup_irq(irq, desc, action);
2209
2210 if (retval) {
2211 irq_chip_pm_put(&desc->irq_data);
2212 kfree(action->secondary);
2213 kfree(action);
2214 }
2215
2216#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2217 if (!retval && (irqflags & IRQF_SHARED)) {
2218 /*
2219 * It's a shared IRQ -- the driver ought to be prepared for it
2220 * to happen immediately, so let's make sure....
2221 * We disable the irq to make sure that a 'real' IRQ doesn't
2222 * run in parallel with our fake.
2223 */
2224 unsigned long flags;
2225
2226 disable_irq(irq);
2227 local_irq_save(flags);
2228
2229 handler(irq, dev_id);
2230
2231 local_irq_restore(flags);
2232 enable_irq(irq);
2233 }
2234#endif
2235 return retval;
2236}
2237EXPORT_SYMBOL(request_threaded_irq);
2238
2239/**
2240 * request_any_context_irq - allocate an interrupt line
2241 * @irq: Interrupt line to allocate
2242 * @handler: Function to be called when the IRQ occurs.
2243 * Threaded handler for threaded interrupts.
2244 * @flags: Interrupt type flags
2245 * @name: An ascii name for the claiming device
2246 * @dev_id: A cookie passed back to the handler function
2247 *
2248 * This call allocates interrupt resources and enables the
2249 * interrupt line and IRQ handling. It selects either a
2250 * hardirq or threaded handling method depending on the
2251 * context.
2252 *
2253 * On failure, it returns a negative value. On success,
2254 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2255 */
2256int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2257 unsigned long flags, const char *name, void *dev_id)
2258{
2259 struct irq_desc *desc;
2260 int ret;
2261
2262 if (irq == IRQ_NOTCONNECTED)
2263 return -ENOTCONN;
2264
2265 desc = irq_to_desc(irq);
2266 if (!desc)
2267 return -EINVAL;
2268
2269 if (irq_settings_is_nested_thread(desc)) {
2270 ret = request_threaded_irq(irq, NULL, handler,
2271 flags, name, dev_id);
2272 return !ret ? IRQC_IS_NESTED : ret;
2273 }
2274
2275 ret = request_irq(irq, handler, flags, name, dev_id);
2276 return !ret ? IRQC_IS_HARDIRQ : ret;
2277}
2278EXPORT_SYMBOL_GPL(request_any_context_irq);
2279
2280/**
2281 * request_nmi - allocate an interrupt line for NMI delivery
2282 * @irq: Interrupt line to allocate
2283 * @handler: Function to be called when the IRQ occurs.
2284 * Threaded handler for threaded interrupts.
2285 * @irqflags: Interrupt type flags
2286 * @name: An ascii name for the claiming device
2287 * @dev_id: A cookie passed back to the handler function
2288 *
2289 * This call allocates interrupt resources and enables the
2290 * interrupt line and IRQ handling. It sets up the IRQ line
2291 * to be handled as an NMI.
2292 *
2293 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2294 * cannot be threaded.
2295 *
2296 * Interrupt lines requested for NMI delivering must produce per cpu
2297 * interrupts and have auto enabling setting disabled.
2298 *
2299 * Dev_id must be globally unique. Normally the address of the
2300 * device data structure is used as the cookie. Since the handler
2301 * receives this value it makes sense to use it.
2302 *
2303 * If the interrupt line cannot be used to deliver NMIs, function
2304 * will fail and return a negative value.
2305 */
2306int request_nmi(unsigned int irq, irq_handler_t handler,
2307 unsigned long irqflags, const char *name, void *dev_id)
2308{
2309 struct irqaction *action;
2310 struct irq_desc *desc;
2311 unsigned long flags;
2312 int retval;
2313
2314 if (irq == IRQ_NOTCONNECTED)
2315 return -ENOTCONN;
2316
2317 /* NMI cannot be shared, used for Polling */
2318 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2319 return -EINVAL;
2320
2321 if (!(irqflags & IRQF_PERCPU))
2322 return -EINVAL;
2323
2324 if (!handler)
2325 return -EINVAL;
2326
2327 desc = irq_to_desc(irq);
2328
2329 if (!desc || (irq_settings_can_autoenable(desc) &&
2330 !(irqflags & IRQF_NO_AUTOEN)) ||
2331 !irq_settings_can_request(desc) ||
2332 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2333 !irq_supports_nmi(desc))
2334 return -EINVAL;
2335
2336 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2337 if (!action)
2338 return -ENOMEM;
2339
2340 action->handler = handler;
2341 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2342 action->name = name;
2343 action->dev_id = dev_id;
2344
2345 retval = irq_chip_pm_get(&desc->irq_data);
2346 if (retval < 0)
2347 goto err_out;
2348
2349 retval = __setup_irq(irq, desc, action);
2350 if (retval)
2351 goto err_irq_setup;
2352
2353 raw_spin_lock_irqsave(&desc->lock, flags);
2354
2355 /* Setup NMI state */
2356 desc->istate |= IRQS_NMI;
2357 retval = irq_nmi_setup(desc);
2358 if (retval) {
2359 __cleanup_nmi(irq, desc);
2360 raw_spin_unlock_irqrestore(&desc->lock, flags);
2361 return -EINVAL;
2362 }
2363
2364 raw_spin_unlock_irqrestore(&desc->lock, flags);
2365
2366 return 0;
2367
2368err_irq_setup:
2369 irq_chip_pm_put(&desc->irq_data);
2370err_out:
2371 kfree(action);
2372
2373 return retval;
2374}
2375
2376void enable_percpu_irq(unsigned int irq, unsigned int type)
2377{
2378 unsigned int cpu = smp_processor_id();
2379 unsigned long flags;
2380 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2381
2382 if (!desc)
2383 return;
2384
2385 /*
2386 * If the trigger type is not specified by the caller, then
2387 * use the default for this interrupt.
2388 */
2389 type &= IRQ_TYPE_SENSE_MASK;
2390 if (type == IRQ_TYPE_NONE)
2391 type = irqd_get_trigger_type(&desc->irq_data);
2392
2393 if (type != IRQ_TYPE_NONE) {
2394 int ret;
2395
2396 ret = __irq_set_trigger(desc, type);
2397
2398 if (ret) {
2399 WARN(1, "failed to set type for IRQ%d\n", irq);
2400 goto out;
2401 }
2402 }
2403
2404 irq_percpu_enable(desc, cpu);
2405out:
2406 irq_put_desc_unlock(desc, flags);
2407}
2408EXPORT_SYMBOL_GPL(enable_percpu_irq);
2409
2410void enable_percpu_nmi(unsigned int irq, unsigned int type)
2411{
2412 enable_percpu_irq(irq, type);
2413}
2414
2415/**
2416 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2417 * @irq: Linux irq number to check for
2418 *
2419 * Must be called from a non migratable context. Returns the enable
2420 * state of a per cpu interrupt on the current cpu.
2421 */
2422bool irq_percpu_is_enabled(unsigned int irq)
2423{
2424 unsigned int cpu = smp_processor_id();
2425 struct irq_desc *desc;
2426 unsigned long flags;
2427 bool is_enabled;
2428
2429 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2430 if (!desc)
2431 return false;
2432
2433 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2434 irq_put_desc_unlock(desc, flags);
2435
2436 return is_enabled;
2437}
2438EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2439
2440void disable_percpu_irq(unsigned int irq)
2441{
2442 unsigned int cpu = smp_processor_id();
2443 unsigned long flags;
2444 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2445
2446 if (!desc)
2447 return;
2448
2449 irq_percpu_disable(desc, cpu);
2450 irq_put_desc_unlock(desc, flags);
2451}
2452EXPORT_SYMBOL_GPL(disable_percpu_irq);
2453
2454void disable_percpu_nmi(unsigned int irq)
2455{
2456 disable_percpu_irq(irq);
2457}
2458
2459/*
2460 * Internal function to unregister a percpu irqaction.
2461 */
2462static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2463{
2464 struct irq_desc *desc = irq_to_desc(irq);
2465 struct irqaction *action;
2466 unsigned long flags;
2467
2468 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2469
2470 if (!desc)
2471 return NULL;
2472
2473 raw_spin_lock_irqsave(&desc->lock, flags);
2474
2475 action = desc->action;
2476 if (!action || action->percpu_dev_id != dev_id) {
2477 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2478 goto bad;
2479 }
2480
2481 if (!cpumask_empty(desc->percpu_enabled)) {
2482 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2483 irq, cpumask_first(desc->percpu_enabled));
2484 goto bad;
2485 }
2486
2487 /* Found it - now remove it from the list of entries: */
2488 desc->action = NULL;
2489
2490 desc->istate &= ~IRQS_NMI;
2491
2492 raw_spin_unlock_irqrestore(&desc->lock, flags);
2493
2494 unregister_handler_proc(irq, action);
2495
2496 irq_chip_pm_put(&desc->irq_data);
2497 module_put(desc->owner);
2498 return action;
2499
2500bad:
2501 raw_spin_unlock_irqrestore(&desc->lock, flags);
2502 return NULL;
2503}
2504
2505/**
2506 * remove_percpu_irq - free a per-cpu interrupt
2507 * @irq: Interrupt line to free
2508 * @act: irqaction for the interrupt
2509 *
2510 * Used to remove interrupts statically setup by the early boot process.
2511 */
2512void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2513{
2514 struct irq_desc *desc = irq_to_desc(irq);
2515
2516 if (desc && irq_settings_is_per_cpu_devid(desc))
2517 __free_percpu_irq(irq, act->percpu_dev_id);
2518}
2519
2520/**
2521 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2522 * @irq: Interrupt line to free
2523 * @dev_id: Device identity to free
2524 *
2525 * Remove a percpu interrupt handler. The handler is removed, but
2526 * the interrupt line is not disabled. This must be done on each
2527 * CPU before calling this function. The function does not return
2528 * until any executing interrupts for this IRQ have completed.
2529 *
2530 * This function must not be called from interrupt context.
2531 */
2532void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2533{
2534 struct irq_desc *desc = irq_to_desc(irq);
2535
2536 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2537 return;
2538
2539 chip_bus_lock(desc);
2540 kfree(__free_percpu_irq(irq, dev_id));
2541 chip_bus_sync_unlock(desc);
2542}
2543EXPORT_SYMBOL_GPL(free_percpu_irq);
2544
2545void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2546{
2547 struct irq_desc *desc = irq_to_desc(irq);
2548
2549 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2550 return;
2551
2552 if (WARN_ON(!irq_is_nmi(desc)))
2553 return;
2554
2555 kfree(__free_percpu_irq(irq, dev_id));
2556}
2557
2558/**
2559 * setup_percpu_irq - setup a per-cpu interrupt
2560 * @irq: Interrupt line to setup
2561 * @act: irqaction for the interrupt
2562 *
2563 * Used to statically setup per-cpu interrupts in the early boot process.
2564 */
2565int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2566{
2567 struct irq_desc *desc = irq_to_desc(irq);
2568 int retval;
2569
2570 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2571 return -EINVAL;
2572
2573 retval = irq_chip_pm_get(&desc->irq_data);
2574 if (retval < 0)
2575 return retval;
2576
2577 retval = __setup_irq(irq, desc, act);
2578
2579 if (retval)
2580 irq_chip_pm_put(&desc->irq_data);
2581
2582 return retval;
2583}
2584
2585/**
2586 * __request_percpu_irq - allocate a percpu interrupt line
2587 * @irq: Interrupt line to allocate
2588 * @handler: Function to be called when the IRQ occurs.
2589 * @flags: Interrupt type flags (IRQF_TIMER only)
2590 * @devname: An ascii name for the claiming device
2591 * @dev_id: A percpu cookie passed back to the handler function
2592 *
2593 * This call allocates interrupt resources and enables the
2594 * interrupt on the local CPU. If the interrupt is supposed to be
2595 * enabled on other CPUs, it has to be done on each CPU using
2596 * enable_percpu_irq().
2597 *
2598 * Dev_id must be globally unique. It is a per-cpu variable, and
2599 * the handler gets called with the interrupted CPU's instance of
2600 * that variable.
2601 */
2602int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2603 unsigned long flags, const char *devname,
2604 void __percpu *dev_id)
2605{
2606 struct irqaction *action;
2607 struct irq_desc *desc;
2608 int retval;
2609
2610 if (!dev_id)
2611 return -EINVAL;
2612
2613 desc = irq_to_desc(irq);
2614 if (!desc || !irq_settings_can_request(desc) ||
2615 !irq_settings_is_per_cpu_devid(desc))
2616 return -EINVAL;
2617
2618 if (flags && flags != IRQF_TIMER)
2619 return -EINVAL;
2620
2621 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2622 if (!action)
2623 return -ENOMEM;
2624
2625 action->handler = handler;
2626 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2627 action->name = devname;
2628 action->percpu_dev_id = dev_id;
2629
2630 retval = irq_chip_pm_get(&desc->irq_data);
2631 if (retval < 0) {
2632 kfree(action);
2633 return retval;
2634 }
2635
2636 retval = __setup_irq(irq, desc, action);
2637
2638 if (retval) {
2639 irq_chip_pm_put(&desc->irq_data);
2640 kfree(action);
2641 }
2642
2643 return retval;
2644}
2645EXPORT_SYMBOL_GPL(__request_percpu_irq);
2646
2647/**
2648 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2649 * @irq: Interrupt line to allocate
2650 * @handler: Function to be called when the IRQ occurs.
2651 * @name: An ascii name for the claiming device
2652 * @dev_id: A percpu cookie passed back to the handler function
2653 *
2654 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2655 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2656 * being enabled on the same CPU by using enable_percpu_nmi().
2657 *
2658 * Dev_id must be globally unique. It is a per-cpu variable, and
2659 * the handler gets called with the interrupted CPU's instance of
2660 * that variable.
2661 *
2662 * Interrupt lines requested for NMI delivering should have auto enabling
2663 * setting disabled.
2664 *
2665 * If the interrupt line cannot be used to deliver NMIs, function
2666 * will fail returning a negative value.
2667 */
2668int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2669 const char *name, void __percpu *dev_id)
2670{
2671 struct irqaction *action;
2672 struct irq_desc *desc;
2673 unsigned long flags;
2674 int retval;
2675
2676 if (!handler)
2677 return -EINVAL;
2678
2679 desc = irq_to_desc(irq);
2680
2681 if (!desc || !irq_settings_can_request(desc) ||
2682 !irq_settings_is_per_cpu_devid(desc) ||
2683 irq_settings_can_autoenable(desc) ||
2684 !irq_supports_nmi(desc))
2685 return -EINVAL;
2686
2687 /* The line cannot already be NMI */
2688 if (irq_is_nmi(desc))
2689 return -EINVAL;
2690
2691 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2692 if (!action)
2693 return -ENOMEM;
2694
2695 action->handler = handler;
2696 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2697 | IRQF_NOBALANCING;
2698 action->name = name;
2699 action->percpu_dev_id = dev_id;
2700
2701 retval = irq_chip_pm_get(&desc->irq_data);
2702 if (retval < 0)
2703 goto err_out;
2704
2705 retval = __setup_irq(irq, desc, action);
2706 if (retval)
2707 goto err_irq_setup;
2708
2709 raw_spin_lock_irqsave(&desc->lock, flags);
2710 desc->istate |= IRQS_NMI;
2711 raw_spin_unlock_irqrestore(&desc->lock, flags);
2712
2713 return 0;
2714
2715err_irq_setup:
2716 irq_chip_pm_put(&desc->irq_data);
2717err_out:
2718 kfree(action);
2719
2720 return retval;
2721}
2722
2723/**
2724 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2725 * @irq: Interrupt line to prepare for NMI delivery
2726 *
2727 * This call prepares an interrupt line to deliver NMI on the current CPU,
2728 * before that interrupt line gets enabled with enable_percpu_nmi().
2729 *
2730 * As a CPU local operation, this should be called from non-preemptible
2731 * context.
2732 *
2733 * If the interrupt line cannot be used to deliver NMIs, function
2734 * will fail returning a negative value.
2735 */
2736int prepare_percpu_nmi(unsigned int irq)
2737{
2738 unsigned long flags;
2739 struct irq_desc *desc;
2740 int ret = 0;
2741
2742 WARN_ON(preemptible());
2743
2744 desc = irq_get_desc_lock(irq, &flags,
2745 IRQ_GET_DESC_CHECK_PERCPU);
2746 if (!desc)
2747 return -EINVAL;
2748
2749 if (WARN(!irq_is_nmi(desc),
2750 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2751 irq)) {
2752 ret = -EINVAL;
2753 goto out;
2754 }
2755
2756 ret = irq_nmi_setup(desc);
2757 if (ret) {
2758 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2759 goto out;
2760 }
2761
2762out:
2763 irq_put_desc_unlock(desc, flags);
2764 return ret;
2765}
2766
2767/**
2768 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2769 * @irq: Interrupt line from which CPU local NMI configuration should be
2770 * removed
2771 *
2772 * This call undoes the setup done by prepare_percpu_nmi().
2773 *
2774 * IRQ line should not be enabled for the current CPU.
2775 *
2776 * As a CPU local operation, this should be called from non-preemptible
2777 * context.
2778 */
2779void teardown_percpu_nmi(unsigned int irq)
2780{
2781 unsigned long flags;
2782 struct irq_desc *desc;
2783
2784 WARN_ON(preemptible());
2785
2786 desc = irq_get_desc_lock(irq, &flags,
2787 IRQ_GET_DESC_CHECK_PERCPU);
2788 if (!desc)
2789 return;
2790
2791 if (WARN_ON(!irq_is_nmi(desc)))
2792 goto out;
2793
2794 irq_nmi_teardown(desc);
2795out:
2796 irq_put_desc_unlock(desc, flags);
2797}
2798
2799int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2800 bool *state)
2801{
2802 struct irq_chip *chip;
2803 int err = -EINVAL;
2804
2805 do {
2806 chip = irq_data_get_irq_chip(data);
2807 if (WARN_ON_ONCE(!chip))
2808 return -ENODEV;
2809 if (chip->irq_get_irqchip_state)
2810 break;
2811#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2812 data = data->parent_data;
2813#else
2814 data = NULL;
2815#endif
2816 } while (data);
2817
2818 if (data)
2819 err = chip->irq_get_irqchip_state(data, which, state);
2820 return err;
2821}
2822
2823/**
2824 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2825 * @irq: Interrupt line that is forwarded to a VM
2826 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2827 * @state: a pointer to a boolean where the state is to be stored
2828 *
2829 * This call snapshots the internal irqchip state of an
2830 * interrupt, returning into @state the bit corresponding to
2831 * stage @which
2832 *
2833 * This function should be called with preemption disabled if the
2834 * interrupt controller has per-cpu registers.
2835 */
2836int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2837 bool *state)
2838{
2839 struct irq_desc *desc;
2840 struct irq_data *data;
2841 unsigned long flags;
2842 int err = -EINVAL;
2843
2844 desc = irq_get_desc_buslock(irq, &flags, 0);
2845 if (!desc)
2846 return err;
2847
2848 data = irq_desc_get_irq_data(desc);
2849
2850 err = __irq_get_irqchip_state(data, which, state);
2851
2852 irq_put_desc_busunlock(desc, flags);
2853 return err;
2854}
2855EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2856
2857/**
2858 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2859 * @irq: Interrupt line that is forwarded to a VM
2860 * @which: State to be restored (one of IRQCHIP_STATE_*)
2861 * @val: Value corresponding to @which
2862 *
2863 * This call sets the internal irqchip state of an interrupt,
2864 * depending on the value of @which.
2865 *
2866 * This function should be called with migration disabled if the
2867 * interrupt controller has per-cpu registers.
2868 */
2869int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2870 bool val)
2871{
2872 struct irq_desc *desc;
2873 struct irq_data *data;
2874 struct irq_chip *chip;
2875 unsigned long flags;
2876 int err = -EINVAL;
2877
2878 desc = irq_get_desc_buslock(irq, &flags, 0);
2879 if (!desc)
2880 return err;
2881
2882 data = irq_desc_get_irq_data(desc);
2883
2884 do {
2885 chip = irq_data_get_irq_chip(data);
2886 if (WARN_ON_ONCE(!chip)) {
2887 err = -ENODEV;
2888 goto out_unlock;
2889 }
2890 if (chip->irq_set_irqchip_state)
2891 break;
2892#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2893 data = data->parent_data;
2894#else
2895 data = NULL;
2896#endif
2897 } while (data);
2898
2899 if (data)
2900 err = chip->irq_set_irqchip_state(data, which, val);
2901
2902out_unlock:
2903 irq_put_desc_busunlock(desc, flags);
2904 return err;
2905}
2906EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2907
2908/**
2909 * irq_has_action - Check whether an interrupt is requested
2910 * @irq: The linux irq number
2911 *
2912 * Returns: A snapshot of the current state
2913 */
2914bool irq_has_action(unsigned int irq)
2915{
2916 bool res;
2917
2918 rcu_read_lock();
2919 res = irq_desc_has_action(irq_to_desc(irq));
2920 rcu_read_unlock();
2921 return res;
2922}
2923EXPORT_SYMBOL_GPL(irq_has_action);
2924
2925/**
2926 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2927 * @irq: The linux irq number
2928 * @bitmask: The bitmask to evaluate
2929 *
2930 * Returns: True if one of the bits in @bitmask is set
2931 */
2932bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2933{
2934 struct irq_desc *desc;
2935 bool res = false;
2936
2937 rcu_read_lock();
2938 desc = irq_to_desc(irq);
2939 if (desc)
2940 res = !!(desc->status_use_accessors & bitmask);
2941 rcu_read_unlock();
2942 return res;
2943}
2944EXPORT_SYMBOL_GPL(irq_check_status_bit);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <linux/sched/isolation.h>
22#include <uapi/linux/sched/types.h>
23#include <linux/task_work.h>
24
25#include "internals.h"
26
27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28__read_mostly bool force_irqthreads;
29EXPORT_SYMBOL_GPL(force_irqthreads);
30
31static int __init setup_forced_irqthreads(char *arg)
32{
33 force_irqthreads = true;
34 return 0;
35}
36early_param("threadirqs", setup_forced_irqthreads);
37#endif
38
39static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40{
41 struct irq_data *irqd = irq_desc_get_irq_data(desc);
42 bool inprogress;
43
44 do {
45 unsigned long flags;
46
47 /*
48 * Wait until we're out of the critical section. This might
49 * give the wrong answer due to the lack of memory barriers.
50 */
51 while (irqd_irq_inprogress(&desc->irq_data))
52 cpu_relax();
53
54 /* Ok, that indicated we're done: double-check carefully. */
55 raw_spin_lock_irqsave(&desc->lock, flags);
56 inprogress = irqd_irq_inprogress(&desc->irq_data);
57
58 /*
59 * If requested and supported, check at the chip whether it
60 * is in flight at the hardware level, i.e. already pending
61 * in a CPU and waiting for service and acknowledge.
62 */
63 if (!inprogress && sync_chip) {
64 /*
65 * Ignore the return code. inprogress is only updated
66 * when the chip supports it.
67 */
68 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
69 &inprogress);
70 }
71 raw_spin_unlock_irqrestore(&desc->lock, flags);
72
73 /* Oops, that failed? */
74 } while (inprogress);
75}
76
77/**
78 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
79 * @irq: interrupt number to wait for
80 *
81 * This function waits for any pending hard IRQ handlers for this
82 * interrupt to complete before returning. If you use this
83 * function while holding a resource the IRQ handler may need you
84 * will deadlock. It does not take associated threaded handlers
85 * into account.
86 *
87 * Do not use this for shutdown scenarios where you must be sure
88 * that all parts (hardirq and threaded handler) have completed.
89 *
90 * Returns: false if a threaded handler is active.
91 *
92 * This function may be called - with care - from IRQ context.
93 *
94 * It does not check whether there is an interrupt in flight at the
95 * hardware level, but not serviced yet, as this might deadlock when
96 * called with interrupts disabled and the target CPU of the interrupt
97 * is the current CPU.
98 */
99bool synchronize_hardirq(unsigned int irq)
100{
101 struct irq_desc *desc = irq_to_desc(irq);
102
103 if (desc) {
104 __synchronize_hardirq(desc, false);
105 return !atomic_read(&desc->threads_active);
106 }
107
108 return true;
109}
110EXPORT_SYMBOL(synchronize_hardirq);
111
112/**
113 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
114 * @irq: interrupt number to wait for
115 *
116 * This function waits for any pending IRQ handlers for this interrupt
117 * to complete before returning. If you use this function while
118 * holding a resource the IRQ handler may need you will deadlock.
119 *
120 * Can only be called from preemptible code as it might sleep when
121 * an interrupt thread is associated to @irq.
122 *
123 * It optionally makes sure (when the irq chip supports that method)
124 * that the interrupt is not pending in any CPU and waiting for
125 * service.
126 */
127void synchronize_irq(unsigned int irq)
128{
129 struct irq_desc *desc = irq_to_desc(irq);
130
131 if (desc) {
132 __synchronize_hardirq(desc, true);
133 /*
134 * We made sure that no hardirq handler is
135 * running. Now verify that no threaded handlers are
136 * active.
137 */
138 wait_event(desc->wait_for_threads,
139 !atomic_read(&desc->threads_active));
140 }
141}
142EXPORT_SYMBOL(synchronize_irq);
143
144#ifdef CONFIG_SMP
145cpumask_var_t irq_default_affinity;
146
147static bool __irq_can_set_affinity(struct irq_desc *desc)
148{
149 if (!desc || !irqd_can_balance(&desc->irq_data) ||
150 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
151 return false;
152 return true;
153}
154
155/**
156 * irq_can_set_affinity - Check if the affinity of a given irq can be set
157 * @irq: Interrupt to check
158 *
159 */
160int irq_can_set_affinity(unsigned int irq)
161{
162 return __irq_can_set_affinity(irq_to_desc(irq));
163}
164
165/**
166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
167 * @irq: Interrupt to check
168 *
169 * Like irq_can_set_affinity() above, but additionally checks for the
170 * AFFINITY_MANAGED flag.
171 */
172bool irq_can_set_affinity_usr(unsigned int irq)
173{
174 struct irq_desc *desc = irq_to_desc(irq);
175
176 return __irq_can_set_affinity(desc) &&
177 !irqd_affinity_is_managed(&desc->irq_data);
178}
179
180/**
181 * irq_set_thread_affinity - Notify irq threads to adjust affinity
182 * @desc: irq descriptor which has affinity changed
183 *
184 * We just set IRQTF_AFFINITY and delegate the affinity setting
185 * to the interrupt thread itself. We can not call
186 * set_cpus_allowed_ptr() here as we hold desc->lock and this
187 * code can be called from hard interrupt context.
188 */
189void irq_set_thread_affinity(struct irq_desc *desc)
190{
191 struct irqaction *action;
192
193 for_each_action_of_desc(desc, action)
194 if (action->thread)
195 set_bit(IRQTF_AFFINITY, &action->thread_flags);
196}
197
198#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
199static void irq_validate_effective_affinity(struct irq_data *data)
200{
201 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202 struct irq_chip *chip = irq_data_get_irq_chip(data);
203
204 if (!cpumask_empty(m))
205 return;
206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207 chip->name, data->irq);
208}
209
210static inline void irq_init_effective_affinity(struct irq_data *data,
211 const struct cpumask *mask)
212{
213 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214}
215#else
216static inline void irq_validate_effective_affinity(struct irq_data *data) { }
217static inline void irq_init_effective_affinity(struct irq_data *data,
218 const struct cpumask *mask) { }
219#endif
220
221int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222 bool force)
223{
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
226 int ret;
227
228 if (!chip || !chip->irq_set_affinity)
229 return -EINVAL;
230
231 /*
232 * If this is a managed interrupt and housekeeping is enabled on
233 * it check whether the requested affinity mask intersects with
234 * a housekeeping CPU. If so, then remove the isolated CPUs from
235 * the mask and just keep the housekeeping CPU(s). This prevents
236 * the affinity setter from routing the interrupt to an isolated
237 * CPU to avoid that I/O submitted from a housekeeping CPU causes
238 * interrupts on an isolated one.
239 *
240 * If the masks do not intersect or include online CPU(s) then
241 * keep the requested mask. The isolated target CPUs are only
242 * receiving interrupts when the I/O operation was submitted
243 * directly from them.
244 *
245 * If all housekeeping CPUs in the affinity mask are offline, the
246 * interrupt will be migrated by the CPU hotplug code once a
247 * housekeeping CPU which belongs to the affinity mask comes
248 * online.
249 */
250 if (irqd_affinity_is_managed(data) &&
251 housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
252 const struct cpumask *hk_mask, *prog_mask;
253
254 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
255 static struct cpumask tmp_mask;
256
257 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
258
259 raw_spin_lock(&tmp_mask_lock);
260 cpumask_and(&tmp_mask, mask, hk_mask);
261 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
262 prog_mask = mask;
263 else
264 prog_mask = &tmp_mask;
265 ret = chip->irq_set_affinity(data, prog_mask, force);
266 raw_spin_unlock(&tmp_mask_lock);
267 } else {
268 ret = chip->irq_set_affinity(data, mask, force);
269 }
270 switch (ret) {
271 case IRQ_SET_MASK_OK:
272 case IRQ_SET_MASK_OK_DONE:
273 cpumask_copy(desc->irq_common_data.affinity, mask);
274 fallthrough;
275 case IRQ_SET_MASK_OK_NOCOPY:
276 irq_validate_effective_affinity(data);
277 irq_set_thread_affinity(desc);
278 ret = 0;
279 }
280
281 return ret;
282}
283
284#ifdef CONFIG_GENERIC_PENDING_IRQ
285static inline int irq_set_affinity_pending(struct irq_data *data,
286 const struct cpumask *dest)
287{
288 struct irq_desc *desc = irq_data_to_desc(data);
289
290 irqd_set_move_pending(data);
291 irq_copy_pending(desc, dest);
292 return 0;
293}
294#else
295static inline int irq_set_affinity_pending(struct irq_data *data,
296 const struct cpumask *dest)
297{
298 return -EBUSY;
299}
300#endif
301
302static int irq_try_set_affinity(struct irq_data *data,
303 const struct cpumask *dest, bool force)
304{
305 int ret = irq_do_set_affinity(data, dest, force);
306
307 /*
308 * In case that the underlying vector management is busy and the
309 * architecture supports the generic pending mechanism then utilize
310 * this to avoid returning an error to user space.
311 */
312 if (ret == -EBUSY && !force)
313 ret = irq_set_affinity_pending(data, dest);
314 return ret;
315}
316
317static bool irq_set_affinity_deactivated(struct irq_data *data,
318 const struct cpumask *mask, bool force)
319{
320 struct irq_desc *desc = irq_data_to_desc(data);
321
322 /*
323 * Handle irq chips which can handle affinity only in activated
324 * state correctly
325 *
326 * If the interrupt is not yet activated, just store the affinity
327 * mask and do not call the chip driver at all. On activation the
328 * driver has to make sure anyway that the interrupt is in a
329 * usable state so startup works.
330 */
331 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
332 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
333 return false;
334
335 cpumask_copy(desc->irq_common_data.affinity, mask);
336 irq_init_effective_affinity(data, mask);
337 irqd_set(data, IRQD_AFFINITY_SET);
338 return true;
339}
340
341int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
342 bool force)
343{
344 struct irq_chip *chip = irq_data_get_irq_chip(data);
345 struct irq_desc *desc = irq_data_to_desc(data);
346 int ret = 0;
347
348 if (!chip || !chip->irq_set_affinity)
349 return -EINVAL;
350
351 if (irq_set_affinity_deactivated(data, mask, force))
352 return 0;
353
354 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
355 ret = irq_try_set_affinity(data, mask, force);
356 } else {
357 irqd_set_move_pending(data);
358 irq_copy_pending(desc, mask);
359 }
360
361 if (desc->affinity_notify) {
362 kref_get(&desc->affinity_notify->kref);
363 if (!schedule_work(&desc->affinity_notify->work)) {
364 /* Work was already scheduled, drop our extra ref */
365 kref_put(&desc->affinity_notify->kref,
366 desc->affinity_notify->release);
367 }
368 }
369 irqd_set(data, IRQD_AFFINITY_SET);
370
371 return ret;
372}
373
374/**
375 * irq_update_affinity_desc - Update affinity management for an interrupt
376 * @irq: The interrupt number to update
377 * @affinity: Pointer to the affinity descriptor
378 *
379 * This interface can be used to configure the affinity management of
380 * interrupts which have been allocated already.
381 *
382 * There are certain limitations on when it may be used - attempts to use it
383 * for when the kernel is configured for generic IRQ reservation mode (in
384 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
385 * managed/non-managed interrupt accounting. In addition, attempts to use it on
386 * an interrupt which is already started or which has already been configured
387 * as managed will also fail, as these mean invalid init state or double init.
388 */
389int irq_update_affinity_desc(unsigned int irq,
390 struct irq_affinity_desc *affinity)
391{
392 struct irq_desc *desc;
393 unsigned long flags;
394 bool activated;
395 int ret = 0;
396
397 /*
398 * Supporting this with the reservation scheme used by x86 needs
399 * some more thought. Fail it for now.
400 */
401 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
402 return -EOPNOTSUPP;
403
404 desc = irq_get_desc_buslock(irq, &flags, 0);
405 if (!desc)
406 return -EINVAL;
407
408 /* Requires the interrupt to be shut down */
409 if (irqd_is_started(&desc->irq_data)) {
410 ret = -EBUSY;
411 goto out_unlock;
412 }
413
414 /* Interrupts which are already managed cannot be modified */
415 if (irqd_affinity_is_managed(&desc->irq_data)) {
416 ret = -EBUSY;
417 goto out_unlock;
418 }
419
420 /*
421 * Deactivate the interrupt. That's required to undo
422 * anything an earlier activation has established.
423 */
424 activated = irqd_is_activated(&desc->irq_data);
425 if (activated)
426 irq_domain_deactivate_irq(&desc->irq_data);
427
428 if (affinity->is_managed) {
429 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
430 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
431 }
432
433 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
434
435 /* Restore the activation state */
436 if (activated)
437 irq_domain_activate_irq(&desc->irq_data, false);
438
439out_unlock:
440 irq_put_desc_busunlock(desc, flags);
441 return ret;
442}
443
444static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
445 bool force)
446{
447 struct irq_desc *desc = irq_to_desc(irq);
448 unsigned long flags;
449 int ret;
450
451 if (!desc)
452 return -EINVAL;
453
454 raw_spin_lock_irqsave(&desc->lock, flags);
455 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
456 raw_spin_unlock_irqrestore(&desc->lock, flags);
457 return ret;
458}
459
460/**
461 * irq_set_affinity - Set the irq affinity of a given irq
462 * @irq: Interrupt to set affinity
463 * @cpumask: cpumask
464 *
465 * Fails if cpumask does not contain an online CPU
466 */
467int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
468{
469 return __irq_set_affinity(irq, cpumask, false);
470}
471EXPORT_SYMBOL_GPL(irq_set_affinity);
472
473/**
474 * irq_force_affinity - Force the irq affinity of a given irq
475 * @irq: Interrupt to set affinity
476 * @cpumask: cpumask
477 *
478 * Same as irq_set_affinity, but without checking the mask against
479 * online cpus.
480 *
481 * Solely for low level cpu hotplug code, where we need to make per
482 * cpu interrupts affine before the cpu becomes online.
483 */
484int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
485{
486 return __irq_set_affinity(irq, cpumask, true);
487}
488EXPORT_SYMBOL_GPL(irq_force_affinity);
489
490int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
491{
492 unsigned long flags;
493 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
494
495 if (!desc)
496 return -EINVAL;
497 desc->affinity_hint = m;
498 irq_put_desc_unlock(desc, flags);
499 /* set the initial affinity to prevent every interrupt being on CPU0 */
500 if (m)
501 __irq_set_affinity(irq, m, false);
502 return 0;
503}
504EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
505
506static void irq_affinity_notify(struct work_struct *work)
507{
508 struct irq_affinity_notify *notify =
509 container_of(work, struct irq_affinity_notify, work);
510 struct irq_desc *desc = irq_to_desc(notify->irq);
511 cpumask_var_t cpumask;
512 unsigned long flags;
513
514 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
515 goto out;
516
517 raw_spin_lock_irqsave(&desc->lock, flags);
518 if (irq_move_pending(&desc->irq_data))
519 irq_get_pending(cpumask, desc);
520 else
521 cpumask_copy(cpumask, desc->irq_common_data.affinity);
522 raw_spin_unlock_irqrestore(&desc->lock, flags);
523
524 notify->notify(notify, cpumask);
525
526 free_cpumask_var(cpumask);
527out:
528 kref_put(¬ify->kref, notify->release);
529}
530
531/**
532 * irq_set_affinity_notifier - control notification of IRQ affinity changes
533 * @irq: Interrupt for which to enable/disable notification
534 * @notify: Context for notification, or %NULL to disable
535 * notification. Function pointers must be initialised;
536 * the other fields will be initialised by this function.
537 *
538 * Must be called in process context. Notification may only be enabled
539 * after the IRQ is allocated and must be disabled before the IRQ is
540 * freed using free_irq().
541 */
542int
543irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
544{
545 struct irq_desc *desc = irq_to_desc(irq);
546 struct irq_affinity_notify *old_notify;
547 unsigned long flags;
548
549 /* The release function is promised process context */
550 might_sleep();
551
552 if (!desc || desc->istate & IRQS_NMI)
553 return -EINVAL;
554
555 /* Complete initialisation of *notify */
556 if (notify) {
557 notify->irq = irq;
558 kref_init(¬ify->kref);
559 INIT_WORK(¬ify->work, irq_affinity_notify);
560 }
561
562 raw_spin_lock_irqsave(&desc->lock, flags);
563 old_notify = desc->affinity_notify;
564 desc->affinity_notify = notify;
565 raw_spin_unlock_irqrestore(&desc->lock, flags);
566
567 if (old_notify) {
568 if (cancel_work_sync(&old_notify->work)) {
569 /* Pending work had a ref, put that one too */
570 kref_put(&old_notify->kref, old_notify->release);
571 }
572 kref_put(&old_notify->kref, old_notify->release);
573 }
574
575 return 0;
576}
577EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
578
579#ifndef CONFIG_AUTO_IRQ_AFFINITY
580/*
581 * Generic version of the affinity autoselector.
582 */
583int irq_setup_affinity(struct irq_desc *desc)
584{
585 struct cpumask *set = irq_default_affinity;
586 int ret, node = irq_desc_get_node(desc);
587 static DEFINE_RAW_SPINLOCK(mask_lock);
588 static struct cpumask mask;
589
590 /* Excludes PER_CPU and NO_BALANCE interrupts */
591 if (!__irq_can_set_affinity(desc))
592 return 0;
593
594 raw_spin_lock(&mask_lock);
595 /*
596 * Preserve the managed affinity setting and a userspace affinity
597 * setup, but make sure that one of the targets is online.
598 */
599 if (irqd_affinity_is_managed(&desc->irq_data) ||
600 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
601 if (cpumask_intersects(desc->irq_common_data.affinity,
602 cpu_online_mask))
603 set = desc->irq_common_data.affinity;
604 else
605 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
606 }
607
608 cpumask_and(&mask, cpu_online_mask, set);
609 if (cpumask_empty(&mask))
610 cpumask_copy(&mask, cpu_online_mask);
611
612 if (node != NUMA_NO_NODE) {
613 const struct cpumask *nodemask = cpumask_of_node(node);
614
615 /* make sure at least one of the cpus in nodemask is online */
616 if (cpumask_intersects(&mask, nodemask))
617 cpumask_and(&mask, &mask, nodemask);
618 }
619 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
620 raw_spin_unlock(&mask_lock);
621 return ret;
622}
623#else
624/* Wrapper for ALPHA specific affinity selector magic */
625int irq_setup_affinity(struct irq_desc *desc)
626{
627 return irq_select_affinity(irq_desc_get_irq(desc));
628}
629#endif /* CONFIG_AUTO_IRQ_AFFINITY */
630#endif /* CONFIG_SMP */
631
632
633/**
634 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
635 * @irq: interrupt number to set affinity
636 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
637 * specific data for percpu_devid interrupts
638 *
639 * This function uses the vCPU specific data to set the vCPU
640 * affinity for an irq. The vCPU specific data is passed from
641 * outside, such as KVM. One example code path is as below:
642 * KVM -> IOMMU -> irq_set_vcpu_affinity().
643 */
644int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
645{
646 unsigned long flags;
647 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
648 struct irq_data *data;
649 struct irq_chip *chip;
650 int ret = -ENOSYS;
651
652 if (!desc)
653 return -EINVAL;
654
655 data = irq_desc_get_irq_data(desc);
656 do {
657 chip = irq_data_get_irq_chip(data);
658 if (chip && chip->irq_set_vcpu_affinity)
659 break;
660#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
661 data = data->parent_data;
662#else
663 data = NULL;
664#endif
665 } while (data);
666
667 if (data)
668 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
669 irq_put_desc_unlock(desc, flags);
670
671 return ret;
672}
673EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
674
675void __disable_irq(struct irq_desc *desc)
676{
677 if (!desc->depth++)
678 irq_disable(desc);
679}
680
681static int __disable_irq_nosync(unsigned int irq)
682{
683 unsigned long flags;
684 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
685
686 if (!desc)
687 return -EINVAL;
688 __disable_irq(desc);
689 irq_put_desc_busunlock(desc, flags);
690 return 0;
691}
692
693/**
694 * disable_irq_nosync - disable an irq without waiting
695 * @irq: Interrupt to disable
696 *
697 * Disable the selected interrupt line. Disables and Enables are
698 * nested.
699 * Unlike disable_irq(), this function does not ensure existing
700 * instances of the IRQ handler have completed before returning.
701 *
702 * This function may be called from IRQ context.
703 */
704void disable_irq_nosync(unsigned int irq)
705{
706 __disable_irq_nosync(irq);
707}
708EXPORT_SYMBOL(disable_irq_nosync);
709
710/**
711 * disable_irq - disable an irq and wait for completion
712 * @irq: Interrupt to disable
713 *
714 * Disable the selected interrupt line. Enables and Disables are
715 * nested.
716 * This function waits for any pending IRQ handlers for this interrupt
717 * to complete before returning. If you use this function while
718 * holding a resource the IRQ handler may need you will deadlock.
719 *
720 * This function may be called - with care - from IRQ context.
721 */
722void disable_irq(unsigned int irq)
723{
724 if (!__disable_irq_nosync(irq))
725 synchronize_irq(irq);
726}
727EXPORT_SYMBOL(disable_irq);
728
729/**
730 * disable_hardirq - disables an irq and waits for hardirq completion
731 * @irq: Interrupt to disable
732 *
733 * Disable the selected interrupt line. Enables and Disables are
734 * nested.
735 * This function waits for any pending hard IRQ handlers for this
736 * interrupt to complete before returning. If you use this function while
737 * holding a resource the hard IRQ handler may need you will deadlock.
738 *
739 * When used to optimistically disable an interrupt from atomic context
740 * the return value must be checked.
741 *
742 * Returns: false if a threaded handler is active.
743 *
744 * This function may be called - with care - from IRQ context.
745 */
746bool disable_hardirq(unsigned int irq)
747{
748 if (!__disable_irq_nosync(irq))
749 return synchronize_hardirq(irq);
750
751 return false;
752}
753EXPORT_SYMBOL_GPL(disable_hardirq);
754
755/**
756 * disable_nmi_nosync - disable an nmi without waiting
757 * @irq: Interrupt to disable
758 *
759 * Disable the selected interrupt line. Disables and enables are
760 * nested.
761 * The interrupt to disable must have been requested through request_nmi.
762 * Unlike disable_nmi(), this function does not ensure existing
763 * instances of the IRQ handler have completed before returning.
764 */
765void disable_nmi_nosync(unsigned int irq)
766{
767 disable_irq_nosync(irq);
768}
769
770void __enable_irq(struct irq_desc *desc)
771{
772 switch (desc->depth) {
773 case 0:
774 err_out:
775 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
776 irq_desc_get_irq(desc));
777 break;
778 case 1: {
779 if (desc->istate & IRQS_SUSPENDED)
780 goto err_out;
781 /* Prevent probing on this irq: */
782 irq_settings_set_noprobe(desc);
783 /*
784 * Call irq_startup() not irq_enable() here because the
785 * interrupt might be marked NOAUTOEN. So irq_startup()
786 * needs to be invoked when it gets enabled the first
787 * time. If it was already started up, then irq_startup()
788 * will invoke irq_enable() under the hood.
789 */
790 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
791 break;
792 }
793 default:
794 desc->depth--;
795 }
796}
797
798/**
799 * enable_irq - enable handling of an irq
800 * @irq: Interrupt to enable
801 *
802 * Undoes the effect of one call to disable_irq(). If this
803 * matches the last disable, processing of interrupts on this
804 * IRQ line is re-enabled.
805 *
806 * This function may be called from IRQ context only when
807 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
808 */
809void enable_irq(unsigned int irq)
810{
811 unsigned long flags;
812 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
813
814 if (!desc)
815 return;
816 if (WARN(!desc->irq_data.chip,
817 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
818 goto out;
819
820 __enable_irq(desc);
821out:
822 irq_put_desc_busunlock(desc, flags);
823}
824EXPORT_SYMBOL(enable_irq);
825
826/**
827 * enable_nmi - enable handling of an nmi
828 * @irq: Interrupt to enable
829 *
830 * The interrupt to enable must have been requested through request_nmi.
831 * Undoes the effect of one call to disable_nmi(). If this
832 * matches the last disable, processing of interrupts on this
833 * IRQ line is re-enabled.
834 */
835void enable_nmi(unsigned int irq)
836{
837 enable_irq(irq);
838}
839
840static int set_irq_wake_real(unsigned int irq, unsigned int on)
841{
842 struct irq_desc *desc = irq_to_desc(irq);
843 int ret = -ENXIO;
844
845 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
846 return 0;
847
848 if (desc->irq_data.chip->irq_set_wake)
849 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
850
851 return ret;
852}
853
854/**
855 * irq_set_irq_wake - control irq power management wakeup
856 * @irq: interrupt to control
857 * @on: enable/disable power management wakeup
858 *
859 * Enable/disable power management wakeup mode, which is
860 * disabled by default. Enables and disables must match,
861 * just as they match for non-wakeup mode support.
862 *
863 * Wakeup mode lets this IRQ wake the system from sleep
864 * states like "suspend to RAM".
865 *
866 * Note: irq enable/disable state is completely orthogonal
867 * to the enable/disable state of irq wake. An irq can be
868 * disabled with disable_irq() and still wake the system as
869 * long as the irq has wake enabled. If this does not hold,
870 * then the underlying irq chip and the related driver need
871 * to be investigated.
872 */
873int irq_set_irq_wake(unsigned int irq, unsigned int on)
874{
875 unsigned long flags;
876 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
877 int ret = 0;
878
879 if (!desc)
880 return -EINVAL;
881
882 /* Don't use NMIs as wake up interrupts please */
883 if (desc->istate & IRQS_NMI) {
884 ret = -EINVAL;
885 goto out_unlock;
886 }
887
888 /* wakeup-capable irqs can be shared between drivers that
889 * don't need to have the same sleep mode behaviors.
890 */
891 if (on) {
892 if (desc->wake_depth++ == 0) {
893 ret = set_irq_wake_real(irq, on);
894 if (ret)
895 desc->wake_depth = 0;
896 else
897 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
898 }
899 } else {
900 if (desc->wake_depth == 0) {
901 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
902 } else if (--desc->wake_depth == 0) {
903 ret = set_irq_wake_real(irq, on);
904 if (ret)
905 desc->wake_depth = 1;
906 else
907 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
908 }
909 }
910
911out_unlock:
912 irq_put_desc_busunlock(desc, flags);
913 return ret;
914}
915EXPORT_SYMBOL(irq_set_irq_wake);
916
917/*
918 * Internal function that tells the architecture code whether a
919 * particular irq has been exclusively allocated or is available
920 * for driver use.
921 */
922int can_request_irq(unsigned int irq, unsigned long irqflags)
923{
924 unsigned long flags;
925 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
926 int canrequest = 0;
927
928 if (!desc)
929 return 0;
930
931 if (irq_settings_can_request(desc)) {
932 if (!desc->action ||
933 irqflags & desc->action->flags & IRQF_SHARED)
934 canrequest = 1;
935 }
936 irq_put_desc_unlock(desc, flags);
937 return canrequest;
938}
939
940int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
941{
942 struct irq_chip *chip = desc->irq_data.chip;
943 int ret, unmask = 0;
944
945 if (!chip || !chip->irq_set_type) {
946 /*
947 * IRQF_TRIGGER_* but the PIC does not support multiple
948 * flow-types?
949 */
950 pr_debug("No set_type function for IRQ %d (%s)\n",
951 irq_desc_get_irq(desc),
952 chip ? (chip->name ? : "unknown") : "unknown");
953 return 0;
954 }
955
956 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
957 if (!irqd_irq_masked(&desc->irq_data))
958 mask_irq(desc);
959 if (!irqd_irq_disabled(&desc->irq_data))
960 unmask = 1;
961 }
962
963 /* Mask all flags except trigger mode */
964 flags &= IRQ_TYPE_SENSE_MASK;
965 ret = chip->irq_set_type(&desc->irq_data, flags);
966
967 switch (ret) {
968 case IRQ_SET_MASK_OK:
969 case IRQ_SET_MASK_OK_DONE:
970 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
971 irqd_set(&desc->irq_data, flags);
972 fallthrough;
973
974 case IRQ_SET_MASK_OK_NOCOPY:
975 flags = irqd_get_trigger_type(&desc->irq_data);
976 irq_settings_set_trigger_mask(desc, flags);
977 irqd_clear(&desc->irq_data, IRQD_LEVEL);
978 irq_settings_clr_level(desc);
979 if (flags & IRQ_TYPE_LEVEL_MASK) {
980 irq_settings_set_level(desc);
981 irqd_set(&desc->irq_data, IRQD_LEVEL);
982 }
983
984 ret = 0;
985 break;
986 default:
987 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
988 flags, irq_desc_get_irq(desc), chip->irq_set_type);
989 }
990 if (unmask)
991 unmask_irq(desc);
992 return ret;
993}
994
995#ifdef CONFIG_HARDIRQS_SW_RESEND
996int irq_set_parent(int irq, int parent_irq)
997{
998 unsigned long flags;
999 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1000
1001 if (!desc)
1002 return -EINVAL;
1003
1004 desc->parent_irq = parent_irq;
1005
1006 irq_put_desc_unlock(desc, flags);
1007 return 0;
1008}
1009EXPORT_SYMBOL_GPL(irq_set_parent);
1010#endif
1011
1012/*
1013 * Default primary interrupt handler for threaded interrupts. Is
1014 * assigned as primary handler when request_threaded_irq is called
1015 * with handler == NULL. Useful for oneshot interrupts.
1016 */
1017static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1018{
1019 return IRQ_WAKE_THREAD;
1020}
1021
1022/*
1023 * Primary handler for nested threaded interrupts. Should never be
1024 * called.
1025 */
1026static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1027{
1028 WARN(1, "Primary handler called for nested irq %d\n", irq);
1029 return IRQ_NONE;
1030}
1031
1032static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1033{
1034 WARN(1, "Secondary action handler called for irq %d\n", irq);
1035 return IRQ_NONE;
1036}
1037
1038static int irq_wait_for_interrupt(struct irqaction *action)
1039{
1040 for (;;) {
1041 set_current_state(TASK_INTERRUPTIBLE);
1042
1043 if (kthread_should_stop()) {
1044 /* may need to run one last time */
1045 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1046 &action->thread_flags)) {
1047 __set_current_state(TASK_RUNNING);
1048 return 0;
1049 }
1050 __set_current_state(TASK_RUNNING);
1051 return -1;
1052 }
1053
1054 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1055 &action->thread_flags)) {
1056 __set_current_state(TASK_RUNNING);
1057 return 0;
1058 }
1059 schedule();
1060 }
1061}
1062
1063/*
1064 * Oneshot interrupts keep the irq line masked until the threaded
1065 * handler finished. unmask if the interrupt has not been disabled and
1066 * is marked MASKED.
1067 */
1068static void irq_finalize_oneshot(struct irq_desc *desc,
1069 struct irqaction *action)
1070{
1071 if (!(desc->istate & IRQS_ONESHOT) ||
1072 action->handler == irq_forced_secondary_handler)
1073 return;
1074again:
1075 chip_bus_lock(desc);
1076 raw_spin_lock_irq(&desc->lock);
1077
1078 /*
1079 * Implausible though it may be we need to protect us against
1080 * the following scenario:
1081 *
1082 * The thread is faster done than the hard interrupt handler
1083 * on the other CPU. If we unmask the irq line then the
1084 * interrupt can come in again and masks the line, leaves due
1085 * to IRQS_INPROGRESS and the irq line is masked forever.
1086 *
1087 * This also serializes the state of shared oneshot handlers
1088 * versus "desc->threads_oneshot |= action->thread_mask;" in
1089 * irq_wake_thread(). See the comment there which explains the
1090 * serialization.
1091 */
1092 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1093 raw_spin_unlock_irq(&desc->lock);
1094 chip_bus_sync_unlock(desc);
1095 cpu_relax();
1096 goto again;
1097 }
1098
1099 /*
1100 * Now check again, whether the thread should run. Otherwise
1101 * we would clear the threads_oneshot bit of this thread which
1102 * was just set.
1103 */
1104 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1105 goto out_unlock;
1106
1107 desc->threads_oneshot &= ~action->thread_mask;
1108
1109 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1110 irqd_irq_masked(&desc->irq_data))
1111 unmask_threaded_irq(desc);
1112
1113out_unlock:
1114 raw_spin_unlock_irq(&desc->lock);
1115 chip_bus_sync_unlock(desc);
1116}
1117
1118#ifdef CONFIG_SMP
1119/*
1120 * Check whether we need to change the affinity of the interrupt thread.
1121 */
1122static void
1123irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1124{
1125 cpumask_var_t mask;
1126 bool valid = true;
1127
1128 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1129 return;
1130
1131 /*
1132 * In case we are out of memory we set IRQTF_AFFINITY again and
1133 * try again next time
1134 */
1135 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1136 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1137 return;
1138 }
1139
1140 raw_spin_lock_irq(&desc->lock);
1141 /*
1142 * This code is triggered unconditionally. Check the affinity
1143 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1144 */
1145 if (cpumask_available(desc->irq_common_data.affinity)) {
1146 const struct cpumask *m;
1147
1148 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1149 cpumask_copy(mask, m);
1150 } else {
1151 valid = false;
1152 }
1153 raw_spin_unlock_irq(&desc->lock);
1154
1155 if (valid)
1156 set_cpus_allowed_ptr(current, mask);
1157 free_cpumask_var(mask);
1158}
1159#else
1160static inline void
1161irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1162#endif
1163
1164/*
1165 * Interrupts which are not explicitly requested as threaded
1166 * interrupts rely on the implicit bh/preempt disable of the hard irq
1167 * context. So we need to disable bh here to avoid deadlocks and other
1168 * side effects.
1169 */
1170static irqreturn_t
1171irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1172{
1173 irqreturn_t ret;
1174
1175 local_bh_disable();
1176 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1177 local_irq_disable();
1178 ret = action->thread_fn(action->irq, action->dev_id);
1179 if (ret == IRQ_HANDLED)
1180 atomic_inc(&desc->threads_handled);
1181
1182 irq_finalize_oneshot(desc, action);
1183 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1184 local_irq_enable();
1185 local_bh_enable();
1186 return ret;
1187}
1188
1189/*
1190 * Interrupts explicitly requested as threaded interrupts want to be
1191 * preemptible - many of them need to sleep and wait for slow busses to
1192 * complete.
1193 */
1194static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1195 struct irqaction *action)
1196{
1197 irqreturn_t ret;
1198
1199 ret = action->thread_fn(action->irq, action->dev_id);
1200 if (ret == IRQ_HANDLED)
1201 atomic_inc(&desc->threads_handled);
1202
1203 irq_finalize_oneshot(desc, action);
1204 return ret;
1205}
1206
1207static void wake_threads_waitq(struct irq_desc *desc)
1208{
1209 if (atomic_dec_and_test(&desc->threads_active))
1210 wake_up(&desc->wait_for_threads);
1211}
1212
1213static void irq_thread_dtor(struct callback_head *unused)
1214{
1215 struct task_struct *tsk = current;
1216 struct irq_desc *desc;
1217 struct irqaction *action;
1218
1219 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1220 return;
1221
1222 action = kthread_data(tsk);
1223
1224 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1225 tsk->comm, tsk->pid, action->irq);
1226
1227
1228 desc = irq_to_desc(action->irq);
1229 /*
1230 * If IRQTF_RUNTHREAD is set, we need to decrement
1231 * desc->threads_active and wake possible waiters.
1232 */
1233 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1234 wake_threads_waitq(desc);
1235
1236 /* Prevent a stale desc->threads_oneshot */
1237 irq_finalize_oneshot(desc, action);
1238}
1239
1240static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1241{
1242 struct irqaction *secondary = action->secondary;
1243
1244 if (WARN_ON_ONCE(!secondary))
1245 return;
1246
1247 raw_spin_lock_irq(&desc->lock);
1248 __irq_wake_thread(desc, secondary);
1249 raw_spin_unlock_irq(&desc->lock);
1250}
1251
1252/*
1253 * Interrupt handler thread
1254 */
1255static int irq_thread(void *data)
1256{
1257 struct callback_head on_exit_work;
1258 struct irqaction *action = data;
1259 struct irq_desc *desc = irq_to_desc(action->irq);
1260 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1261 struct irqaction *action);
1262
1263 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1264 &action->thread_flags))
1265 handler_fn = irq_forced_thread_fn;
1266 else
1267 handler_fn = irq_thread_fn;
1268
1269 init_task_work(&on_exit_work, irq_thread_dtor);
1270 task_work_add(current, &on_exit_work, TWA_NONE);
1271
1272 irq_thread_check_affinity(desc, action);
1273
1274 while (!irq_wait_for_interrupt(action)) {
1275 irqreturn_t action_ret;
1276
1277 irq_thread_check_affinity(desc, action);
1278
1279 action_ret = handler_fn(desc, action);
1280 if (action_ret == IRQ_WAKE_THREAD)
1281 irq_wake_secondary(desc, action);
1282
1283 wake_threads_waitq(desc);
1284 }
1285
1286 /*
1287 * This is the regular exit path. __free_irq() is stopping the
1288 * thread via kthread_stop() after calling
1289 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1290 * oneshot mask bit can be set.
1291 */
1292 task_work_cancel(current, irq_thread_dtor);
1293 return 0;
1294}
1295
1296/**
1297 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1298 * @irq: Interrupt line
1299 * @dev_id: Device identity for which the thread should be woken
1300 *
1301 */
1302void irq_wake_thread(unsigned int irq, void *dev_id)
1303{
1304 struct irq_desc *desc = irq_to_desc(irq);
1305 struct irqaction *action;
1306 unsigned long flags;
1307
1308 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1309 return;
1310
1311 raw_spin_lock_irqsave(&desc->lock, flags);
1312 for_each_action_of_desc(desc, action) {
1313 if (action->dev_id == dev_id) {
1314 if (action->thread)
1315 __irq_wake_thread(desc, action);
1316 break;
1317 }
1318 }
1319 raw_spin_unlock_irqrestore(&desc->lock, flags);
1320}
1321EXPORT_SYMBOL_GPL(irq_wake_thread);
1322
1323static int irq_setup_forced_threading(struct irqaction *new)
1324{
1325 if (!force_irqthreads)
1326 return 0;
1327 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1328 return 0;
1329
1330 /*
1331 * No further action required for interrupts which are requested as
1332 * threaded interrupts already
1333 */
1334 if (new->handler == irq_default_primary_handler)
1335 return 0;
1336
1337 new->flags |= IRQF_ONESHOT;
1338
1339 /*
1340 * Handle the case where we have a real primary handler and a
1341 * thread handler. We force thread them as well by creating a
1342 * secondary action.
1343 */
1344 if (new->handler && new->thread_fn) {
1345 /* Allocate the secondary action */
1346 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1347 if (!new->secondary)
1348 return -ENOMEM;
1349 new->secondary->handler = irq_forced_secondary_handler;
1350 new->secondary->thread_fn = new->thread_fn;
1351 new->secondary->dev_id = new->dev_id;
1352 new->secondary->irq = new->irq;
1353 new->secondary->name = new->name;
1354 }
1355 /* Deal with the primary handler */
1356 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1357 new->thread_fn = new->handler;
1358 new->handler = irq_default_primary_handler;
1359 return 0;
1360}
1361
1362static int irq_request_resources(struct irq_desc *desc)
1363{
1364 struct irq_data *d = &desc->irq_data;
1365 struct irq_chip *c = d->chip;
1366
1367 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1368}
1369
1370static void irq_release_resources(struct irq_desc *desc)
1371{
1372 struct irq_data *d = &desc->irq_data;
1373 struct irq_chip *c = d->chip;
1374
1375 if (c->irq_release_resources)
1376 c->irq_release_resources(d);
1377}
1378
1379static bool irq_supports_nmi(struct irq_desc *desc)
1380{
1381 struct irq_data *d = irq_desc_get_irq_data(desc);
1382
1383#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1384 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1385 if (d->parent_data)
1386 return false;
1387#endif
1388 /* Don't support NMIs for chips behind a slow bus */
1389 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1390 return false;
1391
1392 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1393}
1394
1395static int irq_nmi_setup(struct irq_desc *desc)
1396{
1397 struct irq_data *d = irq_desc_get_irq_data(desc);
1398 struct irq_chip *c = d->chip;
1399
1400 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1401}
1402
1403static void irq_nmi_teardown(struct irq_desc *desc)
1404{
1405 struct irq_data *d = irq_desc_get_irq_data(desc);
1406 struct irq_chip *c = d->chip;
1407
1408 if (c->irq_nmi_teardown)
1409 c->irq_nmi_teardown(d);
1410}
1411
1412static int
1413setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1414{
1415 struct task_struct *t;
1416
1417 if (!secondary) {
1418 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1419 new->name);
1420 } else {
1421 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1422 new->name);
1423 }
1424
1425 if (IS_ERR(t))
1426 return PTR_ERR(t);
1427
1428 sched_set_fifo(t);
1429
1430 /*
1431 * We keep the reference to the task struct even if
1432 * the thread dies to avoid that the interrupt code
1433 * references an already freed task_struct.
1434 */
1435 new->thread = get_task_struct(t);
1436 /*
1437 * Tell the thread to set its affinity. This is
1438 * important for shared interrupt handlers as we do
1439 * not invoke setup_affinity() for the secondary
1440 * handlers as everything is already set up. Even for
1441 * interrupts marked with IRQF_NO_BALANCE this is
1442 * correct as we want the thread to move to the cpu(s)
1443 * on which the requesting code placed the interrupt.
1444 */
1445 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1446 return 0;
1447}
1448
1449/*
1450 * Internal function to register an irqaction - typically used to
1451 * allocate special interrupts that are part of the architecture.
1452 *
1453 * Locking rules:
1454 *
1455 * desc->request_mutex Provides serialization against a concurrent free_irq()
1456 * chip_bus_lock Provides serialization for slow bus operations
1457 * desc->lock Provides serialization against hard interrupts
1458 *
1459 * chip_bus_lock and desc->lock are sufficient for all other management and
1460 * interrupt related functions. desc->request_mutex solely serializes
1461 * request/free_irq().
1462 */
1463static int
1464__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1465{
1466 struct irqaction *old, **old_ptr;
1467 unsigned long flags, thread_mask = 0;
1468 int ret, nested, shared = 0;
1469
1470 if (!desc)
1471 return -EINVAL;
1472
1473 if (desc->irq_data.chip == &no_irq_chip)
1474 return -ENOSYS;
1475 if (!try_module_get(desc->owner))
1476 return -ENODEV;
1477
1478 new->irq = irq;
1479
1480 /*
1481 * If the trigger type is not specified by the caller,
1482 * then use the default for this interrupt.
1483 */
1484 if (!(new->flags & IRQF_TRIGGER_MASK))
1485 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1486
1487 /*
1488 * Check whether the interrupt nests into another interrupt
1489 * thread.
1490 */
1491 nested = irq_settings_is_nested_thread(desc);
1492 if (nested) {
1493 if (!new->thread_fn) {
1494 ret = -EINVAL;
1495 goto out_mput;
1496 }
1497 /*
1498 * Replace the primary handler which was provided from
1499 * the driver for non nested interrupt handling by the
1500 * dummy function which warns when called.
1501 */
1502 new->handler = irq_nested_primary_handler;
1503 } else {
1504 if (irq_settings_can_thread(desc)) {
1505 ret = irq_setup_forced_threading(new);
1506 if (ret)
1507 goto out_mput;
1508 }
1509 }
1510
1511 /*
1512 * Create a handler thread when a thread function is supplied
1513 * and the interrupt does not nest into another interrupt
1514 * thread.
1515 */
1516 if (new->thread_fn && !nested) {
1517 ret = setup_irq_thread(new, irq, false);
1518 if (ret)
1519 goto out_mput;
1520 if (new->secondary) {
1521 ret = setup_irq_thread(new->secondary, irq, true);
1522 if (ret)
1523 goto out_thread;
1524 }
1525 }
1526
1527 /*
1528 * Drivers are often written to work w/o knowledge about the
1529 * underlying irq chip implementation, so a request for a
1530 * threaded irq without a primary hard irq context handler
1531 * requires the ONESHOT flag to be set. Some irq chips like
1532 * MSI based interrupts are per se one shot safe. Check the
1533 * chip flags, so we can avoid the unmask dance at the end of
1534 * the threaded handler for those.
1535 */
1536 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1537 new->flags &= ~IRQF_ONESHOT;
1538
1539 /*
1540 * Protects against a concurrent __free_irq() call which might wait
1541 * for synchronize_hardirq() to complete without holding the optional
1542 * chip bus lock and desc->lock. Also protects against handing out
1543 * a recycled oneshot thread_mask bit while it's still in use by
1544 * its previous owner.
1545 */
1546 mutex_lock(&desc->request_mutex);
1547
1548 /*
1549 * Acquire bus lock as the irq_request_resources() callback below
1550 * might rely on the serialization or the magic power management
1551 * functions which are abusing the irq_bus_lock() callback,
1552 */
1553 chip_bus_lock(desc);
1554
1555 /* First installed action requests resources. */
1556 if (!desc->action) {
1557 ret = irq_request_resources(desc);
1558 if (ret) {
1559 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1560 new->name, irq, desc->irq_data.chip->name);
1561 goto out_bus_unlock;
1562 }
1563 }
1564
1565 /*
1566 * The following block of code has to be executed atomically
1567 * protected against a concurrent interrupt and any of the other
1568 * management calls which are not serialized via
1569 * desc->request_mutex or the optional bus lock.
1570 */
1571 raw_spin_lock_irqsave(&desc->lock, flags);
1572 old_ptr = &desc->action;
1573 old = *old_ptr;
1574 if (old) {
1575 /*
1576 * Can't share interrupts unless both agree to and are
1577 * the same type (level, edge, polarity). So both flag
1578 * fields must have IRQF_SHARED set and the bits which
1579 * set the trigger type must match. Also all must
1580 * agree on ONESHOT.
1581 * Interrupt lines used for NMIs cannot be shared.
1582 */
1583 unsigned int oldtype;
1584
1585 if (desc->istate & IRQS_NMI) {
1586 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1587 new->name, irq, desc->irq_data.chip->name);
1588 ret = -EINVAL;
1589 goto out_unlock;
1590 }
1591
1592 /*
1593 * If nobody did set the configuration before, inherit
1594 * the one provided by the requester.
1595 */
1596 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1597 oldtype = irqd_get_trigger_type(&desc->irq_data);
1598 } else {
1599 oldtype = new->flags & IRQF_TRIGGER_MASK;
1600 irqd_set_trigger_type(&desc->irq_data, oldtype);
1601 }
1602
1603 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1604 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1605 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1606 goto mismatch;
1607
1608 /* All handlers must agree on per-cpuness */
1609 if ((old->flags & IRQF_PERCPU) !=
1610 (new->flags & IRQF_PERCPU))
1611 goto mismatch;
1612
1613 /* add new interrupt at end of irq queue */
1614 do {
1615 /*
1616 * Or all existing action->thread_mask bits,
1617 * so we can find the next zero bit for this
1618 * new action.
1619 */
1620 thread_mask |= old->thread_mask;
1621 old_ptr = &old->next;
1622 old = *old_ptr;
1623 } while (old);
1624 shared = 1;
1625 }
1626
1627 /*
1628 * Setup the thread mask for this irqaction for ONESHOT. For
1629 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1630 * conditional in irq_wake_thread().
1631 */
1632 if (new->flags & IRQF_ONESHOT) {
1633 /*
1634 * Unlikely to have 32 resp 64 irqs sharing one line,
1635 * but who knows.
1636 */
1637 if (thread_mask == ~0UL) {
1638 ret = -EBUSY;
1639 goto out_unlock;
1640 }
1641 /*
1642 * The thread_mask for the action is or'ed to
1643 * desc->thread_active to indicate that the
1644 * IRQF_ONESHOT thread handler has been woken, but not
1645 * yet finished. The bit is cleared when a thread
1646 * completes. When all threads of a shared interrupt
1647 * line have completed desc->threads_active becomes
1648 * zero and the interrupt line is unmasked. See
1649 * handle.c:irq_wake_thread() for further information.
1650 *
1651 * If no thread is woken by primary (hard irq context)
1652 * interrupt handlers, then desc->threads_active is
1653 * also checked for zero to unmask the irq line in the
1654 * affected hard irq flow handlers
1655 * (handle_[fasteoi|level]_irq).
1656 *
1657 * The new action gets the first zero bit of
1658 * thread_mask assigned. See the loop above which or's
1659 * all existing action->thread_mask bits.
1660 */
1661 new->thread_mask = 1UL << ffz(thread_mask);
1662
1663 } else if (new->handler == irq_default_primary_handler &&
1664 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1665 /*
1666 * The interrupt was requested with handler = NULL, so
1667 * we use the default primary handler for it. But it
1668 * does not have the oneshot flag set. In combination
1669 * with level interrupts this is deadly, because the
1670 * default primary handler just wakes the thread, then
1671 * the irq lines is reenabled, but the device still
1672 * has the level irq asserted. Rinse and repeat....
1673 *
1674 * While this works for edge type interrupts, we play
1675 * it safe and reject unconditionally because we can't
1676 * say for sure which type this interrupt really
1677 * has. The type flags are unreliable as the
1678 * underlying chip implementation can override them.
1679 */
1680 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1681 new->name, irq);
1682 ret = -EINVAL;
1683 goto out_unlock;
1684 }
1685
1686 if (!shared) {
1687 init_waitqueue_head(&desc->wait_for_threads);
1688
1689 /* Setup the type (level, edge polarity) if configured: */
1690 if (new->flags & IRQF_TRIGGER_MASK) {
1691 ret = __irq_set_trigger(desc,
1692 new->flags & IRQF_TRIGGER_MASK);
1693
1694 if (ret)
1695 goto out_unlock;
1696 }
1697
1698 /*
1699 * Activate the interrupt. That activation must happen
1700 * independently of IRQ_NOAUTOEN. request_irq() can fail
1701 * and the callers are supposed to handle
1702 * that. enable_irq() of an interrupt requested with
1703 * IRQ_NOAUTOEN is not supposed to fail. The activation
1704 * keeps it in shutdown mode, it merily associates
1705 * resources if necessary and if that's not possible it
1706 * fails. Interrupts which are in managed shutdown mode
1707 * will simply ignore that activation request.
1708 */
1709 ret = irq_activate(desc);
1710 if (ret)
1711 goto out_unlock;
1712
1713 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1714 IRQS_ONESHOT | IRQS_WAITING);
1715 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1716
1717 if (new->flags & IRQF_PERCPU) {
1718 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1719 irq_settings_set_per_cpu(desc);
1720 if (new->flags & IRQF_NO_DEBUG)
1721 irq_settings_set_no_debug(desc);
1722 }
1723
1724 if (noirqdebug)
1725 irq_settings_set_no_debug(desc);
1726
1727 if (new->flags & IRQF_ONESHOT)
1728 desc->istate |= IRQS_ONESHOT;
1729
1730 /* Exclude IRQ from balancing if requested */
1731 if (new->flags & IRQF_NOBALANCING) {
1732 irq_settings_set_no_balancing(desc);
1733 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1734 }
1735
1736 if (!(new->flags & IRQF_NO_AUTOEN) &&
1737 irq_settings_can_autoenable(desc)) {
1738 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1739 } else {
1740 /*
1741 * Shared interrupts do not go well with disabling
1742 * auto enable. The sharing interrupt might request
1743 * it while it's still disabled and then wait for
1744 * interrupts forever.
1745 */
1746 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1747 /* Undo nested disables: */
1748 desc->depth = 1;
1749 }
1750
1751 } else if (new->flags & IRQF_TRIGGER_MASK) {
1752 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1753 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1754
1755 if (nmsk != omsk)
1756 /* hope the handler works with current trigger mode */
1757 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1758 irq, omsk, nmsk);
1759 }
1760
1761 *old_ptr = new;
1762
1763 irq_pm_install_action(desc, new);
1764
1765 /* Reset broken irq detection when installing new handler */
1766 desc->irq_count = 0;
1767 desc->irqs_unhandled = 0;
1768
1769 /*
1770 * Check whether we disabled the irq via the spurious handler
1771 * before. Reenable it and give it another chance.
1772 */
1773 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1774 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1775 __enable_irq(desc);
1776 }
1777
1778 raw_spin_unlock_irqrestore(&desc->lock, flags);
1779 chip_bus_sync_unlock(desc);
1780 mutex_unlock(&desc->request_mutex);
1781
1782 irq_setup_timings(desc, new);
1783
1784 /*
1785 * Strictly no need to wake it up, but hung_task complains
1786 * when no hard interrupt wakes the thread up.
1787 */
1788 if (new->thread)
1789 wake_up_process(new->thread);
1790 if (new->secondary)
1791 wake_up_process(new->secondary->thread);
1792
1793 register_irq_proc(irq, desc);
1794 new->dir = NULL;
1795 register_handler_proc(irq, new);
1796 return 0;
1797
1798mismatch:
1799 if (!(new->flags & IRQF_PROBE_SHARED)) {
1800 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1801 irq, new->flags, new->name, old->flags, old->name);
1802#ifdef CONFIG_DEBUG_SHIRQ
1803 dump_stack();
1804#endif
1805 }
1806 ret = -EBUSY;
1807
1808out_unlock:
1809 raw_spin_unlock_irqrestore(&desc->lock, flags);
1810
1811 if (!desc->action)
1812 irq_release_resources(desc);
1813out_bus_unlock:
1814 chip_bus_sync_unlock(desc);
1815 mutex_unlock(&desc->request_mutex);
1816
1817out_thread:
1818 if (new->thread) {
1819 struct task_struct *t = new->thread;
1820
1821 new->thread = NULL;
1822 kthread_stop(t);
1823 put_task_struct(t);
1824 }
1825 if (new->secondary && new->secondary->thread) {
1826 struct task_struct *t = new->secondary->thread;
1827
1828 new->secondary->thread = NULL;
1829 kthread_stop(t);
1830 put_task_struct(t);
1831 }
1832out_mput:
1833 module_put(desc->owner);
1834 return ret;
1835}
1836
1837/*
1838 * Internal function to unregister an irqaction - used to free
1839 * regular and special interrupts that are part of the architecture.
1840 */
1841static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1842{
1843 unsigned irq = desc->irq_data.irq;
1844 struct irqaction *action, **action_ptr;
1845 unsigned long flags;
1846
1847 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1848
1849 mutex_lock(&desc->request_mutex);
1850 chip_bus_lock(desc);
1851 raw_spin_lock_irqsave(&desc->lock, flags);
1852
1853 /*
1854 * There can be multiple actions per IRQ descriptor, find the right
1855 * one based on the dev_id:
1856 */
1857 action_ptr = &desc->action;
1858 for (;;) {
1859 action = *action_ptr;
1860
1861 if (!action) {
1862 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1863 raw_spin_unlock_irqrestore(&desc->lock, flags);
1864 chip_bus_sync_unlock(desc);
1865 mutex_unlock(&desc->request_mutex);
1866 return NULL;
1867 }
1868
1869 if (action->dev_id == dev_id)
1870 break;
1871 action_ptr = &action->next;
1872 }
1873
1874 /* Found it - now remove it from the list of entries: */
1875 *action_ptr = action->next;
1876
1877 irq_pm_remove_action(desc, action);
1878
1879 /* If this was the last handler, shut down the IRQ line: */
1880 if (!desc->action) {
1881 irq_settings_clr_disable_unlazy(desc);
1882 /* Only shutdown. Deactivate after synchronize_hardirq() */
1883 irq_shutdown(desc);
1884 }
1885
1886#ifdef CONFIG_SMP
1887 /* make sure affinity_hint is cleaned up */
1888 if (WARN_ON_ONCE(desc->affinity_hint))
1889 desc->affinity_hint = NULL;
1890#endif
1891
1892 raw_spin_unlock_irqrestore(&desc->lock, flags);
1893 /*
1894 * Drop bus_lock here so the changes which were done in the chip
1895 * callbacks above are synced out to the irq chips which hang
1896 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1897 *
1898 * Aside of that the bus_lock can also be taken from the threaded
1899 * handler in irq_finalize_oneshot() which results in a deadlock
1900 * because kthread_stop() would wait forever for the thread to
1901 * complete, which is blocked on the bus lock.
1902 *
1903 * The still held desc->request_mutex() protects against a
1904 * concurrent request_irq() of this irq so the release of resources
1905 * and timing data is properly serialized.
1906 */
1907 chip_bus_sync_unlock(desc);
1908
1909 unregister_handler_proc(irq, action);
1910
1911 /*
1912 * Make sure it's not being used on another CPU and if the chip
1913 * supports it also make sure that there is no (not yet serviced)
1914 * interrupt in flight at the hardware level.
1915 */
1916 __synchronize_hardirq(desc, true);
1917
1918#ifdef CONFIG_DEBUG_SHIRQ
1919 /*
1920 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1921 * event to happen even now it's being freed, so let's make sure that
1922 * is so by doing an extra call to the handler ....
1923 *
1924 * ( We do this after actually deregistering it, to make sure that a
1925 * 'real' IRQ doesn't run in parallel with our fake. )
1926 */
1927 if (action->flags & IRQF_SHARED) {
1928 local_irq_save(flags);
1929 action->handler(irq, dev_id);
1930 local_irq_restore(flags);
1931 }
1932#endif
1933
1934 /*
1935 * The action has already been removed above, but the thread writes
1936 * its oneshot mask bit when it completes. Though request_mutex is
1937 * held across this which prevents __setup_irq() from handing out
1938 * the same bit to a newly requested action.
1939 */
1940 if (action->thread) {
1941 kthread_stop(action->thread);
1942 put_task_struct(action->thread);
1943 if (action->secondary && action->secondary->thread) {
1944 kthread_stop(action->secondary->thread);
1945 put_task_struct(action->secondary->thread);
1946 }
1947 }
1948
1949 /* Last action releases resources */
1950 if (!desc->action) {
1951 /*
1952 * Reacquire bus lock as irq_release_resources() might
1953 * require it to deallocate resources over the slow bus.
1954 */
1955 chip_bus_lock(desc);
1956 /*
1957 * There is no interrupt on the fly anymore. Deactivate it
1958 * completely.
1959 */
1960 raw_spin_lock_irqsave(&desc->lock, flags);
1961 irq_domain_deactivate_irq(&desc->irq_data);
1962 raw_spin_unlock_irqrestore(&desc->lock, flags);
1963
1964 irq_release_resources(desc);
1965 chip_bus_sync_unlock(desc);
1966 irq_remove_timings(desc);
1967 }
1968
1969 mutex_unlock(&desc->request_mutex);
1970
1971 irq_chip_pm_put(&desc->irq_data);
1972 module_put(desc->owner);
1973 kfree(action->secondary);
1974 return action;
1975}
1976
1977/**
1978 * free_irq - free an interrupt allocated with request_irq
1979 * @irq: Interrupt line to free
1980 * @dev_id: Device identity to free
1981 *
1982 * Remove an interrupt handler. The handler is removed and if the
1983 * interrupt line is no longer in use by any driver it is disabled.
1984 * On a shared IRQ the caller must ensure the interrupt is disabled
1985 * on the card it drives before calling this function. The function
1986 * does not return until any executing interrupts for this IRQ
1987 * have completed.
1988 *
1989 * This function must not be called from interrupt context.
1990 *
1991 * Returns the devname argument passed to request_irq.
1992 */
1993const void *free_irq(unsigned int irq, void *dev_id)
1994{
1995 struct irq_desc *desc = irq_to_desc(irq);
1996 struct irqaction *action;
1997 const char *devname;
1998
1999 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2000 return NULL;
2001
2002#ifdef CONFIG_SMP
2003 if (WARN_ON(desc->affinity_notify))
2004 desc->affinity_notify = NULL;
2005#endif
2006
2007 action = __free_irq(desc, dev_id);
2008
2009 if (!action)
2010 return NULL;
2011
2012 devname = action->name;
2013 kfree(action);
2014 return devname;
2015}
2016EXPORT_SYMBOL(free_irq);
2017
2018/* This function must be called with desc->lock held */
2019static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2020{
2021 const char *devname = NULL;
2022
2023 desc->istate &= ~IRQS_NMI;
2024
2025 if (!WARN_ON(desc->action == NULL)) {
2026 irq_pm_remove_action(desc, desc->action);
2027 devname = desc->action->name;
2028 unregister_handler_proc(irq, desc->action);
2029
2030 kfree(desc->action);
2031 desc->action = NULL;
2032 }
2033
2034 irq_settings_clr_disable_unlazy(desc);
2035 irq_shutdown_and_deactivate(desc);
2036
2037 irq_release_resources(desc);
2038
2039 irq_chip_pm_put(&desc->irq_data);
2040 module_put(desc->owner);
2041
2042 return devname;
2043}
2044
2045const void *free_nmi(unsigned int irq, void *dev_id)
2046{
2047 struct irq_desc *desc = irq_to_desc(irq);
2048 unsigned long flags;
2049 const void *devname;
2050
2051 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2052 return NULL;
2053
2054 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2055 return NULL;
2056
2057 /* NMI still enabled */
2058 if (WARN_ON(desc->depth == 0))
2059 disable_nmi_nosync(irq);
2060
2061 raw_spin_lock_irqsave(&desc->lock, flags);
2062
2063 irq_nmi_teardown(desc);
2064 devname = __cleanup_nmi(irq, desc);
2065
2066 raw_spin_unlock_irqrestore(&desc->lock, flags);
2067
2068 return devname;
2069}
2070
2071/**
2072 * request_threaded_irq - allocate an interrupt line
2073 * @irq: Interrupt line to allocate
2074 * @handler: Function to be called when the IRQ occurs.
2075 * Primary handler for threaded interrupts
2076 * If NULL and thread_fn != NULL the default
2077 * primary handler is installed
2078 * @thread_fn: Function called from the irq handler thread
2079 * If NULL, no irq thread is created
2080 * @irqflags: Interrupt type flags
2081 * @devname: An ascii name for the claiming device
2082 * @dev_id: A cookie passed back to the handler function
2083 *
2084 * This call allocates interrupt resources and enables the
2085 * interrupt line and IRQ handling. From the point this
2086 * call is made your handler function may be invoked. Since
2087 * your handler function must clear any interrupt the board
2088 * raises, you must take care both to initialise your hardware
2089 * and to set up the interrupt handler in the right order.
2090 *
2091 * If you want to set up a threaded irq handler for your device
2092 * then you need to supply @handler and @thread_fn. @handler is
2093 * still called in hard interrupt context and has to check
2094 * whether the interrupt originates from the device. If yes it
2095 * needs to disable the interrupt on the device and return
2096 * IRQ_WAKE_THREAD which will wake up the handler thread and run
2097 * @thread_fn. This split handler design is necessary to support
2098 * shared interrupts.
2099 *
2100 * Dev_id must be globally unique. Normally the address of the
2101 * device data structure is used as the cookie. Since the handler
2102 * receives this value it makes sense to use it.
2103 *
2104 * If your interrupt is shared you must pass a non NULL dev_id
2105 * as this is required when freeing the interrupt.
2106 *
2107 * Flags:
2108 *
2109 * IRQF_SHARED Interrupt is shared
2110 * IRQF_TRIGGER_* Specify active edge(s) or level
2111 *
2112 */
2113int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2114 irq_handler_t thread_fn, unsigned long irqflags,
2115 const char *devname, void *dev_id)
2116{
2117 struct irqaction *action;
2118 struct irq_desc *desc;
2119 int retval;
2120
2121 if (irq == IRQ_NOTCONNECTED)
2122 return -ENOTCONN;
2123
2124 /*
2125 * Sanity-check: shared interrupts must pass in a real dev-ID,
2126 * otherwise we'll have trouble later trying to figure out
2127 * which interrupt is which (messes up the interrupt freeing
2128 * logic etc).
2129 *
2130 * Also shared interrupts do not go well with disabling auto enable.
2131 * The sharing interrupt might request it while it's still disabled
2132 * and then wait for interrupts forever.
2133 *
2134 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2135 * it cannot be set along with IRQF_NO_SUSPEND.
2136 */
2137 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2138 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2139 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2140 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2141 return -EINVAL;
2142
2143 desc = irq_to_desc(irq);
2144 if (!desc)
2145 return -EINVAL;
2146
2147 if (!irq_settings_can_request(desc) ||
2148 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2149 return -EINVAL;
2150
2151 if (!handler) {
2152 if (!thread_fn)
2153 return -EINVAL;
2154 handler = irq_default_primary_handler;
2155 }
2156
2157 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2158 if (!action)
2159 return -ENOMEM;
2160
2161 action->handler = handler;
2162 action->thread_fn = thread_fn;
2163 action->flags = irqflags;
2164 action->name = devname;
2165 action->dev_id = dev_id;
2166
2167 retval = irq_chip_pm_get(&desc->irq_data);
2168 if (retval < 0) {
2169 kfree(action);
2170 return retval;
2171 }
2172
2173 retval = __setup_irq(irq, desc, action);
2174
2175 if (retval) {
2176 irq_chip_pm_put(&desc->irq_data);
2177 kfree(action->secondary);
2178 kfree(action);
2179 }
2180
2181#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2182 if (!retval && (irqflags & IRQF_SHARED)) {
2183 /*
2184 * It's a shared IRQ -- the driver ought to be prepared for it
2185 * to happen immediately, so let's make sure....
2186 * We disable the irq to make sure that a 'real' IRQ doesn't
2187 * run in parallel with our fake.
2188 */
2189 unsigned long flags;
2190
2191 disable_irq(irq);
2192 local_irq_save(flags);
2193
2194 handler(irq, dev_id);
2195
2196 local_irq_restore(flags);
2197 enable_irq(irq);
2198 }
2199#endif
2200 return retval;
2201}
2202EXPORT_SYMBOL(request_threaded_irq);
2203
2204/**
2205 * request_any_context_irq - allocate an interrupt line
2206 * @irq: Interrupt line to allocate
2207 * @handler: Function to be called when the IRQ occurs.
2208 * Threaded handler for threaded interrupts.
2209 * @flags: Interrupt type flags
2210 * @name: An ascii name for the claiming device
2211 * @dev_id: A cookie passed back to the handler function
2212 *
2213 * This call allocates interrupt resources and enables the
2214 * interrupt line and IRQ handling. It selects either a
2215 * hardirq or threaded handling method depending on the
2216 * context.
2217 *
2218 * On failure, it returns a negative value. On success,
2219 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2220 */
2221int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2222 unsigned long flags, const char *name, void *dev_id)
2223{
2224 struct irq_desc *desc;
2225 int ret;
2226
2227 if (irq == IRQ_NOTCONNECTED)
2228 return -ENOTCONN;
2229
2230 desc = irq_to_desc(irq);
2231 if (!desc)
2232 return -EINVAL;
2233
2234 if (irq_settings_is_nested_thread(desc)) {
2235 ret = request_threaded_irq(irq, NULL, handler,
2236 flags, name, dev_id);
2237 return !ret ? IRQC_IS_NESTED : ret;
2238 }
2239
2240 ret = request_irq(irq, handler, flags, name, dev_id);
2241 return !ret ? IRQC_IS_HARDIRQ : ret;
2242}
2243EXPORT_SYMBOL_GPL(request_any_context_irq);
2244
2245/**
2246 * request_nmi - allocate an interrupt line for NMI delivery
2247 * @irq: Interrupt line to allocate
2248 * @handler: Function to be called when the IRQ occurs.
2249 * Threaded handler for threaded interrupts.
2250 * @irqflags: Interrupt type flags
2251 * @name: An ascii name for the claiming device
2252 * @dev_id: A cookie passed back to the handler function
2253 *
2254 * This call allocates interrupt resources and enables the
2255 * interrupt line and IRQ handling. It sets up the IRQ line
2256 * to be handled as an NMI.
2257 *
2258 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2259 * cannot be threaded.
2260 *
2261 * Interrupt lines requested for NMI delivering must produce per cpu
2262 * interrupts and have auto enabling setting disabled.
2263 *
2264 * Dev_id must be globally unique. Normally the address of the
2265 * device data structure is used as the cookie. Since the handler
2266 * receives this value it makes sense to use it.
2267 *
2268 * If the interrupt line cannot be used to deliver NMIs, function
2269 * will fail and return a negative value.
2270 */
2271int request_nmi(unsigned int irq, irq_handler_t handler,
2272 unsigned long irqflags, const char *name, void *dev_id)
2273{
2274 struct irqaction *action;
2275 struct irq_desc *desc;
2276 unsigned long flags;
2277 int retval;
2278
2279 if (irq == IRQ_NOTCONNECTED)
2280 return -ENOTCONN;
2281
2282 /* NMI cannot be shared, used for Polling */
2283 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2284 return -EINVAL;
2285
2286 if (!(irqflags & IRQF_PERCPU))
2287 return -EINVAL;
2288
2289 if (!handler)
2290 return -EINVAL;
2291
2292 desc = irq_to_desc(irq);
2293
2294 if (!desc || (irq_settings_can_autoenable(desc) &&
2295 !(irqflags & IRQF_NO_AUTOEN)) ||
2296 !irq_settings_can_request(desc) ||
2297 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2298 !irq_supports_nmi(desc))
2299 return -EINVAL;
2300
2301 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2302 if (!action)
2303 return -ENOMEM;
2304
2305 action->handler = handler;
2306 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2307 action->name = name;
2308 action->dev_id = dev_id;
2309
2310 retval = irq_chip_pm_get(&desc->irq_data);
2311 if (retval < 0)
2312 goto err_out;
2313
2314 retval = __setup_irq(irq, desc, action);
2315 if (retval)
2316 goto err_irq_setup;
2317
2318 raw_spin_lock_irqsave(&desc->lock, flags);
2319
2320 /* Setup NMI state */
2321 desc->istate |= IRQS_NMI;
2322 retval = irq_nmi_setup(desc);
2323 if (retval) {
2324 __cleanup_nmi(irq, desc);
2325 raw_spin_unlock_irqrestore(&desc->lock, flags);
2326 return -EINVAL;
2327 }
2328
2329 raw_spin_unlock_irqrestore(&desc->lock, flags);
2330
2331 return 0;
2332
2333err_irq_setup:
2334 irq_chip_pm_put(&desc->irq_data);
2335err_out:
2336 kfree(action);
2337
2338 return retval;
2339}
2340
2341void enable_percpu_irq(unsigned int irq, unsigned int type)
2342{
2343 unsigned int cpu = smp_processor_id();
2344 unsigned long flags;
2345 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2346
2347 if (!desc)
2348 return;
2349
2350 /*
2351 * If the trigger type is not specified by the caller, then
2352 * use the default for this interrupt.
2353 */
2354 type &= IRQ_TYPE_SENSE_MASK;
2355 if (type == IRQ_TYPE_NONE)
2356 type = irqd_get_trigger_type(&desc->irq_data);
2357
2358 if (type != IRQ_TYPE_NONE) {
2359 int ret;
2360
2361 ret = __irq_set_trigger(desc, type);
2362
2363 if (ret) {
2364 WARN(1, "failed to set type for IRQ%d\n", irq);
2365 goto out;
2366 }
2367 }
2368
2369 irq_percpu_enable(desc, cpu);
2370out:
2371 irq_put_desc_unlock(desc, flags);
2372}
2373EXPORT_SYMBOL_GPL(enable_percpu_irq);
2374
2375void enable_percpu_nmi(unsigned int irq, unsigned int type)
2376{
2377 enable_percpu_irq(irq, type);
2378}
2379
2380/**
2381 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2382 * @irq: Linux irq number to check for
2383 *
2384 * Must be called from a non migratable context. Returns the enable
2385 * state of a per cpu interrupt on the current cpu.
2386 */
2387bool irq_percpu_is_enabled(unsigned int irq)
2388{
2389 unsigned int cpu = smp_processor_id();
2390 struct irq_desc *desc;
2391 unsigned long flags;
2392 bool is_enabled;
2393
2394 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2395 if (!desc)
2396 return false;
2397
2398 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2399 irq_put_desc_unlock(desc, flags);
2400
2401 return is_enabled;
2402}
2403EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2404
2405void disable_percpu_irq(unsigned int irq)
2406{
2407 unsigned int cpu = smp_processor_id();
2408 unsigned long flags;
2409 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2410
2411 if (!desc)
2412 return;
2413
2414 irq_percpu_disable(desc, cpu);
2415 irq_put_desc_unlock(desc, flags);
2416}
2417EXPORT_SYMBOL_GPL(disable_percpu_irq);
2418
2419void disable_percpu_nmi(unsigned int irq)
2420{
2421 disable_percpu_irq(irq);
2422}
2423
2424/*
2425 * Internal function to unregister a percpu irqaction.
2426 */
2427static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2428{
2429 struct irq_desc *desc = irq_to_desc(irq);
2430 struct irqaction *action;
2431 unsigned long flags;
2432
2433 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2434
2435 if (!desc)
2436 return NULL;
2437
2438 raw_spin_lock_irqsave(&desc->lock, flags);
2439
2440 action = desc->action;
2441 if (!action || action->percpu_dev_id != dev_id) {
2442 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2443 goto bad;
2444 }
2445
2446 if (!cpumask_empty(desc->percpu_enabled)) {
2447 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2448 irq, cpumask_first(desc->percpu_enabled));
2449 goto bad;
2450 }
2451
2452 /* Found it - now remove it from the list of entries: */
2453 desc->action = NULL;
2454
2455 desc->istate &= ~IRQS_NMI;
2456
2457 raw_spin_unlock_irqrestore(&desc->lock, flags);
2458
2459 unregister_handler_proc(irq, action);
2460
2461 irq_chip_pm_put(&desc->irq_data);
2462 module_put(desc->owner);
2463 return action;
2464
2465bad:
2466 raw_spin_unlock_irqrestore(&desc->lock, flags);
2467 return NULL;
2468}
2469
2470/**
2471 * remove_percpu_irq - free a per-cpu interrupt
2472 * @irq: Interrupt line to free
2473 * @act: irqaction for the interrupt
2474 *
2475 * Used to remove interrupts statically setup by the early boot process.
2476 */
2477void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2478{
2479 struct irq_desc *desc = irq_to_desc(irq);
2480
2481 if (desc && irq_settings_is_per_cpu_devid(desc))
2482 __free_percpu_irq(irq, act->percpu_dev_id);
2483}
2484
2485/**
2486 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2487 * @irq: Interrupt line to free
2488 * @dev_id: Device identity to free
2489 *
2490 * Remove a percpu interrupt handler. The handler is removed, but
2491 * the interrupt line is not disabled. This must be done on each
2492 * CPU before calling this function. The function does not return
2493 * until any executing interrupts for this IRQ have completed.
2494 *
2495 * This function must not be called from interrupt context.
2496 */
2497void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2498{
2499 struct irq_desc *desc = irq_to_desc(irq);
2500
2501 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2502 return;
2503
2504 chip_bus_lock(desc);
2505 kfree(__free_percpu_irq(irq, dev_id));
2506 chip_bus_sync_unlock(desc);
2507}
2508EXPORT_SYMBOL_GPL(free_percpu_irq);
2509
2510void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2511{
2512 struct irq_desc *desc = irq_to_desc(irq);
2513
2514 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2515 return;
2516
2517 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2518 return;
2519
2520 kfree(__free_percpu_irq(irq, dev_id));
2521}
2522
2523/**
2524 * setup_percpu_irq - setup a per-cpu interrupt
2525 * @irq: Interrupt line to setup
2526 * @act: irqaction for the interrupt
2527 *
2528 * Used to statically setup per-cpu interrupts in the early boot process.
2529 */
2530int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2531{
2532 struct irq_desc *desc = irq_to_desc(irq);
2533 int retval;
2534
2535 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2536 return -EINVAL;
2537
2538 retval = irq_chip_pm_get(&desc->irq_data);
2539 if (retval < 0)
2540 return retval;
2541
2542 retval = __setup_irq(irq, desc, act);
2543
2544 if (retval)
2545 irq_chip_pm_put(&desc->irq_data);
2546
2547 return retval;
2548}
2549
2550/**
2551 * __request_percpu_irq - allocate a percpu interrupt line
2552 * @irq: Interrupt line to allocate
2553 * @handler: Function to be called when the IRQ occurs.
2554 * @flags: Interrupt type flags (IRQF_TIMER only)
2555 * @devname: An ascii name for the claiming device
2556 * @dev_id: A percpu cookie passed back to the handler function
2557 *
2558 * This call allocates interrupt resources and enables the
2559 * interrupt on the local CPU. If the interrupt is supposed to be
2560 * enabled on other CPUs, it has to be done on each CPU using
2561 * enable_percpu_irq().
2562 *
2563 * Dev_id must be globally unique. It is a per-cpu variable, and
2564 * the handler gets called with the interrupted CPU's instance of
2565 * that variable.
2566 */
2567int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2568 unsigned long flags, const char *devname,
2569 void __percpu *dev_id)
2570{
2571 struct irqaction *action;
2572 struct irq_desc *desc;
2573 int retval;
2574
2575 if (!dev_id)
2576 return -EINVAL;
2577
2578 desc = irq_to_desc(irq);
2579 if (!desc || !irq_settings_can_request(desc) ||
2580 !irq_settings_is_per_cpu_devid(desc))
2581 return -EINVAL;
2582
2583 if (flags && flags != IRQF_TIMER)
2584 return -EINVAL;
2585
2586 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2587 if (!action)
2588 return -ENOMEM;
2589
2590 action->handler = handler;
2591 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2592 action->name = devname;
2593 action->percpu_dev_id = dev_id;
2594
2595 retval = irq_chip_pm_get(&desc->irq_data);
2596 if (retval < 0) {
2597 kfree(action);
2598 return retval;
2599 }
2600
2601 retval = __setup_irq(irq, desc, action);
2602
2603 if (retval) {
2604 irq_chip_pm_put(&desc->irq_data);
2605 kfree(action);
2606 }
2607
2608 return retval;
2609}
2610EXPORT_SYMBOL_GPL(__request_percpu_irq);
2611
2612/**
2613 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2614 * @irq: Interrupt line to allocate
2615 * @handler: Function to be called when the IRQ occurs.
2616 * @name: An ascii name for the claiming device
2617 * @dev_id: A percpu cookie passed back to the handler function
2618 *
2619 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2620 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2621 * being enabled on the same CPU by using enable_percpu_nmi().
2622 *
2623 * Dev_id must be globally unique. It is a per-cpu variable, and
2624 * the handler gets called with the interrupted CPU's instance of
2625 * that variable.
2626 *
2627 * Interrupt lines requested for NMI delivering should have auto enabling
2628 * setting disabled.
2629 *
2630 * If the interrupt line cannot be used to deliver NMIs, function
2631 * will fail returning a negative value.
2632 */
2633int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2634 const char *name, void __percpu *dev_id)
2635{
2636 struct irqaction *action;
2637 struct irq_desc *desc;
2638 unsigned long flags;
2639 int retval;
2640
2641 if (!handler)
2642 return -EINVAL;
2643
2644 desc = irq_to_desc(irq);
2645
2646 if (!desc || !irq_settings_can_request(desc) ||
2647 !irq_settings_is_per_cpu_devid(desc) ||
2648 irq_settings_can_autoenable(desc) ||
2649 !irq_supports_nmi(desc))
2650 return -EINVAL;
2651
2652 /* The line cannot already be NMI */
2653 if (desc->istate & IRQS_NMI)
2654 return -EINVAL;
2655
2656 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2657 if (!action)
2658 return -ENOMEM;
2659
2660 action->handler = handler;
2661 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2662 | IRQF_NOBALANCING;
2663 action->name = name;
2664 action->percpu_dev_id = dev_id;
2665
2666 retval = irq_chip_pm_get(&desc->irq_data);
2667 if (retval < 0)
2668 goto err_out;
2669
2670 retval = __setup_irq(irq, desc, action);
2671 if (retval)
2672 goto err_irq_setup;
2673
2674 raw_spin_lock_irqsave(&desc->lock, flags);
2675 desc->istate |= IRQS_NMI;
2676 raw_spin_unlock_irqrestore(&desc->lock, flags);
2677
2678 return 0;
2679
2680err_irq_setup:
2681 irq_chip_pm_put(&desc->irq_data);
2682err_out:
2683 kfree(action);
2684
2685 return retval;
2686}
2687
2688/**
2689 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2690 * @irq: Interrupt line to prepare for NMI delivery
2691 *
2692 * This call prepares an interrupt line to deliver NMI on the current CPU,
2693 * before that interrupt line gets enabled with enable_percpu_nmi().
2694 *
2695 * As a CPU local operation, this should be called from non-preemptible
2696 * context.
2697 *
2698 * If the interrupt line cannot be used to deliver NMIs, function
2699 * will fail returning a negative value.
2700 */
2701int prepare_percpu_nmi(unsigned int irq)
2702{
2703 unsigned long flags;
2704 struct irq_desc *desc;
2705 int ret = 0;
2706
2707 WARN_ON(preemptible());
2708
2709 desc = irq_get_desc_lock(irq, &flags,
2710 IRQ_GET_DESC_CHECK_PERCPU);
2711 if (!desc)
2712 return -EINVAL;
2713
2714 if (WARN(!(desc->istate & IRQS_NMI),
2715 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2716 irq)) {
2717 ret = -EINVAL;
2718 goto out;
2719 }
2720
2721 ret = irq_nmi_setup(desc);
2722 if (ret) {
2723 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2724 goto out;
2725 }
2726
2727out:
2728 irq_put_desc_unlock(desc, flags);
2729 return ret;
2730}
2731
2732/**
2733 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2734 * @irq: Interrupt line from which CPU local NMI configuration should be
2735 * removed
2736 *
2737 * This call undoes the setup done by prepare_percpu_nmi().
2738 *
2739 * IRQ line should not be enabled for the current CPU.
2740 *
2741 * As a CPU local operation, this should be called from non-preemptible
2742 * context.
2743 */
2744void teardown_percpu_nmi(unsigned int irq)
2745{
2746 unsigned long flags;
2747 struct irq_desc *desc;
2748
2749 WARN_ON(preemptible());
2750
2751 desc = irq_get_desc_lock(irq, &flags,
2752 IRQ_GET_DESC_CHECK_PERCPU);
2753 if (!desc)
2754 return;
2755
2756 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2757 goto out;
2758
2759 irq_nmi_teardown(desc);
2760out:
2761 irq_put_desc_unlock(desc, flags);
2762}
2763
2764int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2765 bool *state)
2766{
2767 struct irq_chip *chip;
2768 int err = -EINVAL;
2769
2770 do {
2771 chip = irq_data_get_irq_chip(data);
2772 if (WARN_ON_ONCE(!chip))
2773 return -ENODEV;
2774 if (chip->irq_get_irqchip_state)
2775 break;
2776#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2777 data = data->parent_data;
2778#else
2779 data = NULL;
2780#endif
2781 } while (data);
2782
2783 if (data)
2784 err = chip->irq_get_irqchip_state(data, which, state);
2785 return err;
2786}
2787
2788/**
2789 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2790 * @irq: Interrupt line that is forwarded to a VM
2791 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2792 * @state: a pointer to a boolean where the state is to be stored
2793 *
2794 * This call snapshots the internal irqchip state of an
2795 * interrupt, returning into @state the bit corresponding to
2796 * stage @which
2797 *
2798 * This function should be called with preemption disabled if the
2799 * interrupt controller has per-cpu registers.
2800 */
2801int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2802 bool *state)
2803{
2804 struct irq_desc *desc;
2805 struct irq_data *data;
2806 unsigned long flags;
2807 int err = -EINVAL;
2808
2809 desc = irq_get_desc_buslock(irq, &flags, 0);
2810 if (!desc)
2811 return err;
2812
2813 data = irq_desc_get_irq_data(desc);
2814
2815 err = __irq_get_irqchip_state(data, which, state);
2816
2817 irq_put_desc_busunlock(desc, flags);
2818 return err;
2819}
2820EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2821
2822/**
2823 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2824 * @irq: Interrupt line that is forwarded to a VM
2825 * @which: State to be restored (one of IRQCHIP_STATE_*)
2826 * @val: Value corresponding to @which
2827 *
2828 * This call sets the internal irqchip state of an interrupt,
2829 * depending on the value of @which.
2830 *
2831 * This function should be called with preemption disabled if the
2832 * interrupt controller has per-cpu registers.
2833 */
2834int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2835 bool val)
2836{
2837 struct irq_desc *desc;
2838 struct irq_data *data;
2839 struct irq_chip *chip;
2840 unsigned long flags;
2841 int err = -EINVAL;
2842
2843 desc = irq_get_desc_buslock(irq, &flags, 0);
2844 if (!desc)
2845 return err;
2846
2847 data = irq_desc_get_irq_data(desc);
2848
2849 do {
2850 chip = irq_data_get_irq_chip(data);
2851 if (WARN_ON_ONCE(!chip)) {
2852 err = -ENODEV;
2853 goto out_unlock;
2854 }
2855 if (chip->irq_set_irqchip_state)
2856 break;
2857#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2858 data = data->parent_data;
2859#else
2860 data = NULL;
2861#endif
2862 } while (data);
2863
2864 if (data)
2865 err = chip->irq_set_irqchip_state(data, which, val);
2866
2867out_unlock:
2868 irq_put_desc_busunlock(desc, flags);
2869 return err;
2870}
2871EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2872
2873/**
2874 * irq_has_action - Check whether an interrupt is requested
2875 * @irq: The linux irq number
2876 *
2877 * Returns: A snapshot of the current state
2878 */
2879bool irq_has_action(unsigned int irq)
2880{
2881 bool res;
2882
2883 rcu_read_lock();
2884 res = irq_desc_has_action(irq_to_desc(irq));
2885 rcu_read_unlock();
2886 return res;
2887}
2888EXPORT_SYMBOL_GPL(irq_has_action);
2889
2890/**
2891 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2892 * @irq: The linux irq number
2893 * @bitmask: The bitmask to evaluate
2894 *
2895 * Returns: True if one of the bits in @bitmask is set
2896 */
2897bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2898{
2899 struct irq_desc *desc;
2900 bool res = false;
2901
2902 rcu_read_lock();
2903 desc = irq_to_desc(irq);
2904 if (desc)
2905 res = !!(desc->status_use_accessors & bitmask);
2906 rcu_read_unlock();
2907 return res;
2908}
2909EXPORT_SYMBOL_GPL(irq_check_status_bit);