Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006 Thomas Gleixner
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <uapi/linux/sched/types.h>
22#include <linux/task_work.h>
23
24#include "internals.h"
25
26#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
27__read_mostly bool force_irqthreads;
28EXPORT_SYMBOL_GPL(force_irqthreads);
29
30static int __init setup_forced_irqthreads(char *arg)
31{
32 force_irqthreads = true;
33 return 0;
34}
35early_param("threadirqs", setup_forced_irqthreads);
36#endif
37
38static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39{
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46 /*
47 * Wait until we're out of the critical section. This might
48 * give the wrong answer due to the lack of memory barriers.
49 */
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53 /* Ok, that indicated we're done: double-check carefully. */
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57 /*
58 * If requested and supported, check at the chip whether it
59 * is in flight at the hardware level, i.e. already pending
60 * in a CPU and waiting for service and acknowledge.
61 */
62 if (!inprogress && sync_chip) {
63 /*
64 * Ignore the return code. inprogress is only updated
65 * when the chip supports it.
66 */
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72 /* Oops, that failed? */
73 } while (inprogress);
74}
75
76/**
77 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
78 * @irq: interrupt number to wait for
79 *
80 * This function waits for any pending hard IRQ handlers for this
81 * interrupt to complete before returning. If you use this
82 * function while holding a resource the IRQ handler may need you
83 * will deadlock. It does not take associated threaded handlers
84 * into account.
85 *
86 * Do not use this for shutdown scenarios where you must be sure
87 * that all parts (hardirq and threaded handler) have completed.
88 *
89 * Returns: false if a threaded handler is active.
90 *
91 * This function may be called - with care - from IRQ context.
92 *
93 * It does not check whether there is an interrupt in flight at the
94 * hardware level, but not serviced yet, as this might deadlock when
95 * called with interrupts disabled and the target CPU of the interrupt
96 * is the current CPU.
97 */
98bool synchronize_hardirq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108}
109EXPORT_SYMBOL(synchronize_hardirq);
110
111/**
112 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
113 * @irq: interrupt number to wait for
114 *
115 * This function waits for any pending IRQ handlers for this interrupt
116 * to complete before returning. If you use this function while
117 * holding a resource the IRQ handler may need you will deadlock.
118 *
119 * Can only be called from preemptible code as it might sleep when
120 * an interrupt thread is associated to @irq.
121 *
122 * It optionally makes sure (when the irq chip supports that method)
123 * that the interrupt is not pending in any CPU and waiting for
124 * service.
125 */
126void synchronize_irq(unsigned int irq)
127{
128 struct irq_desc *desc = irq_to_desc(irq);
129
130 if (desc) {
131 __synchronize_hardirq(desc, true);
132 /*
133 * We made sure that no hardirq handler is
134 * running. Now verify that no threaded handlers are
135 * active.
136 */
137 wait_event(desc->wait_for_threads,
138 !atomic_read(&desc->threads_active));
139 }
140}
141EXPORT_SYMBOL(synchronize_irq);
142
143#ifdef CONFIG_SMP
144cpumask_var_t irq_default_affinity;
145
146static bool __irq_can_set_affinity(struct irq_desc *desc)
147{
148 if (!desc || !irqd_can_balance(&desc->irq_data) ||
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
150 return false;
151 return true;
152}
153
154/**
155 * irq_can_set_affinity - Check if the affinity of a given irq can be set
156 * @irq: Interrupt to check
157 *
158 */
159int irq_can_set_affinity(unsigned int irq)
160{
161 return __irq_can_set_affinity(irq_to_desc(irq));
162}
163
164/**
165 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
166 * @irq: Interrupt to check
167 *
168 * Like irq_can_set_affinity() above, but additionally checks for the
169 * AFFINITY_MANAGED flag.
170 */
171bool irq_can_set_affinity_usr(unsigned int irq)
172{
173 struct irq_desc *desc = irq_to_desc(irq);
174
175 return __irq_can_set_affinity(desc) &&
176 !irqd_affinity_is_managed(&desc->irq_data);
177}
178
179/**
180 * irq_set_thread_affinity - Notify irq threads to adjust affinity
181 * @desc: irq descriptor which has affitnity changed
182 *
183 * We just set IRQTF_AFFINITY and delegate the affinity setting
184 * to the interrupt thread itself. We can not call
185 * set_cpus_allowed_ptr() here as we hold desc->lock and this
186 * code can be called from hard interrupt context.
187 */
188void irq_set_thread_affinity(struct irq_desc *desc)
189{
190 struct irqaction *action;
191
192 for_each_action_of_desc(desc, action)
193 if (action->thread)
194 set_bit(IRQTF_AFFINITY, &action->thread_flags);
195}
196
197static void irq_validate_effective_affinity(struct irq_data *data)
198{
199#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202
203 if (!cpumask_empty(m))
204 return;
205 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 chip->name, data->irq);
207#endif
208}
209
210int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
211 bool force)
212{
213 struct irq_desc *desc = irq_data_to_desc(data);
214 struct irq_chip *chip = irq_data_get_irq_chip(data);
215 int ret;
216
217 if (!chip || !chip->irq_set_affinity)
218 return -EINVAL;
219
220 ret = chip->irq_set_affinity(data, mask, force);
221 switch (ret) {
222 case IRQ_SET_MASK_OK:
223 case IRQ_SET_MASK_OK_DONE:
224 cpumask_copy(desc->irq_common_data.affinity, mask);
225 /* fall through */
226 case IRQ_SET_MASK_OK_NOCOPY:
227 irq_validate_effective_affinity(data);
228 irq_set_thread_affinity(desc);
229 ret = 0;
230 }
231
232 return ret;
233}
234
235#ifdef CONFIG_GENERIC_PENDING_IRQ
236static inline int irq_set_affinity_pending(struct irq_data *data,
237 const struct cpumask *dest)
238{
239 struct irq_desc *desc = irq_data_to_desc(data);
240
241 irqd_set_move_pending(data);
242 irq_copy_pending(desc, dest);
243 return 0;
244}
245#else
246static inline int irq_set_affinity_pending(struct irq_data *data,
247 const struct cpumask *dest)
248{
249 return -EBUSY;
250}
251#endif
252
253static int irq_try_set_affinity(struct irq_data *data,
254 const struct cpumask *dest, bool force)
255{
256 int ret = irq_do_set_affinity(data, dest, force);
257
258 /*
259 * In case that the underlying vector management is busy and the
260 * architecture supports the generic pending mechanism then utilize
261 * this to avoid returning an error to user space.
262 */
263 if (ret == -EBUSY && !force)
264 ret = irq_set_affinity_pending(data, dest);
265 return ret;
266}
267
268int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
269 bool force)
270{
271 struct irq_chip *chip = irq_data_get_irq_chip(data);
272 struct irq_desc *desc = irq_data_to_desc(data);
273 int ret = 0;
274
275 if (!chip || !chip->irq_set_affinity)
276 return -EINVAL;
277
278 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
279 ret = irq_try_set_affinity(data, mask, force);
280 } else {
281 irqd_set_move_pending(data);
282 irq_copy_pending(desc, mask);
283 }
284
285 if (desc->affinity_notify) {
286 kref_get(&desc->affinity_notify->kref);
287 schedule_work(&desc->affinity_notify->work);
288 }
289 irqd_set(data, IRQD_AFFINITY_SET);
290
291 return ret;
292}
293
294int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
295{
296 struct irq_desc *desc = irq_to_desc(irq);
297 unsigned long flags;
298 int ret;
299
300 if (!desc)
301 return -EINVAL;
302
303 raw_spin_lock_irqsave(&desc->lock, flags);
304 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
305 raw_spin_unlock_irqrestore(&desc->lock, flags);
306 return ret;
307}
308
309int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
310{
311 unsigned long flags;
312 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
313
314 if (!desc)
315 return -EINVAL;
316 desc->affinity_hint = m;
317 irq_put_desc_unlock(desc, flags);
318 /* set the initial affinity to prevent every interrupt being on CPU0 */
319 if (m)
320 __irq_set_affinity(irq, m, false);
321 return 0;
322}
323EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
324
325static void irq_affinity_notify(struct work_struct *work)
326{
327 struct irq_affinity_notify *notify =
328 container_of(work, struct irq_affinity_notify, work);
329 struct irq_desc *desc = irq_to_desc(notify->irq);
330 cpumask_var_t cpumask;
331 unsigned long flags;
332
333 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
334 goto out;
335
336 raw_spin_lock_irqsave(&desc->lock, flags);
337 if (irq_move_pending(&desc->irq_data))
338 irq_get_pending(cpumask, desc);
339 else
340 cpumask_copy(cpumask, desc->irq_common_data.affinity);
341 raw_spin_unlock_irqrestore(&desc->lock, flags);
342
343 notify->notify(notify, cpumask);
344
345 free_cpumask_var(cpumask);
346out:
347 kref_put(¬ify->kref, notify->release);
348}
349
350/**
351 * irq_set_affinity_notifier - control notification of IRQ affinity changes
352 * @irq: Interrupt for which to enable/disable notification
353 * @notify: Context for notification, or %NULL to disable
354 * notification. Function pointers must be initialised;
355 * the other fields will be initialised by this function.
356 *
357 * Must be called in process context. Notification may only be enabled
358 * after the IRQ is allocated and must be disabled before the IRQ is
359 * freed using free_irq().
360 */
361int
362irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
363{
364 struct irq_desc *desc = irq_to_desc(irq);
365 struct irq_affinity_notify *old_notify;
366 unsigned long flags;
367
368 /* The release function is promised process context */
369 might_sleep();
370
371 if (!desc || desc->istate & IRQS_NMI)
372 return -EINVAL;
373
374 /* Complete initialisation of *notify */
375 if (notify) {
376 notify->irq = irq;
377 kref_init(¬ify->kref);
378 INIT_WORK(¬ify->work, irq_affinity_notify);
379 }
380
381 raw_spin_lock_irqsave(&desc->lock, flags);
382 old_notify = desc->affinity_notify;
383 desc->affinity_notify = notify;
384 raw_spin_unlock_irqrestore(&desc->lock, flags);
385
386 if (old_notify) {
387 cancel_work_sync(&old_notify->work);
388 kref_put(&old_notify->kref, old_notify->release);
389 }
390
391 return 0;
392}
393EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
394
395#ifndef CONFIG_AUTO_IRQ_AFFINITY
396/*
397 * Generic version of the affinity autoselector.
398 */
399int irq_setup_affinity(struct irq_desc *desc)
400{
401 struct cpumask *set = irq_default_affinity;
402 int ret, node = irq_desc_get_node(desc);
403 static DEFINE_RAW_SPINLOCK(mask_lock);
404 static struct cpumask mask;
405
406 /* Excludes PER_CPU and NO_BALANCE interrupts */
407 if (!__irq_can_set_affinity(desc))
408 return 0;
409
410 raw_spin_lock(&mask_lock);
411 /*
412 * Preserve the managed affinity setting and a userspace affinity
413 * setup, but make sure that one of the targets is online.
414 */
415 if (irqd_affinity_is_managed(&desc->irq_data) ||
416 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
417 if (cpumask_intersects(desc->irq_common_data.affinity,
418 cpu_online_mask))
419 set = desc->irq_common_data.affinity;
420 else
421 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
422 }
423
424 cpumask_and(&mask, cpu_online_mask, set);
425 if (cpumask_empty(&mask))
426 cpumask_copy(&mask, cpu_online_mask);
427
428 if (node != NUMA_NO_NODE) {
429 const struct cpumask *nodemask = cpumask_of_node(node);
430
431 /* make sure at least one of the cpus in nodemask is online */
432 if (cpumask_intersects(&mask, nodemask))
433 cpumask_and(&mask, &mask, nodemask);
434 }
435 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
436 raw_spin_unlock(&mask_lock);
437 return ret;
438}
439#else
440/* Wrapper for ALPHA specific affinity selector magic */
441int irq_setup_affinity(struct irq_desc *desc)
442{
443 return irq_select_affinity(irq_desc_get_irq(desc));
444}
445#endif
446
447/*
448 * Called when a bogus affinity is set via /proc/irq
449 */
450int irq_select_affinity_usr(unsigned int irq)
451{
452 struct irq_desc *desc = irq_to_desc(irq);
453 unsigned long flags;
454 int ret;
455
456 raw_spin_lock_irqsave(&desc->lock, flags);
457 ret = irq_setup_affinity(desc);
458 raw_spin_unlock_irqrestore(&desc->lock, flags);
459 return ret;
460}
461#endif
462
463/**
464 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
465 * @irq: interrupt number to set affinity
466 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
467 * specific data for percpu_devid interrupts
468 *
469 * This function uses the vCPU specific data to set the vCPU
470 * affinity for an irq. The vCPU specific data is passed from
471 * outside, such as KVM. One example code path is as below:
472 * KVM -> IOMMU -> irq_set_vcpu_affinity().
473 */
474int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
475{
476 unsigned long flags;
477 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
478 struct irq_data *data;
479 struct irq_chip *chip;
480 int ret = -ENOSYS;
481
482 if (!desc)
483 return -EINVAL;
484
485 data = irq_desc_get_irq_data(desc);
486 do {
487 chip = irq_data_get_irq_chip(data);
488 if (chip && chip->irq_set_vcpu_affinity)
489 break;
490#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
491 data = data->parent_data;
492#else
493 data = NULL;
494#endif
495 } while (data);
496
497 if (data)
498 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
499 irq_put_desc_unlock(desc, flags);
500
501 return ret;
502}
503EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
504
505void __disable_irq(struct irq_desc *desc)
506{
507 if (!desc->depth++)
508 irq_disable(desc);
509}
510
511static int __disable_irq_nosync(unsigned int irq)
512{
513 unsigned long flags;
514 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
515
516 if (!desc)
517 return -EINVAL;
518 __disable_irq(desc);
519 irq_put_desc_busunlock(desc, flags);
520 return 0;
521}
522
523/**
524 * disable_irq_nosync - disable an irq without waiting
525 * @irq: Interrupt to disable
526 *
527 * Disable the selected interrupt line. Disables and Enables are
528 * nested.
529 * Unlike disable_irq(), this function does not ensure existing
530 * instances of the IRQ handler have completed before returning.
531 *
532 * This function may be called from IRQ context.
533 */
534void disable_irq_nosync(unsigned int irq)
535{
536 __disable_irq_nosync(irq);
537}
538EXPORT_SYMBOL(disable_irq_nosync);
539
540/**
541 * disable_irq - disable an irq and wait for completion
542 * @irq: Interrupt to disable
543 *
544 * Disable the selected interrupt line. Enables and Disables are
545 * nested.
546 * This function waits for any pending IRQ handlers for this interrupt
547 * to complete before returning. If you use this function while
548 * holding a resource the IRQ handler may need you will deadlock.
549 *
550 * This function may be called - with care - from IRQ context.
551 */
552void disable_irq(unsigned int irq)
553{
554 if (!__disable_irq_nosync(irq))
555 synchronize_irq(irq);
556}
557EXPORT_SYMBOL(disable_irq);
558
559/**
560 * disable_hardirq - disables an irq and waits for hardirq completion
561 * @irq: Interrupt to disable
562 *
563 * Disable the selected interrupt line. Enables and Disables are
564 * nested.
565 * This function waits for any pending hard IRQ handlers for this
566 * interrupt to complete before returning. If you use this function while
567 * holding a resource the hard IRQ handler may need you will deadlock.
568 *
569 * When used to optimistically disable an interrupt from atomic context
570 * the return value must be checked.
571 *
572 * Returns: false if a threaded handler is active.
573 *
574 * This function may be called - with care - from IRQ context.
575 */
576bool disable_hardirq(unsigned int irq)
577{
578 if (!__disable_irq_nosync(irq))
579 return synchronize_hardirq(irq);
580
581 return false;
582}
583EXPORT_SYMBOL_GPL(disable_hardirq);
584
585/**
586 * disable_nmi_nosync - disable an nmi without waiting
587 * @irq: Interrupt to disable
588 *
589 * Disable the selected interrupt line. Disables and enables are
590 * nested.
591 * The interrupt to disable must have been requested through request_nmi.
592 * Unlike disable_nmi(), this function does not ensure existing
593 * instances of the IRQ handler have completed before returning.
594 */
595void disable_nmi_nosync(unsigned int irq)
596{
597 disable_irq_nosync(irq);
598}
599
600void __enable_irq(struct irq_desc *desc)
601{
602 switch (desc->depth) {
603 case 0:
604 err_out:
605 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
606 irq_desc_get_irq(desc));
607 break;
608 case 1: {
609 if (desc->istate & IRQS_SUSPENDED)
610 goto err_out;
611 /* Prevent probing on this irq: */
612 irq_settings_set_noprobe(desc);
613 /*
614 * Call irq_startup() not irq_enable() here because the
615 * interrupt might be marked NOAUTOEN. So irq_startup()
616 * needs to be invoked when it gets enabled the first
617 * time. If it was already started up, then irq_startup()
618 * will invoke irq_enable() under the hood.
619 */
620 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
621 break;
622 }
623 default:
624 desc->depth--;
625 }
626}
627
628/**
629 * enable_irq - enable handling of an irq
630 * @irq: Interrupt to enable
631 *
632 * Undoes the effect of one call to disable_irq(). If this
633 * matches the last disable, processing of interrupts on this
634 * IRQ line is re-enabled.
635 *
636 * This function may be called from IRQ context only when
637 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
638 */
639void enable_irq(unsigned int irq)
640{
641 unsigned long flags;
642 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
643
644 if (!desc)
645 return;
646 if (WARN(!desc->irq_data.chip,
647 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
648 goto out;
649
650 __enable_irq(desc);
651out:
652 irq_put_desc_busunlock(desc, flags);
653}
654EXPORT_SYMBOL(enable_irq);
655
656/**
657 * enable_nmi - enable handling of an nmi
658 * @irq: Interrupt to enable
659 *
660 * The interrupt to enable must have been requested through request_nmi.
661 * Undoes the effect of one call to disable_nmi(). If this
662 * matches the last disable, processing of interrupts on this
663 * IRQ line is re-enabled.
664 */
665void enable_nmi(unsigned int irq)
666{
667 enable_irq(irq);
668}
669
670static int set_irq_wake_real(unsigned int irq, unsigned int on)
671{
672 struct irq_desc *desc = irq_to_desc(irq);
673 int ret = -ENXIO;
674
675 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
676 return 0;
677
678 if (desc->irq_data.chip->irq_set_wake)
679 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
680
681 return ret;
682}
683
684/**
685 * irq_set_irq_wake - control irq power management wakeup
686 * @irq: interrupt to control
687 * @on: enable/disable power management wakeup
688 *
689 * Enable/disable power management wakeup mode, which is
690 * disabled by default. Enables and disables must match,
691 * just as they match for non-wakeup mode support.
692 *
693 * Wakeup mode lets this IRQ wake the system from sleep
694 * states like "suspend to RAM".
695 */
696int irq_set_irq_wake(unsigned int irq, unsigned int on)
697{
698 unsigned long flags;
699 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
700 int ret = 0;
701
702 if (!desc)
703 return -EINVAL;
704
705 /* Don't use NMIs as wake up interrupts please */
706 if (desc->istate & IRQS_NMI) {
707 ret = -EINVAL;
708 goto out_unlock;
709 }
710
711 /* wakeup-capable irqs can be shared between drivers that
712 * don't need to have the same sleep mode behaviors.
713 */
714 if (on) {
715 if (desc->wake_depth++ == 0) {
716 ret = set_irq_wake_real(irq, on);
717 if (ret)
718 desc->wake_depth = 0;
719 else
720 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
721 }
722 } else {
723 if (desc->wake_depth == 0) {
724 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
725 } else if (--desc->wake_depth == 0) {
726 ret = set_irq_wake_real(irq, on);
727 if (ret)
728 desc->wake_depth = 1;
729 else
730 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
731 }
732 }
733
734out_unlock:
735 irq_put_desc_busunlock(desc, flags);
736 return ret;
737}
738EXPORT_SYMBOL(irq_set_irq_wake);
739
740/*
741 * Internal function that tells the architecture code whether a
742 * particular irq has been exclusively allocated or is available
743 * for driver use.
744 */
745int can_request_irq(unsigned int irq, unsigned long irqflags)
746{
747 unsigned long flags;
748 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
749 int canrequest = 0;
750
751 if (!desc)
752 return 0;
753
754 if (irq_settings_can_request(desc)) {
755 if (!desc->action ||
756 irqflags & desc->action->flags & IRQF_SHARED)
757 canrequest = 1;
758 }
759 irq_put_desc_unlock(desc, flags);
760 return canrequest;
761}
762
763int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
764{
765 struct irq_chip *chip = desc->irq_data.chip;
766 int ret, unmask = 0;
767
768 if (!chip || !chip->irq_set_type) {
769 /*
770 * IRQF_TRIGGER_* but the PIC does not support multiple
771 * flow-types?
772 */
773 pr_debug("No set_type function for IRQ %d (%s)\n",
774 irq_desc_get_irq(desc),
775 chip ? (chip->name ? : "unknown") : "unknown");
776 return 0;
777 }
778
779 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
780 if (!irqd_irq_masked(&desc->irq_data))
781 mask_irq(desc);
782 if (!irqd_irq_disabled(&desc->irq_data))
783 unmask = 1;
784 }
785
786 /* Mask all flags except trigger mode */
787 flags &= IRQ_TYPE_SENSE_MASK;
788 ret = chip->irq_set_type(&desc->irq_data, flags);
789
790 switch (ret) {
791 case IRQ_SET_MASK_OK:
792 case IRQ_SET_MASK_OK_DONE:
793 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
794 irqd_set(&desc->irq_data, flags);
795 /* fall through */
796
797 case IRQ_SET_MASK_OK_NOCOPY:
798 flags = irqd_get_trigger_type(&desc->irq_data);
799 irq_settings_set_trigger_mask(desc, flags);
800 irqd_clear(&desc->irq_data, IRQD_LEVEL);
801 irq_settings_clr_level(desc);
802 if (flags & IRQ_TYPE_LEVEL_MASK) {
803 irq_settings_set_level(desc);
804 irqd_set(&desc->irq_data, IRQD_LEVEL);
805 }
806
807 ret = 0;
808 break;
809 default:
810 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
811 flags, irq_desc_get_irq(desc), chip->irq_set_type);
812 }
813 if (unmask)
814 unmask_irq(desc);
815 return ret;
816}
817
818#ifdef CONFIG_HARDIRQS_SW_RESEND
819int irq_set_parent(int irq, int parent_irq)
820{
821 unsigned long flags;
822 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
823
824 if (!desc)
825 return -EINVAL;
826
827 desc->parent_irq = parent_irq;
828
829 irq_put_desc_unlock(desc, flags);
830 return 0;
831}
832EXPORT_SYMBOL_GPL(irq_set_parent);
833#endif
834
835/*
836 * Default primary interrupt handler for threaded interrupts. Is
837 * assigned as primary handler when request_threaded_irq is called
838 * with handler == NULL. Useful for oneshot interrupts.
839 */
840static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
841{
842 return IRQ_WAKE_THREAD;
843}
844
845/*
846 * Primary handler for nested threaded interrupts. Should never be
847 * called.
848 */
849static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
850{
851 WARN(1, "Primary handler called for nested irq %d\n", irq);
852 return IRQ_NONE;
853}
854
855static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
856{
857 WARN(1, "Secondary action handler called for irq %d\n", irq);
858 return IRQ_NONE;
859}
860
861static int irq_wait_for_interrupt(struct irqaction *action)
862{
863 for (;;) {
864 set_current_state(TASK_INTERRUPTIBLE);
865
866 if (kthread_should_stop()) {
867 /* may need to run one last time */
868 if (test_and_clear_bit(IRQTF_RUNTHREAD,
869 &action->thread_flags)) {
870 __set_current_state(TASK_RUNNING);
871 return 0;
872 }
873 __set_current_state(TASK_RUNNING);
874 return -1;
875 }
876
877 if (test_and_clear_bit(IRQTF_RUNTHREAD,
878 &action->thread_flags)) {
879 __set_current_state(TASK_RUNNING);
880 return 0;
881 }
882 schedule();
883 }
884}
885
886/*
887 * Oneshot interrupts keep the irq line masked until the threaded
888 * handler finished. unmask if the interrupt has not been disabled and
889 * is marked MASKED.
890 */
891static void irq_finalize_oneshot(struct irq_desc *desc,
892 struct irqaction *action)
893{
894 if (!(desc->istate & IRQS_ONESHOT) ||
895 action->handler == irq_forced_secondary_handler)
896 return;
897again:
898 chip_bus_lock(desc);
899 raw_spin_lock_irq(&desc->lock);
900
901 /*
902 * Implausible though it may be we need to protect us against
903 * the following scenario:
904 *
905 * The thread is faster done than the hard interrupt handler
906 * on the other CPU. If we unmask the irq line then the
907 * interrupt can come in again and masks the line, leaves due
908 * to IRQS_INPROGRESS and the irq line is masked forever.
909 *
910 * This also serializes the state of shared oneshot handlers
911 * versus "desc->threads_onehsot |= action->thread_mask;" in
912 * irq_wake_thread(). See the comment there which explains the
913 * serialization.
914 */
915 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
916 raw_spin_unlock_irq(&desc->lock);
917 chip_bus_sync_unlock(desc);
918 cpu_relax();
919 goto again;
920 }
921
922 /*
923 * Now check again, whether the thread should run. Otherwise
924 * we would clear the threads_oneshot bit of this thread which
925 * was just set.
926 */
927 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
928 goto out_unlock;
929
930 desc->threads_oneshot &= ~action->thread_mask;
931
932 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
933 irqd_irq_masked(&desc->irq_data))
934 unmask_threaded_irq(desc);
935
936out_unlock:
937 raw_spin_unlock_irq(&desc->lock);
938 chip_bus_sync_unlock(desc);
939}
940
941#ifdef CONFIG_SMP
942/*
943 * Check whether we need to change the affinity of the interrupt thread.
944 */
945static void
946irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
947{
948 cpumask_var_t mask;
949 bool valid = true;
950
951 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
952 return;
953
954 /*
955 * In case we are out of memory we set IRQTF_AFFINITY again and
956 * try again next time
957 */
958 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
959 set_bit(IRQTF_AFFINITY, &action->thread_flags);
960 return;
961 }
962
963 raw_spin_lock_irq(&desc->lock);
964 /*
965 * This code is triggered unconditionally. Check the affinity
966 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
967 */
968 if (cpumask_available(desc->irq_common_data.affinity)) {
969 const struct cpumask *m;
970
971 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
972 cpumask_copy(mask, m);
973 } else {
974 valid = false;
975 }
976 raw_spin_unlock_irq(&desc->lock);
977
978 if (valid)
979 set_cpus_allowed_ptr(current, mask);
980 free_cpumask_var(mask);
981}
982#else
983static inline void
984irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
985#endif
986
987/*
988 * Interrupts which are not explicitly requested as threaded
989 * interrupts rely on the implicit bh/preempt disable of the hard irq
990 * context. So we need to disable bh here to avoid deadlocks and other
991 * side effects.
992 */
993static irqreturn_t
994irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
995{
996 irqreturn_t ret;
997
998 local_bh_disable();
999 ret = action->thread_fn(action->irq, action->dev_id);
1000 if (ret == IRQ_HANDLED)
1001 atomic_inc(&desc->threads_handled);
1002
1003 irq_finalize_oneshot(desc, action);
1004 local_bh_enable();
1005 return ret;
1006}
1007
1008/*
1009 * Interrupts explicitly requested as threaded interrupts want to be
1010 * preemtible - many of them need to sleep and wait for slow busses to
1011 * complete.
1012 */
1013static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1014 struct irqaction *action)
1015{
1016 irqreturn_t ret;
1017
1018 ret = action->thread_fn(action->irq, action->dev_id);
1019 if (ret == IRQ_HANDLED)
1020 atomic_inc(&desc->threads_handled);
1021
1022 irq_finalize_oneshot(desc, action);
1023 return ret;
1024}
1025
1026static void wake_threads_waitq(struct irq_desc *desc)
1027{
1028 if (atomic_dec_and_test(&desc->threads_active))
1029 wake_up(&desc->wait_for_threads);
1030}
1031
1032static void irq_thread_dtor(struct callback_head *unused)
1033{
1034 struct task_struct *tsk = current;
1035 struct irq_desc *desc;
1036 struct irqaction *action;
1037
1038 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1039 return;
1040
1041 action = kthread_data(tsk);
1042
1043 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1044 tsk->comm, tsk->pid, action->irq);
1045
1046
1047 desc = irq_to_desc(action->irq);
1048 /*
1049 * If IRQTF_RUNTHREAD is set, we need to decrement
1050 * desc->threads_active and wake possible waiters.
1051 */
1052 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1053 wake_threads_waitq(desc);
1054
1055 /* Prevent a stale desc->threads_oneshot */
1056 irq_finalize_oneshot(desc, action);
1057}
1058
1059static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1060{
1061 struct irqaction *secondary = action->secondary;
1062
1063 if (WARN_ON_ONCE(!secondary))
1064 return;
1065
1066 raw_spin_lock_irq(&desc->lock);
1067 __irq_wake_thread(desc, secondary);
1068 raw_spin_unlock_irq(&desc->lock);
1069}
1070
1071/*
1072 * Interrupt handler thread
1073 */
1074static int irq_thread(void *data)
1075{
1076 struct callback_head on_exit_work;
1077 struct irqaction *action = data;
1078 struct irq_desc *desc = irq_to_desc(action->irq);
1079 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1080 struct irqaction *action);
1081
1082 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1083 &action->thread_flags))
1084 handler_fn = irq_forced_thread_fn;
1085 else
1086 handler_fn = irq_thread_fn;
1087
1088 init_task_work(&on_exit_work, irq_thread_dtor);
1089 task_work_add(current, &on_exit_work, false);
1090
1091 irq_thread_check_affinity(desc, action);
1092
1093 while (!irq_wait_for_interrupt(action)) {
1094 irqreturn_t action_ret;
1095
1096 irq_thread_check_affinity(desc, action);
1097
1098 action_ret = handler_fn(desc, action);
1099 if (action_ret == IRQ_WAKE_THREAD)
1100 irq_wake_secondary(desc, action);
1101
1102 wake_threads_waitq(desc);
1103 }
1104
1105 /*
1106 * This is the regular exit path. __free_irq() is stopping the
1107 * thread via kthread_stop() after calling
1108 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1109 * oneshot mask bit can be set.
1110 */
1111 task_work_cancel(current, irq_thread_dtor);
1112 return 0;
1113}
1114
1115/**
1116 * irq_wake_thread - wake the irq thread for the action identified by dev_id
1117 * @irq: Interrupt line
1118 * @dev_id: Device identity for which the thread should be woken
1119 *
1120 */
1121void irq_wake_thread(unsigned int irq, void *dev_id)
1122{
1123 struct irq_desc *desc = irq_to_desc(irq);
1124 struct irqaction *action;
1125 unsigned long flags;
1126
1127 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1128 return;
1129
1130 raw_spin_lock_irqsave(&desc->lock, flags);
1131 for_each_action_of_desc(desc, action) {
1132 if (action->dev_id == dev_id) {
1133 if (action->thread)
1134 __irq_wake_thread(desc, action);
1135 break;
1136 }
1137 }
1138 raw_spin_unlock_irqrestore(&desc->lock, flags);
1139}
1140EXPORT_SYMBOL_GPL(irq_wake_thread);
1141
1142static int irq_setup_forced_threading(struct irqaction *new)
1143{
1144 if (!force_irqthreads)
1145 return 0;
1146 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1147 return 0;
1148
1149 /*
1150 * No further action required for interrupts which are requested as
1151 * threaded interrupts already
1152 */
1153 if (new->handler == irq_default_primary_handler)
1154 return 0;
1155
1156 new->flags |= IRQF_ONESHOT;
1157
1158 /*
1159 * Handle the case where we have a real primary handler and a
1160 * thread handler. We force thread them as well by creating a
1161 * secondary action.
1162 */
1163 if (new->handler && new->thread_fn) {
1164 /* Allocate the secondary action */
1165 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1166 if (!new->secondary)
1167 return -ENOMEM;
1168 new->secondary->handler = irq_forced_secondary_handler;
1169 new->secondary->thread_fn = new->thread_fn;
1170 new->secondary->dev_id = new->dev_id;
1171 new->secondary->irq = new->irq;
1172 new->secondary->name = new->name;
1173 }
1174 /* Deal with the primary handler */
1175 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1176 new->thread_fn = new->handler;
1177 new->handler = irq_default_primary_handler;
1178 return 0;
1179}
1180
1181static int irq_request_resources(struct irq_desc *desc)
1182{
1183 struct irq_data *d = &desc->irq_data;
1184 struct irq_chip *c = d->chip;
1185
1186 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1187}
1188
1189static void irq_release_resources(struct irq_desc *desc)
1190{
1191 struct irq_data *d = &desc->irq_data;
1192 struct irq_chip *c = d->chip;
1193
1194 if (c->irq_release_resources)
1195 c->irq_release_resources(d);
1196}
1197
1198static bool irq_supports_nmi(struct irq_desc *desc)
1199{
1200 struct irq_data *d = irq_desc_get_irq_data(desc);
1201
1202#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1203 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1204 if (d->parent_data)
1205 return false;
1206#endif
1207 /* Don't support NMIs for chips behind a slow bus */
1208 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1209 return false;
1210
1211 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1212}
1213
1214static int irq_nmi_setup(struct irq_desc *desc)
1215{
1216 struct irq_data *d = irq_desc_get_irq_data(desc);
1217 struct irq_chip *c = d->chip;
1218
1219 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1220}
1221
1222static void irq_nmi_teardown(struct irq_desc *desc)
1223{
1224 struct irq_data *d = irq_desc_get_irq_data(desc);
1225 struct irq_chip *c = d->chip;
1226
1227 if (c->irq_nmi_teardown)
1228 c->irq_nmi_teardown(d);
1229}
1230
1231static int
1232setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1233{
1234 struct task_struct *t;
1235 struct sched_param param = {
1236 .sched_priority = MAX_USER_RT_PRIO/2,
1237 };
1238
1239 if (!secondary) {
1240 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1241 new->name);
1242 } else {
1243 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1244 new->name);
1245 param.sched_priority -= 1;
1246 }
1247
1248 if (IS_ERR(t))
1249 return PTR_ERR(t);
1250
1251 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1252
1253 /*
1254 * We keep the reference to the task struct even if
1255 * the thread dies to avoid that the interrupt code
1256 * references an already freed task_struct.
1257 */
1258 new->thread = get_task_struct(t);
1259 /*
1260 * Tell the thread to set its affinity. This is
1261 * important for shared interrupt handlers as we do
1262 * not invoke setup_affinity() for the secondary
1263 * handlers as everything is already set up. Even for
1264 * interrupts marked with IRQF_NO_BALANCE this is
1265 * correct as we want the thread to move to the cpu(s)
1266 * on which the requesting code placed the interrupt.
1267 */
1268 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1269 return 0;
1270}
1271
1272/*
1273 * Internal function to register an irqaction - typically used to
1274 * allocate special interrupts that are part of the architecture.
1275 *
1276 * Locking rules:
1277 *
1278 * desc->request_mutex Provides serialization against a concurrent free_irq()
1279 * chip_bus_lock Provides serialization for slow bus operations
1280 * desc->lock Provides serialization against hard interrupts
1281 *
1282 * chip_bus_lock and desc->lock are sufficient for all other management and
1283 * interrupt related functions. desc->request_mutex solely serializes
1284 * request/free_irq().
1285 */
1286static int
1287__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1288{
1289 struct irqaction *old, **old_ptr;
1290 unsigned long flags, thread_mask = 0;
1291 int ret, nested, shared = 0;
1292
1293 if (!desc)
1294 return -EINVAL;
1295
1296 if (desc->irq_data.chip == &no_irq_chip)
1297 return -ENOSYS;
1298 if (!try_module_get(desc->owner))
1299 return -ENODEV;
1300
1301 new->irq = irq;
1302
1303 /*
1304 * If the trigger type is not specified by the caller,
1305 * then use the default for this interrupt.
1306 */
1307 if (!(new->flags & IRQF_TRIGGER_MASK))
1308 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1309
1310 /*
1311 * Check whether the interrupt nests into another interrupt
1312 * thread.
1313 */
1314 nested = irq_settings_is_nested_thread(desc);
1315 if (nested) {
1316 if (!new->thread_fn) {
1317 ret = -EINVAL;
1318 goto out_mput;
1319 }
1320 /*
1321 * Replace the primary handler which was provided from
1322 * the driver for non nested interrupt handling by the
1323 * dummy function which warns when called.
1324 */
1325 new->handler = irq_nested_primary_handler;
1326 } else {
1327 if (irq_settings_can_thread(desc)) {
1328 ret = irq_setup_forced_threading(new);
1329 if (ret)
1330 goto out_mput;
1331 }
1332 }
1333
1334 /*
1335 * Create a handler thread when a thread function is supplied
1336 * and the interrupt does not nest into another interrupt
1337 * thread.
1338 */
1339 if (new->thread_fn && !nested) {
1340 ret = setup_irq_thread(new, irq, false);
1341 if (ret)
1342 goto out_mput;
1343 if (new->secondary) {
1344 ret = setup_irq_thread(new->secondary, irq, true);
1345 if (ret)
1346 goto out_thread;
1347 }
1348 }
1349
1350 /*
1351 * Drivers are often written to work w/o knowledge about the
1352 * underlying irq chip implementation, so a request for a
1353 * threaded irq without a primary hard irq context handler
1354 * requires the ONESHOT flag to be set. Some irq chips like
1355 * MSI based interrupts are per se one shot safe. Check the
1356 * chip flags, so we can avoid the unmask dance at the end of
1357 * the threaded handler for those.
1358 */
1359 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1360 new->flags &= ~IRQF_ONESHOT;
1361
1362 /*
1363 * Protects against a concurrent __free_irq() call which might wait
1364 * for synchronize_hardirq() to complete without holding the optional
1365 * chip bus lock and desc->lock. Also protects against handing out
1366 * a recycled oneshot thread_mask bit while it's still in use by
1367 * its previous owner.
1368 */
1369 mutex_lock(&desc->request_mutex);
1370
1371 /*
1372 * Acquire bus lock as the irq_request_resources() callback below
1373 * might rely on the serialization or the magic power management
1374 * functions which are abusing the irq_bus_lock() callback,
1375 */
1376 chip_bus_lock(desc);
1377
1378 /* First installed action requests resources. */
1379 if (!desc->action) {
1380 ret = irq_request_resources(desc);
1381 if (ret) {
1382 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1383 new->name, irq, desc->irq_data.chip->name);
1384 goto out_bus_unlock;
1385 }
1386 }
1387
1388 /*
1389 * The following block of code has to be executed atomically
1390 * protected against a concurrent interrupt and any of the other
1391 * management calls which are not serialized via
1392 * desc->request_mutex or the optional bus lock.
1393 */
1394 raw_spin_lock_irqsave(&desc->lock, flags);
1395 old_ptr = &desc->action;
1396 old = *old_ptr;
1397 if (old) {
1398 /*
1399 * Can't share interrupts unless both agree to and are
1400 * the same type (level, edge, polarity). So both flag
1401 * fields must have IRQF_SHARED set and the bits which
1402 * set the trigger type must match. Also all must
1403 * agree on ONESHOT.
1404 * Interrupt lines used for NMIs cannot be shared.
1405 */
1406 unsigned int oldtype;
1407
1408 if (desc->istate & IRQS_NMI) {
1409 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1410 new->name, irq, desc->irq_data.chip->name);
1411 ret = -EINVAL;
1412 goto out_unlock;
1413 }
1414
1415 /*
1416 * If nobody did set the configuration before, inherit
1417 * the one provided by the requester.
1418 */
1419 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1420 oldtype = irqd_get_trigger_type(&desc->irq_data);
1421 } else {
1422 oldtype = new->flags & IRQF_TRIGGER_MASK;
1423 irqd_set_trigger_type(&desc->irq_data, oldtype);
1424 }
1425
1426 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1427 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1428 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1429 goto mismatch;
1430
1431 /* All handlers must agree on per-cpuness */
1432 if ((old->flags & IRQF_PERCPU) !=
1433 (new->flags & IRQF_PERCPU))
1434 goto mismatch;
1435
1436 /* add new interrupt at end of irq queue */
1437 do {
1438 /*
1439 * Or all existing action->thread_mask bits,
1440 * so we can find the next zero bit for this
1441 * new action.
1442 */
1443 thread_mask |= old->thread_mask;
1444 old_ptr = &old->next;
1445 old = *old_ptr;
1446 } while (old);
1447 shared = 1;
1448 }
1449
1450 /*
1451 * Setup the thread mask for this irqaction for ONESHOT. For
1452 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1453 * conditional in irq_wake_thread().
1454 */
1455 if (new->flags & IRQF_ONESHOT) {
1456 /*
1457 * Unlikely to have 32 resp 64 irqs sharing one line,
1458 * but who knows.
1459 */
1460 if (thread_mask == ~0UL) {
1461 ret = -EBUSY;
1462 goto out_unlock;
1463 }
1464 /*
1465 * The thread_mask for the action is or'ed to
1466 * desc->thread_active to indicate that the
1467 * IRQF_ONESHOT thread handler has been woken, but not
1468 * yet finished. The bit is cleared when a thread
1469 * completes. When all threads of a shared interrupt
1470 * line have completed desc->threads_active becomes
1471 * zero and the interrupt line is unmasked. See
1472 * handle.c:irq_wake_thread() for further information.
1473 *
1474 * If no thread is woken by primary (hard irq context)
1475 * interrupt handlers, then desc->threads_active is
1476 * also checked for zero to unmask the irq line in the
1477 * affected hard irq flow handlers
1478 * (handle_[fasteoi|level]_irq).
1479 *
1480 * The new action gets the first zero bit of
1481 * thread_mask assigned. See the loop above which or's
1482 * all existing action->thread_mask bits.
1483 */
1484 new->thread_mask = 1UL << ffz(thread_mask);
1485
1486 } else if (new->handler == irq_default_primary_handler &&
1487 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1488 /*
1489 * The interrupt was requested with handler = NULL, so
1490 * we use the default primary handler for it. But it
1491 * does not have the oneshot flag set. In combination
1492 * with level interrupts this is deadly, because the
1493 * default primary handler just wakes the thread, then
1494 * the irq lines is reenabled, but the device still
1495 * has the level irq asserted. Rinse and repeat....
1496 *
1497 * While this works for edge type interrupts, we play
1498 * it safe and reject unconditionally because we can't
1499 * say for sure which type this interrupt really
1500 * has. The type flags are unreliable as the
1501 * underlying chip implementation can override them.
1502 */
1503 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1504 irq);
1505 ret = -EINVAL;
1506 goto out_unlock;
1507 }
1508
1509 if (!shared) {
1510 init_waitqueue_head(&desc->wait_for_threads);
1511
1512 /* Setup the type (level, edge polarity) if configured: */
1513 if (new->flags & IRQF_TRIGGER_MASK) {
1514 ret = __irq_set_trigger(desc,
1515 new->flags & IRQF_TRIGGER_MASK);
1516
1517 if (ret)
1518 goto out_unlock;
1519 }
1520
1521 /*
1522 * Activate the interrupt. That activation must happen
1523 * independently of IRQ_NOAUTOEN. request_irq() can fail
1524 * and the callers are supposed to handle
1525 * that. enable_irq() of an interrupt requested with
1526 * IRQ_NOAUTOEN is not supposed to fail. The activation
1527 * keeps it in shutdown mode, it merily associates
1528 * resources if necessary and if that's not possible it
1529 * fails. Interrupts which are in managed shutdown mode
1530 * will simply ignore that activation request.
1531 */
1532 ret = irq_activate(desc);
1533 if (ret)
1534 goto out_unlock;
1535
1536 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1537 IRQS_ONESHOT | IRQS_WAITING);
1538 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1539
1540 if (new->flags & IRQF_PERCPU) {
1541 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1542 irq_settings_set_per_cpu(desc);
1543 }
1544
1545 if (new->flags & IRQF_ONESHOT)
1546 desc->istate |= IRQS_ONESHOT;
1547
1548 /* Exclude IRQ from balancing if requested */
1549 if (new->flags & IRQF_NOBALANCING) {
1550 irq_settings_set_no_balancing(desc);
1551 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1552 }
1553
1554 if (irq_settings_can_autoenable(desc)) {
1555 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1556 } else {
1557 /*
1558 * Shared interrupts do not go well with disabling
1559 * auto enable. The sharing interrupt might request
1560 * it while it's still disabled and then wait for
1561 * interrupts forever.
1562 */
1563 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1564 /* Undo nested disables: */
1565 desc->depth = 1;
1566 }
1567
1568 } else if (new->flags & IRQF_TRIGGER_MASK) {
1569 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1570 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1571
1572 if (nmsk != omsk)
1573 /* hope the handler works with current trigger mode */
1574 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1575 irq, omsk, nmsk);
1576 }
1577
1578 *old_ptr = new;
1579
1580 irq_pm_install_action(desc, new);
1581
1582 /* Reset broken irq detection when installing new handler */
1583 desc->irq_count = 0;
1584 desc->irqs_unhandled = 0;
1585
1586 /*
1587 * Check whether we disabled the irq via the spurious handler
1588 * before. Reenable it and give it another chance.
1589 */
1590 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1591 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1592 __enable_irq(desc);
1593 }
1594
1595 raw_spin_unlock_irqrestore(&desc->lock, flags);
1596 chip_bus_sync_unlock(desc);
1597 mutex_unlock(&desc->request_mutex);
1598
1599 irq_setup_timings(desc, new);
1600
1601 /*
1602 * Strictly no need to wake it up, but hung_task complains
1603 * when no hard interrupt wakes the thread up.
1604 */
1605 if (new->thread)
1606 wake_up_process(new->thread);
1607 if (new->secondary)
1608 wake_up_process(new->secondary->thread);
1609
1610 register_irq_proc(irq, desc);
1611 new->dir = NULL;
1612 register_handler_proc(irq, new);
1613 return 0;
1614
1615mismatch:
1616 if (!(new->flags & IRQF_PROBE_SHARED)) {
1617 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1618 irq, new->flags, new->name, old->flags, old->name);
1619#ifdef CONFIG_DEBUG_SHIRQ
1620 dump_stack();
1621#endif
1622 }
1623 ret = -EBUSY;
1624
1625out_unlock:
1626 raw_spin_unlock_irqrestore(&desc->lock, flags);
1627
1628 if (!desc->action)
1629 irq_release_resources(desc);
1630out_bus_unlock:
1631 chip_bus_sync_unlock(desc);
1632 mutex_unlock(&desc->request_mutex);
1633
1634out_thread:
1635 if (new->thread) {
1636 struct task_struct *t = new->thread;
1637
1638 new->thread = NULL;
1639 kthread_stop(t);
1640 put_task_struct(t);
1641 }
1642 if (new->secondary && new->secondary->thread) {
1643 struct task_struct *t = new->secondary->thread;
1644
1645 new->secondary->thread = NULL;
1646 kthread_stop(t);
1647 put_task_struct(t);
1648 }
1649out_mput:
1650 module_put(desc->owner);
1651 return ret;
1652}
1653
1654/**
1655 * setup_irq - setup an interrupt
1656 * @irq: Interrupt line to setup
1657 * @act: irqaction for the interrupt
1658 *
1659 * Used to statically setup interrupts in the early boot process.
1660 */
1661int setup_irq(unsigned int irq, struct irqaction *act)
1662{
1663 int retval;
1664 struct irq_desc *desc = irq_to_desc(irq);
1665
1666 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1667 return -EINVAL;
1668
1669 retval = irq_chip_pm_get(&desc->irq_data);
1670 if (retval < 0)
1671 return retval;
1672
1673 retval = __setup_irq(irq, desc, act);
1674
1675 if (retval)
1676 irq_chip_pm_put(&desc->irq_data);
1677
1678 return retval;
1679}
1680EXPORT_SYMBOL_GPL(setup_irq);
1681
1682/*
1683 * Internal function to unregister an irqaction - used to free
1684 * regular and special interrupts that are part of the architecture.
1685 */
1686static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1687{
1688 unsigned irq = desc->irq_data.irq;
1689 struct irqaction *action, **action_ptr;
1690 unsigned long flags;
1691
1692 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1693
1694 mutex_lock(&desc->request_mutex);
1695 chip_bus_lock(desc);
1696 raw_spin_lock_irqsave(&desc->lock, flags);
1697
1698 /*
1699 * There can be multiple actions per IRQ descriptor, find the right
1700 * one based on the dev_id:
1701 */
1702 action_ptr = &desc->action;
1703 for (;;) {
1704 action = *action_ptr;
1705
1706 if (!action) {
1707 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1708 raw_spin_unlock_irqrestore(&desc->lock, flags);
1709 chip_bus_sync_unlock(desc);
1710 mutex_unlock(&desc->request_mutex);
1711 return NULL;
1712 }
1713
1714 if (action->dev_id == dev_id)
1715 break;
1716 action_ptr = &action->next;
1717 }
1718
1719 /* Found it - now remove it from the list of entries: */
1720 *action_ptr = action->next;
1721
1722 irq_pm_remove_action(desc, action);
1723
1724 /* If this was the last handler, shut down the IRQ line: */
1725 if (!desc->action) {
1726 irq_settings_clr_disable_unlazy(desc);
1727 /* Only shutdown. Deactivate after synchronize_hardirq() */
1728 irq_shutdown(desc);
1729 }
1730
1731#ifdef CONFIG_SMP
1732 /* make sure affinity_hint is cleaned up */
1733 if (WARN_ON_ONCE(desc->affinity_hint))
1734 desc->affinity_hint = NULL;
1735#endif
1736
1737 raw_spin_unlock_irqrestore(&desc->lock, flags);
1738 /*
1739 * Drop bus_lock here so the changes which were done in the chip
1740 * callbacks above are synced out to the irq chips which hang
1741 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1742 *
1743 * Aside of that the bus_lock can also be taken from the threaded
1744 * handler in irq_finalize_oneshot() which results in a deadlock
1745 * because kthread_stop() would wait forever for the thread to
1746 * complete, which is blocked on the bus lock.
1747 *
1748 * The still held desc->request_mutex() protects against a
1749 * concurrent request_irq() of this irq so the release of resources
1750 * and timing data is properly serialized.
1751 */
1752 chip_bus_sync_unlock(desc);
1753
1754 unregister_handler_proc(irq, action);
1755
1756 /*
1757 * Make sure it's not being used on another CPU and if the chip
1758 * supports it also make sure that there is no (not yet serviced)
1759 * interrupt in flight at the hardware level.
1760 */
1761 __synchronize_hardirq(desc, true);
1762
1763#ifdef CONFIG_DEBUG_SHIRQ
1764 /*
1765 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1766 * event to happen even now it's being freed, so let's make sure that
1767 * is so by doing an extra call to the handler ....
1768 *
1769 * ( We do this after actually deregistering it, to make sure that a
1770 * 'real' IRQ doesn't run in parallel with our fake. )
1771 */
1772 if (action->flags & IRQF_SHARED) {
1773 local_irq_save(flags);
1774 action->handler(irq, dev_id);
1775 local_irq_restore(flags);
1776 }
1777#endif
1778
1779 /*
1780 * The action has already been removed above, but the thread writes
1781 * its oneshot mask bit when it completes. Though request_mutex is
1782 * held across this which prevents __setup_irq() from handing out
1783 * the same bit to a newly requested action.
1784 */
1785 if (action->thread) {
1786 kthread_stop(action->thread);
1787 put_task_struct(action->thread);
1788 if (action->secondary && action->secondary->thread) {
1789 kthread_stop(action->secondary->thread);
1790 put_task_struct(action->secondary->thread);
1791 }
1792 }
1793
1794 /* Last action releases resources */
1795 if (!desc->action) {
1796 /*
1797 * Reaquire bus lock as irq_release_resources() might
1798 * require it to deallocate resources over the slow bus.
1799 */
1800 chip_bus_lock(desc);
1801 /*
1802 * There is no interrupt on the fly anymore. Deactivate it
1803 * completely.
1804 */
1805 raw_spin_lock_irqsave(&desc->lock, flags);
1806 irq_domain_deactivate_irq(&desc->irq_data);
1807 raw_spin_unlock_irqrestore(&desc->lock, flags);
1808
1809 irq_release_resources(desc);
1810 chip_bus_sync_unlock(desc);
1811 irq_remove_timings(desc);
1812 }
1813
1814 mutex_unlock(&desc->request_mutex);
1815
1816 irq_chip_pm_put(&desc->irq_data);
1817 module_put(desc->owner);
1818 kfree(action->secondary);
1819 return action;
1820}
1821
1822/**
1823 * remove_irq - free an interrupt
1824 * @irq: Interrupt line to free
1825 * @act: irqaction for the interrupt
1826 *
1827 * Used to remove interrupts statically setup by the early boot process.
1828 */
1829void remove_irq(unsigned int irq, struct irqaction *act)
1830{
1831 struct irq_desc *desc = irq_to_desc(irq);
1832
1833 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1834 __free_irq(desc, act->dev_id);
1835}
1836EXPORT_SYMBOL_GPL(remove_irq);
1837
1838/**
1839 * free_irq - free an interrupt allocated with request_irq
1840 * @irq: Interrupt line to free
1841 * @dev_id: Device identity to free
1842 *
1843 * Remove an interrupt handler. The handler is removed and if the
1844 * interrupt line is no longer in use by any driver it is disabled.
1845 * On a shared IRQ the caller must ensure the interrupt is disabled
1846 * on the card it drives before calling this function. The function
1847 * does not return until any executing interrupts for this IRQ
1848 * have completed.
1849 *
1850 * This function must not be called from interrupt context.
1851 *
1852 * Returns the devname argument passed to request_irq.
1853 */
1854const void *free_irq(unsigned int irq, void *dev_id)
1855{
1856 struct irq_desc *desc = irq_to_desc(irq);
1857 struct irqaction *action;
1858 const char *devname;
1859
1860 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1861 return NULL;
1862
1863#ifdef CONFIG_SMP
1864 if (WARN_ON(desc->affinity_notify))
1865 desc->affinity_notify = NULL;
1866#endif
1867
1868 action = __free_irq(desc, dev_id);
1869
1870 if (!action)
1871 return NULL;
1872
1873 devname = action->name;
1874 kfree(action);
1875 return devname;
1876}
1877EXPORT_SYMBOL(free_irq);
1878
1879/* This function must be called with desc->lock held */
1880static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1881{
1882 const char *devname = NULL;
1883
1884 desc->istate &= ~IRQS_NMI;
1885
1886 if (!WARN_ON(desc->action == NULL)) {
1887 irq_pm_remove_action(desc, desc->action);
1888 devname = desc->action->name;
1889 unregister_handler_proc(irq, desc->action);
1890
1891 kfree(desc->action);
1892 desc->action = NULL;
1893 }
1894
1895 irq_settings_clr_disable_unlazy(desc);
1896 irq_shutdown_and_deactivate(desc);
1897
1898 irq_release_resources(desc);
1899
1900 irq_chip_pm_put(&desc->irq_data);
1901 module_put(desc->owner);
1902
1903 return devname;
1904}
1905
1906const void *free_nmi(unsigned int irq, void *dev_id)
1907{
1908 struct irq_desc *desc = irq_to_desc(irq);
1909 unsigned long flags;
1910 const void *devname;
1911
1912 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1913 return NULL;
1914
1915 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1916 return NULL;
1917
1918 /* NMI still enabled */
1919 if (WARN_ON(desc->depth == 0))
1920 disable_nmi_nosync(irq);
1921
1922 raw_spin_lock_irqsave(&desc->lock, flags);
1923
1924 irq_nmi_teardown(desc);
1925 devname = __cleanup_nmi(irq, desc);
1926
1927 raw_spin_unlock_irqrestore(&desc->lock, flags);
1928
1929 return devname;
1930}
1931
1932/**
1933 * request_threaded_irq - allocate an interrupt line
1934 * @irq: Interrupt line to allocate
1935 * @handler: Function to be called when the IRQ occurs.
1936 * Primary handler for threaded interrupts
1937 * If NULL and thread_fn != NULL the default
1938 * primary handler is installed
1939 * @thread_fn: Function called from the irq handler thread
1940 * If NULL, no irq thread is created
1941 * @irqflags: Interrupt type flags
1942 * @devname: An ascii name for the claiming device
1943 * @dev_id: A cookie passed back to the handler function
1944 *
1945 * This call allocates interrupt resources and enables the
1946 * interrupt line and IRQ handling. From the point this
1947 * call is made your handler function may be invoked. Since
1948 * your handler function must clear any interrupt the board
1949 * raises, you must take care both to initialise your hardware
1950 * and to set up the interrupt handler in the right order.
1951 *
1952 * If you want to set up a threaded irq handler for your device
1953 * then you need to supply @handler and @thread_fn. @handler is
1954 * still called in hard interrupt context and has to check
1955 * whether the interrupt originates from the device. If yes it
1956 * needs to disable the interrupt on the device and return
1957 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1958 * @thread_fn. This split handler design is necessary to support
1959 * shared interrupts.
1960 *
1961 * Dev_id must be globally unique. Normally the address of the
1962 * device data structure is used as the cookie. Since the handler
1963 * receives this value it makes sense to use it.
1964 *
1965 * If your interrupt is shared you must pass a non NULL dev_id
1966 * as this is required when freeing the interrupt.
1967 *
1968 * Flags:
1969 *
1970 * IRQF_SHARED Interrupt is shared
1971 * IRQF_TRIGGER_* Specify active edge(s) or level
1972 *
1973 */
1974int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1975 irq_handler_t thread_fn, unsigned long irqflags,
1976 const char *devname, void *dev_id)
1977{
1978 struct irqaction *action;
1979 struct irq_desc *desc;
1980 int retval;
1981
1982 if (irq == IRQ_NOTCONNECTED)
1983 return -ENOTCONN;
1984
1985 /*
1986 * Sanity-check: shared interrupts must pass in a real dev-ID,
1987 * otherwise we'll have trouble later trying to figure out
1988 * which interrupt is which (messes up the interrupt freeing
1989 * logic etc).
1990 *
1991 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1992 * it cannot be set along with IRQF_NO_SUSPEND.
1993 */
1994 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1995 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1996 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1997 return -EINVAL;
1998
1999 desc = irq_to_desc(irq);
2000 if (!desc)
2001 return -EINVAL;
2002
2003 if (!irq_settings_can_request(desc) ||
2004 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2005 return -EINVAL;
2006
2007 if (!handler) {
2008 if (!thread_fn)
2009 return -EINVAL;
2010 handler = irq_default_primary_handler;
2011 }
2012
2013 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2014 if (!action)
2015 return -ENOMEM;
2016
2017 action->handler = handler;
2018 action->thread_fn = thread_fn;
2019 action->flags = irqflags;
2020 action->name = devname;
2021 action->dev_id = dev_id;
2022
2023 retval = irq_chip_pm_get(&desc->irq_data);
2024 if (retval < 0) {
2025 kfree(action);
2026 return retval;
2027 }
2028
2029 retval = __setup_irq(irq, desc, action);
2030
2031 if (retval) {
2032 irq_chip_pm_put(&desc->irq_data);
2033 kfree(action->secondary);
2034 kfree(action);
2035 }
2036
2037#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2038 if (!retval && (irqflags & IRQF_SHARED)) {
2039 /*
2040 * It's a shared IRQ -- the driver ought to be prepared for it
2041 * to happen immediately, so let's make sure....
2042 * We disable the irq to make sure that a 'real' IRQ doesn't
2043 * run in parallel with our fake.
2044 */
2045 unsigned long flags;
2046
2047 disable_irq(irq);
2048 local_irq_save(flags);
2049
2050 handler(irq, dev_id);
2051
2052 local_irq_restore(flags);
2053 enable_irq(irq);
2054 }
2055#endif
2056 return retval;
2057}
2058EXPORT_SYMBOL(request_threaded_irq);
2059
2060/**
2061 * request_any_context_irq - allocate an interrupt line
2062 * @irq: Interrupt line to allocate
2063 * @handler: Function to be called when the IRQ occurs.
2064 * Threaded handler for threaded interrupts.
2065 * @flags: Interrupt type flags
2066 * @name: An ascii name for the claiming device
2067 * @dev_id: A cookie passed back to the handler function
2068 *
2069 * This call allocates interrupt resources and enables the
2070 * interrupt line and IRQ handling. It selects either a
2071 * hardirq or threaded handling method depending on the
2072 * context.
2073 *
2074 * On failure, it returns a negative value. On success,
2075 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2076 */
2077int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2078 unsigned long flags, const char *name, void *dev_id)
2079{
2080 struct irq_desc *desc;
2081 int ret;
2082
2083 if (irq == IRQ_NOTCONNECTED)
2084 return -ENOTCONN;
2085
2086 desc = irq_to_desc(irq);
2087 if (!desc)
2088 return -EINVAL;
2089
2090 if (irq_settings_is_nested_thread(desc)) {
2091 ret = request_threaded_irq(irq, NULL, handler,
2092 flags, name, dev_id);
2093 return !ret ? IRQC_IS_NESTED : ret;
2094 }
2095
2096 ret = request_irq(irq, handler, flags, name, dev_id);
2097 return !ret ? IRQC_IS_HARDIRQ : ret;
2098}
2099EXPORT_SYMBOL_GPL(request_any_context_irq);
2100
2101/**
2102 * request_nmi - allocate an interrupt line for NMI delivery
2103 * @irq: Interrupt line to allocate
2104 * @handler: Function to be called when the IRQ occurs.
2105 * Threaded handler for threaded interrupts.
2106 * @irqflags: Interrupt type flags
2107 * @name: An ascii name for the claiming device
2108 * @dev_id: A cookie passed back to the handler function
2109 *
2110 * This call allocates interrupt resources and enables the
2111 * interrupt line and IRQ handling. It sets up the IRQ line
2112 * to be handled as an NMI.
2113 *
2114 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2115 * cannot be threaded.
2116 *
2117 * Interrupt lines requested for NMI delivering must produce per cpu
2118 * interrupts and have auto enabling setting disabled.
2119 *
2120 * Dev_id must be globally unique. Normally the address of the
2121 * device data structure is used as the cookie. Since the handler
2122 * receives this value it makes sense to use it.
2123 *
2124 * If the interrupt line cannot be used to deliver NMIs, function
2125 * will fail and return a negative value.
2126 */
2127int request_nmi(unsigned int irq, irq_handler_t handler,
2128 unsigned long irqflags, const char *name, void *dev_id)
2129{
2130 struct irqaction *action;
2131 struct irq_desc *desc;
2132 unsigned long flags;
2133 int retval;
2134
2135 if (irq == IRQ_NOTCONNECTED)
2136 return -ENOTCONN;
2137
2138 /* NMI cannot be shared, used for Polling */
2139 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2140 return -EINVAL;
2141
2142 if (!(irqflags & IRQF_PERCPU))
2143 return -EINVAL;
2144
2145 if (!handler)
2146 return -EINVAL;
2147
2148 desc = irq_to_desc(irq);
2149
2150 if (!desc || irq_settings_can_autoenable(desc) ||
2151 !irq_settings_can_request(desc) ||
2152 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2153 !irq_supports_nmi(desc))
2154 return -EINVAL;
2155
2156 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2157 if (!action)
2158 return -ENOMEM;
2159
2160 action->handler = handler;
2161 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2162 action->name = name;
2163 action->dev_id = dev_id;
2164
2165 retval = irq_chip_pm_get(&desc->irq_data);
2166 if (retval < 0)
2167 goto err_out;
2168
2169 retval = __setup_irq(irq, desc, action);
2170 if (retval)
2171 goto err_irq_setup;
2172
2173 raw_spin_lock_irqsave(&desc->lock, flags);
2174
2175 /* Setup NMI state */
2176 desc->istate |= IRQS_NMI;
2177 retval = irq_nmi_setup(desc);
2178 if (retval) {
2179 __cleanup_nmi(irq, desc);
2180 raw_spin_unlock_irqrestore(&desc->lock, flags);
2181 return -EINVAL;
2182 }
2183
2184 raw_spin_unlock_irqrestore(&desc->lock, flags);
2185
2186 return 0;
2187
2188err_irq_setup:
2189 irq_chip_pm_put(&desc->irq_data);
2190err_out:
2191 kfree(action);
2192
2193 return retval;
2194}
2195
2196void enable_percpu_irq(unsigned int irq, unsigned int type)
2197{
2198 unsigned int cpu = smp_processor_id();
2199 unsigned long flags;
2200 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2201
2202 if (!desc)
2203 return;
2204
2205 /*
2206 * If the trigger type is not specified by the caller, then
2207 * use the default for this interrupt.
2208 */
2209 type &= IRQ_TYPE_SENSE_MASK;
2210 if (type == IRQ_TYPE_NONE)
2211 type = irqd_get_trigger_type(&desc->irq_data);
2212
2213 if (type != IRQ_TYPE_NONE) {
2214 int ret;
2215
2216 ret = __irq_set_trigger(desc, type);
2217
2218 if (ret) {
2219 WARN(1, "failed to set type for IRQ%d\n", irq);
2220 goto out;
2221 }
2222 }
2223
2224 irq_percpu_enable(desc, cpu);
2225out:
2226 irq_put_desc_unlock(desc, flags);
2227}
2228EXPORT_SYMBOL_GPL(enable_percpu_irq);
2229
2230void enable_percpu_nmi(unsigned int irq, unsigned int type)
2231{
2232 enable_percpu_irq(irq, type);
2233}
2234
2235/**
2236 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2237 * @irq: Linux irq number to check for
2238 *
2239 * Must be called from a non migratable context. Returns the enable
2240 * state of a per cpu interrupt on the current cpu.
2241 */
2242bool irq_percpu_is_enabled(unsigned int irq)
2243{
2244 unsigned int cpu = smp_processor_id();
2245 struct irq_desc *desc;
2246 unsigned long flags;
2247 bool is_enabled;
2248
2249 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2250 if (!desc)
2251 return false;
2252
2253 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2254 irq_put_desc_unlock(desc, flags);
2255
2256 return is_enabled;
2257}
2258EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2259
2260void disable_percpu_irq(unsigned int irq)
2261{
2262 unsigned int cpu = smp_processor_id();
2263 unsigned long flags;
2264 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2265
2266 if (!desc)
2267 return;
2268
2269 irq_percpu_disable(desc, cpu);
2270 irq_put_desc_unlock(desc, flags);
2271}
2272EXPORT_SYMBOL_GPL(disable_percpu_irq);
2273
2274void disable_percpu_nmi(unsigned int irq)
2275{
2276 disable_percpu_irq(irq);
2277}
2278
2279/*
2280 * Internal function to unregister a percpu irqaction.
2281 */
2282static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2283{
2284 struct irq_desc *desc = irq_to_desc(irq);
2285 struct irqaction *action;
2286 unsigned long flags;
2287
2288 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2289
2290 if (!desc)
2291 return NULL;
2292
2293 raw_spin_lock_irqsave(&desc->lock, flags);
2294
2295 action = desc->action;
2296 if (!action || action->percpu_dev_id != dev_id) {
2297 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2298 goto bad;
2299 }
2300
2301 if (!cpumask_empty(desc->percpu_enabled)) {
2302 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2303 irq, cpumask_first(desc->percpu_enabled));
2304 goto bad;
2305 }
2306
2307 /* Found it - now remove it from the list of entries: */
2308 desc->action = NULL;
2309
2310 desc->istate &= ~IRQS_NMI;
2311
2312 raw_spin_unlock_irqrestore(&desc->lock, flags);
2313
2314 unregister_handler_proc(irq, action);
2315
2316 irq_chip_pm_put(&desc->irq_data);
2317 module_put(desc->owner);
2318 return action;
2319
2320bad:
2321 raw_spin_unlock_irqrestore(&desc->lock, flags);
2322 return NULL;
2323}
2324
2325/**
2326 * remove_percpu_irq - free a per-cpu interrupt
2327 * @irq: Interrupt line to free
2328 * @act: irqaction for the interrupt
2329 *
2330 * Used to remove interrupts statically setup by the early boot process.
2331 */
2332void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2333{
2334 struct irq_desc *desc = irq_to_desc(irq);
2335
2336 if (desc && irq_settings_is_per_cpu_devid(desc))
2337 __free_percpu_irq(irq, act->percpu_dev_id);
2338}
2339
2340/**
2341 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
2342 * @irq: Interrupt line to free
2343 * @dev_id: Device identity to free
2344 *
2345 * Remove a percpu interrupt handler. The handler is removed, but
2346 * the interrupt line is not disabled. This must be done on each
2347 * CPU before calling this function. The function does not return
2348 * until any executing interrupts for this IRQ have completed.
2349 *
2350 * This function must not be called from interrupt context.
2351 */
2352void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2353{
2354 struct irq_desc *desc = irq_to_desc(irq);
2355
2356 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2357 return;
2358
2359 chip_bus_lock(desc);
2360 kfree(__free_percpu_irq(irq, dev_id));
2361 chip_bus_sync_unlock(desc);
2362}
2363EXPORT_SYMBOL_GPL(free_percpu_irq);
2364
2365void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2366{
2367 struct irq_desc *desc = irq_to_desc(irq);
2368
2369 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2370 return;
2371
2372 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2373 return;
2374
2375 kfree(__free_percpu_irq(irq, dev_id));
2376}
2377
2378/**
2379 * setup_percpu_irq - setup a per-cpu interrupt
2380 * @irq: Interrupt line to setup
2381 * @act: irqaction for the interrupt
2382 *
2383 * Used to statically setup per-cpu interrupts in the early boot process.
2384 */
2385int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2386{
2387 struct irq_desc *desc = irq_to_desc(irq);
2388 int retval;
2389
2390 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2391 return -EINVAL;
2392
2393 retval = irq_chip_pm_get(&desc->irq_data);
2394 if (retval < 0)
2395 return retval;
2396
2397 retval = __setup_irq(irq, desc, act);
2398
2399 if (retval)
2400 irq_chip_pm_put(&desc->irq_data);
2401
2402 return retval;
2403}
2404
2405/**
2406 * __request_percpu_irq - allocate a percpu interrupt line
2407 * @irq: Interrupt line to allocate
2408 * @handler: Function to be called when the IRQ occurs.
2409 * @flags: Interrupt type flags (IRQF_TIMER only)
2410 * @devname: An ascii name for the claiming device
2411 * @dev_id: A percpu cookie passed back to the handler function
2412 *
2413 * This call allocates interrupt resources and enables the
2414 * interrupt on the local CPU. If the interrupt is supposed to be
2415 * enabled on other CPUs, it has to be done on each CPU using
2416 * enable_percpu_irq().
2417 *
2418 * Dev_id must be globally unique. It is a per-cpu variable, and
2419 * the handler gets called with the interrupted CPU's instance of
2420 * that variable.
2421 */
2422int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2423 unsigned long flags, const char *devname,
2424 void __percpu *dev_id)
2425{
2426 struct irqaction *action;
2427 struct irq_desc *desc;
2428 int retval;
2429
2430 if (!dev_id)
2431 return -EINVAL;
2432
2433 desc = irq_to_desc(irq);
2434 if (!desc || !irq_settings_can_request(desc) ||
2435 !irq_settings_is_per_cpu_devid(desc))
2436 return -EINVAL;
2437
2438 if (flags && flags != IRQF_TIMER)
2439 return -EINVAL;
2440
2441 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2442 if (!action)
2443 return -ENOMEM;
2444
2445 action->handler = handler;
2446 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2447 action->name = devname;
2448 action->percpu_dev_id = dev_id;
2449
2450 retval = irq_chip_pm_get(&desc->irq_data);
2451 if (retval < 0) {
2452 kfree(action);
2453 return retval;
2454 }
2455
2456 retval = __setup_irq(irq, desc, action);
2457
2458 if (retval) {
2459 irq_chip_pm_put(&desc->irq_data);
2460 kfree(action);
2461 }
2462
2463 return retval;
2464}
2465EXPORT_SYMBOL_GPL(__request_percpu_irq);
2466
2467/**
2468 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2469 * @irq: Interrupt line to allocate
2470 * @handler: Function to be called when the IRQ occurs.
2471 * @name: An ascii name for the claiming device
2472 * @dev_id: A percpu cookie passed back to the handler function
2473 *
2474 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2475 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2476 * being enabled on the same CPU by using enable_percpu_nmi().
2477 *
2478 * Dev_id must be globally unique. It is a per-cpu variable, and
2479 * the handler gets called with the interrupted CPU's instance of
2480 * that variable.
2481 *
2482 * Interrupt lines requested for NMI delivering should have auto enabling
2483 * setting disabled.
2484 *
2485 * If the interrupt line cannot be used to deliver NMIs, function
2486 * will fail returning a negative value.
2487 */
2488int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2489 const char *name, void __percpu *dev_id)
2490{
2491 struct irqaction *action;
2492 struct irq_desc *desc;
2493 unsigned long flags;
2494 int retval;
2495
2496 if (!handler)
2497 return -EINVAL;
2498
2499 desc = irq_to_desc(irq);
2500
2501 if (!desc || !irq_settings_can_request(desc) ||
2502 !irq_settings_is_per_cpu_devid(desc) ||
2503 irq_settings_can_autoenable(desc) ||
2504 !irq_supports_nmi(desc))
2505 return -EINVAL;
2506
2507 /* The line cannot already be NMI */
2508 if (desc->istate & IRQS_NMI)
2509 return -EINVAL;
2510
2511 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2512 if (!action)
2513 return -ENOMEM;
2514
2515 action->handler = handler;
2516 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2517 | IRQF_NOBALANCING;
2518 action->name = name;
2519 action->percpu_dev_id = dev_id;
2520
2521 retval = irq_chip_pm_get(&desc->irq_data);
2522 if (retval < 0)
2523 goto err_out;
2524
2525 retval = __setup_irq(irq, desc, action);
2526 if (retval)
2527 goto err_irq_setup;
2528
2529 raw_spin_lock_irqsave(&desc->lock, flags);
2530 desc->istate |= IRQS_NMI;
2531 raw_spin_unlock_irqrestore(&desc->lock, flags);
2532
2533 return 0;
2534
2535err_irq_setup:
2536 irq_chip_pm_put(&desc->irq_data);
2537err_out:
2538 kfree(action);
2539
2540 return retval;
2541}
2542
2543/**
2544 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2545 * @irq: Interrupt line to prepare for NMI delivery
2546 *
2547 * This call prepares an interrupt line to deliver NMI on the current CPU,
2548 * before that interrupt line gets enabled with enable_percpu_nmi().
2549 *
2550 * As a CPU local operation, this should be called from non-preemptible
2551 * context.
2552 *
2553 * If the interrupt line cannot be used to deliver NMIs, function
2554 * will fail returning a negative value.
2555 */
2556int prepare_percpu_nmi(unsigned int irq)
2557{
2558 unsigned long flags;
2559 struct irq_desc *desc;
2560 int ret = 0;
2561
2562 WARN_ON(preemptible());
2563
2564 desc = irq_get_desc_lock(irq, &flags,
2565 IRQ_GET_DESC_CHECK_PERCPU);
2566 if (!desc)
2567 return -EINVAL;
2568
2569 if (WARN(!(desc->istate & IRQS_NMI),
2570 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2571 irq)) {
2572 ret = -EINVAL;
2573 goto out;
2574 }
2575
2576 ret = irq_nmi_setup(desc);
2577 if (ret) {
2578 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2579 goto out;
2580 }
2581
2582out:
2583 irq_put_desc_unlock(desc, flags);
2584 return ret;
2585}
2586
2587/**
2588 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2589 * @irq: Interrupt line from which CPU local NMI configuration should be
2590 * removed
2591 *
2592 * This call undoes the setup done by prepare_percpu_nmi().
2593 *
2594 * IRQ line should not be enabled for the current CPU.
2595 *
2596 * As a CPU local operation, this should be called from non-preemptible
2597 * context.
2598 */
2599void teardown_percpu_nmi(unsigned int irq)
2600{
2601 unsigned long flags;
2602 struct irq_desc *desc;
2603
2604 WARN_ON(preemptible());
2605
2606 desc = irq_get_desc_lock(irq, &flags,
2607 IRQ_GET_DESC_CHECK_PERCPU);
2608 if (!desc)
2609 return;
2610
2611 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2612 goto out;
2613
2614 irq_nmi_teardown(desc);
2615out:
2616 irq_put_desc_unlock(desc, flags);
2617}
2618
2619int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2620 bool *state)
2621{
2622 struct irq_chip *chip;
2623 int err = -EINVAL;
2624
2625 do {
2626 chip = irq_data_get_irq_chip(data);
2627 if (chip->irq_get_irqchip_state)
2628 break;
2629#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2630 data = data->parent_data;
2631#else
2632 data = NULL;
2633#endif
2634 } while (data);
2635
2636 if (data)
2637 err = chip->irq_get_irqchip_state(data, which, state);
2638 return err;
2639}
2640
2641/**
2642 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2643 * @irq: Interrupt line that is forwarded to a VM
2644 * @which: One of IRQCHIP_STATE_* the caller wants to know about
2645 * @state: a pointer to a boolean where the state is to be storeed
2646 *
2647 * This call snapshots the internal irqchip state of an
2648 * interrupt, returning into @state the bit corresponding to
2649 * stage @which
2650 *
2651 * This function should be called with preemption disabled if the
2652 * interrupt controller has per-cpu registers.
2653 */
2654int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2655 bool *state)
2656{
2657 struct irq_desc *desc;
2658 struct irq_data *data;
2659 unsigned long flags;
2660 int err = -EINVAL;
2661
2662 desc = irq_get_desc_buslock(irq, &flags, 0);
2663 if (!desc)
2664 return err;
2665
2666 data = irq_desc_get_irq_data(desc);
2667
2668 err = __irq_get_irqchip_state(data, which, state);
2669
2670 irq_put_desc_busunlock(desc, flags);
2671 return err;
2672}
2673EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2674
2675/**
2676 * irq_set_irqchip_state - set the state of a forwarded interrupt.
2677 * @irq: Interrupt line that is forwarded to a VM
2678 * @which: State to be restored (one of IRQCHIP_STATE_*)
2679 * @val: Value corresponding to @which
2680 *
2681 * This call sets the internal irqchip state of an interrupt,
2682 * depending on the value of @which.
2683 *
2684 * This function should be called with preemption disabled if the
2685 * interrupt controller has per-cpu registers.
2686 */
2687int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2688 bool val)
2689{
2690 struct irq_desc *desc;
2691 struct irq_data *data;
2692 struct irq_chip *chip;
2693 unsigned long flags;
2694 int err = -EINVAL;
2695
2696 desc = irq_get_desc_buslock(irq, &flags, 0);
2697 if (!desc)
2698 return err;
2699
2700 data = irq_desc_get_irq_data(desc);
2701
2702 do {
2703 chip = irq_data_get_irq_chip(data);
2704 if (chip->irq_set_irqchip_state)
2705 break;
2706#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2707 data = data->parent_data;
2708#else
2709 data = NULL;
2710#endif
2711 } while (data);
2712
2713 if (data)
2714 err = chip->irq_set_irqchip_state(data, which, val);
2715
2716 irq_put_desc_busunlock(desc, flags);
2717 return err;
2718}
2719EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42 /*
43 * Wait until we're out of the critical section. This might
44 * give the wrong answer due to the lack of memory barriers.
45 */
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49 /* Ok, that indicated we're done: double-check carefully. */
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54 /* Oops, that failed? */
55 } while (inprogress);
56}
57
58/**
59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60 * @irq: interrupt number to wait for
61 *
62 * This function waits for any pending hard IRQ handlers for this
63 * interrupt to complete before returning. If you use this
64 * function while holding a resource the IRQ handler may need you
65 * will deadlock. It does not take associated threaded handlers
66 * into account.
67 *
68 * Do not use this for shutdown scenarios where you must be sure
69 * that all parts (hardirq and threaded handler) have completed.
70 *
71 * This function may be called - with care - from IRQ context.
72 */
73void synchronize_hardirq(unsigned int irq)
74{
75 struct irq_desc *desc = irq_to_desc(irq);
76
77 if (desc)
78 __synchronize_hardirq(desc);
79}
80EXPORT_SYMBOL(synchronize_hardirq);
81
82/**
83 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
84 * @irq: interrupt number to wait for
85 *
86 * This function waits for any pending IRQ handlers for this interrupt
87 * to complete before returning. If you use this function while
88 * holding a resource the IRQ handler may need you will deadlock.
89 *
90 * This function may be called - with care - from IRQ context.
91 */
92void synchronize_irq(unsigned int irq)
93{
94 struct irq_desc *desc = irq_to_desc(irq);
95
96 if (desc) {
97 __synchronize_hardirq(desc);
98 /*
99 * We made sure that no hardirq handler is
100 * running. Now verify that no threaded handlers are
101 * active.
102 */
103 wait_event(desc->wait_for_threads,
104 !atomic_read(&desc->threads_active));
105 }
106}
107EXPORT_SYMBOL(synchronize_irq);
108
109#ifdef CONFIG_SMP
110cpumask_var_t irq_default_affinity;
111
112/**
113 * irq_can_set_affinity - Check if the affinity of a given irq can be set
114 * @irq: Interrupt to check
115 *
116 */
117int irq_can_set_affinity(unsigned int irq)
118{
119 struct irq_desc *desc = irq_to_desc(irq);
120
121 if (!desc || !irqd_can_balance(&desc->irq_data) ||
122 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
123 return 0;
124
125 return 1;
126}
127
128/**
129 * irq_set_thread_affinity - Notify irq threads to adjust affinity
130 * @desc: irq descriptor which has affitnity changed
131 *
132 * We just set IRQTF_AFFINITY and delegate the affinity setting
133 * to the interrupt thread itself. We can not call
134 * set_cpus_allowed_ptr() here as we hold desc->lock and this
135 * code can be called from hard interrupt context.
136 */
137void irq_set_thread_affinity(struct irq_desc *desc)
138{
139 struct irqaction *action = desc->action;
140
141 while (action) {
142 if (action->thread)
143 set_bit(IRQTF_AFFINITY, &action->thread_flags);
144 action = action->next;
145 }
146}
147
148#ifdef CONFIG_GENERIC_PENDING_IRQ
149static inline bool irq_can_move_pcntxt(struct irq_data *data)
150{
151 return irqd_can_move_in_process_context(data);
152}
153static inline bool irq_move_pending(struct irq_data *data)
154{
155 return irqd_is_setaffinity_pending(data);
156}
157static inline void
158irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
159{
160 cpumask_copy(desc->pending_mask, mask);
161}
162static inline void
163irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
164{
165 cpumask_copy(mask, desc->pending_mask);
166}
167#else
168static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
169static inline bool irq_move_pending(struct irq_data *data) { return false; }
170static inline void
171irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
172static inline void
173irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
174#endif
175
176int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
177 bool force)
178{
179 struct irq_desc *desc = irq_data_to_desc(data);
180 struct irq_chip *chip = irq_data_get_irq_chip(data);
181 int ret;
182
183 ret = chip->irq_set_affinity(data, mask, force);
184 switch (ret) {
185 case IRQ_SET_MASK_OK:
186 cpumask_copy(data->affinity, mask);
187 case IRQ_SET_MASK_OK_NOCOPY:
188 irq_set_thread_affinity(desc);
189 ret = 0;
190 }
191
192 return ret;
193}
194
195int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
196 bool force)
197{
198 struct irq_chip *chip = irq_data_get_irq_chip(data);
199 struct irq_desc *desc = irq_data_to_desc(data);
200 int ret = 0;
201
202 if (!chip || !chip->irq_set_affinity)
203 return -EINVAL;
204
205 if (irq_can_move_pcntxt(data)) {
206 ret = irq_do_set_affinity(data, mask, force);
207 } else {
208 irqd_set_move_pending(data);
209 irq_copy_pending(desc, mask);
210 }
211
212 if (desc->affinity_notify) {
213 kref_get(&desc->affinity_notify->kref);
214 schedule_work(&desc->affinity_notify->work);
215 }
216 irqd_set(data, IRQD_AFFINITY_SET);
217
218 return ret;
219}
220
221int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
222{
223 struct irq_desc *desc = irq_to_desc(irq);
224 unsigned long flags;
225 int ret;
226
227 if (!desc)
228 return -EINVAL;
229
230 raw_spin_lock_irqsave(&desc->lock, flags);
231 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
232 raw_spin_unlock_irqrestore(&desc->lock, flags);
233 return ret;
234}
235
236int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
237{
238 unsigned long flags;
239 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
240
241 if (!desc)
242 return -EINVAL;
243 desc->affinity_hint = m;
244 irq_put_desc_unlock(desc, flags);
245 return 0;
246}
247EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
248
249static void irq_affinity_notify(struct work_struct *work)
250{
251 struct irq_affinity_notify *notify =
252 container_of(work, struct irq_affinity_notify, work);
253 struct irq_desc *desc = irq_to_desc(notify->irq);
254 cpumask_var_t cpumask;
255 unsigned long flags;
256
257 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
258 goto out;
259
260 raw_spin_lock_irqsave(&desc->lock, flags);
261 if (irq_move_pending(&desc->irq_data))
262 irq_get_pending(cpumask, desc);
263 else
264 cpumask_copy(cpumask, desc->irq_data.affinity);
265 raw_spin_unlock_irqrestore(&desc->lock, flags);
266
267 notify->notify(notify, cpumask);
268
269 free_cpumask_var(cpumask);
270out:
271 kref_put(¬ify->kref, notify->release);
272}
273
274/**
275 * irq_set_affinity_notifier - control notification of IRQ affinity changes
276 * @irq: Interrupt for which to enable/disable notification
277 * @notify: Context for notification, or %NULL to disable
278 * notification. Function pointers must be initialised;
279 * the other fields will be initialised by this function.
280 *
281 * Must be called in process context. Notification may only be enabled
282 * after the IRQ is allocated and must be disabled before the IRQ is
283 * freed using free_irq().
284 */
285int
286irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
287{
288 struct irq_desc *desc = irq_to_desc(irq);
289 struct irq_affinity_notify *old_notify;
290 unsigned long flags;
291
292 /* The release function is promised process context */
293 might_sleep();
294
295 if (!desc)
296 return -EINVAL;
297
298 /* Complete initialisation of *notify */
299 if (notify) {
300 notify->irq = irq;
301 kref_init(¬ify->kref);
302 INIT_WORK(¬ify->work, irq_affinity_notify);
303 }
304
305 raw_spin_lock_irqsave(&desc->lock, flags);
306 old_notify = desc->affinity_notify;
307 desc->affinity_notify = notify;
308 raw_spin_unlock_irqrestore(&desc->lock, flags);
309
310 if (old_notify)
311 kref_put(&old_notify->kref, old_notify->release);
312
313 return 0;
314}
315EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
316
317#ifndef CONFIG_AUTO_IRQ_AFFINITY
318/*
319 * Generic version of the affinity autoselector.
320 */
321static int
322setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
323{
324 struct cpumask *set = irq_default_affinity;
325 int node = desc->irq_data.node;
326
327 /* Excludes PER_CPU and NO_BALANCE interrupts */
328 if (!irq_can_set_affinity(irq))
329 return 0;
330
331 /*
332 * Preserve an userspace affinity setup, but make sure that
333 * one of the targets is online.
334 */
335 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
336 if (cpumask_intersects(desc->irq_data.affinity,
337 cpu_online_mask))
338 set = desc->irq_data.affinity;
339 else
340 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
341 }
342
343 cpumask_and(mask, cpu_online_mask, set);
344 if (node != NUMA_NO_NODE) {
345 const struct cpumask *nodemask = cpumask_of_node(node);
346
347 /* make sure at least one of the cpus in nodemask is online */
348 if (cpumask_intersects(mask, nodemask))
349 cpumask_and(mask, mask, nodemask);
350 }
351 irq_do_set_affinity(&desc->irq_data, mask, false);
352 return 0;
353}
354#else
355static inline int
356setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
357{
358 return irq_select_affinity(irq);
359}
360#endif
361
362/*
363 * Called when affinity is set via /proc/irq
364 */
365int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
366{
367 struct irq_desc *desc = irq_to_desc(irq);
368 unsigned long flags;
369 int ret;
370
371 raw_spin_lock_irqsave(&desc->lock, flags);
372 ret = setup_affinity(irq, desc, mask);
373 raw_spin_unlock_irqrestore(&desc->lock, flags);
374 return ret;
375}
376
377#else
378static inline int
379setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
380{
381 return 0;
382}
383#endif
384
385void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
386{
387 if (suspend) {
388 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
389 return;
390 desc->istate |= IRQS_SUSPENDED;
391 }
392
393 if (!desc->depth++)
394 irq_disable(desc);
395}
396
397static int __disable_irq_nosync(unsigned int irq)
398{
399 unsigned long flags;
400 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
401
402 if (!desc)
403 return -EINVAL;
404 __disable_irq(desc, irq, false);
405 irq_put_desc_busunlock(desc, flags);
406 return 0;
407}
408
409/**
410 * disable_irq_nosync - disable an irq without waiting
411 * @irq: Interrupt to disable
412 *
413 * Disable the selected interrupt line. Disables and Enables are
414 * nested.
415 * Unlike disable_irq(), this function does not ensure existing
416 * instances of the IRQ handler have completed before returning.
417 *
418 * This function may be called from IRQ context.
419 */
420void disable_irq_nosync(unsigned int irq)
421{
422 __disable_irq_nosync(irq);
423}
424EXPORT_SYMBOL(disable_irq_nosync);
425
426/**
427 * disable_irq - disable an irq and wait for completion
428 * @irq: Interrupt to disable
429 *
430 * Disable the selected interrupt line. Enables and Disables are
431 * nested.
432 * This function waits for any pending IRQ handlers for this interrupt
433 * to complete before returning. If you use this function while
434 * holding a resource the IRQ handler may need you will deadlock.
435 *
436 * This function may be called - with care - from IRQ context.
437 */
438void disable_irq(unsigned int irq)
439{
440 if (!__disable_irq_nosync(irq))
441 synchronize_irq(irq);
442}
443EXPORT_SYMBOL(disable_irq);
444
445void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
446{
447 if (resume) {
448 if (!(desc->istate & IRQS_SUSPENDED)) {
449 if (!desc->action)
450 return;
451 if (!(desc->action->flags & IRQF_FORCE_RESUME))
452 return;
453 /* Pretend that it got disabled ! */
454 desc->depth++;
455 }
456 desc->istate &= ~IRQS_SUSPENDED;
457 }
458
459 switch (desc->depth) {
460 case 0:
461 err_out:
462 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
463 break;
464 case 1: {
465 if (desc->istate & IRQS_SUSPENDED)
466 goto err_out;
467 /* Prevent probing on this irq: */
468 irq_settings_set_noprobe(desc);
469 irq_enable(desc);
470 check_irq_resend(desc, irq);
471 /* fall-through */
472 }
473 default:
474 desc->depth--;
475 }
476}
477
478/**
479 * enable_irq - enable handling of an irq
480 * @irq: Interrupt to enable
481 *
482 * Undoes the effect of one call to disable_irq(). If this
483 * matches the last disable, processing of interrupts on this
484 * IRQ line is re-enabled.
485 *
486 * This function may be called from IRQ context only when
487 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
488 */
489void enable_irq(unsigned int irq)
490{
491 unsigned long flags;
492 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
493
494 if (!desc)
495 return;
496 if (WARN(!desc->irq_data.chip,
497 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
498 goto out;
499
500 __enable_irq(desc, irq, false);
501out:
502 irq_put_desc_busunlock(desc, flags);
503}
504EXPORT_SYMBOL(enable_irq);
505
506static int set_irq_wake_real(unsigned int irq, unsigned int on)
507{
508 struct irq_desc *desc = irq_to_desc(irq);
509 int ret = -ENXIO;
510
511 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
512 return 0;
513
514 if (desc->irq_data.chip->irq_set_wake)
515 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
516
517 return ret;
518}
519
520/**
521 * irq_set_irq_wake - control irq power management wakeup
522 * @irq: interrupt to control
523 * @on: enable/disable power management wakeup
524 *
525 * Enable/disable power management wakeup mode, which is
526 * disabled by default. Enables and disables must match,
527 * just as they match for non-wakeup mode support.
528 *
529 * Wakeup mode lets this IRQ wake the system from sleep
530 * states like "suspend to RAM".
531 */
532int irq_set_irq_wake(unsigned int irq, unsigned int on)
533{
534 unsigned long flags;
535 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
536 int ret = 0;
537
538 if (!desc)
539 return -EINVAL;
540
541 /* wakeup-capable irqs can be shared between drivers that
542 * don't need to have the same sleep mode behaviors.
543 */
544 if (on) {
545 if (desc->wake_depth++ == 0) {
546 ret = set_irq_wake_real(irq, on);
547 if (ret)
548 desc->wake_depth = 0;
549 else
550 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
551 }
552 } else {
553 if (desc->wake_depth == 0) {
554 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
555 } else if (--desc->wake_depth == 0) {
556 ret = set_irq_wake_real(irq, on);
557 if (ret)
558 desc->wake_depth = 1;
559 else
560 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
561 }
562 }
563 irq_put_desc_busunlock(desc, flags);
564 return ret;
565}
566EXPORT_SYMBOL(irq_set_irq_wake);
567
568/*
569 * Internal function that tells the architecture code whether a
570 * particular irq has been exclusively allocated or is available
571 * for driver use.
572 */
573int can_request_irq(unsigned int irq, unsigned long irqflags)
574{
575 unsigned long flags;
576 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
577 int canrequest = 0;
578
579 if (!desc)
580 return 0;
581
582 if (irq_settings_can_request(desc)) {
583 if (!desc->action ||
584 irqflags & desc->action->flags & IRQF_SHARED)
585 canrequest = 1;
586 }
587 irq_put_desc_unlock(desc, flags);
588 return canrequest;
589}
590
591int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
592 unsigned long flags)
593{
594 struct irq_chip *chip = desc->irq_data.chip;
595 int ret, unmask = 0;
596
597 if (!chip || !chip->irq_set_type) {
598 /*
599 * IRQF_TRIGGER_* but the PIC does not support multiple
600 * flow-types?
601 */
602 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
603 chip ? (chip->name ? : "unknown") : "unknown");
604 return 0;
605 }
606
607 flags &= IRQ_TYPE_SENSE_MASK;
608
609 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
610 if (!irqd_irq_masked(&desc->irq_data))
611 mask_irq(desc);
612 if (!irqd_irq_disabled(&desc->irq_data))
613 unmask = 1;
614 }
615
616 /* caller masked out all except trigger mode flags */
617 ret = chip->irq_set_type(&desc->irq_data, flags);
618
619 switch (ret) {
620 case IRQ_SET_MASK_OK:
621 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
622 irqd_set(&desc->irq_data, flags);
623
624 case IRQ_SET_MASK_OK_NOCOPY:
625 flags = irqd_get_trigger_type(&desc->irq_data);
626 irq_settings_set_trigger_mask(desc, flags);
627 irqd_clear(&desc->irq_data, IRQD_LEVEL);
628 irq_settings_clr_level(desc);
629 if (flags & IRQ_TYPE_LEVEL_MASK) {
630 irq_settings_set_level(desc);
631 irqd_set(&desc->irq_data, IRQD_LEVEL);
632 }
633
634 ret = 0;
635 break;
636 default:
637 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
638 flags, irq, chip->irq_set_type);
639 }
640 if (unmask)
641 unmask_irq(desc);
642 return ret;
643}
644
645#ifdef CONFIG_HARDIRQS_SW_RESEND
646int irq_set_parent(int irq, int parent_irq)
647{
648 unsigned long flags;
649 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
650
651 if (!desc)
652 return -EINVAL;
653
654 desc->parent_irq = parent_irq;
655
656 irq_put_desc_unlock(desc, flags);
657 return 0;
658}
659#endif
660
661/*
662 * Default primary interrupt handler for threaded interrupts. Is
663 * assigned as primary handler when request_threaded_irq is called
664 * with handler == NULL. Useful for oneshot interrupts.
665 */
666static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
667{
668 return IRQ_WAKE_THREAD;
669}
670
671/*
672 * Primary handler for nested threaded interrupts. Should never be
673 * called.
674 */
675static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
676{
677 WARN(1, "Primary handler called for nested irq %d\n", irq);
678 return IRQ_NONE;
679}
680
681static int irq_wait_for_interrupt(struct irqaction *action)
682{
683 set_current_state(TASK_INTERRUPTIBLE);
684
685 while (!kthread_should_stop()) {
686
687 if (test_and_clear_bit(IRQTF_RUNTHREAD,
688 &action->thread_flags)) {
689 __set_current_state(TASK_RUNNING);
690 return 0;
691 }
692 schedule();
693 set_current_state(TASK_INTERRUPTIBLE);
694 }
695 __set_current_state(TASK_RUNNING);
696 return -1;
697}
698
699/*
700 * Oneshot interrupts keep the irq line masked until the threaded
701 * handler finished. unmask if the interrupt has not been disabled and
702 * is marked MASKED.
703 */
704static void irq_finalize_oneshot(struct irq_desc *desc,
705 struct irqaction *action)
706{
707 if (!(desc->istate & IRQS_ONESHOT))
708 return;
709again:
710 chip_bus_lock(desc);
711 raw_spin_lock_irq(&desc->lock);
712
713 /*
714 * Implausible though it may be we need to protect us against
715 * the following scenario:
716 *
717 * The thread is faster done than the hard interrupt handler
718 * on the other CPU. If we unmask the irq line then the
719 * interrupt can come in again and masks the line, leaves due
720 * to IRQS_INPROGRESS and the irq line is masked forever.
721 *
722 * This also serializes the state of shared oneshot handlers
723 * versus "desc->threads_onehsot |= action->thread_mask;" in
724 * irq_wake_thread(). See the comment there which explains the
725 * serialization.
726 */
727 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
728 raw_spin_unlock_irq(&desc->lock);
729 chip_bus_sync_unlock(desc);
730 cpu_relax();
731 goto again;
732 }
733
734 /*
735 * Now check again, whether the thread should run. Otherwise
736 * we would clear the threads_oneshot bit of this thread which
737 * was just set.
738 */
739 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
740 goto out_unlock;
741
742 desc->threads_oneshot &= ~action->thread_mask;
743
744 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
745 irqd_irq_masked(&desc->irq_data))
746 unmask_threaded_irq(desc);
747
748out_unlock:
749 raw_spin_unlock_irq(&desc->lock);
750 chip_bus_sync_unlock(desc);
751}
752
753#ifdef CONFIG_SMP
754/*
755 * Check whether we need to change the affinity of the interrupt thread.
756 */
757static void
758irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
759{
760 cpumask_var_t mask;
761 bool valid = true;
762
763 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
764 return;
765
766 /*
767 * In case we are out of memory we set IRQTF_AFFINITY again and
768 * try again next time
769 */
770 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
771 set_bit(IRQTF_AFFINITY, &action->thread_flags);
772 return;
773 }
774
775 raw_spin_lock_irq(&desc->lock);
776 /*
777 * This code is triggered unconditionally. Check the affinity
778 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
779 */
780 if (desc->irq_data.affinity)
781 cpumask_copy(mask, desc->irq_data.affinity);
782 else
783 valid = false;
784 raw_spin_unlock_irq(&desc->lock);
785
786 if (valid)
787 set_cpus_allowed_ptr(current, mask);
788 free_cpumask_var(mask);
789}
790#else
791static inline void
792irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
793#endif
794
795/*
796 * Interrupts which are not explicitely requested as threaded
797 * interrupts rely on the implicit bh/preempt disable of the hard irq
798 * context. So we need to disable bh here to avoid deadlocks and other
799 * side effects.
800 */
801static irqreturn_t
802irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
803{
804 irqreturn_t ret;
805
806 local_bh_disable();
807 ret = action->thread_fn(action->irq, action->dev_id);
808 irq_finalize_oneshot(desc, action);
809 local_bh_enable();
810 return ret;
811}
812
813/*
814 * Interrupts explicitly requested as threaded interrupts want to be
815 * preemtible - many of them need to sleep and wait for slow busses to
816 * complete.
817 */
818static irqreturn_t irq_thread_fn(struct irq_desc *desc,
819 struct irqaction *action)
820{
821 irqreturn_t ret;
822
823 ret = action->thread_fn(action->irq, action->dev_id);
824 irq_finalize_oneshot(desc, action);
825 return ret;
826}
827
828static void wake_threads_waitq(struct irq_desc *desc)
829{
830 if (atomic_dec_and_test(&desc->threads_active))
831 wake_up(&desc->wait_for_threads);
832}
833
834static void irq_thread_dtor(struct callback_head *unused)
835{
836 struct task_struct *tsk = current;
837 struct irq_desc *desc;
838 struct irqaction *action;
839
840 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
841 return;
842
843 action = kthread_data(tsk);
844
845 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
846 tsk->comm, tsk->pid, action->irq);
847
848
849 desc = irq_to_desc(action->irq);
850 /*
851 * If IRQTF_RUNTHREAD is set, we need to decrement
852 * desc->threads_active and wake possible waiters.
853 */
854 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
855 wake_threads_waitq(desc);
856
857 /* Prevent a stale desc->threads_oneshot */
858 irq_finalize_oneshot(desc, action);
859}
860
861/*
862 * Interrupt handler thread
863 */
864static int irq_thread(void *data)
865{
866 struct callback_head on_exit_work;
867 struct irqaction *action = data;
868 struct irq_desc *desc = irq_to_desc(action->irq);
869 irqreturn_t (*handler_fn)(struct irq_desc *desc,
870 struct irqaction *action);
871
872 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
873 &action->thread_flags))
874 handler_fn = irq_forced_thread_fn;
875 else
876 handler_fn = irq_thread_fn;
877
878 init_task_work(&on_exit_work, irq_thread_dtor);
879 task_work_add(current, &on_exit_work, false);
880
881 irq_thread_check_affinity(desc, action);
882
883 while (!irq_wait_for_interrupt(action)) {
884 irqreturn_t action_ret;
885
886 irq_thread_check_affinity(desc, action);
887
888 action_ret = handler_fn(desc, action);
889 if (!noirqdebug)
890 note_interrupt(action->irq, desc, action_ret);
891
892 wake_threads_waitq(desc);
893 }
894
895 /*
896 * This is the regular exit path. __free_irq() is stopping the
897 * thread via kthread_stop() after calling
898 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
899 * oneshot mask bit can be set. We cannot verify that as we
900 * cannot touch the oneshot mask at this point anymore as
901 * __setup_irq() might have given out currents thread_mask
902 * again.
903 */
904 task_work_cancel(current, irq_thread_dtor);
905 return 0;
906}
907
908/**
909 * irq_wake_thread - wake the irq thread for the action identified by dev_id
910 * @irq: Interrupt line
911 * @dev_id: Device identity for which the thread should be woken
912 *
913 */
914void irq_wake_thread(unsigned int irq, void *dev_id)
915{
916 struct irq_desc *desc = irq_to_desc(irq);
917 struct irqaction *action;
918 unsigned long flags;
919
920 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
921 return;
922
923 raw_spin_lock_irqsave(&desc->lock, flags);
924 for (action = desc->action; action; action = action->next) {
925 if (action->dev_id == dev_id) {
926 if (action->thread)
927 __irq_wake_thread(desc, action);
928 break;
929 }
930 }
931 raw_spin_unlock_irqrestore(&desc->lock, flags);
932}
933EXPORT_SYMBOL_GPL(irq_wake_thread);
934
935static void irq_setup_forced_threading(struct irqaction *new)
936{
937 if (!force_irqthreads)
938 return;
939 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
940 return;
941
942 new->flags |= IRQF_ONESHOT;
943
944 if (!new->thread_fn) {
945 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
946 new->thread_fn = new->handler;
947 new->handler = irq_default_primary_handler;
948 }
949}
950
951static int irq_request_resources(struct irq_desc *desc)
952{
953 struct irq_data *d = &desc->irq_data;
954 struct irq_chip *c = d->chip;
955
956 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
957}
958
959static void irq_release_resources(struct irq_desc *desc)
960{
961 struct irq_data *d = &desc->irq_data;
962 struct irq_chip *c = d->chip;
963
964 if (c->irq_release_resources)
965 c->irq_release_resources(d);
966}
967
968/*
969 * Internal function to register an irqaction - typically used to
970 * allocate special interrupts that are part of the architecture.
971 */
972static int
973__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
974{
975 struct irqaction *old, **old_ptr;
976 unsigned long flags, thread_mask = 0;
977 int ret, nested, shared = 0;
978 cpumask_var_t mask;
979
980 if (!desc)
981 return -EINVAL;
982
983 if (desc->irq_data.chip == &no_irq_chip)
984 return -ENOSYS;
985 if (!try_module_get(desc->owner))
986 return -ENODEV;
987
988 /*
989 * Check whether the interrupt nests into another interrupt
990 * thread.
991 */
992 nested = irq_settings_is_nested_thread(desc);
993 if (nested) {
994 if (!new->thread_fn) {
995 ret = -EINVAL;
996 goto out_mput;
997 }
998 /*
999 * Replace the primary handler which was provided from
1000 * the driver for non nested interrupt handling by the
1001 * dummy function which warns when called.
1002 */
1003 new->handler = irq_nested_primary_handler;
1004 } else {
1005 if (irq_settings_can_thread(desc))
1006 irq_setup_forced_threading(new);
1007 }
1008
1009 /*
1010 * Create a handler thread when a thread function is supplied
1011 * and the interrupt does not nest into another interrupt
1012 * thread.
1013 */
1014 if (new->thread_fn && !nested) {
1015 struct task_struct *t;
1016 static const struct sched_param param = {
1017 .sched_priority = MAX_USER_RT_PRIO/2,
1018 };
1019
1020 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1021 new->name);
1022 if (IS_ERR(t)) {
1023 ret = PTR_ERR(t);
1024 goto out_mput;
1025 }
1026
1027 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1028
1029 /*
1030 * We keep the reference to the task struct even if
1031 * the thread dies to avoid that the interrupt code
1032 * references an already freed task_struct.
1033 */
1034 get_task_struct(t);
1035 new->thread = t;
1036 /*
1037 * Tell the thread to set its affinity. This is
1038 * important for shared interrupt handlers as we do
1039 * not invoke setup_affinity() for the secondary
1040 * handlers as everything is already set up. Even for
1041 * interrupts marked with IRQF_NO_BALANCE this is
1042 * correct as we want the thread to move to the cpu(s)
1043 * on which the requesting code placed the interrupt.
1044 */
1045 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1046 }
1047
1048 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1049 ret = -ENOMEM;
1050 goto out_thread;
1051 }
1052
1053 /*
1054 * Drivers are often written to work w/o knowledge about the
1055 * underlying irq chip implementation, so a request for a
1056 * threaded irq without a primary hard irq context handler
1057 * requires the ONESHOT flag to be set. Some irq chips like
1058 * MSI based interrupts are per se one shot safe. Check the
1059 * chip flags, so we can avoid the unmask dance at the end of
1060 * the threaded handler for those.
1061 */
1062 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1063 new->flags &= ~IRQF_ONESHOT;
1064
1065 /*
1066 * The following block of code has to be executed atomically
1067 */
1068 raw_spin_lock_irqsave(&desc->lock, flags);
1069 old_ptr = &desc->action;
1070 old = *old_ptr;
1071 if (old) {
1072 /*
1073 * Can't share interrupts unless both agree to and are
1074 * the same type (level, edge, polarity). So both flag
1075 * fields must have IRQF_SHARED set and the bits which
1076 * set the trigger type must match. Also all must
1077 * agree on ONESHOT.
1078 */
1079 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1080 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1081 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1082 goto mismatch;
1083
1084 /* All handlers must agree on per-cpuness */
1085 if ((old->flags & IRQF_PERCPU) !=
1086 (new->flags & IRQF_PERCPU))
1087 goto mismatch;
1088
1089 /* add new interrupt at end of irq queue */
1090 do {
1091 /*
1092 * Or all existing action->thread_mask bits,
1093 * so we can find the next zero bit for this
1094 * new action.
1095 */
1096 thread_mask |= old->thread_mask;
1097 old_ptr = &old->next;
1098 old = *old_ptr;
1099 } while (old);
1100 shared = 1;
1101 }
1102
1103 /*
1104 * Setup the thread mask for this irqaction for ONESHOT. For
1105 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1106 * conditional in irq_wake_thread().
1107 */
1108 if (new->flags & IRQF_ONESHOT) {
1109 /*
1110 * Unlikely to have 32 resp 64 irqs sharing one line,
1111 * but who knows.
1112 */
1113 if (thread_mask == ~0UL) {
1114 ret = -EBUSY;
1115 goto out_mask;
1116 }
1117 /*
1118 * The thread_mask for the action is or'ed to
1119 * desc->thread_active to indicate that the
1120 * IRQF_ONESHOT thread handler has been woken, but not
1121 * yet finished. The bit is cleared when a thread
1122 * completes. When all threads of a shared interrupt
1123 * line have completed desc->threads_active becomes
1124 * zero and the interrupt line is unmasked. See
1125 * handle.c:irq_wake_thread() for further information.
1126 *
1127 * If no thread is woken by primary (hard irq context)
1128 * interrupt handlers, then desc->threads_active is
1129 * also checked for zero to unmask the irq line in the
1130 * affected hard irq flow handlers
1131 * (handle_[fasteoi|level]_irq).
1132 *
1133 * The new action gets the first zero bit of
1134 * thread_mask assigned. See the loop above which or's
1135 * all existing action->thread_mask bits.
1136 */
1137 new->thread_mask = 1 << ffz(thread_mask);
1138
1139 } else if (new->handler == irq_default_primary_handler &&
1140 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1141 /*
1142 * The interrupt was requested with handler = NULL, so
1143 * we use the default primary handler for it. But it
1144 * does not have the oneshot flag set. In combination
1145 * with level interrupts this is deadly, because the
1146 * default primary handler just wakes the thread, then
1147 * the irq lines is reenabled, but the device still
1148 * has the level irq asserted. Rinse and repeat....
1149 *
1150 * While this works for edge type interrupts, we play
1151 * it safe and reject unconditionally because we can't
1152 * say for sure which type this interrupt really
1153 * has. The type flags are unreliable as the
1154 * underlying chip implementation can override them.
1155 */
1156 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1157 irq);
1158 ret = -EINVAL;
1159 goto out_mask;
1160 }
1161
1162 if (!shared) {
1163 ret = irq_request_resources(desc);
1164 if (ret) {
1165 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1166 new->name, irq, desc->irq_data.chip->name);
1167 goto out_mask;
1168 }
1169
1170 init_waitqueue_head(&desc->wait_for_threads);
1171
1172 /* Setup the type (level, edge polarity) if configured: */
1173 if (new->flags & IRQF_TRIGGER_MASK) {
1174 ret = __irq_set_trigger(desc, irq,
1175 new->flags & IRQF_TRIGGER_MASK);
1176
1177 if (ret)
1178 goto out_mask;
1179 }
1180
1181 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1182 IRQS_ONESHOT | IRQS_WAITING);
1183 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1184
1185 if (new->flags & IRQF_PERCPU) {
1186 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1187 irq_settings_set_per_cpu(desc);
1188 }
1189
1190 if (new->flags & IRQF_ONESHOT)
1191 desc->istate |= IRQS_ONESHOT;
1192
1193 if (irq_settings_can_autoenable(desc))
1194 irq_startup(desc, true);
1195 else
1196 /* Undo nested disables: */
1197 desc->depth = 1;
1198
1199 /* Exclude IRQ from balancing if requested */
1200 if (new->flags & IRQF_NOBALANCING) {
1201 irq_settings_set_no_balancing(desc);
1202 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1203 }
1204
1205 /* Set default affinity mask once everything is setup */
1206 setup_affinity(irq, desc, mask);
1207
1208 } else if (new->flags & IRQF_TRIGGER_MASK) {
1209 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1210 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1211
1212 if (nmsk != omsk)
1213 /* hope the handler works with current trigger mode */
1214 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1215 irq, nmsk, omsk);
1216 }
1217
1218 new->irq = irq;
1219 *old_ptr = new;
1220
1221 /* Reset broken irq detection when installing new handler */
1222 desc->irq_count = 0;
1223 desc->irqs_unhandled = 0;
1224
1225 /*
1226 * Check whether we disabled the irq via the spurious handler
1227 * before. Reenable it and give it another chance.
1228 */
1229 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1230 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1231 __enable_irq(desc, irq, false);
1232 }
1233
1234 raw_spin_unlock_irqrestore(&desc->lock, flags);
1235
1236 /*
1237 * Strictly no need to wake it up, but hung_task complains
1238 * when no hard interrupt wakes the thread up.
1239 */
1240 if (new->thread)
1241 wake_up_process(new->thread);
1242
1243 register_irq_proc(irq, desc);
1244 new->dir = NULL;
1245 register_handler_proc(irq, new);
1246 free_cpumask_var(mask);
1247
1248 return 0;
1249
1250mismatch:
1251 if (!(new->flags & IRQF_PROBE_SHARED)) {
1252 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1253 irq, new->flags, new->name, old->flags, old->name);
1254#ifdef CONFIG_DEBUG_SHIRQ
1255 dump_stack();
1256#endif
1257 }
1258 ret = -EBUSY;
1259
1260out_mask:
1261 raw_spin_unlock_irqrestore(&desc->lock, flags);
1262 free_cpumask_var(mask);
1263
1264out_thread:
1265 if (new->thread) {
1266 struct task_struct *t = new->thread;
1267
1268 new->thread = NULL;
1269 kthread_stop(t);
1270 put_task_struct(t);
1271 }
1272out_mput:
1273 module_put(desc->owner);
1274 return ret;
1275}
1276
1277/**
1278 * setup_irq - setup an interrupt
1279 * @irq: Interrupt line to setup
1280 * @act: irqaction for the interrupt
1281 *
1282 * Used to statically setup interrupts in the early boot process.
1283 */
1284int setup_irq(unsigned int irq, struct irqaction *act)
1285{
1286 int retval;
1287 struct irq_desc *desc = irq_to_desc(irq);
1288
1289 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1290 return -EINVAL;
1291 chip_bus_lock(desc);
1292 retval = __setup_irq(irq, desc, act);
1293 chip_bus_sync_unlock(desc);
1294
1295 return retval;
1296}
1297EXPORT_SYMBOL_GPL(setup_irq);
1298
1299/*
1300 * Internal function to unregister an irqaction - used to free
1301 * regular and special interrupts that are part of the architecture.
1302 */
1303static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1304{
1305 struct irq_desc *desc = irq_to_desc(irq);
1306 struct irqaction *action, **action_ptr;
1307 unsigned long flags;
1308
1309 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1310
1311 if (!desc)
1312 return NULL;
1313
1314 raw_spin_lock_irqsave(&desc->lock, flags);
1315
1316 /*
1317 * There can be multiple actions per IRQ descriptor, find the right
1318 * one based on the dev_id:
1319 */
1320 action_ptr = &desc->action;
1321 for (;;) {
1322 action = *action_ptr;
1323
1324 if (!action) {
1325 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1326 raw_spin_unlock_irqrestore(&desc->lock, flags);
1327
1328 return NULL;
1329 }
1330
1331 if (action->dev_id == dev_id)
1332 break;
1333 action_ptr = &action->next;
1334 }
1335
1336 /* Found it - now remove it from the list of entries: */
1337 *action_ptr = action->next;
1338
1339 /* If this was the last handler, shut down the IRQ line: */
1340 if (!desc->action) {
1341 irq_shutdown(desc);
1342 irq_release_resources(desc);
1343 }
1344
1345#ifdef CONFIG_SMP
1346 /* make sure affinity_hint is cleaned up */
1347 if (WARN_ON_ONCE(desc->affinity_hint))
1348 desc->affinity_hint = NULL;
1349#endif
1350
1351 raw_spin_unlock_irqrestore(&desc->lock, flags);
1352
1353 unregister_handler_proc(irq, action);
1354
1355 /* Make sure it's not being used on another CPU: */
1356 synchronize_irq(irq);
1357
1358#ifdef CONFIG_DEBUG_SHIRQ
1359 /*
1360 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1361 * event to happen even now it's being freed, so let's make sure that
1362 * is so by doing an extra call to the handler ....
1363 *
1364 * ( We do this after actually deregistering it, to make sure that a
1365 * 'real' IRQ doesn't run in * parallel with our fake. )
1366 */
1367 if (action->flags & IRQF_SHARED) {
1368 local_irq_save(flags);
1369 action->handler(irq, dev_id);
1370 local_irq_restore(flags);
1371 }
1372#endif
1373
1374 if (action->thread) {
1375 kthread_stop(action->thread);
1376 put_task_struct(action->thread);
1377 }
1378
1379 module_put(desc->owner);
1380 return action;
1381}
1382
1383/**
1384 * remove_irq - free an interrupt
1385 * @irq: Interrupt line to free
1386 * @act: irqaction for the interrupt
1387 *
1388 * Used to remove interrupts statically setup by the early boot process.
1389 */
1390void remove_irq(unsigned int irq, struct irqaction *act)
1391{
1392 struct irq_desc *desc = irq_to_desc(irq);
1393
1394 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1395 __free_irq(irq, act->dev_id);
1396}
1397EXPORT_SYMBOL_GPL(remove_irq);
1398
1399/**
1400 * free_irq - free an interrupt allocated with request_irq
1401 * @irq: Interrupt line to free
1402 * @dev_id: Device identity to free
1403 *
1404 * Remove an interrupt handler. The handler is removed and if the
1405 * interrupt line is no longer in use by any driver it is disabled.
1406 * On a shared IRQ the caller must ensure the interrupt is disabled
1407 * on the card it drives before calling this function. The function
1408 * does not return until any executing interrupts for this IRQ
1409 * have completed.
1410 *
1411 * This function must not be called from interrupt context.
1412 */
1413void free_irq(unsigned int irq, void *dev_id)
1414{
1415 struct irq_desc *desc = irq_to_desc(irq);
1416
1417 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1418 return;
1419
1420#ifdef CONFIG_SMP
1421 if (WARN_ON(desc->affinity_notify))
1422 desc->affinity_notify = NULL;
1423#endif
1424
1425 chip_bus_lock(desc);
1426 kfree(__free_irq(irq, dev_id));
1427 chip_bus_sync_unlock(desc);
1428}
1429EXPORT_SYMBOL(free_irq);
1430
1431/**
1432 * request_threaded_irq - allocate an interrupt line
1433 * @irq: Interrupt line to allocate
1434 * @handler: Function to be called when the IRQ occurs.
1435 * Primary handler for threaded interrupts
1436 * If NULL and thread_fn != NULL the default
1437 * primary handler is installed
1438 * @thread_fn: Function called from the irq handler thread
1439 * If NULL, no irq thread is created
1440 * @irqflags: Interrupt type flags
1441 * @devname: An ascii name for the claiming device
1442 * @dev_id: A cookie passed back to the handler function
1443 *
1444 * This call allocates interrupt resources and enables the
1445 * interrupt line and IRQ handling. From the point this
1446 * call is made your handler function may be invoked. Since
1447 * your handler function must clear any interrupt the board
1448 * raises, you must take care both to initialise your hardware
1449 * and to set up the interrupt handler in the right order.
1450 *
1451 * If you want to set up a threaded irq handler for your device
1452 * then you need to supply @handler and @thread_fn. @handler is
1453 * still called in hard interrupt context and has to check
1454 * whether the interrupt originates from the device. If yes it
1455 * needs to disable the interrupt on the device and return
1456 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1457 * @thread_fn. This split handler design is necessary to support
1458 * shared interrupts.
1459 *
1460 * Dev_id must be globally unique. Normally the address of the
1461 * device data structure is used as the cookie. Since the handler
1462 * receives this value it makes sense to use it.
1463 *
1464 * If your interrupt is shared you must pass a non NULL dev_id
1465 * as this is required when freeing the interrupt.
1466 *
1467 * Flags:
1468 *
1469 * IRQF_SHARED Interrupt is shared
1470 * IRQF_TRIGGER_* Specify active edge(s) or level
1471 *
1472 */
1473int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1474 irq_handler_t thread_fn, unsigned long irqflags,
1475 const char *devname, void *dev_id)
1476{
1477 struct irqaction *action;
1478 struct irq_desc *desc;
1479 int retval;
1480
1481 /*
1482 * Sanity-check: shared interrupts must pass in a real dev-ID,
1483 * otherwise we'll have trouble later trying to figure out
1484 * which interrupt is which (messes up the interrupt freeing
1485 * logic etc).
1486 */
1487 if ((irqflags & IRQF_SHARED) && !dev_id)
1488 return -EINVAL;
1489
1490 desc = irq_to_desc(irq);
1491 if (!desc)
1492 return -EINVAL;
1493
1494 if (!irq_settings_can_request(desc) ||
1495 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1496 return -EINVAL;
1497
1498 if (!handler) {
1499 if (!thread_fn)
1500 return -EINVAL;
1501 handler = irq_default_primary_handler;
1502 }
1503
1504 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1505 if (!action)
1506 return -ENOMEM;
1507
1508 action->handler = handler;
1509 action->thread_fn = thread_fn;
1510 action->flags = irqflags;
1511 action->name = devname;
1512 action->dev_id = dev_id;
1513
1514 chip_bus_lock(desc);
1515 retval = __setup_irq(irq, desc, action);
1516 chip_bus_sync_unlock(desc);
1517
1518 if (retval)
1519 kfree(action);
1520
1521#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1522 if (!retval && (irqflags & IRQF_SHARED)) {
1523 /*
1524 * It's a shared IRQ -- the driver ought to be prepared for it
1525 * to happen immediately, so let's make sure....
1526 * We disable the irq to make sure that a 'real' IRQ doesn't
1527 * run in parallel with our fake.
1528 */
1529 unsigned long flags;
1530
1531 disable_irq(irq);
1532 local_irq_save(flags);
1533
1534 handler(irq, dev_id);
1535
1536 local_irq_restore(flags);
1537 enable_irq(irq);
1538 }
1539#endif
1540 return retval;
1541}
1542EXPORT_SYMBOL(request_threaded_irq);
1543
1544/**
1545 * request_any_context_irq - allocate an interrupt line
1546 * @irq: Interrupt line to allocate
1547 * @handler: Function to be called when the IRQ occurs.
1548 * Threaded handler for threaded interrupts.
1549 * @flags: Interrupt type flags
1550 * @name: An ascii name for the claiming device
1551 * @dev_id: A cookie passed back to the handler function
1552 *
1553 * This call allocates interrupt resources and enables the
1554 * interrupt line and IRQ handling. It selects either a
1555 * hardirq or threaded handling method depending on the
1556 * context.
1557 *
1558 * On failure, it returns a negative value. On success,
1559 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1560 */
1561int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1562 unsigned long flags, const char *name, void *dev_id)
1563{
1564 struct irq_desc *desc = irq_to_desc(irq);
1565 int ret;
1566
1567 if (!desc)
1568 return -EINVAL;
1569
1570 if (irq_settings_is_nested_thread(desc)) {
1571 ret = request_threaded_irq(irq, NULL, handler,
1572 flags, name, dev_id);
1573 return !ret ? IRQC_IS_NESTED : ret;
1574 }
1575
1576 ret = request_irq(irq, handler, flags, name, dev_id);
1577 return !ret ? IRQC_IS_HARDIRQ : ret;
1578}
1579EXPORT_SYMBOL_GPL(request_any_context_irq);
1580
1581void enable_percpu_irq(unsigned int irq, unsigned int type)
1582{
1583 unsigned int cpu = smp_processor_id();
1584 unsigned long flags;
1585 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1586
1587 if (!desc)
1588 return;
1589
1590 type &= IRQ_TYPE_SENSE_MASK;
1591 if (type != IRQ_TYPE_NONE) {
1592 int ret;
1593
1594 ret = __irq_set_trigger(desc, irq, type);
1595
1596 if (ret) {
1597 WARN(1, "failed to set type for IRQ%d\n", irq);
1598 goto out;
1599 }
1600 }
1601
1602 irq_percpu_enable(desc, cpu);
1603out:
1604 irq_put_desc_unlock(desc, flags);
1605}
1606EXPORT_SYMBOL_GPL(enable_percpu_irq);
1607
1608void disable_percpu_irq(unsigned int irq)
1609{
1610 unsigned int cpu = smp_processor_id();
1611 unsigned long flags;
1612 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1613
1614 if (!desc)
1615 return;
1616
1617 irq_percpu_disable(desc, cpu);
1618 irq_put_desc_unlock(desc, flags);
1619}
1620EXPORT_SYMBOL_GPL(disable_percpu_irq);
1621
1622/*
1623 * Internal function to unregister a percpu irqaction.
1624 */
1625static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1626{
1627 struct irq_desc *desc = irq_to_desc(irq);
1628 struct irqaction *action;
1629 unsigned long flags;
1630
1631 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1632
1633 if (!desc)
1634 return NULL;
1635
1636 raw_spin_lock_irqsave(&desc->lock, flags);
1637
1638 action = desc->action;
1639 if (!action || action->percpu_dev_id != dev_id) {
1640 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1641 goto bad;
1642 }
1643
1644 if (!cpumask_empty(desc->percpu_enabled)) {
1645 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1646 irq, cpumask_first(desc->percpu_enabled));
1647 goto bad;
1648 }
1649
1650 /* Found it - now remove it from the list of entries: */
1651 desc->action = NULL;
1652
1653 raw_spin_unlock_irqrestore(&desc->lock, flags);
1654
1655 unregister_handler_proc(irq, action);
1656
1657 module_put(desc->owner);
1658 return action;
1659
1660bad:
1661 raw_spin_unlock_irqrestore(&desc->lock, flags);
1662 return NULL;
1663}
1664
1665/**
1666 * remove_percpu_irq - free a per-cpu interrupt
1667 * @irq: Interrupt line to free
1668 * @act: irqaction for the interrupt
1669 *
1670 * Used to remove interrupts statically setup by the early boot process.
1671 */
1672void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1673{
1674 struct irq_desc *desc = irq_to_desc(irq);
1675
1676 if (desc && irq_settings_is_per_cpu_devid(desc))
1677 __free_percpu_irq(irq, act->percpu_dev_id);
1678}
1679
1680/**
1681 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1682 * @irq: Interrupt line to free
1683 * @dev_id: Device identity to free
1684 *
1685 * Remove a percpu interrupt handler. The handler is removed, but
1686 * the interrupt line is not disabled. This must be done on each
1687 * CPU before calling this function. The function does not return
1688 * until any executing interrupts for this IRQ have completed.
1689 *
1690 * This function must not be called from interrupt context.
1691 */
1692void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1693{
1694 struct irq_desc *desc = irq_to_desc(irq);
1695
1696 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1697 return;
1698
1699 chip_bus_lock(desc);
1700 kfree(__free_percpu_irq(irq, dev_id));
1701 chip_bus_sync_unlock(desc);
1702}
1703
1704/**
1705 * setup_percpu_irq - setup a per-cpu interrupt
1706 * @irq: Interrupt line to setup
1707 * @act: irqaction for the interrupt
1708 *
1709 * Used to statically setup per-cpu interrupts in the early boot process.
1710 */
1711int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1712{
1713 struct irq_desc *desc = irq_to_desc(irq);
1714 int retval;
1715
1716 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1717 return -EINVAL;
1718 chip_bus_lock(desc);
1719 retval = __setup_irq(irq, desc, act);
1720 chip_bus_sync_unlock(desc);
1721
1722 return retval;
1723}
1724
1725/**
1726 * request_percpu_irq - allocate a percpu interrupt line
1727 * @irq: Interrupt line to allocate
1728 * @handler: Function to be called when the IRQ occurs.
1729 * @devname: An ascii name for the claiming device
1730 * @dev_id: A percpu cookie passed back to the handler function
1731 *
1732 * This call allocates interrupt resources, but doesn't
1733 * automatically enable the interrupt. It has to be done on each
1734 * CPU using enable_percpu_irq().
1735 *
1736 * Dev_id must be globally unique. It is a per-cpu variable, and
1737 * the handler gets called with the interrupted CPU's instance of
1738 * that variable.
1739 */
1740int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1741 const char *devname, void __percpu *dev_id)
1742{
1743 struct irqaction *action;
1744 struct irq_desc *desc;
1745 int retval;
1746
1747 if (!dev_id)
1748 return -EINVAL;
1749
1750 desc = irq_to_desc(irq);
1751 if (!desc || !irq_settings_can_request(desc) ||
1752 !irq_settings_is_per_cpu_devid(desc))
1753 return -EINVAL;
1754
1755 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1756 if (!action)
1757 return -ENOMEM;
1758
1759 action->handler = handler;
1760 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1761 action->name = devname;
1762 action->percpu_dev_id = dev_id;
1763
1764 chip_bus_lock(desc);
1765 retval = __setup_irq(irq, desc, action);
1766 chip_bus_sync_unlock(desc);
1767
1768 if (retval)
1769 kfree(action);
1770
1771 return retval;
1772}