Loading...
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42 /*
43 * Wait until we're out of the critical section. This might
44 * give the wrong answer due to the lack of memory barriers.
45 */
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49 /* Ok, that indicated we're done: double-check carefully. */
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54 /* Oops, that failed? */
55 } while (inprogress);
56}
57
58/**
59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60 * @irq: interrupt number to wait for
61 *
62 * This function waits for any pending hard IRQ handlers for this
63 * interrupt to complete before returning. If you use this
64 * function while holding a resource the IRQ handler may need you
65 * will deadlock. It does not take associated threaded handlers
66 * into account.
67 *
68 * Do not use this for shutdown scenarios where you must be sure
69 * that all parts (hardirq and threaded handler) have completed.
70 *
71 * Returns: false if a threaded handler is active.
72 *
73 * This function may be called - with care - from IRQ context.
74 */
75bool synchronize_hardirq(unsigned int irq)
76{
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85}
86EXPORT_SYMBOL(synchronize_hardirq);
87
88/**
89 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
90 * @irq: interrupt number to wait for
91 *
92 * This function waits for any pending IRQ handlers for this interrupt
93 * to complete before returning. If you use this function while
94 * holding a resource the IRQ handler may need you will deadlock.
95 *
96 * This function may be called - with care - from IRQ context.
97 */
98void synchronize_irq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104 /*
105 * We made sure that no hardirq handler is
106 * running. Now verify that no threaded handlers are
107 * active.
108 */
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112}
113EXPORT_SYMBOL(synchronize_irq);
114
115#ifdef CONFIG_SMP
116cpumask_var_t irq_default_affinity;
117
118static int __irq_can_set_affinity(struct irq_desc *desc)
119{
120 if (!desc || !irqd_can_balance(&desc->irq_data) ||
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122 return 0;
123 return 1;
124}
125
126/**
127 * irq_can_set_affinity - Check if the affinity of a given irq can be set
128 * @irq: Interrupt to check
129 *
130 */
131int irq_can_set_affinity(unsigned int irq)
132{
133 return __irq_can_set_affinity(irq_to_desc(irq));
134}
135
136/**
137 * irq_set_thread_affinity - Notify irq threads to adjust affinity
138 * @desc: irq descriptor which has affitnity changed
139 *
140 * We just set IRQTF_AFFINITY and delegate the affinity setting
141 * to the interrupt thread itself. We can not call
142 * set_cpus_allowed_ptr() here as we hold desc->lock and this
143 * code can be called from hard interrupt context.
144 */
145void irq_set_thread_affinity(struct irq_desc *desc)
146{
147 struct irqaction *action;
148
149 for_each_action_of_desc(desc, action)
150 if (action->thread)
151 set_bit(IRQTF_AFFINITY, &action->thread_flags);
152}
153
154#ifdef CONFIG_GENERIC_PENDING_IRQ
155static inline bool irq_can_move_pcntxt(struct irq_data *data)
156{
157 return irqd_can_move_in_process_context(data);
158}
159static inline bool irq_move_pending(struct irq_data *data)
160{
161 return irqd_is_setaffinity_pending(data);
162}
163static inline void
164irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
165{
166 cpumask_copy(desc->pending_mask, mask);
167}
168static inline void
169irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
170{
171 cpumask_copy(mask, desc->pending_mask);
172}
173#else
174static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
175static inline bool irq_move_pending(struct irq_data *data) { return false; }
176static inline void
177irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
178static inline void
179irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
180#endif
181
182int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
183 bool force)
184{
185 struct irq_desc *desc = irq_data_to_desc(data);
186 struct irq_chip *chip = irq_data_get_irq_chip(data);
187 int ret;
188
189 ret = chip->irq_set_affinity(data, mask, force);
190 switch (ret) {
191 case IRQ_SET_MASK_OK:
192 case IRQ_SET_MASK_OK_DONE:
193 cpumask_copy(desc->irq_common_data.affinity, mask);
194 case IRQ_SET_MASK_OK_NOCOPY:
195 irq_set_thread_affinity(desc);
196 ret = 0;
197 }
198
199 return ret;
200}
201
202int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
203 bool force)
204{
205 struct irq_chip *chip = irq_data_get_irq_chip(data);
206 struct irq_desc *desc = irq_data_to_desc(data);
207 int ret = 0;
208
209 if (!chip || !chip->irq_set_affinity)
210 return -EINVAL;
211
212 if (irq_can_move_pcntxt(data)) {
213 ret = irq_do_set_affinity(data, mask, force);
214 } else {
215 irqd_set_move_pending(data);
216 irq_copy_pending(desc, mask);
217 }
218
219 if (desc->affinity_notify) {
220 kref_get(&desc->affinity_notify->kref);
221 schedule_work(&desc->affinity_notify->work);
222 }
223 irqd_set(data, IRQD_AFFINITY_SET);
224
225 return ret;
226}
227
228int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
229{
230 struct irq_desc *desc = irq_to_desc(irq);
231 unsigned long flags;
232 int ret;
233
234 if (!desc)
235 return -EINVAL;
236
237 raw_spin_lock_irqsave(&desc->lock, flags);
238 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240 return ret;
241}
242
243int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
244{
245 unsigned long flags;
246 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
247
248 if (!desc)
249 return -EINVAL;
250 desc->affinity_hint = m;
251 irq_put_desc_unlock(desc, flags);
252 /* set the initial affinity to prevent every interrupt being on CPU0 */
253 if (m)
254 __irq_set_affinity(irq, m, false);
255 return 0;
256}
257EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
258
259static void irq_affinity_notify(struct work_struct *work)
260{
261 struct irq_affinity_notify *notify =
262 container_of(work, struct irq_affinity_notify, work);
263 struct irq_desc *desc = irq_to_desc(notify->irq);
264 cpumask_var_t cpumask;
265 unsigned long flags;
266
267 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
268 goto out;
269
270 raw_spin_lock_irqsave(&desc->lock, flags);
271 if (irq_move_pending(&desc->irq_data))
272 irq_get_pending(cpumask, desc);
273 else
274 cpumask_copy(cpumask, desc->irq_common_data.affinity);
275 raw_spin_unlock_irqrestore(&desc->lock, flags);
276
277 notify->notify(notify, cpumask);
278
279 free_cpumask_var(cpumask);
280out:
281 kref_put(¬ify->kref, notify->release);
282}
283
284/**
285 * irq_set_affinity_notifier - control notification of IRQ affinity changes
286 * @irq: Interrupt for which to enable/disable notification
287 * @notify: Context for notification, or %NULL to disable
288 * notification. Function pointers must be initialised;
289 * the other fields will be initialised by this function.
290 *
291 * Must be called in process context. Notification may only be enabled
292 * after the IRQ is allocated and must be disabled before the IRQ is
293 * freed using free_irq().
294 */
295int
296irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
297{
298 struct irq_desc *desc = irq_to_desc(irq);
299 struct irq_affinity_notify *old_notify;
300 unsigned long flags;
301
302 /* The release function is promised process context */
303 might_sleep();
304
305 if (!desc)
306 return -EINVAL;
307
308 /* Complete initialisation of *notify */
309 if (notify) {
310 notify->irq = irq;
311 kref_init(¬ify->kref);
312 INIT_WORK(¬ify->work, irq_affinity_notify);
313 }
314
315 raw_spin_lock_irqsave(&desc->lock, flags);
316 old_notify = desc->affinity_notify;
317 desc->affinity_notify = notify;
318 raw_spin_unlock_irqrestore(&desc->lock, flags);
319
320 if (old_notify)
321 kref_put(&old_notify->kref, old_notify->release);
322
323 return 0;
324}
325EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
326
327#ifndef CONFIG_AUTO_IRQ_AFFINITY
328/*
329 * Generic version of the affinity autoselector.
330 */
331static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
332{
333 struct cpumask *set = irq_default_affinity;
334 int node = irq_desc_get_node(desc);
335
336 /* Excludes PER_CPU and NO_BALANCE interrupts */
337 if (!__irq_can_set_affinity(desc))
338 return 0;
339
340 /*
341 * Preserve an userspace affinity setup, but make sure that
342 * one of the targets is online.
343 */
344 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
345 if (cpumask_intersects(desc->irq_common_data.affinity,
346 cpu_online_mask))
347 set = desc->irq_common_data.affinity;
348 else
349 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
350 }
351
352 cpumask_and(mask, cpu_online_mask, set);
353 if (node != NUMA_NO_NODE) {
354 const struct cpumask *nodemask = cpumask_of_node(node);
355
356 /* make sure at least one of the cpus in nodemask is online */
357 if (cpumask_intersects(mask, nodemask))
358 cpumask_and(mask, mask, nodemask);
359 }
360 irq_do_set_affinity(&desc->irq_data, mask, false);
361 return 0;
362}
363#else
364/* Wrapper for ALPHA specific affinity selector magic */
365static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
366{
367 return irq_select_affinity(irq_desc_get_irq(d));
368}
369#endif
370
371/*
372 * Called when affinity is set via /proc/irq
373 */
374int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
375{
376 struct irq_desc *desc = irq_to_desc(irq);
377 unsigned long flags;
378 int ret;
379
380 raw_spin_lock_irqsave(&desc->lock, flags);
381 ret = setup_affinity(desc, mask);
382 raw_spin_unlock_irqrestore(&desc->lock, flags);
383 return ret;
384}
385
386#else
387static inline int
388setup_affinity(struct irq_desc *desc, struct cpumask *mask)
389{
390 return 0;
391}
392#endif
393
394/**
395 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
396 * @irq: interrupt number to set affinity
397 * @vcpu_info: vCPU specific data
398 *
399 * This function uses the vCPU specific data to set the vCPU
400 * affinity for an irq. The vCPU specific data is passed from
401 * outside, such as KVM. One example code path is as below:
402 * KVM -> IOMMU -> irq_set_vcpu_affinity().
403 */
404int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
405{
406 unsigned long flags;
407 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
408 struct irq_data *data;
409 struct irq_chip *chip;
410 int ret = -ENOSYS;
411
412 if (!desc)
413 return -EINVAL;
414
415 data = irq_desc_get_irq_data(desc);
416 chip = irq_data_get_irq_chip(data);
417 if (chip && chip->irq_set_vcpu_affinity)
418 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
419 irq_put_desc_unlock(desc, flags);
420
421 return ret;
422}
423EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
424
425void __disable_irq(struct irq_desc *desc)
426{
427 if (!desc->depth++)
428 irq_disable(desc);
429}
430
431static int __disable_irq_nosync(unsigned int irq)
432{
433 unsigned long flags;
434 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
435
436 if (!desc)
437 return -EINVAL;
438 __disable_irq(desc);
439 irq_put_desc_busunlock(desc, flags);
440 return 0;
441}
442
443/**
444 * disable_irq_nosync - disable an irq without waiting
445 * @irq: Interrupt to disable
446 *
447 * Disable the selected interrupt line. Disables and Enables are
448 * nested.
449 * Unlike disable_irq(), this function does not ensure existing
450 * instances of the IRQ handler have completed before returning.
451 *
452 * This function may be called from IRQ context.
453 */
454void disable_irq_nosync(unsigned int irq)
455{
456 __disable_irq_nosync(irq);
457}
458EXPORT_SYMBOL(disable_irq_nosync);
459
460/**
461 * disable_irq - disable an irq and wait for completion
462 * @irq: Interrupt to disable
463 *
464 * Disable the selected interrupt line. Enables and Disables are
465 * nested.
466 * This function waits for any pending IRQ handlers for this interrupt
467 * to complete before returning. If you use this function while
468 * holding a resource the IRQ handler may need you will deadlock.
469 *
470 * This function may be called - with care - from IRQ context.
471 */
472void disable_irq(unsigned int irq)
473{
474 if (!__disable_irq_nosync(irq))
475 synchronize_irq(irq);
476}
477EXPORT_SYMBOL(disable_irq);
478
479/**
480 * disable_hardirq - disables an irq and waits for hardirq completion
481 * @irq: Interrupt to disable
482 *
483 * Disable the selected interrupt line. Enables and Disables are
484 * nested.
485 * This function waits for any pending hard IRQ handlers for this
486 * interrupt to complete before returning. If you use this function while
487 * holding a resource the hard IRQ handler may need you will deadlock.
488 *
489 * When used to optimistically disable an interrupt from atomic context
490 * the return value must be checked.
491 *
492 * Returns: false if a threaded handler is active.
493 *
494 * This function may be called - with care - from IRQ context.
495 */
496bool disable_hardirq(unsigned int irq)
497{
498 if (!__disable_irq_nosync(irq))
499 return synchronize_hardirq(irq);
500
501 return false;
502}
503EXPORT_SYMBOL_GPL(disable_hardirq);
504
505void __enable_irq(struct irq_desc *desc)
506{
507 switch (desc->depth) {
508 case 0:
509 err_out:
510 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
511 irq_desc_get_irq(desc));
512 break;
513 case 1: {
514 if (desc->istate & IRQS_SUSPENDED)
515 goto err_out;
516 /* Prevent probing on this irq: */
517 irq_settings_set_noprobe(desc);
518 irq_enable(desc);
519 check_irq_resend(desc);
520 /* fall-through */
521 }
522 default:
523 desc->depth--;
524 }
525}
526
527/**
528 * enable_irq - enable handling of an irq
529 * @irq: Interrupt to enable
530 *
531 * Undoes the effect of one call to disable_irq(). If this
532 * matches the last disable, processing of interrupts on this
533 * IRQ line is re-enabled.
534 *
535 * This function may be called from IRQ context only when
536 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
537 */
538void enable_irq(unsigned int irq)
539{
540 unsigned long flags;
541 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
542
543 if (!desc)
544 return;
545 if (WARN(!desc->irq_data.chip,
546 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
547 goto out;
548
549 __enable_irq(desc);
550out:
551 irq_put_desc_busunlock(desc, flags);
552}
553EXPORT_SYMBOL(enable_irq);
554
555static int set_irq_wake_real(unsigned int irq, unsigned int on)
556{
557 struct irq_desc *desc = irq_to_desc(irq);
558 int ret = -ENXIO;
559
560 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
561 return 0;
562
563 if (desc->irq_data.chip->irq_set_wake)
564 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
565
566 return ret;
567}
568
569/**
570 * irq_set_irq_wake - control irq power management wakeup
571 * @irq: interrupt to control
572 * @on: enable/disable power management wakeup
573 *
574 * Enable/disable power management wakeup mode, which is
575 * disabled by default. Enables and disables must match,
576 * just as they match for non-wakeup mode support.
577 *
578 * Wakeup mode lets this IRQ wake the system from sleep
579 * states like "suspend to RAM".
580 */
581int irq_set_irq_wake(unsigned int irq, unsigned int on)
582{
583 unsigned long flags;
584 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
585 int ret = 0;
586
587 if (!desc)
588 return -EINVAL;
589
590 /* wakeup-capable irqs can be shared between drivers that
591 * don't need to have the same sleep mode behaviors.
592 */
593 if (on) {
594 if (desc->wake_depth++ == 0) {
595 ret = set_irq_wake_real(irq, on);
596 if (ret)
597 desc->wake_depth = 0;
598 else
599 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
600 }
601 } else {
602 if (desc->wake_depth == 0) {
603 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
604 } else if (--desc->wake_depth == 0) {
605 ret = set_irq_wake_real(irq, on);
606 if (ret)
607 desc->wake_depth = 1;
608 else
609 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
610 }
611 }
612 irq_put_desc_busunlock(desc, flags);
613 return ret;
614}
615EXPORT_SYMBOL(irq_set_irq_wake);
616
617/*
618 * Internal function that tells the architecture code whether a
619 * particular irq has been exclusively allocated or is available
620 * for driver use.
621 */
622int can_request_irq(unsigned int irq, unsigned long irqflags)
623{
624 unsigned long flags;
625 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
626 int canrequest = 0;
627
628 if (!desc)
629 return 0;
630
631 if (irq_settings_can_request(desc)) {
632 if (!desc->action ||
633 irqflags & desc->action->flags & IRQF_SHARED)
634 canrequest = 1;
635 }
636 irq_put_desc_unlock(desc, flags);
637 return canrequest;
638}
639
640int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
641{
642 struct irq_chip *chip = desc->irq_data.chip;
643 int ret, unmask = 0;
644
645 if (!chip || !chip->irq_set_type) {
646 /*
647 * IRQF_TRIGGER_* but the PIC does not support multiple
648 * flow-types?
649 */
650 pr_debug("No set_type function for IRQ %d (%s)\n",
651 irq_desc_get_irq(desc),
652 chip ? (chip->name ? : "unknown") : "unknown");
653 return 0;
654 }
655
656 flags &= IRQ_TYPE_SENSE_MASK;
657
658 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
659 if (!irqd_irq_masked(&desc->irq_data))
660 mask_irq(desc);
661 if (!irqd_irq_disabled(&desc->irq_data))
662 unmask = 1;
663 }
664
665 /* caller masked out all except trigger mode flags */
666 ret = chip->irq_set_type(&desc->irq_data, flags);
667
668 switch (ret) {
669 case IRQ_SET_MASK_OK:
670 case IRQ_SET_MASK_OK_DONE:
671 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
672 irqd_set(&desc->irq_data, flags);
673
674 case IRQ_SET_MASK_OK_NOCOPY:
675 flags = irqd_get_trigger_type(&desc->irq_data);
676 irq_settings_set_trigger_mask(desc, flags);
677 irqd_clear(&desc->irq_data, IRQD_LEVEL);
678 irq_settings_clr_level(desc);
679 if (flags & IRQ_TYPE_LEVEL_MASK) {
680 irq_settings_set_level(desc);
681 irqd_set(&desc->irq_data, IRQD_LEVEL);
682 }
683
684 ret = 0;
685 break;
686 default:
687 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
688 flags, irq_desc_get_irq(desc), chip->irq_set_type);
689 }
690 if (unmask)
691 unmask_irq(desc);
692 return ret;
693}
694
695#ifdef CONFIG_HARDIRQS_SW_RESEND
696int irq_set_parent(int irq, int parent_irq)
697{
698 unsigned long flags;
699 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
700
701 if (!desc)
702 return -EINVAL;
703
704 desc->parent_irq = parent_irq;
705
706 irq_put_desc_unlock(desc, flags);
707 return 0;
708}
709#endif
710
711/*
712 * Default primary interrupt handler for threaded interrupts. Is
713 * assigned as primary handler when request_threaded_irq is called
714 * with handler == NULL. Useful for oneshot interrupts.
715 */
716static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
717{
718 return IRQ_WAKE_THREAD;
719}
720
721/*
722 * Primary handler for nested threaded interrupts. Should never be
723 * called.
724 */
725static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
726{
727 WARN(1, "Primary handler called for nested irq %d\n", irq);
728 return IRQ_NONE;
729}
730
731static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
732{
733 WARN(1, "Secondary action handler called for irq %d\n", irq);
734 return IRQ_NONE;
735}
736
737static int irq_wait_for_interrupt(struct irqaction *action)
738{
739 set_current_state(TASK_INTERRUPTIBLE);
740
741 while (!kthread_should_stop()) {
742
743 if (test_and_clear_bit(IRQTF_RUNTHREAD,
744 &action->thread_flags)) {
745 __set_current_state(TASK_RUNNING);
746 return 0;
747 }
748 schedule();
749 set_current_state(TASK_INTERRUPTIBLE);
750 }
751 __set_current_state(TASK_RUNNING);
752 return -1;
753}
754
755/*
756 * Oneshot interrupts keep the irq line masked until the threaded
757 * handler finished. unmask if the interrupt has not been disabled and
758 * is marked MASKED.
759 */
760static void irq_finalize_oneshot(struct irq_desc *desc,
761 struct irqaction *action)
762{
763 if (!(desc->istate & IRQS_ONESHOT) ||
764 action->handler == irq_forced_secondary_handler)
765 return;
766again:
767 chip_bus_lock(desc);
768 raw_spin_lock_irq(&desc->lock);
769
770 /*
771 * Implausible though it may be we need to protect us against
772 * the following scenario:
773 *
774 * The thread is faster done than the hard interrupt handler
775 * on the other CPU. If we unmask the irq line then the
776 * interrupt can come in again and masks the line, leaves due
777 * to IRQS_INPROGRESS and the irq line is masked forever.
778 *
779 * This also serializes the state of shared oneshot handlers
780 * versus "desc->threads_onehsot |= action->thread_mask;" in
781 * irq_wake_thread(). See the comment there which explains the
782 * serialization.
783 */
784 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
785 raw_spin_unlock_irq(&desc->lock);
786 chip_bus_sync_unlock(desc);
787 cpu_relax();
788 goto again;
789 }
790
791 /*
792 * Now check again, whether the thread should run. Otherwise
793 * we would clear the threads_oneshot bit of this thread which
794 * was just set.
795 */
796 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
797 goto out_unlock;
798
799 desc->threads_oneshot &= ~action->thread_mask;
800
801 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
802 irqd_irq_masked(&desc->irq_data))
803 unmask_threaded_irq(desc);
804
805out_unlock:
806 raw_spin_unlock_irq(&desc->lock);
807 chip_bus_sync_unlock(desc);
808}
809
810#ifdef CONFIG_SMP
811/*
812 * Check whether we need to change the affinity of the interrupt thread.
813 */
814static void
815irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
816{
817 cpumask_var_t mask;
818 bool valid = true;
819
820 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
821 return;
822
823 /*
824 * In case we are out of memory we set IRQTF_AFFINITY again and
825 * try again next time
826 */
827 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
828 set_bit(IRQTF_AFFINITY, &action->thread_flags);
829 return;
830 }
831
832 raw_spin_lock_irq(&desc->lock);
833 /*
834 * This code is triggered unconditionally. Check the affinity
835 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
836 */
837 if (desc->irq_common_data.affinity)
838 cpumask_copy(mask, desc->irq_common_data.affinity);
839 else
840 valid = false;
841 raw_spin_unlock_irq(&desc->lock);
842
843 if (valid)
844 set_cpus_allowed_ptr(current, mask);
845 free_cpumask_var(mask);
846}
847#else
848static inline void
849irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
850#endif
851
852/*
853 * Interrupts which are not explicitely requested as threaded
854 * interrupts rely on the implicit bh/preempt disable of the hard irq
855 * context. So we need to disable bh here to avoid deadlocks and other
856 * side effects.
857 */
858static irqreturn_t
859irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
860{
861 irqreturn_t ret;
862
863 local_bh_disable();
864 ret = action->thread_fn(action->irq, action->dev_id);
865 irq_finalize_oneshot(desc, action);
866 local_bh_enable();
867 return ret;
868}
869
870/*
871 * Interrupts explicitly requested as threaded interrupts want to be
872 * preemtible - many of them need to sleep and wait for slow busses to
873 * complete.
874 */
875static irqreturn_t irq_thread_fn(struct irq_desc *desc,
876 struct irqaction *action)
877{
878 irqreturn_t ret;
879
880 ret = action->thread_fn(action->irq, action->dev_id);
881 irq_finalize_oneshot(desc, action);
882 return ret;
883}
884
885static void wake_threads_waitq(struct irq_desc *desc)
886{
887 if (atomic_dec_and_test(&desc->threads_active))
888 wake_up(&desc->wait_for_threads);
889}
890
891static void irq_thread_dtor(struct callback_head *unused)
892{
893 struct task_struct *tsk = current;
894 struct irq_desc *desc;
895 struct irqaction *action;
896
897 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
898 return;
899
900 action = kthread_data(tsk);
901
902 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
903 tsk->comm, tsk->pid, action->irq);
904
905
906 desc = irq_to_desc(action->irq);
907 /*
908 * If IRQTF_RUNTHREAD is set, we need to decrement
909 * desc->threads_active and wake possible waiters.
910 */
911 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
912 wake_threads_waitq(desc);
913
914 /* Prevent a stale desc->threads_oneshot */
915 irq_finalize_oneshot(desc, action);
916}
917
918static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
919{
920 struct irqaction *secondary = action->secondary;
921
922 if (WARN_ON_ONCE(!secondary))
923 return;
924
925 raw_spin_lock_irq(&desc->lock);
926 __irq_wake_thread(desc, secondary);
927 raw_spin_unlock_irq(&desc->lock);
928}
929
930/*
931 * Interrupt handler thread
932 */
933static int irq_thread(void *data)
934{
935 struct callback_head on_exit_work;
936 struct irqaction *action = data;
937 struct irq_desc *desc = irq_to_desc(action->irq);
938 irqreturn_t (*handler_fn)(struct irq_desc *desc,
939 struct irqaction *action);
940
941 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
942 &action->thread_flags))
943 handler_fn = irq_forced_thread_fn;
944 else
945 handler_fn = irq_thread_fn;
946
947 init_task_work(&on_exit_work, irq_thread_dtor);
948 task_work_add(current, &on_exit_work, false);
949
950 irq_thread_check_affinity(desc, action);
951
952 while (!irq_wait_for_interrupt(action)) {
953 irqreturn_t action_ret;
954
955 irq_thread_check_affinity(desc, action);
956
957 action_ret = handler_fn(desc, action);
958 if (action_ret == IRQ_HANDLED)
959 atomic_inc(&desc->threads_handled);
960 if (action_ret == IRQ_WAKE_THREAD)
961 irq_wake_secondary(desc, action);
962
963 wake_threads_waitq(desc);
964 }
965
966 /*
967 * This is the regular exit path. __free_irq() is stopping the
968 * thread via kthread_stop() after calling
969 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
970 * oneshot mask bit can be set. We cannot verify that as we
971 * cannot touch the oneshot mask at this point anymore as
972 * __setup_irq() might have given out currents thread_mask
973 * again.
974 */
975 task_work_cancel(current, irq_thread_dtor);
976 return 0;
977}
978
979/**
980 * irq_wake_thread - wake the irq thread for the action identified by dev_id
981 * @irq: Interrupt line
982 * @dev_id: Device identity for which the thread should be woken
983 *
984 */
985void irq_wake_thread(unsigned int irq, void *dev_id)
986{
987 struct irq_desc *desc = irq_to_desc(irq);
988 struct irqaction *action;
989 unsigned long flags;
990
991 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
992 return;
993
994 raw_spin_lock_irqsave(&desc->lock, flags);
995 for_each_action_of_desc(desc, action) {
996 if (action->dev_id == dev_id) {
997 if (action->thread)
998 __irq_wake_thread(desc, action);
999 break;
1000 }
1001 }
1002 raw_spin_unlock_irqrestore(&desc->lock, flags);
1003}
1004EXPORT_SYMBOL_GPL(irq_wake_thread);
1005
1006static int irq_setup_forced_threading(struct irqaction *new)
1007{
1008 if (!force_irqthreads)
1009 return 0;
1010 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1011 return 0;
1012
1013 new->flags |= IRQF_ONESHOT;
1014
1015 /*
1016 * Handle the case where we have a real primary handler and a
1017 * thread handler. We force thread them as well by creating a
1018 * secondary action.
1019 */
1020 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1021 /* Allocate the secondary action */
1022 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1023 if (!new->secondary)
1024 return -ENOMEM;
1025 new->secondary->handler = irq_forced_secondary_handler;
1026 new->secondary->thread_fn = new->thread_fn;
1027 new->secondary->dev_id = new->dev_id;
1028 new->secondary->irq = new->irq;
1029 new->secondary->name = new->name;
1030 }
1031 /* Deal with the primary handler */
1032 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1033 new->thread_fn = new->handler;
1034 new->handler = irq_default_primary_handler;
1035 return 0;
1036}
1037
1038static int irq_request_resources(struct irq_desc *desc)
1039{
1040 struct irq_data *d = &desc->irq_data;
1041 struct irq_chip *c = d->chip;
1042
1043 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1044}
1045
1046static void irq_release_resources(struct irq_desc *desc)
1047{
1048 struct irq_data *d = &desc->irq_data;
1049 struct irq_chip *c = d->chip;
1050
1051 if (c->irq_release_resources)
1052 c->irq_release_resources(d);
1053}
1054
1055static int
1056setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1057{
1058 struct task_struct *t;
1059 struct sched_param param = {
1060 .sched_priority = MAX_USER_RT_PRIO/2,
1061 };
1062
1063 if (!secondary) {
1064 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1065 new->name);
1066 } else {
1067 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1068 new->name);
1069 param.sched_priority -= 1;
1070 }
1071
1072 if (IS_ERR(t))
1073 return PTR_ERR(t);
1074
1075 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1076
1077 /*
1078 * We keep the reference to the task struct even if
1079 * the thread dies to avoid that the interrupt code
1080 * references an already freed task_struct.
1081 */
1082 get_task_struct(t);
1083 new->thread = t;
1084 /*
1085 * Tell the thread to set its affinity. This is
1086 * important for shared interrupt handlers as we do
1087 * not invoke setup_affinity() for the secondary
1088 * handlers as everything is already set up. Even for
1089 * interrupts marked with IRQF_NO_BALANCE this is
1090 * correct as we want the thread to move to the cpu(s)
1091 * on which the requesting code placed the interrupt.
1092 */
1093 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1094 return 0;
1095}
1096
1097/*
1098 * Internal function to register an irqaction - typically used to
1099 * allocate special interrupts that are part of the architecture.
1100 */
1101static int
1102__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1103{
1104 struct irqaction *old, **old_ptr;
1105 unsigned long flags, thread_mask = 0;
1106 int ret, nested, shared = 0;
1107 cpumask_var_t mask;
1108
1109 if (!desc)
1110 return -EINVAL;
1111
1112 if (desc->irq_data.chip == &no_irq_chip)
1113 return -ENOSYS;
1114 if (!try_module_get(desc->owner))
1115 return -ENODEV;
1116
1117 new->irq = irq;
1118
1119 /*
1120 * Check whether the interrupt nests into another interrupt
1121 * thread.
1122 */
1123 nested = irq_settings_is_nested_thread(desc);
1124 if (nested) {
1125 if (!new->thread_fn) {
1126 ret = -EINVAL;
1127 goto out_mput;
1128 }
1129 /*
1130 * Replace the primary handler which was provided from
1131 * the driver for non nested interrupt handling by the
1132 * dummy function which warns when called.
1133 */
1134 new->handler = irq_nested_primary_handler;
1135 } else {
1136 if (irq_settings_can_thread(desc)) {
1137 ret = irq_setup_forced_threading(new);
1138 if (ret)
1139 goto out_mput;
1140 }
1141 }
1142
1143 /*
1144 * Create a handler thread when a thread function is supplied
1145 * and the interrupt does not nest into another interrupt
1146 * thread.
1147 */
1148 if (new->thread_fn && !nested) {
1149 ret = setup_irq_thread(new, irq, false);
1150 if (ret)
1151 goto out_mput;
1152 if (new->secondary) {
1153 ret = setup_irq_thread(new->secondary, irq, true);
1154 if (ret)
1155 goto out_thread;
1156 }
1157 }
1158
1159 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1160 ret = -ENOMEM;
1161 goto out_thread;
1162 }
1163
1164 /*
1165 * Drivers are often written to work w/o knowledge about the
1166 * underlying irq chip implementation, so a request for a
1167 * threaded irq without a primary hard irq context handler
1168 * requires the ONESHOT flag to be set. Some irq chips like
1169 * MSI based interrupts are per se one shot safe. Check the
1170 * chip flags, so we can avoid the unmask dance at the end of
1171 * the threaded handler for those.
1172 */
1173 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1174 new->flags &= ~IRQF_ONESHOT;
1175
1176 /*
1177 * The following block of code has to be executed atomically
1178 */
1179 raw_spin_lock_irqsave(&desc->lock, flags);
1180 old_ptr = &desc->action;
1181 old = *old_ptr;
1182 if (old) {
1183 /*
1184 * Can't share interrupts unless both agree to and are
1185 * the same type (level, edge, polarity). So both flag
1186 * fields must have IRQF_SHARED set and the bits which
1187 * set the trigger type must match. Also all must
1188 * agree on ONESHOT.
1189 */
1190 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1191 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1192 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1193 goto mismatch;
1194
1195 /* All handlers must agree on per-cpuness */
1196 if ((old->flags & IRQF_PERCPU) !=
1197 (new->flags & IRQF_PERCPU))
1198 goto mismatch;
1199
1200 /* add new interrupt at end of irq queue */
1201 do {
1202 /*
1203 * Or all existing action->thread_mask bits,
1204 * so we can find the next zero bit for this
1205 * new action.
1206 */
1207 thread_mask |= old->thread_mask;
1208 old_ptr = &old->next;
1209 old = *old_ptr;
1210 } while (old);
1211 shared = 1;
1212 }
1213
1214 /*
1215 * Setup the thread mask for this irqaction for ONESHOT. For
1216 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1217 * conditional in irq_wake_thread().
1218 */
1219 if (new->flags & IRQF_ONESHOT) {
1220 /*
1221 * Unlikely to have 32 resp 64 irqs sharing one line,
1222 * but who knows.
1223 */
1224 if (thread_mask == ~0UL) {
1225 ret = -EBUSY;
1226 goto out_mask;
1227 }
1228 /*
1229 * The thread_mask for the action is or'ed to
1230 * desc->thread_active to indicate that the
1231 * IRQF_ONESHOT thread handler has been woken, but not
1232 * yet finished. The bit is cleared when a thread
1233 * completes. When all threads of a shared interrupt
1234 * line have completed desc->threads_active becomes
1235 * zero and the interrupt line is unmasked. See
1236 * handle.c:irq_wake_thread() for further information.
1237 *
1238 * If no thread is woken by primary (hard irq context)
1239 * interrupt handlers, then desc->threads_active is
1240 * also checked for zero to unmask the irq line in the
1241 * affected hard irq flow handlers
1242 * (handle_[fasteoi|level]_irq).
1243 *
1244 * The new action gets the first zero bit of
1245 * thread_mask assigned. See the loop above which or's
1246 * all existing action->thread_mask bits.
1247 */
1248 new->thread_mask = 1 << ffz(thread_mask);
1249
1250 } else if (new->handler == irq_default_primary_handler &&
1251 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1252 /*
1253 * The interrupt was requested with handler = NULL, so
1254 * we use the default primary handler for it. But it
1255 * does not have the oneshot flag set. In combination
1256 * with level interrupts this is deadly, because the
1257 * default primary handler just wakes the thread, then
1258 * the irq lines is reenabled, but the device still
1259 * has the level irq asserted. Rinse and repeat....
1260 *
1261 * While this works for edge type interrupts, we play
1262 * it safe and reject unconditionally because we can't
1263 * say for sure which type this interrupt really
1264 * has. The type flags are unreliable as the
1265 * underlying chip implementation can override them.
1266 */
1267 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1268 irq);
1269 ret = -EINVAL;
1270 goto out_mask;
1271 }
1272
1273 if (!shared) {
1274 ret = irq_request_resources(desc);
1275 if (ret) {
1276 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1277 new->name, irq, desc->irq_data.chip->name);
1278 goto out_mask;
1279 }
1280
1281 init_waitqueue_head(&desc->wait_for_threads);
1282
1283 /* Setup the type (level, edge polarity) if configured: */
1284 if (new->flags & IRQF_TRIGGER_MASK) {
1285 ret = __irq_set_trigger(desc,
1286 new->flags & IRQF_TRIGGER_MASK);
1287
1288 if (ret)
1289 goto out_mask;
1290 }
1291
1292 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1293 IRQS_ONESHOT | IRQS_WAITING);
1294 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1295
1296 if (new->flags & IRQF_PERCPU) {
1297 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1298 irq_settings_set_per_cpu(desc);
1299 }
1300
1301 if (new->flags & IRQF_ONESHOT)
1302 desc->istate |= IRQS_ONESHOT;
1303
1304 if (irq_settings_can_autoenable(desc))
1305 irq_startup(desc, true);
1306 else
1307 /* Undo nested disables: */
1308 desc->depth = 1;
1309
1310 /* Exclude IRQ from balancing if requested */
1311 if (new->flags & IRQF_NOBALANCING) {
1312 irq_settings_set_no_balancing(desc);
1313 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1314 }
1315
1316 /* Set default affinity mask once everything is setup */
1317 setup_affinity(desc, mask);
1318
1319 } else if (new->flags & IRQF_TRIGGER_MASK) {
1320 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1321 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1322
1323 if (nmsk != omsk)
1324 /* hope the handler works with current trigger mode */
1325 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1326 irq, nmsk, omsk);
1327 }
1328
1329 *old_ptr = new;
1330
1331 irq_pm_install_action(desc, new);
1332
1333 /* Reset broken irq detection when installing new handler */
1334 desc->irq_count = 0;
1335 desc->irqs_unhandled = 0;
1336
1337 /*
1338 * Check whether we disabled the irq via the spurious handler
1339 * before. Reenable it and give it another chance.
1340 */
1341 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1342 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1343 __enable_irq(desc);
1344 }
1345
1346 raw_spin_unlock_irqrestore(&desc->lock, flags);
1347
1348 /*
1349 * Strictly no need to wake it up, but hung_task complains
1350 * when no hard interrupt wakes the thread up.
1351 */
1352 if (new->thread)
1353 wake_up_process(new->thread);
1354 if (new->secondary)
1355 wake_up_process(new->secondary->thread);
1356
1357 register_irq_proc(irq, desc);
1358 new->dir = NULL;
1359 register_handler_proc(irq, new);
1360 free_cpumask_var(mask);
1361
1362 return 0;
1363
1364mismatch:
1365 if (!(new->flags & IRQF_PROBE_SHARED)) {
1366 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1367 irq, new->flags, new->name, old->flags, old->name);
1368#ifdef CONFIG_DEBUG_SHIRQ
1369 dump_stack();
1370#endif
1371 }
1372 ret = -EBUSY;
1373
1374out_mask:
1375 raw_spin_unlock_irqrestore(&desc->lock, flags);
1376 free_cpumask_var(mask);
1377
1378out_thread:
1379 if (new->thread) {
1380 struct task_struct *t = new->thread;
1381
1382 new->thread = NULL;
1383 kthread_stop(t);
1384 put_task_struct(t);
1385 }
1386 if (new->secondary && new->secondary->thread) {
1387 struct task_struct *t = new->secondary->thread;
1388
1389 new->secondary->thread = NULL;
1390 kthread_stop(t);
1391 put_task_struct(t);
1392 }
1393out_mput:
1394 module_put(desc->owner);
1395 return ret;
1396}
1397
1398/**
1399 * setup_irq - setup an interrupt
1400 * @irq: Interrupt line to setup
1401 * @act: irqaction for the interrupt
1402 *
1403 * Used to statically setup interrupts in the early boot process.
1404 */
1405int setup_irq(unsigned int irq, struct irqaction *act)
1406{
1407 int retval;
1408 struct irq_desc *desc = irq_to_desc(irq);
1409
1410 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1411 return -EINVAL;
1412 chip_bus_lock(desc);
1413 retval = __setup_irq(irq, desc, act);
1414 chip_bus_sync_unlock(desc);
1415
1416 return retval;
1417}
1418EXPORT_SYMBOL_GPL(setup_irq);
1419
1420/*
1421 * Internal function to unregister an irqaction - used to free
1422 * regular and special interrupts that are part of the architecture.
1423 */
1424static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1425{
1426 struct irq_desc *desc = irq_to_desc(irq);
1427 struct irqaction *action, **action_ptr;
1428 unsigned long flags;
1429
1430 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1431
1432 if (!desc)
1433 return NULL;
1434
1435 chip_bus_lock(desc);
1436 raw_spin_lock_irqsave(&desc->lock, flags);
1437
1438 /*
1439 * There can be multiple actions per IRQ descriptor, find the right
1440 * one based on the dev_id:
1441 */
1442 action_ptr = &desc->action;
1443 for (;;) {
1444 action = *action_ptr;
1445
1446 if (!action) {
1447 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1448 raw_spin_unlock_irqrestore(&desc->lock, flags);
1449 chip_bus_sync_unlock(desc);
1450 return NULL;
1451 }
1452
1453 if (action->dev_id == dev_id)
1454 break;
1455 action_ptr = &action->next;
1456 }
1457
1458 /* Found it - now remove it from the list of entries: */
1459 *action_ptr = action->next;
1460
1461 irq_pm_remove_action(desc, action);
1462
1463 /* If this was the last handler, shut down the IRQ line: */
1464 if (!desc->action) {
1465 irq_settings_clr_disable_unlazy(desc);
1466 irq_shutdown(desc);
1467 irq_release_resources(desc);
1468 }
1469
1470#ifdef CONFIG_SMP
1471 /* make sure affinity_hint is cleaned up */
1472 if (WARN_ON_ONCE(desc->affinity_hint))
1473 desc->affinity_hint = NULL;
1474#endif
1475
1476 raw_spin_unlock_irqrestore(&desc->lock, flags);
1477 chip_bus_sync_unlock(desc);
1478
1479 unregister_handler_proc(irq, action);
1480
1481 /* Make sure it's not being used on another CPU: */
1482 synchronize_irq(irq);
1483
1484#ifdef CONFIG_DEBUG_SHIRQ
1485 /*
1486 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1487 * event to happen even now it's being freed, so let's make sure that
1488 * is so by doing an extra call to the handler ....
1489 *
1490 * ( We do this after actually deregistering it, to make sure that a
1491 * 'real' IRQ doesn't run in * parallel with our fake. )
1492 */
1493 if (action->flags & IRQF_SHARED) {
1494 local_irq_save(flags);
1495 action->handler(irq, dev_id);
1496 local_irq_restore(flags);
1497 }
1498#endif
1499
1500 if (action->thread) {
1501 kthread_stop(action->thread);
1502 put_task_struct(action->thread);
1503 if (action->secondary && action->secondary->thread) {
1504 kthread_stop(action->secondary->thread);
1505 put_task_struct(action->secondary->thread);
1506 }
1507 }
1508
1509 module_put(desc->owner);
1510 kfree(action->secondary);
1511 return action;
1512}
1513
1514/**
1515 * remove_irq - free an interrupt
1516 * @irq: Interrupt line to free
1517 * @act: irqaction for the interrupt
1518 *
1519 * Used to remove interrupts statically setup by the early boot process.
1520 */
1521void remove_irq(unsigned int irq, struct irqaction *act)
1522{
1523 struct irq_desc *desc = irq_to_desc(irq);
1524
1525 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1526 __free_irq(irq, act->dev_id);
1527}
1528EXPORT_SYMBOL_GPL(remove_irq);
1529
1530/**
1531 * free_irq - free an interrupt allocated with request_irq
1532 * @irq: Interrupt line to free
1533 * @dev_id: Device identity to free
1534 *
1535 * Remove an interrupt handler. The handler is removed and if the
1536 * interrupt line is no longer in use by any driver it is disabled.
1537 * On a shared IRQ the caller must ensure the interrupt is disabled
1538 * on the card it drives before calling this function. The function
1539 * does not return until any executing interrupts for this IRQ
1540 * have completed.
1541 *
1542 * This function must not be called from interrupt context.
1543 */
1544void free_irq(unsigned int irq, void *dev_id)
1545{
1546 struct irq_desc *desc = irq_to_desc(irq);
1547
1548 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1549 return;
1550
1551#ifdef CONFIG_SMP
1552 if (WARN_ON(desc->affinity_notify))
1553 desc->affinity_notify = NULL;
1554#endif
1555
1556 kfree(__free_irq(irq, dev_id));
1557}
1558EXPORT_SYMBOL(free_irq);
1559
1560/**
1561 * request_threaded_irq - allocate an interrupt line
1562 * @irq: Interrupt line to allocate
1563 * @handler: Function to be called when the IRQ occurs.
1564 * Primary handler for threaded interrupts
1565 * If NULL and thread_fn != NULL the default
1566 * primary handler is installed
1567 * @thread_fn: Function called from the irq handler thread
1568 * If NULL, no irq thread is created
1569 * @irqflags: Interrupt type flags
1570 * @devname: An ascii name for the claiming device
1571 * @dev_id: A cookie passed back to the handler function
1572 *
1573 * This call allocates interrupt resources and enables the
1574 * interrupt line and IRQ handling. From the point this
1575 * call is made your handler function may be invoked. Since
1576 * your handler function must clear any interrupt the board
1577 * raises, you must take care both to initialise your hardware
1578 * and to set up the interrupt handler in the right order.
1579 *
1580 * If you want to set up a threaded irq handler for your device
1581 * then you need to supply @handler and @thread_fn. @handler is
1582 * still called in hard interrupt context and has to check
1583 * whether the interrupt originates from the device. If yes it
1584 * needs to disable the interrupt on the device and return
1585 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1586 * @thread_fn. This split handler design is necessary to support
1587 * shared interrupts.
1588 *
1589 * Dev_id must be globally unique. Normally the address of the
1590 * device data structure is used as the cookie. Since the handler
1591 * receives this value it makes sense to use it.
1592 *
1593 * If your interrupt is shared you must pass a non NULL dev_id
1594 * as this is required when freeing the interrupt.
1595 *
1596 * Flags:
1597 *
1598 * IRQF_SHARED Interrupt is shared
1599 * IRQF_TRIGGER_* Specify active edge(s) or level
1600 *
1601 */
1602int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1603 irq_handler_t thread_fn, unsigned long irqflags,
1604 const char *devname, void *dev_id)
1605{
1606 struct irqaction *action;
1607 struct irq_desc *desc;
1608 int retval;
1609
1610 if (irq == IRQ_NOTCONNECTED)
1611 return -ENOTCONN;
1612
1613 /*
1614 * Sanity-check: shared interrupts must pass in a real dev-ID,
1615 * otherwise we'll have trouble later trying to figure out
1616 * which interrupt is which (messes up the interrupt freeing
1617 * logic etc).
1618 *
1619 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1620 * it cannot be set along with IRQF_NO_SUSPEND.
1621 */
1622 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1623 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1624 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1625 return -EINVAL;
1626
1627 desc = irq_to_desc(irq);
1628 if (!desc)
1629 return -EINVAL;
1630
1631 if (!irq_settings_can_request(desc) ||
1632 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1633 return -EINVAL;
1634
1635 if (!handler) {
1636 if (!thread_fn)
1637 return -EINVAL;
1638 handler = irq_default_primary_handler;
1639 }
1640
1641 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1642 if (!action)
1643 return -ENOMEM;
1644
1645 action->handler = handler;
1646 action->thread_fn = thread_fn;
1647 action->flags = irqflags;
1648 action->name = devname;
1649 action->dev_id = dev_id;
1650
1651 chip_bus_lock(desc);
1652 retval = __setup_irq(irq, desc, action);
1653 chip_bus_sync_unlock(desc);
1654
1655 if (retval) {
1656 kfree(action->secondary);
1657 kfree(action);
1658 }
1659
1660#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1661 if (!retval && (irqflags & IRQF_SHARED)) {
1662 /*
1663 * It's a shared IRQ -- the driver ought to be prepared for it
1664 * to happen immediately, so let's make sure....
1665 * We disable the irq to make sure that a 'real' IRQ doesn't
1666 * run in parallel with our fake.
1667 */
1668 unsigned long flags;
1669
1670 disable_irq(irq);
1671 local_irq_save(flags);
1672
1673 handler(irq, dev_id);
1674
1675 local_irq_restore(flags);
1676 enable_irq(irq);
1677 }
1678#endif
1679 return retval;
1680}
1681EXPORT_SYMBOL(request_threaded_irq);
1682
1683/**
1684 * request_any_context_irq - allocate an interrupt line
1685 * @irq: Interrupt line to allocate
1686 * @handler: Function to be called when the IRQ occurs.
1687 * Threaded handler for threaded interrupts.
1688 * @flags: Interrupt type flags
1689 * @name: An ascii name for the claiming device
1690 * @dev_id: A cookie passed back to the handler function
1691 *
1692 * This call allocates interrupt resources and enables the
1693 * interrupt line and IRQ handling. It selects either a
1694 * hardirq or threaded handling method depending on the
1695 * context.
1696 *
1697 * On failure, it returns a negative value. On success,
1698 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1699 */
1700int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1701 unsigned long flags, const char *name, void *dev_id)
1702{
1703 struct irq_desc *desc;
1704 int ret;
1705
1706 if (irq == IRQ_NOTCONNECTED)
1707 return -ENOTCONN;
1708
1709 desc = irq_to_desc(irq);
1710 if (!desc)
1711 return -EINVAL;
1712
1713 if (irq_settings_is_nested_thread(desc)) {
1714 ret = request_threaded_irq(irq, NULL, handler,
1715 flags, name, dev_id);
1716 return !ret ? IRQC_IS_NESTED : ret;
1717 }
1718
1719 ret = request_irq(irq, handler, flags, name, dev_id);
1720 return !ret ? IRQC_IS_HARDIRQ : ret;
1721}
1722EXPORT_SYMBOL_GPL(request_any_context_irq);
1723
1724void enable_percpu_irq(unsigned int irq, unsigned int type)
1725{
1726 unsigned int cpu = smp_processor_id();
1727 unsigned long flags;
1728 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1729
1730 if (!desc)
1731 return;
1732
1733 type &= IRQ_TYPE_SENSE_MASK;
1734 if (type != IRQ_TYPE_NONE) {
1735 int ret;
1736
1737 ret = __irq_set_trigger(desc, type);
1738
1739 if (ret) {
1740 WARN(1, "failed to set type for IRQ%d\n", irq);
1741 goto out;
1742 }
1743 }
1744
1745 irq_percpu_enable(desc, cpu);
1746out:
1747 irq_put_desc_unlock(desc, flags);
1748}
1749EXPORT_SYMBOL_GPL(enable_percpu_irq);
1750
1751/**
1752 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1753 * @irq: Linux irq number to check for
1754 *
1755 * Must be called from a non migratable context. Returns the enable
1756 * state of a per cpu interrupt on the current cpu.
1757 */
1758bool irq_percpu_is_enabled(unsigned int irq)
1759{
1760 unsigned int cpu = smp_processor_id();
1761 struct irq_desc *desc;
1762 unsigned long flags;
1763 bool is_enabled;
1764
1765 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1766 if (!desc)
1767 return false;
1768
1769 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1770 irq_put_desc_unlock(desc, flags);
1771
1772 return is_enabled;
1773}
1774EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1775
1776void disable_percpu_irq(unsigned int irq)
1777{
1778 unsigned int cpu = smp_processor_id();
1779 unsigned long flags;
1780 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1781
1782 if (!desc)
1783 return;
1784
1785 irq_percpu_disable(desc, cpu);
1786 irq_put_desc_unlock(desc, flags);
1787}
1788EXPORT_SYMBOL_GPL(disable_percpu_irq);
1789
1790/*
1791 * Internal function to unregister a percpu irqaction.
1792 */
1793static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1794{
1795 struct irq_desc *desc = irq_to_desc(irq);
1796 struct irqaction *action;
1797 unsigned long flags;
1798
1799 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1800
1801 if (!desc)
1802 return NULL;
1803
1804 raw_spin_lock_irqsave(&desc->lock, flags);
1805
1806 action = desc->action;
1807 if (!action || action->percpu_dev_id != dev_id) {
1808 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1809 goto bad;
1810 }
1811
1812 if (!cpumask_empty(desc->percpu_enabled)) {
1813 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1814 irq, cpumask_first(desc->percpu_enabled));
1815 goto bad;
1816 }
1817
1818 /* Found it - now remove it from the list of entries: */
1819 desc->action = NULL;
1820
1821 raw_spin_unlock_irqrestore(&desc->lock, flags);
1822
1823 unregister_handler_proc(irq, action);
1824
1825 module_put(desc->owner);
1826 return action;
1827
1828bad:
1829 raw_spin_unlock_irqrestore(&desc->lock, flags);
1830 return NULL;
1831}
1832
1833/**
1834 * remove_percpu_irq - free a per-cpu interrupt
1835 * @irq: Interrupt line to free
1836 * @act: irqaction for the interrupt
1837 *
1838 * Used to remove interrupts statically setup by the early boot process.
1839 */
1840void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1841{
1842 struct irq_desc *desc = irq_to_desc(irq);
1843
1844 if (desc && irq_settings_is_per_cpu_devid(desc))
1845 __free_percpu_irq(irq, act->percpu_dev_id);
1846}
1847
1848/**
1849 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1850 * @irq: Interrupt line to free
1851 * @dev_id: Device identity to free
1852 *
1853 * Remove a percpu interrupt handler. The handler is removed, but
1854 * the interrupt line is not disabled. This must be done on each
1855 * CPU before calling this function. The function does not return
1856 * until any executing interrupts for this IRQ have completed.
1857 *
1858 * This function must not be called from interrupt context.
1859 */
1860void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1861{
1862 struct irq_desc *desc = irq_to_desc(irq);
1863
1864 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1865 return;
1866
1867 chip_bus_lock(desc);
1868 kfree(__free_percpu_irq(irq, dev_id));
1869 chip_bus_sync_unlock(desc);
1870}
1871EXPORT_SYMBOL_GPL(free_percpu_irq);
1872
1873/**
1874 * setup_percpu_irq - setup a per-cpu interrupt
1875 * @irq: Interrupt line to setup
1876 * @act: irqaction for the interrupt
1877 *
1878 * Used to statically setup per-cpu interrupts in the early boot process.
1879 */
1880int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1881{
1882 struct irq_desc *desc = irq_to_desc(irq);
1883 int retval;
1884
1885 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1886 return -EINVAL;
1887 chip_bus_lock(desc);
1888 retval = __setup_irq(irq, desc, act);
1889 chip_bus_sync_unlock(desc);
1890
1891 return retval;
1892}
1893
1894/**
1895 * request_percpu_irq - allocate a percpu interrupt line
1896 * @irq: Interrupt line to allocate
1897 * @handler: Function to be called when the IRQ occurs.
1898 * @devname: An ascii name for the claiming device
1899 * @dev_id: A percpu cookie passed back to the handler function
1900 *
1901 * This call allocates interrupt resources and enables the
1902 * interrupt on the local CPU. If the interrupt is supposed to be
1903 * enabled on other CPUs, it has to be done on each CPU using
1904 * enable_percpu_irq().
1905 *
1906 * Dev_id must be globally unique. It is a per-cpu variable, and
1907 * the handler gets called with the interrupted CPU's instance of
1908 * that variable.
1909 */
1910int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1911 const char *devname, void __percpu *dev_id)
1912{
1913 struct irqaction *action;
1914 struct irq_desc *desc;
1915 int retval;
1916
1917 if (!dev_id)
1918 return -EINVAL;
1919
1920 desc = irq_to_desc(irq);
1921 if (!desc || !irq_settings_can_request(desc) ||
1922 !irq_settings_is_per_cpu_devid(desc))
1923 return -EINVAL;
1924
1925 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1926 if (!action)
1927 return -ENOMEM;
1928
1929 action->handler = handler;
1930 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1931 action->name = devname;
1932 action->percpu_dev_id = dev_id;
1933
1934 chip_bus_lock(desc);
1935 retval = __setup_irq(irq, desc, action);
1936 chip_bus_sync_unlock(desc);
1937
1938 if (retval)
1939 kfree(action);
1940
1941 return retval;
1942}
1943EXPORT_SYMBOL_GPL(request_percpu_irq);
1944
1945/**
1946 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
1947 * @irq: Interrupt line that is forwarded to a VM
1948 * @which: One of IRQCHIP_STATE_* the caller wants to know about
1949 * @state: a pointer to a boolean where the state is to be storeed
1950 *
1951 * This call snapshots the internal irqchip state of an
1952 * interrupt, returning into @state the bit corresponding to
1953 * stage @which
1954 *
1955 * This function should be called with preemption disabled if the
1956 * interrupt controller has per-cpu registers.
1957 */
1958int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1959 bool *state)
1960{
1961 struct irq_desc *desc;
1962 struct irq_data *data;
1963 struct irq_chip *chip;
1964 unsigned long flags;
1965 int err = -EINVAL;
1966
1967 desc = irq_get_desc_buslock(irq, &flags, 0);
1968 if (!desc)
1969 return err;
1970
1971 data = irq_desc_get_irq_data(desc);
1972
1973 do {
1974 chip = irq_data_get_irq_chip(data);
1975 if (chip->irq_get_irqchip_state)
1976 break;
1977#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1978 data = data->parent_data;
1979#else
1980 data = NULL;
1981#endif
1982 } while (data);
1983
1984 if (data)
1985 err = chip->irq_get_irqchip_state(data, which, state);
1986
1987 irq_put_desc_busunlock(desc, flags);
1988 return err;
1989}
1990EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
1991
1992/**
1993 * irq_set_irqchip_state - set the state of a forwarded interrupt.
1994 * @irq: Interrupt line that is forwarded to a VM
1995 * @which: State to be restored (one of IRQCHIP_STATE_*)
1996 * @val: Value corresponding to @which
1997 *
1998 * This call sets the internal irqchip state of an interrupt,
1999 * depending on the value of @which.
2000 *
2001 * This function should be called with preemption disabled if the
2002 * interrupt controller has per-cpu registers.
2003 */
2004int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2005 bool val)
2006{
2007 struct irq_desc *desc;
2008 struct irq_data *data;
2009 struct irq_chip *chip;
2010 unsigned long flags;
2011 int err = -EINVAL;
2012
2013 desc = irq_get_desc_buslock(irq, &flags, 0);
2014 if (!desc)
2015 return err;
2016
2017 data = irq_desc_get_irq_data(desc);
2018
2019 do {
2020 chip = irq_data_get_irq_chip(data);
2021 if (chip->irq_set_irqchip_state)
2022 break;
2023#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2024 data = data->parent_data;
2025#else
2026 data = NULL;
2027#endif
2028 } while (data);
2029
2030 if (data)
2031 err = chip->irq_set_irqchip_state(data, which, val);
2032
2033 irq_put_desc_busunlock(desc, flags);
2034 return err;
2035}
2036EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/task_work.h>
20
21#include "internals.h"
22
23#ifdef CONFIG_IRQ_FORCED_THREADING
24__read_mostly bool force_irqthreads;
25
26static int __init setup_forced_irqthreads(char *arg)
27{
28 force_irqthreads = true;
29 return 0;
30}
31early_param("threadirqs", setup_forced_irqthreads);
32#endif
33
34/**
35 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
36 * @irq: interrupt number to wait for
37 *
38 * This function waits for any pending IRQ handlers for this interrupt
39 * to complete before returning. If you use this function while
40 * holding a resource the IRQ handler may need you will deadlock.
41 *
42 * This function may be called - with care - from IRQ context.
43 */
44void synchronize_irq(unsigned int irq)
45{
46 struct irq_desc *desc = irq_to_desc(irq);
47 bool inprogress;
48
49 if (!desc)
50 return;
51
52 do {
53 unsigned long flags;
54
55 /*
56 * Wait until we're out of the critical section. This might
57 * give the wrong answer due to the lack of memory barriers.
58 */
59 while (irqd_irq_inprogress(&desc->irq_data))
60 cpu_relax();
61
62 /* Ok, that indicated we're done: double-check carefully. */
63 raw_spin_lock_irqsave(&desc->lock, flags);
64 inprogress = irqd_irq_inprogress(&desc->irq_data);
65 raw_spin_unlock_irqrestore(&desc->lock, flags);
66
67 /* Oops, that failed? */
68 } while (inprogress);
69
70 /*
71 * We made sure that no hardirq handler is running. Now verify
72 * that no threaded handlers are active.
73 */
74 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
75}
76EXPORT_SYMBOL(synchronize_irq);
77
78#ifdef CONFIG_SMP
79cpumask_var_t irq_default_affinity;
80
81/**
82 * irq_can_set_affinity - Check if the affinity of a given irq can be set
83 * @irq: Interrupt to check
84 *
85 */
86int irq_can_set_affinity(unsigned int irq)
87{
88 struct irq_desc *desc = irq_to_desc(irq);
89
90 if (!desc || !irqd_can_balance(&desc->irq_data) ||
91 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
92 return 0;
93
94 return 1;
95}
96
97/**
98 * irq_set_thread_affinity - Notify irq threads to adjust affinity
99 * @desc: irq descriptor which has affitnity changed
100 *
101 * We just set IRQTF_AFFINITY and delegate the affinity setting
102 * to the interrupt thread itself. We can not call
103 * set_cpus_allowed_ptr() here as we hold desc->lock and this
104 * code can be called from hard interrupt context.
105 */
106void irq_set_thread_affinity(struct irq_desc *desc)
107{
108 struct irqaction *action = desc->action;
109
110 while (action) {
111 if (action->thread)
112 set_bit(IRQTF_AFFINITY, &action->thread_flags);
113 action = action->next;
114 }
115}
116
117#ifdef CONFIG_GENERIC_PENDING_IRQ
118static inline bool irq_can_move_pcntxt(struct irq_data *data)
119{
120 return irqd_can_move_in_process_context(data);
121}
122static inline bool irq_move_pending(struct irq_data *data)
123{
124 return irqd_is_setaffinity_pending(data);
125}
126static inline void
127irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
128{
129 cpumask_copy(desc->pending_mask, mask);
130}
131static inline void
132irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
133{
134 cpumask_copy(mask, desc->pending_mask);
135}
136#else
137static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
138static inline bool irq_move_pending(struct irq_data *data) { return false; }
139static inline void
140irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
141static inline void
142irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
143#endif
144
145int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
146 bool force)
147{
148 struct irq_desc *desc = irq_data_to_desc(data);
149 struct irq_chip *chip = irq_data_get_irq_chip(data);
150 int ret;
151
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160
161 return ret;
162}
163
164int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
165{
166 struct irq_chip *chip = irq_data_get_irq_chip(data);
167 struct irq_desc *desc = irq_data_to_desc(data);
168 int ret = 0;
169
170 if (!chip || !chip->irq_set_affinity)
171 return -EINVAL;
172
173 if (irq_can_move_pcntxt(data)) {
174 ret = irq_do_set_affinity(data, mask, false);
175 } else {
176 irqd_set_move_pending(data);
177 irq_copy_pending(desc, mask);
178 }
179
180 if (desc->affinity_notify) {
181 kref_get(&desc->affinity_notify->kref);
182 schedule_work(&desc->affinity_notify->work);
183 }
184 irqd_set(data, IRQD_AFFINITY_SET);
185
186 return ret;
187}
188
189/**
190 * irq_set_affinity - Set the irq affinity of a given irq
191 * @irq: Interrupt to set affinity
192 * @mask: cpumask
193 *
194 */
195int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
196{
197 struct irq_desc *desc = irq_to_desc(irq);
198 unsigned long flags;
199 int ret;
200
201 if (!desc)
202 return -EINVAL;
203
204 raw_spin_lock_irqsave(&desc->lock, flags);
205 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
206 raw_spin_unlock_irqrestore(&desc->lock, flags);
207 return ret;
208}
209
210int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
211{
212 unsigned long flags;
213 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
214
215 if (!desc)
216 return -EINVAL;
217 desc->affinity_hint = m;
218 irq_put_desc_unlock(desc, flags);
219 return 0;
220}
221EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
222
223static void irq_affinity_notify(struct work_struct *work)
224{
225 struct irq_affinity_notify *notify =
226 container_of(work, struct irq_affinity_notify, work);
227 struct irq_desc *desc = irq_to_desc(notify->irq);
228 cpumask_var_t cpumask;
229 unsigned long flags;
230
231 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
232 goto out;
233
234 raw_spin_lock_irqsave(&desc->lock, flags);
235 if (irq_move_pending(&desc->irq_data))
236 irq_get_pending(cpumask, desc);
237 else
238 cpumask_copy(cpumask, desc->irq_data.affinity);
239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240
241 notify->notify(notify, cpumask);
242
243 free_cpumask_var(cpumask);
244out:
245 kref_put(¬ify->kref, notify->release);
246}
247
248/**
249 * irq_set_affinity_notifier - control notification of IRQ affinity changes
250 * @irq: Interrupt for which to enable/disable notification
251 * @notify: Context for notification, or %NULL to disable
252 * notification. Function pointers must be initialised;
253 * the other fields will be initialised by this function.
254 *
255 * Must be called in process context. Notification may only be enabled
256 * after the IRQ is allocated and must be disabled before the IRQ is
257 * freed using free_irq().
258 */
259int
260irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
261{
262 struct irq_desc *desc = irq_to_desc(irq);
263 struct irq_affinity_notify *old_notify;
264 unsigned long flags;
265
266 /* The release function is promised process context */
267 might_sleep();
268
269 if (!desc)
270 return -EINVAL;
271
272 /* Complete initialisation of *notify */
273 if (notify) {
274 notify->irq = irq;
275 kref_init(¬ify->kref);
276 INIT_WORK(¬ify->work, irq_affinity_notify);
277 }
278
279 raw_spin_lock_irqsave(&desc->lock, flags);
280 old_notify = desc->affinity_notify;
281 desc->affinity_notify = notify;
282 raw_spin_unlock_irqrestore(&desc->lock, flags);
283
284 if (old_notify)
285 kref_put(&old_notify->kref, old_notify->release);
286
287 return 0;
288}
289EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
290
291#ifndef CONFIG_AUTO_IRQ_AFFINITY
292/*
293 * Generic version of the affinity autoselector.
294 */
295static int
296setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
297{
298 struct cpumask *set = irq_default_affinity;
299 int node = desc->irq_data.node;
300
301 /* Excludes PER_CPU and NO_BALANCE interrupts */
302 if (!irq_can_set_affinity(irq))
303 return 0;
304
305 /*
306 * Preserve an userspace affinity setup, but make sure that
307 * one of the targets is online.
308 */
309 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
310 if (cpumask_intersects(desc->irq_data.affinity,
311 cpu_online_mask))
312 set = desc->irq_data.affinity;
313 else
314 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
315 }
316
317 cpumask_and(mask, cpu_online_mask, set);
318 if (node != NUMA_NO_NODE) {
319 const struct cpumask *nodemask = cpumask_of_node(node);
320
321 /* make sure at least one of the cpus in nodemask is online */
322 if (cpumask_intersects(mask, nodemask))
323 cpumask_and(mask, mask, nodemask);
324 }
325 irq_do_set_affinity(&desc->irq_data, mask, false);
326 return 0;
327}
328#else
329static inline int
330setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
331{
332 return irq_select_affinity(irq);
333}
334#endif
335
336/*
337 * Called when affinity is set via /proc/irq
338 */
339int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
340{
341 struct irq_desc *desc = irq_to_desc(irq);
342 unsigned long flags;
343 int ret;
344
345 raw_spin_lock_irqsave(&desc->lock, flags);
346 ret = setup_affinity(irq, desc, mask);
347 raw_spin_unlock_irqrestore(&desc->lock, flags);
348 return ret;
349}
350
351#else
352static inline int
353setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
354{
355 return 0;
356}
357#endif
358
359void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
360{
361 if (suspend) {
362 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
363 return;
364 desc->istate |= IRQS_SUSPENDED;
365 }
366
367 if (!desc->depth++)
368 irq_disable(desc);
369}
370
371static int __disable_irq_nosync(unsigned int irq)
372{
373 unsigned long flags;
374 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
375
376 if (!desc)
377 return -EINVAL;
378 __disable_irq(desc, irq, false);
379 irq_put_desc_busunlock(desc, flags);
380 return 0;
381}
382
383/**
384 * disable_irq_nosync - disable an irq without waiting
385 * @irq: Interrupt to disable
386 *
387 * Disable the selected interrupt line. Disables and Enables are
388 * nested.
389 * Unlike disable_irq(), this function does not ensure existing
390 * instances of the IRQ handler have completed before returning.
391 *
392 * This function may be called from IRQ context.
393 */
394void disable_irq_nosync(unsigned int irq)
395{
396 __disable_irq_nosync(irq);
397}
398EXPORT_SYMBOL(disable_irq_nosync);
399
400/**
401 * disable_irq - disable an irq and wait for completion
402 * @irq: Interrupt to disable
403 *
404 * Disable the selected interrupt line. Enables and Disables are
405 * nested.
406 * This function waits for any pending IRQ handlers for this interrupt
407 * to complete before returning. If you use this function while
408 * holding a resource the IRQ handler may need you will deadlock.
409 *
410 * This function may be called - with care - from IRQ context.
411 */
412void disable_irq(unsigned int irq)
413{
414 if (!__disable_irq_nosync(irq))
415 synchronize_irq(irq);
416}
417EXPORT_SYMBOL(disable_irq);
418
419void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
420{
421 if (resume) {
422 if (!(desc->istate & IRQS_SUSPENDED)) {
423 if (!desc->action)
424 return;
425 if (!(desc->action->flags & IRQF_FORCE_RESUME))
426 return;
427 /* Pretend that it got disabled ! */
428 desc->depth++;
429 }
430 desc->istate &= ~IRQS_SUSPENDED;
431 }
432
433 switch (desc->depth) {
434 case 0:
435 err_out:
436 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
437 break;
438 case 1: {
439 if (desc->istate & IRQS_SUSPENDED)
440 goto err_out;
441 /* Prevent probing on this irq: */
442 irq_settings_set_noprobe(desc);
443 irq_enable(desc);
444 check_irq_resend(desc, irq);
445 /* fall-through */
446 }
447 default:
448 desc->depth--;
449 }
450}
451
452/**
453 * enable_irq - enable handling of an irq
454 * @irq: Interrupt to enable
455 *
456 * Undoes the effect of one call to disable_irq(). If this
457 * matches the last disable, processing of interrupts on this
458 * IRQ line is re-enabled.
459 *
460 * This function may be called from IRQ context only when
461 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
462 */
463void enable_irq(unsigned int irq)
464{
465 unsigned long flags;
466 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
467
468 if (!desc)
469 return;
470 if (WARN(!desc->irq_data.chip,
471 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
472 goto out;
473
474 __enable_irq(desc, irq, false);
475out:
476 irq_put_desc_busunlock(desc, flags);
477}
478EXPORT_SYMBOL(enable_irq);
479
480static int set_irq_wake_real(unsigned int irq, unsigned int on)
481{
482 struct irq_desc *desc = irq_to_desc(irq);
483 int ret = -ENXIO;
484
485 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
486 return 0;
487
488 if (desc->irq_data.chip->irq_set_wake)
489 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
490
491 return ret;
492}
493
494/**
495 * irq_set_irq_wake - control irq power management wakeup
496 * @irq: interrupt to control
497 * @on: enable/disable power management wakeup
498 *
499 * Enable/disable power management wakeup mode, which is
500 * disabled by default. Enables and disables must match,
501 * just as they match for non-wakeup mode support.
502 *
503 * Wakeup mode lets this IRQ wake the system from sleep
504 * states like "suspend to RAM".
505 */
506int irq_set_irq_wake(unsigned int irq, unsigned int on)
507{
508 unsigned long flags;
509 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
510 int ret = 0;
511
512 if (!desc)
513 return -EINVAL;
514
515 /* wakeup-capable irqs can be shared between drivers that
516 * don't need to have the same sleep mode behaviors.
517 */
518 if (on) {
519 if (desc->wake_depth++ == 0) {
520 ret = set_irq_wake_real(irq, on);
521 if (ret)
522 desc->wake_depth = 0;
523 else
524 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
525 }
526 } else {
527 if (desc->wake_depth == 0) {
528 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
529 } else if (--desc->wake_depth == 0) {
530 ret = set_irq_wake_real(irq, on);
531 if (ret)
532 desc->wake_depth = 1;
533 else
534 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
535 }
536 }
537 irq_put_desc_busunlock(desc, flags);
538 return ret;
539}
540EXPORT_SYMBOL(irq_set_irq_wake);
541
542/*
543 * Internal function that tells the architecture code whether a
544 * particular irq has been exclusively allocated or is available
545 * for driver use.
546 */
547int can_request_irq(unsigned int irq, unsigned long irqflags)
548{
549 unsigned long flags;
550 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
551 int canrequest = 0;
552
553 if (!desc)
554 return 0;
555
556 if (irq_settings_can_request(desc)) {
557 if (desc->action)
558 if (irqflags & desc->action->flags & IRQF_SHARED)
559 canrequest =1;
560 }
561 irq_put_desc_unlock(desc, flags);
562 return canrequest;
563}
564
565int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
566 unsigned long flags)
567{
568 struct irq_chip *chip = desc->irq_data.chip;
569 int ret, unmask = 0;
570
571 if (!chip || !chip->irq_set_type) {
572 /*
573 * IRQF_TRIGGER_* but the PIC does not support multiple
574 * flow-types?
575 */
576 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
577 chip ? (chip->name ? : "unknown") : "unknown");
578 return 0;
579 }
580
581 flags &= IRQ_TYPE_SENSE_MASK;
582
583 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
584 if (!irqd_irq_masked(&desc->irq_data))
585 mask_irq(desc);
586 if (!irqd_irq_disabled(&desc->irq_data))
587 unmask = 1;
588 }
589
590 /* caller masked out all except trigger mode flags */
591 ret = chip->irq_set_type(&desc->irq_data, flags);
592
593 switch (ret) {
594 case IRQ_SET_MASK_OK:
595 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
596 irqd_set(&desc->irq_data, flags);
597
598 case IRQ_SET_MASK_OK_NOCOPY:
599 flags = irqd_get_trigger_type(&desc->irq_data);
600 irq_settings_set_trigger_mask(desc, flags);
601 irqd_clear(&desc->irq_data, IRQD_LEVEL);
602 irq_settings_clr_level(desc);
603 if (flags & IRQ_TYPE_LEVEL_MASK) {
604 irq_settings_set_level(desc);
605 irqd_set(&desc->irq_data, IRQD_LEVEL);
606 }
607
608 ret = 0;
609 break;
610 default:
611 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
612 flags, irq, chip->irq_set_type);
613 }
614 if (unmask)
615 unmask_irq(desc);
616 return ret;
617}
618
619/*
620 * Default primary interrupt handler for threaded interrupts. Is
621 * assigned as primary handler when request_threaded_irq is called
622 * with handler == NULL. Useful for oneshot interrupts.
623 */
624static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
625{
626 return IRQ_WAKE_THREAD;
627}
628
629/*
630 * Primary handler for nested threaded interrupts. Should never be
631 * called.
632 */
633static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
634{
635 WARN(1, "Primary handler called for nested irq %d\n", irq);
636 return IRQ_NONE;
637}
638
639static int irq_wait_for_interrupt(struct irqaction *action)
640{
641 set_current_state(TASK_INTERRUPTIBLE);
642
643 while (!kthread_should_stop()) {
644
645 if (test_and_clear_bit(IRQTF_RUNTHREAD,
646 &action->thread_flags)) {
647 __set_current_state(TASK_RUNNING);
648 return 0;
649 }
650 schedule();
651 set_current_state(TASK_INTERRUPTIBLE);
652 }
653 __set_current_state(TASK_RUNNING);
654 return -1;
655}
656
657/*
658 * Oneshot interrupts keep the irq line masked until the threaded
659 * handler finished. unmask if the interrupt has not been disabled and
660 * is marked MASKED.
661 */
662static void irq_finalize_oneshot(struct irq_desc *desc,
663 struct irqaction *action)
664{
665 if (!(desc->istate & IRQS_ONESHOT))
666 return;
667again:
668 chip_bus_lock(desc);
669 raw_spin_lock_irq(&desc->lock);
670
671 /*
672 * Implausible though it may be we need to protect us against
673 * the following scenario:
674 *
675 * The thread is faster done than the hard interrupt handler
676 * on the other CPU. If we unmask the irq line then the
677 * interrupt can come in again and masks the line, leaves due
678 * to IRQS_INPROGRESS and the irq line is masked forever.
679 *
680 * This also serializes the state of shared oneshot handlers
681 * versus "desc->threads_onehsot |= action->thread_mask;" in
682 * irq_wake_thread(). See the comment there which explains the
683 * serialization.
684 */
685 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
686 raw_spin_unlock_irq(&desc->lock);
687 chip_bus_sync_unlock(desc);
688 cpu_relax();
689 goto again;
690 }
691
692 /*
693 * Now check again, whether the thread should run. Otherwise
694 * we would clear the threads_oneshot bit of this thread which
695 * was just set.
696 */
697 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
698 goto out_unlock;
699
700 desc->threads_oneshot &= ~action->thread_mask;
701
702 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
703 irqd_irq_masked(&desc->irq_data))
704 unmask_irq(desc);
705
706out_unlock:
707 raw_spin_unlock_irq(&desc->lock);
708 chip_bus_sync_unlock(desc);
709}
710
711#ifdef CONFIG_SMP
712/*
713 * Check whether we need to chasnge the affinity of the interrupt thread.
714 */
715static void
716irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
717{
718 cpumask_var_t mask;
719
720 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
721 return;
722
723 /*
724 * In case we are out of memory we set IRQTF_AFFINITY again and
725 * try again next time
726 */
727 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
728 set_bit(IRQTF_AFFINITY, &action->thread_flags);
729 return;
730 }
731
732 raw_spin_lock_irq(&desc->lock);
733 cpumask_copy(mask, desc->irq_data.affinity);
734 raw_spin_unlock_irq(&desc->lock);
735
736 set_cpus_allowed_ptr(current, mask);
737 free_cpumask_var(mask);
738}
739#else
740static inline void
741irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
742#endif
743
744/*
745 * Interrupts which are not explicitely requested as threaded
746 * interrupts rely on the implicit bh/preempt disable of the hard irq
747 * context. So we need to disable bh here to avoid deadlocks and other
748 * side effects.
749 */
750static irqreturn_t
751irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
752{
753 irqreturn_t ret;
754
755 local_bh_disable();
756 ret = action->thread_fn(action->irq, action->dev_id);
757 irq_finalize_oneshot(desc, action);
758 local_bh_enable();
759 return ret;
760}
761
762/*
763 * Interrupts explicitely requested as threaded interupts want to be
764 * preemtible - many of them need to sleep and wait for slow busses to
765 * complete.
766 */
767static irqreturn_t irq_thread_fn(struct irq_desc *desc,
768 struct irqaction *action)
769{
770 irqreturn_t ret;
771
772 ret = action->thread_fn(action->irq, action->dev_id);
773 irq_finalize_oneshot(desc, action);
774 return ret;
775}
776
777static void wake_threads_waitq(struct irq_desc *desc)
778{
779 if (atomic_dec_and_test(&desc->threads_active) &&
780 waitqueue_active(&desc->wait_for_threads))
781 wake_up(&desc->wait_for_threads);
782}
783
784static void irq_thread_dtor(struct task_work *unused)
785{
786 struct task_struct *tsk = current;
787 struct irq_desc *desc;
788 struct irqaction *action;
789
790 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
791 return;
792
793 action = kthread_data(tsk);
794
795 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
796 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
797
798
799 desc = irq_to_desc(action->irq);
800 /*
801 * If IRQTF_RUNTHREAD is set, we need to decrement
802 * desc->threads_active and wake possible waiters.
803 */
804 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
805 wake_threads_waitq(desc);
806
807 /* Prevent a stale desc->threads_oneshot */
808 irq_finalize_oneshot(desc, action);
809}
810
811/*
812 * Interrupt handler thread
813 */
814static int irq_thread(void *data)
815{
816 struct task_work on_exit_work;
817 static const struct sched_param param = {
818 .sched_priority = MAX_USER_RT_PRIO/2,
819 };
820 struct irqaction *action = data;
821 struct irq_desc *desc = irq_to_desc(action->irq);
822 irqreturn_t (*handler_fn)(struct irq_desc *desc,
823 struct irqaction *action);
824
825 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
826 &action->thread_flags))
827 handler_fn = irq_forced_thread_fn;
828 else
829 handler_fn = irq_thread_fn;
830
831 sched_setscheduler(current, SCHED_FIFO, ¶m);
832
833 init_task_work(&on_exit_work, irq_thread_dtor, NULL);
834 task_work_add(current, &on_exit_work, false);
835
836 while (!irq_wait_for_interrupt(action)) {
837 irqreturn_t action_ret;
838
839 irq_thread_check_affinity(desc, action);
840
841 action_ret = handler_fn(desc, action);
842 if (!noirqdebug)
843 note_interrupt(action->irq, desc, action_ret);
844
845 wake_threads_waitq(desc);
846 }
847
848 /*
849 * This is the regular exit path. __free_irq() is stopping the
850 * thread via kthread_stop() after calling
851 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
852 * oneshot mask bit can be set. We cannot verify that as we
853 * cannot touch the oneshot mask at this point anymore as
854 * __setup_irq() might have given out currents thread_mask
855 * again.
856 */
857 task_work_cancel(current, irq_thread_dtor);
858 return 0;
859}
860
861static void irq_setup_forced_threading(struct irqaction *new)
862{
863 if (!force_irqthreads)
864 return;
865 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
866 return;
867
868 new->flags |= IRQF_ONESHOT;
869
870 if (!new->thread_fn) {
871 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
872 new->thread_fn = new->handler;
873 new->handler = irq_default_primary_handler;
874 }
875}
876
877/*
878 * Internal function to register an irqaction - typically used to
879 * allocate special interrupts that are part of the architecture.
880 */
881static int
882__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
883{
884 struct irqaction *old, **old_ptr;
885 unsigned long flags, thread_mask = 0;
886 int ret, nested, shared = 0;
887 cpumask_var_t mask;
888
889 if (!desc)
890 return -EINVAL;
891
892 if (desc->irq_data.chip == &no_irq_chip)
893 return -ENOSYS;
894 if (!try_module_get(desc->owner))
895 return -ENODEV;
896
897 /*
898 * Check whether the interrupt nests into another interrupt
899 * thread.
900 */
901 nested = irq_settings_is_nested_thread(desc);
902 if (nested) {
903 if (!new->thread_fn) {
904 ret = -EINVAL;
905 goto out_mput;
906 }
907 /*
908 * Replace the primary handler which was provided from
909 * the driver for non nested interrupt handling by the
910 * dummy function which warns when called.
911 */
912 new->handler = irq_nested_primary_handler;
913 } else {
914 if (irq_settings_can_thread(desc))
915 irq_setup_forced_threading(new);
916 }
917
918 /*
919 * Create a handler thread when a thread function is supplied
920 * and the interrupt does not nest into another interrupt
921 * thread.
922 */
923 if (new->thread_fn && !nested) {
924 struct task_struct *t;
925
926 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
927 new->name);
928 if (IS_ERR(t)) {
929 ret = PTR_ERR(t);
930 goto out_mput;
931 }
932 /*
933 * We keep the reference to the task struct even if
934 * the thread dies to avoid that the interrupt code
935 * references an already freed task_struct.
936 */
937 get_task_struct(t);
938 new->thread = t;
939 }
940
941 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
942 ret = -ENOMEM;
943 goto out_thread;
944 }
945
946 /*
947 * The following block of code has to be executed atomically
948 */
949 raw_spin_lock_irqsave(&desc->lock, flags);
950 old_ptr = &desc->action;
951 old = *old_ptr;
952 if (old) {
953 /*
954 * Can't share interrupts unless both agree to and are
955 * the same type (level, edge, polarity). So both flag
956 * fields must have IRQF_SHARED set and the bits which
957 * set the trigger type must match. Also all must
958 * agree on ONESHOT.
959 */
960 if (!((old->flags & new->flags) & IRQF_SHARED) ||
961 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
962 ((old->flags ^ new->flags) & IRQF_ONESHOT))
963 goto mismatch;
964
965 /* All handlers must agree on per-cpuness */
966 if ((old->flags & IRQF_PERCPU) !=
967 (new->flags & IRQF_PERCPU))
968 goto mismatch;
969
970 /* add new interrupt at end of irq queue */
971 do {
972 /*
973 * Or all existing action->thread_mask bits,
974 * so we can find the next zero bit for this
975 * new action.
976 */
977 thread_mask |= old->thread_mask;
978 old_ptr = &old->next;
979 old = *old_ptr;
980 } while (old);
981 shared = 1;
982 }
983
984 /*
985 * Setup the thread mask for this irqaction for ONESHOT. For
986 * !ONESHOT irqs the thread mask is 0 so we can avoid a
987 * conditional in irq_wake_thread().
988 */
989 if (new->flags & IRQF_ONESHOT) {
990 /*
991 * Unlikely to have 32 resp 64 irqs sharing one line,
992 * but who knows.
993 */
994 if (thread_mask == ~0UL) {
995 ret = -EBUSY;
996 goto out_mask;
997 }
998 /*
999 * The thread_mask for the action is or'ed to
1000 * desc->thread_active to indicate that the
1001 * IRQF_ONESHOT thread handler has been woken, but not
1002 * yet finished. The bit is cleared when a thread
1003 * completes. When all threads of a shared interrupt
1004 * line have completed desc->threads_active becomes
1005 * zero and the interrupt line is unmasked. See
1006 * handle.c:irq_wake_thread() for further information.
1007 *
1008 * If no thread is woken by primary (hard irq context)
1009 * interrupt handlers, then desc->threads_active is
1010 * also checked for zero to unmask the irq line in the
1011 * affected hard irq flow handlers
1012 * (handle_[fasteoi|level]_irq).
1013 *
1014 * The new action gets the first zero bit of
1015 * thread_mask assigned. See the loop above which or's
1016 * all existing action->thread_mask bits.
1017 */
1018 new->thread_mask = 1 << ffz(thread_mask);
1019
1020 } else if (new->handler == irq_default_primary_handler) {
1021 /*
1022 * The interrupt was requested with handler = NULL, so
1023 * we use the default primary handler for it. But it
1024 * does not have the oneshot flag set. In combination
1025 * with level interrupts this is deadly, because the
1026 * default primary handler just wakes the thread, then
1027 * the irq lines is reenabled, but the device still
1028 * has the level irq asserted. Rinse and repeat....
1029 *
1030 * While this works for edge type interrupts, we play
1031 * it safe and reject unconditionally because we can't
1032 * say for sure which type this interrupt really
1033 * has. The type flags are unreliable as the
1034 * underlying chip implementation can override them.
1035 */
1036 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1037 irq);
1038 ret = -EINVAL;
1039 goto out_mask;
1040 }
1041
1042 if (!shared) {
1043 init_waitqueue_head(&desc->wait_for_threads);
1044
1045 /* Setup the type (level, edge polarity) if configured: */
1046 if (new->flags & IRQF_TRIGGER_MASK) {
1047 ret = __irq_set_trigger(desc, irq,
1048 new->flags & IRQF_TRIGGER_MASK);
1049
1050 if (ret)
1051 goto out_mask;
1052 }
1053
1054 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1055 IRQS_ONESHOT | IRQS_WAITING);
1056 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1057
1058 if (new->flags & IRQF_PERCPU) {
1059 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1060 irq_settings_set_per_cpu(desc);
1061 }
1062
1063 if (new->flags & IRQF_ONESHOT)
1064 desc->istate |= IRQS_ONESHOT;
1065
1066 if (irq_settings_can_autoenable(desc))
1067 irq_startup(desc, true);
1068 else
1069 /* Undo nested disables: */
1070 desc->depth = 1;
1071
1072 /* Exclude IRQ from balancing if requested */
1073 if (new->flags & IRQF_NOBALANCING) {
1074 irq_settings_set_no_balancing(desc);
1075 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1076 }
1077
1078 /* Set default affinity mask once everything is setup */
1079 setup_affinity(irq, desc, mask);
1080
1081 } else if (new->flags & IRQF_TRIGGER_MASK) {
1082 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1083 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1084
1085 if (nmsk != omsk)
1086 /* hope the handler works with current trigger mode */
1087 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1088 irq, nmsk, omsk);
1089 }
1090
1091 new->irq = irq;
1092 *old_ptr = new;
1093
1094 /* Reset broken irq detection when installing new handler */
1095 desc->irq_count = 0;
1096 desc->irqs_unhandled = 0;
1097
1098 /*
1099 * Check whether we disabled the irq via the spurious handler
1100 * before. Reenable it and give it another chance.
1101 */
1102 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1103 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1104 __enable_irq(desc, irq, false);
1105 }
1106
1107 raw_spin_unlock_irqrestore(&desc->lock, flags);
1108
1109 /*
1110 * Strictly no need to wake it up, but hung_task complains
1111 * when no hard interrupt wakes the thread up.
1112 */
1113 if (new->thread)
1114 wake_up_process(new->thread);
1115
1116 register_irq_proc(irq, desc);
1117 new->dir = NULL;
1118 register_handler_proc(irq, new);
1119 free_cpumask_var(mask);
1120
1121 return 0;
1122
1123mismatch:
1124 if (!(new->flags & IRQF_PROBE_SHARED)) {
1125 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1126 irq, new->flags, new->name, old->flags, old->name);
1127#ifdef CONFIG_DEBUG_SHIRQ
1128 dump_stack();
1129#endif
1130 }
1131 ret = -EBUSY;
1132
1133out_mask:
1134 raw_spin_unlock_irqrestore(&desc->lock, flags);
1135 free_cpumask_var(mask);
1136
1137out_thread:
1138 if (new->thread) {
1139 struct task_struct *t = new->thread;
1140
1141 new->thread = NULL;
1142 kthread_stop(t);
1143 put_task_struct(t);
1144 }
1145out_mput:
1146 module_put(desc->owner);
1147 return ret;
1148}
1149
1150/**
1151 * setup_irq - setup an interrupt
1152 * @irq: Interrupt line to setup
1153 * @act: irqaction for the interrupt
1154 *
1155 * Used to statically setup interrupts in the early boot process.
1156 */
1157int setup_irq(unsigned int irq, struct irqaction *act)
1158{
1159 int retval;
1160 struct irq_desc *desc = irq_to_desc(irq);
1161
1162 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1163 return -EINVAL;
1164 chip_bus_lock(desc);
1165 retval = __setup_irq(irq, desc, act);
1166 chip_bus_sync_unlock(desc);
1167
1168 return retval;
1169}
1170EXPORT_SYMBOL_GPL(setup_irq);
1171
1172/*
1173 * Internal function to unregister an irqaction - used to free
1174 * regular and special interrupts that are part of the architecture.
1175 */
1176static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1177{
1178 struct irq_desc *desc = irq_to_desc(irq);
1179 struct irqaction *action, **action_ptr;
1180 unsigned long flags;
1181
1182 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1183
1184 if (!desc)
1185 return NULL;
1186
1187 raw_spin_lock_irqsave(&desc->lock, flags);
1188
1189 /*
1190 * There can be multiple actions per IRQ descriptor, find the right
1191 * one based on the dev_id:
1192 */
1193 action_ptr = &desc->action;
1194 for (;;) {
1195 action = *action_ptr;
1196
1197 if (!action) {
1198 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1199 raw_spin_unlock_irqrestore(&desc->lock, flags);
1200
1201 return NULL;
1202 }
1203
1204 if (action->dev_id == dev_id)
1205 break;
1206 action_ptr = &action->next;
1207 }
1208
1209 /* Found it - now remove it from the list of entries: */
1210 *action_ptr = action->next;
1211
1212 /* If this was the last handler, shut down the IRQ line: */
1213 if (!desc->action)
1214 irq_shutdown(desc);
1215
1216#ifdef CONFIG_SMP
1217 /* make sure affinity_hint is cleaned up */
1218 if (WARN_ON_ONCE(desc->affinity_hint))
1219 desc->affinity_hint = NULL;
1220#endif
1221
1222 raw_spin_unlock_irqrestore(&desc->lock, flags);
1223
1224 unregister_handler_proc(irq, action);
1225
1226 /* Make sure it's not being used on another CPU: */
1227 synchronize_irq(irq);
1228
1229#ifdef CONFIG_DEBUG_SHIRQ
1230 /*
1231 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1232 * event to happen even now it's being freed, so let's make sure that
1233 * is so by doing an extra call to the handler ....
1234 *
1235 * ( We do this after actually deregistering it, to make sure that a
1236 * 'real' IRQ doesn't run in * parallel with our fake. )
1237 */
1238 if (action->flags & IRQF_SHARED) {
1239 local_irq_save(flags);
1240 action->handler(irq, dev_id);
1241 local_irq_restore(flags);
1242 }
1243#endif
1244
1245 if (action->thread) {
1246 kthread_stop(action->thread);
1247 put_task_struct(action->thread);
1248 }
1249
1250 module_put(desc->owner);
1251 return action;
1252}
1253
1254/**
1255 * remove_irq - free an interrupt
1256 * @irq: Interrupt line to free
1257 * @act: irqaction for the interrupt
1258 *
1259 * Used to remove interrupts statically setup by the early boot process.
1260 */
1261void remove_irq(unsigned int irq, struct irqaction *act)
1262{
1263 struct irq_desc *desc = irq_to_desc(irq);
1264
1265 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1266 __free_irq(irq, act->dev_id);
1267}
1268EXPORT_SYMBOL_GPL(remove_irq);
1269
1270/**
1271 * free_irq - free an interrupt allocated with request_irq
1272 * @irq: Interrupt line to free
1273 * @dev_id: Device identity to free
1274 *
1275 * Remove an interrupt handler. The handler is removed and if the
1276 * interrupt line is no longer in use by any driver it is disabled.
1277 * On a shared IRQ the caller must ensure the interrupt is disabled
1278 * on the card it drives before calling this function. The function
1279 * does not return until any executing interrupts for this IRQ
1280 * have completed.
1281 *
1282 * This function must not be called from interrupt context.
1283 */
1284void free_irq(unsigned int irq, void *dev_id)
1285{
1286 struct irq_desc *desc = irq_to_desc(irq);
1287
1288 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1289 return;
1290
1291#ifdef CONFIG_SMP
1292 if (WARN_ON(desc->affinity_notify))
1293 desc->affinity_notify = NULL;
1294#endif
1295
1296 chip_bus_lock(desc);
1297 kfree(__free_irq(irq, dev_id));
1298 chip_bus_sync_unlock(desc);
1299}
1300EXPORT_SYMBOL(free_irq);
1301
1302/**
1303 * request_threaded_irq - allocate an interrupt line
1304 * @irq: Interrupt line to allocate
1305 * @handler: Function to be called when the IRQ occurs.
1306 * Primary handler for threaded interrupts
1307 * If NULL and thread_fn != NULL the default
1308 * primary handler is installed
1309 * @thread_fn: Function called from the irq handler thread
1310 * If NULL, no irq thread is created
1311 * @irqflags: Interrupt type flags
1312 * @devname: An ascii name for the claiming device
1313 * @dev_id: A cookie passed back to the handler function
1314 *
1315 * This call allocates interrupt resources and enables the
1316 * interrupt line and IRQ handling. From the point this
1317 * call is made your handler function may be invoked. Since
1318 * your handler function must clear any interrupt the board
1319 * raises, you must take care both to initialise your hardware
1320 * and to set up the interrupt handler in the right order.
1321 *
1322 * If you want to set up a threaded irq handler for your device
1323 * then you need to supply @handler and @thread_fn. @handler is
1324 * still called in hard interrupt context and has to check
1325 * whether the interrupt originates from the device. If yes it
1326 * needs to disable the interrupt on the device and return
1327 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1328 * @thread_fn. This split handler design is necessary to support
1329 * shared interrupts.
1330 *
1331 * Dev_id must be globally unique. Normally the address of the
1332 * device data structure is used as the cookie. Since the handler
1333 * receives this value it makes sense to use it.
1334 *
1335 * If your interrupt is shared you must pass a non NULL dev_id
1336 * as this is required when freeing the interrupt.
1337 *
1338 * Flags:
1339 *
1340 * IRQF_SHARED Interrupt is shared
1341 * IRQF_TRIGGER_* Specify active edge(s) or level
1342 *
1343 */
1344int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1345 irq_handler_t thread_fn, unsigned long irqflags,
1346 const char *devname, void *dev_id)
1347{
1348 struct irqaction *action;
1349 struct irq_desc *desc;
1350 int retval;
1351
1352 /*
1353 * Sanity-check: shared interrupts must pass in a real dev-ID,
1354 * otherwise we'll have trouble later trying to figure out
1355 * which interrupt is which (messes up the interrupt freeing
1356 * logic etc).
1357 */
1358 if ((irqflags & IRQF_SHARED) && !dev_id)
1359 return -EINVAL;
1360
1361 desc = irq_to_desc(irq);
1362 if (!desc)
1363 return -EINVAL;
1364
1365 if (!irq_settings_can_request(desc) ||
1366 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1367 return -EINVAL;
1368
1369 if (!handler) {
1370 if (!thread_fn)
1371 return -EINVAL;
1372 handler = irq_default_primary_handler;
1373 }
1374
1375 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1376 if (!action)
1377 return -ENOMEM;
1378
1379 action->handler = handler;
1380 action->thread_fn = thread_fn;
1381 action->flags = irqflags;
1382 action->name = devname;
1383 action->dev_id = dev_id;
1384
1385 chip_bus_lock(desc);
1386 retval = __setup_irq(irq, desc, action);
1387 chip_bus_sync_unlock(desc);
1388
1389 if (retval)
1390 kfree(action);
1391
1392#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1393 if (!retval && (irqflags & IRQF_SHARED)) {
1394 /*
1395 * It's a shared IRQ -- the driver ought to be prepared for it
1396 * to happen immediately, so let's make sure....
1397 * We disable the irq to make sure that a 'real' IRQ doesn't
1398 * run in parallel with our fake.
1399 */
1400 unsigned long flags;
1401
1402 disable_irq(irq);
1403 local_irq_save(flags);
1404
1405 handler(irq, dev_id);
1406
1407 local_irq_restore(flags);
1408 enable_irq(irq);
1409 }
1410#endif
1411 return retval;
1412}
1413EXPORT_SYMBOL(request_threaded_irq);
1414
1415/**
1416 * request_any_context_irq - allocate an interrupt line
1417 * @irq: Interrupt line to allocate
1418 * @handler: Function to be called when the IRQ occurs.
1419 * Threaded handler for threaded interrupts.
1420 * @flags: Interrupt type flags
1421 * @name: An ascii name for the claiming device
1422 * @dev_id: A cookie passed back to the handler function
1423 *
1424 * This call allocates interrupt resources and enables the
1425 * interrupt line and IRQ handling. It selects either a
1426 * hardirq or threaded handling method depending on the
1427 * context.
1428 *
1429 * On failure, it returns a negative value. On success,
1430 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1431 */
1432int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1433 unsigned long flags, const char *name, void *dev_id)
1434{
1435 struct irq_desc *desc = irq_to_desc(irq);
1436 int ret;
1437
1438 if (!desc)
1439 return -EINVAL;
1440
1441 if (irq_settings_is_nested_thread(desc)) {
1442 ret = request_threaded_irq(irq, NULL, handler,
1443 flags, name, dev_id);
1444 return !ret ? IRQC_IS_NESTED : ret;
1445 }
1446
1447 ret = request_irq(irq, handler, flags, name, dev_id);
1448 return !ret ? IRQC_IS_HARDIRQ : ret;
1449}
1450EXPORT_SYMBOL_GPL(request_any_context_irq);
1451
1452void enable_percpu_irq(unsigned int irq, unsigned int type)
1453{
1454 unsigned int cpu = smp_processor_id();
1455 unsigned long flags;
1456 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1457
1458 if (!desc)
1459 return;
1460
1461 type &= IRQ_TYPE_SENSE_MASK;
1462 if (type != IRQ_TYPE_NONE) {
1463 int ret;
1464
1465 ret = __irq_set_trigger(desc, irq, type);
1466
1467 if (ret) {
1468 WARN(1, "failed to set type for IRQ%d\n", irq);
1469 goto out;
1470 }
1471 }
1472
1473 irq_percpu_enable(desc, cpu);
1474out:
1475 irq_put_desc_unlock(desc, flags);
1476}
1477
1478void disable_percpu_irq(unsigned int irq)
1479{
1480 unsigned int cpu = smp_processor_id();
1481 unsigned long flags;
1482 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1483
1484 if (!desc)
1485 return;
1486
1487 irq_percpu_disable(desc, cpu);
1488 irq_put_desc_unlock(desc, flags);
1489}
1490
1491/*
1492 * Internal function to unregister a percpu irqaction.
1493 */
1494static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1495{
1496 struct irq_desc *desc = irq_to_desc(irq);
1497 struct irqaction *action;
1498 unsigned long flags;
1499
1500 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1501
1502 if (!desc)
1503 return NULL;
1504
1505 raw_spin_lock_irqsave(&desc->lock, flags);
1506
1507 action = desc->action;
1508 if (!action || action->percpu_dev_id != dev_id) {
1509 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1510 goto bad;
1511 }
1512
1513 if (!cpumask_empty(desc->percpu_enabled)) {
1514 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1515 irq, cpumask_first(desc->percpu_enabled));
1516 goto bad;
1517 }
1518
1519 /* Found it - now remove it from the list of entries: */
1520 desc->action = NULL;
1521
1522 raw_spin_unlock_irqrestore(&desc->lock, flags);
1523
1524 unregister_handler_proc(irq, action);
1525
1526 module_put(desc->owner);
1527 return action;
1528
1529bad:
1530 raw_spin_unlock_irqrestore(&desc->lock, flags);
1531 return NULL;
1532}
1533
1534/**
1535 * remove_percpu_irq - free a per-cpu interrupt
1536 * @irq: Interrupt line to free
1537 * @act: irqaction for the interrupt
1538 *
1539 * Used to remove interrupts statically setup by the early boot process.
1540 */
1541void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1542{
1543 struct irq_desc *desc = irq_to_desc(irq);
1544
1545 if (desc && irq_settings_is_per_cpu_devid(desc))
1546 __free_percpu_irq(irq, act->percpu_dev_id);
1547}
1548
1549/**
1550 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1551 * @irq: Interrupt line to free
1552 * @dev_id: Device identity to free
1553 *
1554 * Remove a percpu interrupt handler. The handler is removed, but
1555 * the interrupt line is not disabled. This must be done on each
1556 * CPU before calling this function. The function does not return
1557 * until any executing interrupts for this IRQ have completed.
1558 *
1559 * This function must not be called from interrupt context.
1560 */
1561void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1562{
1563 struct irq_desc *desc = irq_to_desc(irq);
1564
1565 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1566 return;
1567
1568 chip_bus_lock(desc);
1569 kfree(__free_percpu_irq(irq, dev_id));
1570 chip_bus_sync_unlock(desc);
1571}
1572
1573/**
1574 * setup_percpu_irq - setup a per-cpu interrupt
1575 * @irq: Interrupt line to setup
1576 * @act: irqaction for the interrupt
1577 *
1578 * Used to statically setup per-cpu interrupts in the early boot process.
1579 */
1580int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1581{
1582 struct irq_desc *desc = irq_to_desc(irq);
1583 int retval;
1584
1585 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1586 return -EINVAL;
1587 chip_bus_lock(desc);
1588 retval = __setup_irq(irq, desc, act);
1589 chip_bus_sync_unlock(desc);
1590
1591 return retval;
1592}
1593
1594/**
1595 * request_percpu_irq - allocate a percpu interrupt line
1596 * @irq: Interrupt line to allocate
1597 * @handler: Function to be called when the IRQ occurs.
1598 * @devname: An ascii name for the claiming device
1599 * @dev_id: A percpu cookie passed back to the handler function
1600 *
1601 * This call allocates interrupt resources, but doesn't
1602 * automatically enable the interrupt. It has to be done on each
1603 * CPU using enable_percpu_irq().
1604 *
1605 * Dev_id must be globally unique. It is a per-cpu variable, and
1606 * the handler gets called with the interrupted CPU's instance of
1607 * that variable.
1608 */
1609int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1610 const char *devname, void __percpu *dev_id)
1611{
1612 struct irqaction *action;
1613 struct irq_desc *desc;
1614 int retval;
1615
1616 if (!dev_id)
1617 return -EINVAL;
1618
1619 desc = irq_to_desc(irq);
1620 if (!desc || !irq_settings_can_request(desc) ||
1621 !irq_settings_is_per_cpu_devid(desc))
1622 return -EINVAL;
1623
1624 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1625 if (!action)
1626 return -ENOMEM;
1627
1628 action->handler = handler;
1629 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1630 action->name = devname;
1631 action->percpu_dev_id = dev_id;
1632
1633 chip_bus_lock(desc);
1634 retval = __setup_irq(irq, desc, action);
1635 chip_bus_sync_unlock(desc);
1636
1637 if (retval)
1638 kfree(action);
1639
1640 return retval;
1641}