Loading...
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42 /*
43 * Wait until we're out of the critical section. This might
44 * give the wrong answer due to the lack of memory barriers.
45 */
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49 /* Ok, that indicated we're done: double-check carefully. */
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54 /* Oops, that failed? */
55 } while (inprogress);
56}
57
58/**
59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
60 * @irq: interrupt number to wait for
61 *
62 * This function waits for any pending hard IRQ handlers for this
63 * interrupt to complete before returning. If you use this
64 * function while holding a resource the IRQ handler may need you
65 * will deadlock. It does not take associated threaded handlers
66 * into account.
67 *
68 * Do not use this for shutdown scenarios where you must be sure
69 * that all parts (hardirq and threaded handler) have completed.
70 *
71 * Returns: false if a threaded handler is active.
72 *
73 * This function may be called - with care - from IRQ context.
74 */
75bool synchronize_hardirq(unsigned int irq)
76{
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85}
86EXPORT_SYMBOL(synchronize_hardirq);
87
88/**
89 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
90 * @irq: interrupt number to wait for
91 *
92 * This function waits for any pending IRQ handlers for this interrupt
93 * to complete before returning. If you use this function while
94 * holding a resource the IRQ handler may need you will deadlock.
95 *
96 * This function may be called - with care - from IRQ context.
97 */
98void synchronize_irq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104 /*
105 * We made sure that no hardirq handler is
106 * running. Now verify that no threaded handlers are
107 * active.
108 */
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112}
113EXPORT_SYMBOL(synchronize_irq);
114
115#ifdef CONFIG_SMP
116cpumask_var_t irq_default_affinity;
117
118static int __irq_can_set_affinity(struct irq_desc *desc)
119{
120 if (!desc || !irqd_can_balance(&desc->irq_data) ||
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122 return 0;
123 return 1;
124}
125
126/**
127 * irq_can_set_affinity - Check if the affinity of a given irq can be set
128 * @irq: Interrupt to check
129 *
130 */
131int irq_can_set_affinity(unsigned int irq)
132{
133 return __irq_can_set_affinity(irq_to_desc(irq));
134}
135
136/**
137 * irq_set_thread_affinity - Notify irq threads to adjust affinity
138 * @desc: irq descriptor which has affitnity changed
139 *
140 * We just set IRQTF_AFFINITY and delegate the affinity setting
141 * to the interrupt thread itself. We can not call
142 * set_cpus_allowed_ptr() here as we hold desc->lock and this
143 * code can be called from hard interrupt context.
144 */
145void irq_set_thread_affinity(struct irq_desc *desc)
146{
147 struct irqaction *action;
148
149 for_each_action_of_desc(desc, action)
150 if (action->thread)
151 set_bit(IRQTF_AFFINITY, &action->thread_flags);
152}
153
154#ifdef CONFIG_GENERIC_PENDING_IRQ
155static inline bool irq_can_move_pcntxt(struct irq_data *data)
156{
157 return irqd_can_move_in_process_context(data);
158}
159static inline bool irq_move_pending(struct irq_data *data)
160{
161 return irqd_is_setaffinity_pending(data);
162}
163static inline void
164irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
165{
166 cpumask_copy(desc->pending_mask, mask);
167}
168static inline void
169irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
170{
171 cpumask_copy(mask, desc->pending_mask);
172}
173#else
174static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
175static inline bool irq_move_pending(struct irq_data *data) { return false; }
176static inline void
177irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
178static inline void
179irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
180#endif
181
182int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
183 bool force)
184{
185 struct irq_desc *desc = irq_data_to_desc(data);
186 struct irq_chip *chip = irq_data_get_irq_chip(data);
187 int ret;
188
189 ret = chip->irq_set_affinity(data, mask, force);
190 switch (ret) {
191 case IRQ_SET_MASK_OK:
192 case IRQ_SET_MASK_OK_DONE:
193 cpumask_copy(desc->irq_common_data.affinity, mask);
194 case IRQ_SET_MASK_OK_NOCOPY:
195 irq_set_thread_affinity(desc);
196 ret = 0;
197 }
198
199 return ret;
200}
201
202int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
203 bool force)
204{
205 struct irq_chip *chip = irq_data_get_irq_chip(data);
206 struct irq_desc *desc = irq_data_to_desc(data);
207 int ret = 0;
208
209 if (!chip || !chip->irq_set_affinity)
210 return -EINVAL;
211
212 if (irq_can_move_pcntxt(data)) {
213 ret = irq_do_set_affinity(data, mask, force);
214 } else {
215 irqd_set_move_pending(data);
216 irq_copy_pending(desc, mask);
217 }
218
219 if (desc->affinity_notify) {
220 kref_get(&desc->affinity_notify->kref);
221 schedule_work(&desc->affinity_notify->work);
222 }
223 irqd_set(data, IRQD_AFFINITY_SET);
224
225 return ret;
226}
227
228int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
229{
230 struct irq_desc *desc = irq_to_desc(irq);
231 unsigned long flags;
232 int ret;
233
234 if (!desc)
235 return -EINVAL;
236
237 raw_spin_lock_irqsave(&desc->lock, flags);
238 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240 return ret;
241}
242
243int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
244{
245 unsigned long flags;
246 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
247
248 if (!desc)
249 return -EINVAL;
250 desc->affinity_hint = m;
251 irq_put_desc_unlock(desc, flags);
252 /* set the initial affinity to prevent every interrupt being on CPU0 */
253 if (m)
254 __irq_set_affinity(irq, m, false);
255 return 0;
256}
257EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
258
259static void irq_affinity_notify(struct work_struct *work)
260{
261 struct irq_affinity_notify *notify =
262 container_of(work, struct irq_affinity_notify, work);
263 struct irq_desc *desc = irq_to_desc(notify->irq);
264 cpumask_var_t cpumask;
265 unsigned long flags;
266
267 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
268 goto out;
269
270 raw_spin_lock_irqsave(&desc->lock, flags);
271 if (irq_move_pending(&desc->irq_data))
272 irq_get_pending(cpumask, desc);
273 else
274 cpumask_copy(cpumask, desc->irq_common_data.affinity);
275 raw_spin_unlock_irqrestore(&desc->lock, flags);
276
277 notify->notify(notify, cpumask);
278
279 free_cpumask_var(cpumask);
280out:
281 kref_put(¬ify->kref, notify->release);
282}
283
284/**
285 * irq_set_affinity_notifier - control notification of IRQ affinity changes
286 * @irq: Interrupt for which to enable/disable notification
287 * @notify: Context for notification, or %NULL to disable
288 * notification. Function pointers must be initialised;
289 * the other fields will be initialised by this function.
290 *
291 * Must be called in process context. Notification may only be enabled
292 * after the IRQ is allocated and must be disabled before the IRQ is
293 * freed using free_irq().
294 */
295int
296irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
297{
298 struct irq_desc *desc = irq_to_desc(irq);
299 struct irq_affinity_notify *old_notify;
300 unsigned long flags;
301
302 /* The release function is promised process context */
303 might_sleep();
304
305 if (!desc)
306 return -EINVAL;
307
308 /* Complete initialisation of *notify */
309 if (notify) {
310 notify->irq = irq;
311 kref_init(¬ify->kref);
312 INIT_WORK(¬ify->work, irq_affinity_notify);
313 }
314
315 raw_spin_lock_irqsave(&desc->lock, flags);
316 old_notify = desc->affinity_notify;
317 desc->affinity_notify = notify;
318 raw_spin_unlock_irqrestore(&desc->lock, flags);
319
320 if (old_notify)
321 kref_put(&old_notify->kref, old_notify->release);
322
323 return 0;
324}
325EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
326
327#ifndef CONFIG_AUTO_IRQ_AFFINITY
328/*
329 * Generic version of the affinity autoselector.
330 */
331static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
332{
333 struct cpumask *set = irq_default_affinity;
334 int node = irq_desc_get_node(desc);
335
336 /* Excludes PER_CPU and NO_BALANCE interrupts */
337 if (!__irq_can_set_affinity(desc))
338 return 0;
339
340 /*
341 * Preserve an userspace affinity setup, but make sure that
342 * one of the targets is online.
343 */
344 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
345 if (cpumask_intersects(desc->irq_common_data.affinity,
346 cpu_online_mask))
347 set = desc->irq_common_data.affinity;
348 else
349 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
350 }
351
352 cpumask_and(mask, cpu_online_mask, set);
353 if (node != NUMA_NO_NODE) {
354 const struct cpumask *nodemask = cpumask_of_node(node);
355
356 /* make sure at least one of the cpus in nodemask is online */
357 if (cpumask_intersects(mask, nodemask))
358 cpumask_and(mask, mask, nodemask);
359 }
360 irq_do_set_affinity(&desc->irq_data, mask, false);
361 return 0;
362}
363#else
364/* Wrapper for ALPHA specific affinity selector magic */
365static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
366{
367 return irq_select_affinity(irq_desc_get_irq(d));
368}
369#endif
370
371/*
372 * Called when affinity is set via /proc/irq
373 */
374int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
375{
376 struct irq_desc *desc = irq_to_desc(irq);
377 unsigned long flags;
378 int ret;
379
380 raw_spin_lock_irqsave(&desc->lock, flags);
381 ret = setup_affinity(desc, mask);
382 raw_spin_unlock_irqrestore(&desc->lock, flags);
383 return ret;
384}
385
386#else
387static inline int
388setup_affinity(struct irq_desc *desc, struct cpumask *mask)
389{
390 return 0;
391}
392#endif
393
394/**
395 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
396 * @irq: interrupt number to set affinity
397 * @vcpu_info: vCPU specific data
398 *
399 * This function uses the vCPU specific data to set the vCPU
400 * affinity for an irq. The vCPU specific data is passed from
401 * outside, such as KVM. One example code path is as below:
402 * KVM -> IOMMU -> irq_set_vcpu_affinity().
403 */
404int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
405{
406 unsigned long flags;
407 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
408 struct irq_data *data;
409 struct irq_chip *chip;
410 int ret = -ENOSYS;
411
412 if (!desc)
413 return -EINVAL;
414
415 data = irq_desc_get_irq_data(desc);
416 chip = irq_data_get_irq_chip(data);
417 if (chip && chip->irq_set_vcpu_affinity)
418 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
419 irq_put_desc_unlock(desc, flags);
420
421 return ret;
422}
423EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
424
425void __disable_irq(struct irq_desc *desc)
426{
427 if (!desc->depth++)
428 irq_disable(desc);
429}
430
431static int __disable_irq_nosync(unsigned int irq)
432{
433 unsigned long flags;
434 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
435
436 if (!desc)
437 return -EINVAL;
438 __disable_irq(desc);
439 irq_put_desc_busunlock(desc, flags);
440 return 0;
441}
442
443/**
444 * disable_irq_nosync - disable an irq without waiting
445 * @irq: Interrupt to disable
446 *
447 * Disable the selected interrupt line. Disables and Enables are
448 * nested.
449 * Unlike disable_irq(), this function does not ensure existing
450 * instances of the IRQ handler have completed before returning.
451 *
452 * This function may be called from IRQ context.
453 */
454void disable_irq_nosync(unsigned int irq)
455{
456 __disable_irq_nosync(irq);
457}
458EXPORT_SYMBOL(disable_irq_nosync);
459
460/**
461 * disable_irq - disable an irq and wait for completion
462 * @irq: Interrupt to disable
463 *
464 * Disable the selected interrupt line. Enables and Disables are
465 * nested.
466 * This function waits for any pending IRQ handlers for this interrupt
467 * to complete before returning. If you use this function while
468 * holding a resource the IRQ handler may need you will deadlock.
469 *
470 * This function may be called - with care - from IRQ context.
471 */
472void disable_irq(unsigned int irq)
473{
474 if (!__disable_irq_nosync(irq))
475 synchronize_irq(irq);
476}
477EXPORT_SYMBOL(disable_irq);
478
479/**
480 * disable_hardirq - disables an irq and waits for hardirq completion
481 * @irq: Interrupt to disable
482 *
483 * Disable the selected interrupt line. Enables and Disables are
484 * nested.
485 * This function waits for any pending hard IRQ handlers for this
486 * interrupt to complete before returning. If you use this function while
487 * holding a resource the hard IRQ handler may need you will deadlock.
488 *
489 * When used to optimistically disable an interrupt from atomic context
490 * the return value must be checked.
491 *
492 * Returns: false if a threaded handler is active.
493 *
494 * This function may be called - with care - from IRQ context.
495 */
496bool disable_hardirq(unsigned int irq)
497{
498 if (!__disable_irq_nosync(irq))
499 return synchronize_hardirq(irq);
500
501 return false;
502}
503EXPORT_SYMBOL_GPL(disable_hardirq);
504
505void __enable_irq(struct irq_desc *desc)
506{
507 switch (desc->depth) {
508 case 0:
509 err_out:
510 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
511 irq_desc_get_irq(desc));
512 break;
513 case 1: {
514 if (desc->istate & IRQS_SUSPENDED)
515 goto err_out;
516 /* Prevent probing on this irq: */
517 irq_settings_set_noprobe(desc);
518 irq_enable(desc);
519 check_irq_resend(desc);
520 /* fall-through */
521 }
522 default:
523 desc->depth--;
524 }
525}
526
527/**
528 * enable_irq - enable handling of an irq
529 * @irq: Interrupt to enable
530 *
531 * Undoes the effect of one call to disable_irq(). If this
532 * matches the last disable, processing of interrupts on this
533 * IRQ line is re-enabled.
534 *
535 * This function may be called from IRQ context only when
536 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
537 */
538void enable_irq(unsigned int irq)
539{
540 unsigned long flags;
541 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
542
543 if (!desc)
544 return;
545 if (WARN(!desc->irq_data.chip,
546 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
547 goto out;
548
549 __enable_irq(desc);
550out:
551 irq_put_desc_busunlock(desc, flags);
552}
553EXPORT_SYMBOL(enable_irq);
554
555static int set_irq_wake_real(unsigned int irq, unsigned int on)
556{
557 struct irq_desc *desc = irq_to_desc(irq);
558 int ret = -ENXIO;
559
560 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
561 return 0;
562
563 if (desc->irq_data.chip->irq_set_wake)
564 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
565
566 return ret;
567}
568
569/**
570 * irq_set_irq_wake - control irq power management wakeup
571 * @irq: interrupt to control
572 * @on: enable/disable power management wakeup
573 *
574 * Enable/disable power management wakeup mode, which is
575 * disabled by default. Enables and disables must match,
576 * just as they match for non-wakeup mode support.
577 *
578 * Wakeup mode lets this IRQ wake the system from sleep
579 * states like "suspend to RAM".
580 */
581int irq_set_irq_wake(unsigned int irq, unsigned int on)
582{
583 unsigned long flags;
584 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
585 int ret = 0;
586
587 if (!desc)
588 return -EINVAL;
589
590 /* wakeup-capable irqs can be shared between drivers that
591 * don't need to have the same sleep mode behaviors.
592 */
593 if (on) {
594 if (desc->wake_depth++ == 0) {
595 ret = set_irq_wake_real(irq, on);
596 if (ret)
597 desc->wake_depth = 0;
598 else
599 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
600 }
601 } else {
602 if (desc->wake_depth == 0) {
603 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
604 } else if (--desc->wake_depth == 0) {
605 ret = set_irq_wake_real(irq, on);
606 if (ret)
607 desc->wake_depth = 1;
608 else
609 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
610 }
611 }
612 irq_put_desc_busunlock(desc, flags);
613 return ret;
614}
615EXPORT_SYMBOL(irq_set_irq_wake);
616
617/*
618 * Internal function that tells the architecture code whether a
619 * particular irq has been exclusively allocated or is available
620 * for driver use.
621 */
622int can_request_irq(unsigned int irq, unsigned long irqflags)
623{
624 unsigned long flags;
625 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
626 int canrequest = 0;
627
628 if (!desc)
629 return 0;
630
631 if (irq_settings_can_request(desc)) {
632 if (!desc->action ||
633 irqflags & desc->action->flags & IRQF_SHARED)
634 canrequest = 1;
635 }
636 irq_put_desc_unlock(desc, flags);
637 return canrequest;
638}
639
640int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
641{
642 struct irq_chip *chip = desc->irq_data.chip;
643 int ret, unmask = 0;
644
645 if (!chip || !chip->irq_set_type) {
646 /*
647 * IRQF_TRIGGER_* but the PIC does not support multiple
648 * flow-types?
649 */
650 pr_debug("No set_type function for IRQ %d (%s)\n",
651 irq_desc_get_irq(desc),
652 chip ? (chip->name ? : "unknown") : "unknown");
653 return 0;
654 }
655
656 flags &= IRQ_TYPE_SENSE_MASK;
657
658 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
659 if (!irqd_irq_masked(&desc->irq_data))
660 mask_irq(desc);
661 if (!irqd_irq_disabled(&desc->irq_data))
662 unmask = 1;
663 }
664
665 /* caller masked out all except trigger mode flags */
666 ret = chip->irq_set_type(&desc->irq_data, flags);
667
668 switch (ret) {
669 case IRQ_SET_MASK_OK:
670 case IRQ_SET_MASK_OK_DONE:
671 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
672 irqd_set(&desc->irq_data, flags);
673
674 case IRQ_SET_MASK_OK_NOCOPY:
675 flags = irqd_get_trigger_type(&desc->irq_data);
676 irq_settings_set_trigger_mask(desc, flags);
677 irqd_clear(&desc->irq_data, IRQD_LEVEL);
678 irq_settings_clr_level(desc);
679 if (flags & IRQ_TYPE_LEVEL_MASK) {
680 irq_settings_set_level(desc);
681 irqd_set(&desc->irq_data, IRQD_LEVEL);
682 }
683
684 ret = 0;
685 break;
686 default:
687 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
688 flags, irq_desc_get_irq(desc), chip->irq_set_type);
689 }
690 if (unmask)
691 unmask_irq(desc);
692 return ret;
693}
694
695#ifdef CONFIG_HARDIRQS_SW_RESEND
696int irq_set_parent(int irq, int parent_irq)
697{
698 unsigned long flags;
699 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
700
701 if (!desc)
702 return -EINVAL;
703
704 desc->parent_irq = parent_irq;
705
706 irq_put_desc_unlock(desc, flags);
707 return 0;
708}
709#endif
710
711/*
712 * Default primary interrupt handler for threaded interrupts. Is
713 * assigned as primary handler when request_threaded_irq is called
714 * with handler == NULL. Useful for oneshot interrupts.
715 */
716static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
717{
718 return IRQ_WAKE_THREAD;
719}
720
721/*
722 * Primary handler for nested threaded interrupts. Should never be
723 * called.
724 */
725static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
726{
727 WARN(1, "Primary handler called for nested irq %d\n", irq);
728 return IRQ_NONE;
729}
730
731static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
732{
733 WARN(1, "Secondary action handler called for irq %d\n", irq);
734 return IRQ_NONE;
735}
736
737static int irq_wait_for_interrupt(struct irqaction *action)
738{
739 set_current_state(TASK_INTERRUPTIBLE);
740
741 while (!kthread_should_stop()) {
742
743 if (test_and_clear_bit(IRQTF_RUNTHREAD,
744 &action->thread_flags)) {
745 __set_current_state(TASK_RUNNING);
746 return 0;
747 }
748 schedule();
749 set_current_state(TASK_INTERRUPTIBLE);
750 }
751 __set_current_state(TASK_RUNNING);
752 return -1;
753}
754
755/*
756 * Oneshot interrupts keep the irq line masked until the threaded
757 * handler finished. unmask if the interrupt has not been disabled and
758 * is marked MASKED.
759 */
760static void irq_finalize_oneshot(struct irq_desc *desc,
761 struct irqaction *action)
762{
763 if (!(desc->istate & IRQS_ONESHOT) ||
764 action->handler == irq_forced_secondary_handler)
765 return;
766again:
767 chip_bus_lock(desc);
768 raw_spin_lock_irq(&desc->lock);
769
770 /*
771 * Implausible though it may be we need to protect us against
772 * the following scenario:
773 *
774 * The thread is faster done than the hard interrupt handler
775 * on the other CPU. If we unmask the irq line then the
776 * interrupt can come in again and masks the line, leaves due
777 * to IRQS_INPROGRESS and the irq line is masked forever.
778 *
779 * This also serializes the state of shared oneshot handlers
780 * versus "desc->threads_onehsot |= action->thread_mask;" in
781 * irq_wake_thread(). See the comment there which explains the
782 * serialization.
783 */
784 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
785 raw_spin_unlock_irq(&desc->lock);
786 chip_bus_sync_unlock(desc);
787 cpu_relax();
788 goto again;
789 }
790
791 /*
792 * Now check again, whether the thread should run. Otherwise
793 * we would clear the threads_oneshot bit of this thread which
794 * was just set.
795 */
796 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
797 goto out_unlock;
798
799 desc->threads_oneshot &= ~action->thread_mask;
800
801 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
802 irqd_irq_masked(&desc->irq_data))
803 unmask_threaded_irq(desc);
804
805out_unlock:
806 raw_spin_unlock_irq(&desc->lock);
807 chip_bus_sync_unlock(desc);
808}
809
810#ifdef CONFIG_SMP
811/*
812 * Check whether we need to change the affinity of the interrupt thread.
813 */
814static void
815irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
816{
817 cpumask_var_t mask;
818 bool valid = true;
819
820 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
821 return;
822
823 /*
824 * In case we are out of memory we set IRQTF_AFFINITY again and
825 * try again next time
826 */
827 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
828 set_bit(IRQTF_AFFINITY, &action->thread_flags);
829 return;
830 }
831
832 raw_spin_lock_irq(&desc->lock);
833 /*
834 * This code is triggered unconditionally. Check the affinity
835 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
836 */
837 if (desc->irq_common_data.affinity)
838 cpumask_copy(mask, desc->irq_common_data.affinity);
839 else
840 valid = false;
841 raw_spin_unlock_irq(&desc->lock);
842
843 if (valid)
844 set_cpus_allowed_ptr(current, mask);
845 free_cpumask_var(mask);
846}
847#else
848static inline void
849irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
850#endif
851
852/*
853 * Interrupts which are not explicitely requested as threaded
854 * interrupts rely on the implicit bh/preempt disable of the hard irq
855 * context. So we need to disable bh here to avoid deadlocks and other
856 * side effects.
857 */
858static irqreturn_t
859irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
860{
861 irqreturn_t ret;
862
863 local_bh_disable();
864 ret = action->thread_fn(action->irq, action->dev_id);
865 irq_finalize_oneshot(desc, action);
866 local_bh_enable();
867 return ret;
868}
869
870/*
871 * Interrupts explicitly requested as threaded interrupts want to be
872 * preemtible - many of them need to sleep and wait for slow busses to
873 * complete.
874 */
875static irqreturn_t irq_thread_fn(struct irq_desc *desc,
876 struct irqaction *action)
877{
878 irqreturn_t ret;
879
880 ret = action->thread_fn(action->irq, action->dev_id);
881 irq_finalize_oneshot(desc, action);
882 return ret;
883}
884
885static void wake_threads_waitq(struct irq_desc *desc)
886{
887 if (atomic_dec_and_test(&desc->threads_active))
888 wake_up(&desc->wait_for_threads);
889}
890
891static void irq_thread_dtor(struct callback_head *unused)
892{
893 struct task_struct *tsk = current;
894 struct irq_desc *desc;
895 struct irqaction *action;
896
897 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
898 return;
899
900 action = kthread_data(tsk);
901
902 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
903 tsk->comm, tsk->pid, action->irq);
904
905
906 desc = irq_to_desc(action->irq);
907 /*
908 * If IRQTF_RUNTHREAD is set, we need to decrement
909 * desc->threads_active and wake possible waiters.
910 */
911 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
912 wake_threads_waitq(desc);
913
914 /* Prevent a stale desc->threads_oneshot */
915 irq_finalize_oneshot(desc, action);
916}
917
918static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
919{
920 struct irqaction *secondary = action->secondary;
921
922 if (WARN_ON_ONCE(!secondary))
923 return;
924
925 raw_spin_lock_irq(&desc->lock);
926 __irq_wake_thread(desc, secondary);
927 raw_spin_unlock_irq(&desc->lock);
928}
929
930/*
931 * Interrupt handler thread
932 */
933static int irq_thread(void *data)
934{
935 struct callback_head on_exit_work;
936 struct irqaction *action = data;
937 struct irq_desc *desc = irq_to_desc(action->irq);
938 irqreturn_t (*handler_fn)(struct irq_desc *desc,
939 struct irqaction *action);
940
941 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
942 &action->thread_flags))
943 handler_fn = irq_forced_thread_fn;
944 else
945 handler_fn = irq_thread_fn;
946
947 init_task_work(&on_exit_work, irq_thread_dtor);
948 task_work_add(current, &on_exit_work, false);
949
950 irq_thread_check_affinity(desc, action);
951
952 while (!irq_wait_for_interrupt(action)) {
953 irqreturn_t action_ret;
954
955 irq_thread_check_affinity(desc, action);
956
957 action_ret = handler_fn(desc, action);
958 if (action_ret == IRQ_HANDLED)
959 atomic_inc(&desc->threads_handled);
960 if (action_ret == IRQ_WAKE_THREAD)
961 irq_wake_secondary(desc, action);
962
963 wake_threads_waitq(desc);
964 }
965
966 /*
967 * This is the regular exit path. __free_irq() is stopping the
968 * thread via kthread_stop() after calling
969 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
970 * oneshot mask bit can be set. We cannot verify that as we
971 * cannot touch the oneshot mask at this point anymore as
972 * __setup_irq() might have given out currents thread_mask
973 * again.
974 */
975 task_work_cancel(current, irq_thread_dtor);
976 return 0;
977}
978
979/**
980 * irq_wake_thread - wake the irq thread for the action identified by dev_id
981 * @irq: Interrupt line
982 * @dev_id: Device identity for which the thread should be woken
983 *
984 */
985void irq_wake_thread(unsigned int irq, void *dev_id)
986{
987 struct irq_desc *desc = irq_to_desc(irq);
988 struct irqaction *action;
989 unsigned long flags;
990
991 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
992 return;
993
994 raw_spin_lock_irqsave(&desc->lock, flags);
995 for_each_action_of_desc(desc, action) {
996 if (action->dev_id == dev_id) {
997 if (action->thread)
998 __irq_wake_thread(desc, action);
999 break;
1000 }
1001 }
1002 raw_spin_unlock_irqrestore(&desc->lock, flags);
1003}
1004EXPORT_SYMBOL_GPL(irq_wake_thread);
1005
1006static int irq_setup_forced_threading(struct irqaction *new)
1007{
1008 if (!force_irqthreads)
1009 return 0;
1010 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1011 return 0;
1012
1013 new->flags |= IRQF_ONESHOT;
1014
1015 /*
1016 * Handle the case where we have a real primary handler and a
1017 * thread handler. We force thread them as well by creating a
1018 * secondary action.
1019 */
1020 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1021 /* Allocate the secondary action */
1022 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1023 if (!new->secondary)
1024 return -ENOMEM;
1025 new->secondary->handler = irq_forced_secondary_handler;
1026 new->secondary->thread_fn = new->thread_fn;
1027 new->secondary->dev_id = new->dev_id;
1028 new->secondary->irq = new->irq;
1029 new->secondary->name = new->name;
1030 }
1031 /* Deal with the primary handler */
1032 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1033 new->thread_fn = new->handler;
1034 new->handler = irq_default_primary_handler;
1035 return 0;
1036}
1037
1038static int irq_request_resources(struct irq_desc *desc)
1039{
1040 struct irq_data *d = &desc->irq_data;
1041 struct irq_chip *c = d->chip;
1042
1043 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1044}
1045
1046static void irq_release_resources(struct irq_desc *desc)
1047{
1048 struct irq_data *d = &desc->irq_data;
1049 struct irq_chip *c = d->chip;
1050
1051 if (c->irq_release_resources)
1052 c->irq_release_resources(d);
1053}
1054
1055static int
1056setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1057{
1058 struct task_struct *t;
1059 struct sched_param param = {
1060 .sched_priority = MAX_USER_RT_PRIO/2,
1061 };
1062
1063 if (!secondary) {
1064 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1065 new->name);
1066 } else {
1067 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1068 new->name);
1069 param.sched_priority -= 1;
1070 }
1071
1072 if (IS_ERR(t))
1073 return PTR_ERR(t);
1074
1075 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1076
1077 /*
1078 * We keep the reference to the task struct even if
1079 * the thread dies to avoid that the interrupt code
1080 * references an already freed task_struct.
1081 */
1082 get_task_struct(t);
1083 new->thread = t;
1084 /*
1085 * Tell the thread to set its affinity. This is
1086 * important for shared interrupt handlers as we do
1087 * not invoke setup_affinity() for the secondary
1088 * handlers as everything is already set up. Even for
1089 * interrupts marked with IRQF_NO_BALANCE this is
1090 * correct as we want the thread to move to the cpu(s)
1091 * on which the requesting code placed the interrupt.
1092 */
1093 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1094 return 0;
1095}
1096
1097/*
1098 * Internal function to register an irqaction - typically used to
1099 * allocate special interrupts that are part of the architecture.
1100 */
1101static int
1102__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1103{
1104 struct irqaction *old, **old_ptr;
1105 unsigned long flags, thread_mask = 0;
1106 int ret, nested, shared = 0;
1107 cpumask_var_t mask;
1108
1109 if (!desc)
1110 return -EINVAL;
1111
1112 if (desc->irq_data.chip == &no_irq_chip)
1113 return -ENOSYS;
1114 if (!try_module_get(desc->owner))
1115 return -ENODEV;
1116
1117 new->irq = irq;
1118
1119 /*
1120 * Check whether the interrupt nests into another interrupt
1121 * thread.
1122 */
1123 nested = irq_settings_is_nested_thread(desc);
1124 if (nested) {
1125 if (!new->thread_fn) {
1126 ret = -EINVAL;
1127 goto out_mput;
1128 }
1129 /*
1130 * Replace the primary handler which was provided from
1131 * the driver for non nested interrupt handling by the
1132 * dummy function which warns when called.
1133 */
1134 new->handler = irq_nested_primary_handler;
1135 } else {
1136 if (irq_settings_can_thread(desc)) {
1137 ret = irq_setup_forced_threading(new);
1138 if (ret)
1139 goto out_mput;
1140 }
1141 }
1142
1143 /*
1144 * Create a handler thread when a thread function is supplied
1145 * and the interrupt does not nest into another interrupt
1146 * thread.
1147 */
1148 if (new->thread_fn && !nested) {
1149 ret = setup_irq_thread(new, irq, false);
1150 if (ret)
1151 goto out_mput;
1152 if (new->secondary) {
1153 ret = setup_irq_thread(new->secondary, irq, true);
1154 if (ret)
1155 goto out_thread;
1156 }
1157 }
1158
1159 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1160 ret = -ENOMEM;
1161 goto out_thread;
1162 }
1163
1164 /*
1165 * Drivers are often written to work w/o knowledge about the
1166 * underlying irq chip implementation, so a request for a
1167 * threaded irq without a primary hard irq context handler
1168 * requires the ONESHOT flag to be set. Some irq chips like
1169 * MSI based interrupts are per se one shot safe. Check the
1170 * chip flags, so we can avoid the unmask dance at the end of
1171 * the threaded handler for those.
1172 */
1173 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1174 new->flags &= ~IRQF_ONESHOT;
1175
1176 /*
1177 * The following block of code has to be executed atomically
1178 */
1179 raw_spin_lock_irqsave(&desc->lock, flags);
1180 old_ptr = &desc->action;
1181 old = *old_ptr;
1182 if (old) {
1183 /*
1184 * Can't share interrupts unless both agree to and are
1185 * the same type (level, edge, polarity). So both flag
1186 * fields must have IRQF_SHARED set and the bits which
1187 * set the trigger type must match. Also all must
1188 * agree on ONESHOT.
1189 */
1190 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1191 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1192 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1193 goto mismatch;
1194
1195 /* All handlers must agree on per-cpuness */
1196 if ((old->flags & IRQF_PERCPU) !=
1197 (new->flags & IRQF_PERCPU))
1198 goto mismatch;
1199
1200 /* add new interrupt at end of irq queue */
1201 do {
1202 /*
1203 * Or all existing action->thread_mask bits,
1204 * so we can find the next zero bit for this
1205 * new action.
1206 */
1207 thread_mask |= old->thread_mask;
1208 old_ptr = &old->next;
1209 old = *old_ptr;
1210 } while (old);
1211 shared = 1;
1212 }
1213
1214 /*
1215 * Setup the thread mask for this irqaction for ONESHOT. For
1216 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1217 * conditional in irq_wake_thread().
1218 */
1219 if (new->flags & IRQF_ONESHOT) {
1220 /*
1221 * Unlikely to have 32 resp 64 irqs sharing one line,
1222 * but who knows.
1223 */
1224 if (thread_mask == ~0UL) {
1225 ret = -EBUSY;
1226 goto out_mask;
1227 }
1228 /*
1229 * The thread_mask for the action is or'ed to
1230 * desc->thread_active to indicate that the
1231 * IRQF_ONESHOT thread handler has been woken, but not
1232 * yet finished. The bit is cleared when a thread
1233 * completes. When all threads of a shared interrupt
1234 * line have completed desc->threads_active becomes
1235 * zero and the interrupt line is unmasked. See
1236 * handle.c:irq_wake_thread() for further information.
1237 *
1238 * If no thread is woken by primary (hard irq context)
1239 * interrupt handlers, then desc->threads_active is
1240 * also checked for zero to unmask the irq line in the
1241 * affected hard irq flow handlers
1242 * (handle_[fasteoi|level]_irq).
1243 *
1244 * The new action gets the first zero bit of
1245 * thread_mask assigned. See the loop above which or's
1246 * all existing action->thread_mask bits.
1247 */
1248 new->thread_mask = 1 << ffz(thread_mask);
1249
1250 } else if (new->handler == irq_default_primary_handler &&
1251 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1252 /*
1253 * The interrupt was requested with handler = NULL, so
1254 * we use the default primary handler for it. But it
1255 * does not have the oneshot flag set. In combination
1256 * with level interrupts this is deadly, because the
1257 * default primary handler just wakes the thread, then
1258 * the irq lines is reenabled, but the device still
1259 * has the level irq asserted. Rinse and repeat....
1260 *
1261 * While this works for edge type interrupts, we play
1262 * it safe and reject unconditionally because we can't
1263 * say for sure which type this interrupt really
1264 * has. The type flags are unreliable as the
1265 * underlying chip implementation can override them.
1266 */
1267 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1268 irq);
1269 ret = -EINVAL;
1270 goto out_mask;
1271 }
1272
1273 if (!shared) {
1274 ret = irq_request_resources(desc);
1275 if (ret) {
1276 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1277 new->name, irq, desc->irq_data.chip->name);
1278 goto out_mask;
1279 }
1280
1281 init_waitqueue_head(&desc->wait_for_threads);
1282
1283 /* Setup the type (level, edge polarity) if configured: */
1284 if (new->flags & IRQF_TRIGGER_MASK) {
1285 ret = __irq_set_trigger(desc,
1286 new->flags & IRQF_TRIGGER_MASK);
1287
1288 if (ret)
1289 goto out_mask;
1290 }
1291
1292 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1293 IRQS_ONESHOT | IRQS_WAITING);
1294 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1295
1296 if (new->flags & IRQF_PERCPU) {
1297 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1298 irq_settings_set_per_cpu(desc);
1299 }
1300
1301 if (new->flags & IRQF_ONESHOT)
1302 desc->istate |= IRQS_ONESHOT;
1303
1304 if (irq_settings_can_autoenable(desc))
1305 irq_startup(desc, true);
1306 else
1307 /* Undo nested disables: */
1308 desc->depth = 1;
1309
1310 /* Exclude IRQ from balancing if requested */
1311 if (new->flags & IRQF_NOBALANCING) {
1312 irq_settings_set_no_balancing(desc);
1313 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1314 }
1315
1316 /* Set default affinity mask once everything is setup */
1317 setup_affinity(desc, mask);
1318
1319 } else if (new->flags & IRQF_TRIGGER_MASK) {
1320 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1321 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1322
1323 if (nmsk != omsk)
1324 /* hope the handler works with current trigger mode */
1325 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1326 irq, nmsk, omsk);
1327 }
1328
1329 *old_ptr = new;
1330
1331 irq_pm_install_action(desc, new);
1332
1333 /* Reset broken irq detection when installing new handler */
1334 desc->irq_count = 0;
1335 desc->irqs_unhandled = 0;
1336
1337 /*
1338 * Check whether we disabled the irq via the spurious handler
1339 * before. Reenable it and give it another chance.
1340 */
1341 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1342 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1343 __enable_irq(desc);
1344 }
1345
1346 raw_spin_unlock_irqrestore(&desc->lock, flags);
1347
1348 /*
1349 * Strictly no need to wake it up, but hung_task complains
1350 * when no hard interrupt wakes the thread up.
1351 */
1352 if (new->thread)
1353 wake_up_process(new->thread);
1354 if (new->secondary)
1355 wake_up_process(new->secondary->thread);
1356
1357 register_irq_proc(irq, desc);
1358 new->dir = NULL;
1359 register_handler_proc(irq, new);
1360 free_cpumask_var(mask);
1361
1362 return 0;
1363
1364mismatch:
1365 if (!(new->flags & IRQF_PROBE_SHARED)) {
1366 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1367 irq, new->flags, new->name, old->flags, old->name);
1368#ifdef CONFIG_DEBUG_SHIRQ
1369 dump_stack();
1370#endif
1371 }
1372 ret = -EBUSY;
1373
1374out_mask:
1375 raw_spin_unlock_irqrestore(&desc->lock, flags);
1376 free_cpumask_var(mask);
1377
1378out_thread:
1379 if (new->thread) {
1380 struct task_struct *t = new->thread;
1381
1382 new->thread = NULL;
1383 kthread_stop(t);
1384 put_task_struct(t);
1385 }
1386 if (new->secondary && new->secondary->thread) {
1387 struct task_struct *t = new->secondary->thread;
1388
1389 new->secondary->thread = NULL;
1390 kthread_stop(t);
1391 put_task_struct(t);
1392 }
1393out_mput:
1394 module_put(desc->owner);
1395 return ret;
1396}
1397
1398/**
1399 * setup_irq - setup an interrupt
1400 * @irq: Interrupt line to setup
1401 * @act: irqaction for the interrupt
1402 *
1403 * Used to statically setup interrupts in the early boot process.
1404 */
1405int setup_irq(unsigned int irq, struct irqaction *act)
1406{
1407 int retval;
1408 struct irq_desc *desc = irq_to_desc(irq);
1409
1410 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1411 return -EINVAL;
1412 chip_bus_lock(desc);
1413 retval = __setup_irq(irq, desc, act);
1414 chip_bus_sync_unlock(desc);
1415
1416 return retval;
1417}
1418EXPORT_SYMBOL_GPL(setup_irq);
1419
1420/*
1421 * Internal function to unregister an irqaction - used to free
1422 * regular and special interrupts that are part of the architecture.
1423 */
1424static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1425{
1426 struct irq_desc *desc = irq_to_desc(irq);
1427 struct irqaction *action, **action_ptr;
1428 unsigned long flags;
1429
1430 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1431
1432 if (!desc)
1433 return NULL;
1434
1435 chip_bus_lock(desc);
1436 raw_spin_lock_irqsave(&desc->lock, flags);
1437
1438 /*
1439 * There can be multiple actions per IRQ descriptor, find the right
1440 * one based on the dev_id:
1441 */
1442 action_ptr = &desc->action;
1443 for (;;) {
1444 action = *action_ptr;
1445
1446 if (!action) {
1447 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1448 raw_spin_unlock_irqrestore(&desc->lock, flags);
1449 chip_bus_sync_unlock(desc);
1450 return NULL;
1451 }
1452
1453 if (action->dev_id == dev_id)
1454 break;
1455 action_ptr = &action->next;
1456 }
1457
1458 /* Found it - now remove it from the list of entries: */
1459 *action_ptr = action->next;
1460
1461 irq_pm_remove_action(desc, action);
1462
1463 /* If this was the last handler, shut down the IRQ line: */
1464 if (!desc->action) {
1465 irq_settings_clr_disable_unlazy(desc);
1466 irq_shutdown(desc);
1467 irq_release_resources(desc);
1468 }
1469
1470#ifdef CONFIG_SMP
1471 /* make sure affinity_hint is cleaned up */
1472 if (WARN_ON_ONCE(desc->affinity_hint))
1473 desc->affinity_hint = NULL;
1474#endif
1475
1476 raw_spin_unlock_irqrestore(&desc->lock, flags);
1477 chip_bus_sync_unlock(desc);
1478
1479 unregister_handler_proc(irq, action);
1480
1481 /* Make sure it's not being used on another CPU: */
1482 synchronize_irq(irq);
1483
1484#ifdef CONFIG_DEBUG_SHIRQ
1485 /*
1486 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1487 * event to happen even now it's being freed, so let's make sure that
1488 * is so by doing an extra call to the handler ....
1489 *
1490 * ( We do this after actually deregistering it, to make sure that a
1491 * 'real' IRQ doesn't run in * parallel with our fake. )
1492 */
1493 if (action->flags & IRQF_SHARED) {
1494 local_irq_save(flags);
1495 action->handler(irq, dev_id);
1496 local_irq_restore(flags);
1497 }
1498#endif
1499
1500 if (action->thread) {
1501 kthread_stop(action->thread);
1502 put_task_struct(action->thread);
1503 if (action->secondary && action->secondary->thread) {
1504 kthread_stop(action->secondary->thread);
1505 put_task_struct(action->secondary->thread);
1506 }
1507 }
1508
1509 module_put(desc->owner);
1510 kfree(action->secondary);
1511 return action;
1512}
1513
1514/**
1515 * remove_irq - free an interrupt
1516 * @irq: Interrupt line to free
1517 * @act: irqaction for the interrupt
1518 *
1519 * Used to remove interrupts statically setup by the early boot process.
1520 */
1521void remove_irq(unsigned int irq, struct irqaction *act)
1522{
1523 struct irq_desc *desc = irq_to_desc(irq);
1524
1525 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1526 __free_irq(irq, act->dev_id);
1527}
1528EXPORT_SYMBOL_GPL(remove_irq);
1529
1530/**
1531 * free_irq - free an interrupt allocated with request_irq
1532 * @irq: Interrupt line to free
1533 * @dev_id: Device identity to free
1534 *
1535 * Remove an interrupt handler. The handler is removed and if the
1536 * interrupt line is no longer in use by any driver it is disabled.
1537 * On a shared IRQ the caller must ensure the interrupt is disabled
1538 * on the card it drives before calling this function. The function
1539 * does not return until any executing interrupts for this IRQ
1540 * have completed.
1541 *
1542 * This function must not be called from interrupt context.
1543 */
1544void free_irq(unsigned int irq, void *dev_id)
1545{
1546 struct irq_desc *desc = irq_to_desc(irq);
1547
1548 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1549 return;
1550
1551#ifdef CONFIG_SMP
1552 if (WARN_ON(desc->affinity_notify))
1553 desc->affinity_notify = NULL;
1554#endif
1555
1556 kfree(__free_irq(irq, dev_id));
1557}
1558EXPORT_SYMBOL(free_irq);
1559
1560/**
1561 * request_threaded_irq - allocate an interrupt line
1562 * @irq: Interrupt line to allocate
1563 * @handler: Function to be called when the IRQ occurs.
1564 * Primary handler for threaded interrupts
1565 * If NULL and thread_fn != NULL the default
1566 * primary handler is installed
1567 * @thread_fn: Function called from the irq handler thread
1568 * If NULL, no irq thread is created
1569 * @irqflags: Interrupt type flags
1570 * @devname: An ascii name for the claiming device
1571 * @dev_id: A cookie passed back to the handler function
1572 *
1573 * This call allocates interrupt resources and enables the
1574 * interrupt line and IRQ handling. From the point this
1575 * call is made your handler function may be invoked. Since
1576 * your handler function must clear any interrupt the board
1577 * raises, you must take care both to initialise your hardware
1578 * and to set up the interrupt handler in the right order.
1579 *
1580 * If you want to set up a threaded irq handler for your device
1581 * then you need to supply @handler and @thread_fn. @handler is
1582 * still called in hard interrupt context and has to check
1583 * whether the interrupt originates from the device. If yes it
1584 * needs to disable the interrupt on the device and return
1585 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1586 * @thread_fn. This split handler design is necessary to support
1587 * shared interrupts.
1588 *
1589 * Dev_id must be globally unique. Normally the address of the
1590 * device data structure is used as the cookie. Since the handler
1591 * receives this value it makes sense to use it.
1592 *
1593 * If your interrupt is shared you must pass a non NULL dev_id
1594 * as this is required when freeing the interrupt.
1595 *
1596 * Flags:
1597 *
1598 * IRQF_SHARED Interrupt is shared
1599 * IRQF_TRIGGER_* Specify active edge(s) or level
1600 *
1601 */
1602int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1603 irq_handler_t thread_fn, unsigned long irqflags,
1604 const char *devname, void *dev_id)
1605{
1606 struct irqaction *action;
1607 struct irq_desc *desc;
1608 int retval;
1609
1610 if (irq == IRQ_NOTCONNECTED)
1611 return -ENOTCONN;
1612
1613 /*
1614 * Sanity-check: shared interrupts must pass in a real dev-ID,
1615 * otherwise we'll have trouble later trying to figure out
1616 * which interrupt is which (messes up the interrupt freeing
1617 * logic etc).
1618 *
1619 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1620 * it cannot be set along with IRQF_NO_SUSPEND.
1621 */
1622 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1623 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1624 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1625 return -EINVAL;
1626
1627 desc = irq_to_desc(irq);
1628 if (!desc)
1629 return -EINVAL;
1630
1631 if (!irq_settings_can_request(desc) ||
1632 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1633 return -EINVAL;
1634
1635 if (!handler) {
1636 if (!thread_fn)
1637 return -EINVAL;
1638 handler = irq_default_primary_handler;
1639 }
1640
1641 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1642 if (!action)
1643 return -ENOMEM;
1644
1645 action->handler = handler;
1646 action->thread_fn = thread_fn;
1647 action->flags = irqflags;
1648 action->name = devname;
1649 action->dev_id = dev_id;
1650
1651 chip_bus_lock(desc);
1652 retval = __setup_irq(irq, desc, action);
1653 chip_bus_sync_unlock(desc);
1654
1655 if (retval) {
1656 kfree(action->secondary);
1657 kfree(action);
1658 }
1659
1660#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1661 if (!retval && (irqflags & IRQF_SHARED)) {
1662 /*
1663 * It's a shared IRQ -- the driver ought to be prepared for it
1664 * to happen immediately, so let's make sure....
1665 * We disable the irq to make sure that a 'real' IRQ doesn't
1666 * run in parallel with our fake.
1667 */
1668 unsigned long flags;
1669
1670 disable_irq(irq);
1671 local_irq_save(flags);
1672
1673 handler(irq, dev_id);
1674
1675 local_irq_restore(flags);
1676 enable_irq(irq);
1677 }
1678#endif
1679 return retval;
1680}
1681EXPORT_SYMBOL(request_threaded_irq);
1682
1683/**
1684 * request_any_context_irq - allocate an interrupt line
1685 * @irq: Interrupt line to allocate
1686 * @handler: Function to be called when the IRQ occurs.
1687 * Threaded handler for threaded interrupts.
1688 * @flags: Interrupt type flags
1689 * @name: An ascii name for the claiming device
1690 * @dev_id: A cookie passed back to the handler function
1691 *
1692 * This call allocates interrupt resources and enables the
1693 * interrupt line and IRQ handling. It selects either a
1694 * hardirq or threaded handling method depending on the
1695 * context.
1696 *
1697 * On failure, it returns a negative value. On success,
1698 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1699 */
1700int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1701 unsigned long flags, const char *name, void *dev_id)
1702{
1703 struct irq_desc *desc;
1704 int ret;
1705
1706 if (irq == IRQ_NOTCONNECTED)
1707 return -ENOTCONN;
1708
1709 desc = irq_to_desc(irq);
1710 if (!desc)
1711 return -EINVAL;
1712
1713 if (irq_settings_is_nested_thread(desc)) {
1714 ret = request_threaded_irq(irq, NULL, handler,
1715 flags, name, dev_id);
1716 return !ret ? IRQC_IS_NESTED : ret;
1717 }
1718
1719 ret = request_irq(irq, handler, flags, name, dev_id);
1720 return !ret ? IRQC_IS_HARDIRQ : ret;
1721}
1722EXPORT_SYMBOL_GPL(request_any_context_irq);
1723
1724void enable_percpu_irq(unsigned int irq, unsigned int type)
1725{
1726 unsigned int cpu = smp_processor_id();
1727 unsigned long flags;
1728 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1729
1730 if (!desc)
1731 return;
1732
1733 type &= IRQ_TYPE_SENSE_MASK;
1734 if (type != IRQ_TYPE_NONE) {
1735 int ret;
1736
1737 ret = __irq_set_trigger(desc, type);
1738
1739 if (ret) {
1740 WARN(1, "failed to set type for IRQ%d\n", irq);
1741 goto out;
1742 }
1743 }
1744
1745 irq_percpu_enable(desc, cpu);
1746out:
1747 irq_put_desc_unlock(desc, flags);
1748}
1749EXPORT_SYMBOL_GPL(enable_percpu_irq);
1750
1751/**
1752 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1753 * @irq: Linux irq number to check for
1754 *
1755 * Must be called from a non migratable context. Returns the enable
1756 * state of a per cpu interrupt on the current cpu.
1757 */
1758bool irq_percpu_is_enabled(unsigned int irq)
1759{
1760 unsigned int cpu = smp_processor_id();
1761 struct irq_desc *desc;
1762 unsigned long flags;
1763 bool is_enabled;
1764
1765 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1766 if (!desc)
1767 return false;
1768
1769 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1770 irq_put_desc_unlock(desc, flags);
1771
1772 return is_enabled;
1773}
1774EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1775
1776void disable_percpu_irq(unsigned int irq)
1777{
1778 unsigned int cpu = smp_processor_id();
1779 unsigned long flags;
1780 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1781
1782 if (!desc)
1783 return;
1784
1785 irq_percpu_disable(desc, cpu);
1786 irq_put_desc_unlock(desc, flags);
1787}
1788EXPORT_SYMBOL_GPL(disable_percpu_irq);
1789
1790/*
1791 * Internal function to unregister a percpu irqaction.
1792 */
1793static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1794{
1795 struct irq_desc *desc = irq_to_desc(irq);
1796 struct irqaction *action;
1797 unsigned long flags;
1798
1799 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1800
1801 if (!desc)
1802 return NULL;
1803
1804 raw_spin_lock_irqsave(&desc->lock, flags);
1805
1806 action = desc->action;
1807 if (!action || action->percpu_dev_id != dev_id) {
1808 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1809 goto bad;
1810 }
1811
1812 if (!cpumask_empty(desc->percpu_enabled)) {
1813 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1814 irq, cpumask_first(desc->percpu_enabled));
1815 goto bad;
1816 }
1817
1818 /* Found it - now remove it from the list of entries: */
1819 desc->action = NULL;
1820
1821 raw_spin_unlock_irqrestore(&desc->lock, flags);
1822
1823 unregister_handler_proc(irq, action);
1824
1825 module_put(desc->owner);
1826 return action;
1827
1828bad:
1829 raw_spin_unlock_irqrestore(&desc->lock, flags);
1830 return NULL;
1831}
1832
1833/**
1834 * remove_percpu_irq - free a per-cpu interrupt
1835 * @irq: Interrupt line to free
1836 * @act: irqaction for the interrupt
1837 *
1838 * Used to remove interrupts statically setup by the early boot process.
1839 */
1840void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1841{
1842 struct irq_desc *desc = irq_to_desc(irq);
1843
1844 if (desc && irq_settings_is_per_cpu_devid(desc))
1845 __free_percpu_irq(irq, act->percpu_dev_id);
1846}
1847
1848/**
1849 * free_percpu_irq - free an interrupt allocated with request_percpu_irq
1850 * @irq: Interrupt line to free
1851 * @dev_id: Device identity to free
1852 *
1853 * Remove a percpu interrupt handler. The handler is removed, but
1854 * the interrupt line is not disabled. This must be done on each
1855 * CPU before calling this function. The function does not return
1856 * until any executing interrupts for this IRQ have completed.
1857 *
1858 * This function must not be called from interrupt context.
1859 */
1860void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1861{
1862 struct irq_desc *desc = irq_to_desc(irq);
1863
1864 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1865 return;
1866
1867 chip_bus_lock(desc);
1868 kfree(__free_percpu_irq(irq, dev_id));
1869 chip_bus_sync_unlock(desc);
1870}
1871EXPORT_SYMBOL_GPL(free_percpu_irq);
1872
1873/**
1874 * setup_percpu_irq - setup a per-cpu interrupt
1875 * @irq: Interrupt line to setup
1876 * @act: irqaction for the interrupt
1877 *
1878 * Used to statically setup per-cpu interrupts in the early boot process.
1879 */
1880int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1881{
1882 struct irq_desc *desc = irq_to_desc(irq);
1883 int retval;
1884
1885 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1886 return -EINVAL;
1887 chip_bus_lock(desc);
1888 retval = __setup_irq(irq, desc, act);
1889 chip_bus_sync_unlock(desc);
1890
1891 return retval;
1892}
1893
1894/**
1895 * request_percpu_irq - allocate a percpu interrupt line
1896 * @irq: Interrupt line to allocate
1897 * @handler: Function to be called when the IRQ occurs.
1898 * @devname: An ascii name for the claiming device
1899 * @dev_id: A percpu cookie passed back to the handler function
1900 *
1901 * This call allocates interrupt resources and enables the
1902 * interrupt on the local CPU. If the interrupt is supposed to be
1903 * enabled on other CPUs, it has to be done on each CPU using
1904 * enable_percpu_irq().
1905 *
1906 * Dev_id must be globally unique. It is a per-cpu variable, and
1907 * the handler gets called with the interrupted CPU's instance of
1908 * that variable.
1909 */
1910int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1911 const char *devname, void __percpu *dev_id)
1912{
1913 struct irqaction *action;
1914 struct irq_desc *desc;
1915 int retval;
1916
1917 if (!dev_id)
1918 return -EINVAL;
1919
1920 desc = irq_to_desc(irq);
1921 if (!desc || !irq_settings_can_request(desc) ||
1922 !irq_settings_is_per_cpu_devid(desc))
1923 return -EINVAL;
1924
1925 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1926 if (!action)
1927 return -ENOMEM;
1928
1929 action->handler = handler;
1930 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1931 action->name = devname;
1932 action->percpu_dev_id = dev_id;
1933
1934 chip_bus_lock(desc);
1935 retval = __setup_irq(irq, desc, action);
1936 chip_bus_sync_unlock(desc);
1937
1938 if (retval)
1939 kfree(action);
1940
1941 return retval;
1942}
1943EXPORT_SYMBOL_GPL(request_percpu_irq);
1944
1945/**
1946 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
1947 * @irq: Interrupt line that is forwarded to a VM
1948 * @which: One of IRQCHIP_STATE_* the caller wants to know about
1949 * @state: a pointer to a boolean where the state is to be storeed
1950 *
1951 * This call snapshots the internal irqchip state of an
1952 * interrupt, returning into @state the bit corresponding to
1953 * stage @which
1954 *
1955 * This function should be called with preemption disabled if the
1956 * interrupt controller has per-cpu registers.
1957 */
1958int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1959 bool *state)
1960{
1961 struct irq_desc *desc;
1962 struct irq_data *data;
1963 struct irq_chip *chip;
1964 unsigned long flags;
1965 int err = -EINVAL;
1966
1967 desc = irq_get_desc_buslock(irq, &flags, 0);
1968 if (!desc)
1969 return err;
1970
1971 data = irq_desc_get_irq_data(desc);
1972
1973 do {
1974 chip = irq_data_get_irq_chip(data);
1975 if (chip->irq_get_irqchip_state)
1976 break;
1977#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1978 data = data->parent_data;
1979#else
1980 data = NULL;
1981#endif
1982 } while (data);
1983
1984 if (data)
1985 err = chip->irq_get_irqchip_state(data, which, state);
1986
1987 irq_put_desc_busunlock(desc, flags);
1988 return err;
1989}
1990EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
1991
1992/**
1993 * irq_set_irqchip_state - set the state of a forwarded interrupt.
1994 * @irq: Interrupt line that is forwarded to a VM
1995 * @which: State to be restored (one of IRQCHIP_STATE_*)
1996 * @val: Value corresponding to @which
1997 *
1998 * This call sets the internal irqchip state of an interrupt,
1999 * depending on the value of @which.
2000 *
2001 * This function should be called with preemption disabled if the
2002 * interrupt controller has per-cpu registers.
2003 */
2004int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2005 bool val)
2006{
2007 struct irq_desc *desc;
2008 struct irq_data *data;
2009 struct irq_chip *chip;
2010 unsigned long flags;
2011 int err = -EINVAL;
2012
2013 desc = irq_get_desc_buslock(irq, &flags, 0);
2014 if (!desc)
2015 return err;
2016
2017 data = irq_desc_get_irq_data(desc);
2018
2019 do {
2020 chip = irq_data_get_irq_chip(data);
2021 if (chip->irq_set_irqchip_state)
2022 break;
2023#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2024 data = data->parent_data;
2025#else
2026 data = NULL;
2027#endif
2028 } while (data);
2029
2030 if (data)
2031 err = chip->irq_set_irqchip_state(data, which, val);
2032
2033 irq_put_desc_busunlock(desc, flags);
2034 return err;
2035}
2036EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10#include <linux/irq.h>
11#include <linux/kthread.h>
12#include <linux/module.h>
13#include <linux/random.h>
14#include <linux/interrupt.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17
18#include "internals.h"
19
20#ifdef CONFIG_IRQ_FORCED_THREADING
21__read_mostly bool force_irqthreads;
22
23static int __init setup_forced_irqthreads(char *arg)
24{
25 force_irqthreads = true;
26 return 0;
27}
28early_param("threadirqs", setup_forced_irqthreads);
29#endif
30
31/**
32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
33 * @irq: interrupt number to wait for
34 *
35 * This function waits for any pending IRQ handlers for this interrupt
36 * to complete before returning. If you use this function while
37 * holding a resource the IRQ handler may need you will deadlock.
38 *
39 * This function may be called - with care - from IRQ context.
40 */
41void synchronize_irq(unsigned int irq)
42{
43 struct irq_desc *desc = irq_to_desc(irq);
44 bool inprogress;
45
46 if (!desc)
47 return;
48
49 do {
50 unsigned long flags;
51
52 /*
53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers.
55 */
56 while (irqd_irq_inprogress(&desc->irq_data))
57 cpu_relax();
58
59 /* Ok, that indicated we're done: double-check carefully. */
60 raw_spin_lock_irqsave(&desc->lock, flags);
61 inprogress = irqd_irq_inprogress(&desc->irq_data);
62 raw_spin_unlock_irqrestore(&desc->lock, flags);
63
64 /* Oops, that failed? */
65 } while (inprogress);
66
67 /*
68 * We made sure that no hardirq handler is running. Now verify
69 * that no threaded handlers are active.
70 */
71 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
72}
73EXPORT_SYMBOL(synchronize_irq);
74
75#ifdef CONFIG_SMP
76cpumask_var_t irq_default_affinity;
77
78/**
79 * irq_can_set_affinity - Check if the affinity of a given irq can be set
80 * @irq: Interrupt to check
81 *
82 */
83int irq_can_set_affinity(unsigned int irq)
84{
85 struct irq_desc *desc = irq_to_desc(irq);
86
87 if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
89 return 0;
90
91 return 1;
92}
93
94/**
95 * irq_set_thread_affinity - Notify irq threads to adjust affinity
96 * @desc: irq descriptor which has affitnity changed
97 *
98 * We just set IRQTF_AFFINITY and delegate the affinity setting
99 * to the interrupt thread itself. We can not call
100 * set_cpus_allowed_ptr() here as we hold desc->lock and this
101 * code can be called from hard interrupt context.
102 */
103void irq_set_thread_affinity(struct irq_desc *desc)
104{
105 struct irqaction *action = desc->action;
106
107 while (action) {
108 if (action->thread)
109 set_bit(IRQTF_AFFINITY, &action->thread_flags);
110 action = action->next;
111 }
112}
113
114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_data *data)
116{
117 return irqd_can_move_in_process_context(data);
118}
119static inline bool irq_move_pending(struct irq_data *data)
120{
121 return irqd_is_setaffinity_pending(data);
122}
123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125{
126 cpumask_copy(desc->pending_mask, mask);
127}
128static inline void
129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130{
131 cpumask_copy(mask, desc->pending_mask);
132}
133#else
134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135static inline bool irq_move_pending(struct irq_data *data) { return false; }
136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif
141
142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143{
144 struct irq_chip *chip = irq_data_get_irq_chip(data);
145 struct irq_desc *desc = irq_data_to_desc(data);
146 int ret = 0;
147
148 if (!chip || !chip->irq_set_affinity)
149 return -EINVAL;
150
151 if (irq_can_move_pcntxt(data)) {
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else {
161 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask);
163 }
164
165 if (desc->affinity_notify) {
166 kref_get(&desc->affinity_notify->kref);
167 schedule_work(&desc->affinity_notify->work);
168 }
169 irqd_set(data, IRQD_AFFINITY_SET);
170
171 return ret;
172}
173
174/**
175 * irq_set_affinity - Set the irq affinity of a given irq
176 * @irq: Interrupt to set affinity
177 * @mask: cpumask
178 *
179 */
180int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
181{
182 struct irq_desc *desc = irq_to_desc(irq);
183 unsigned long flags;
184 int ret;
185
186 if (!desc)
187 return -EINVAL;
188
189 raw_spin_lock_irqsave(&desc->lock, flags);
190 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
191 raw_spin_unlock_irqrestore(&desc->lock, flags);
192 return ret;
193}
194
195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
196{
197 unsigned long flags;
198 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
199
200 if (!desc)
201 return -EINVAL;
202 desc->affinity_hint = m;
203 irq_put_desc_unlock(desc, flags);
204 return 0;
205}
206EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
207
208static void irq_affinity_notify(struct work_struct *work)
209{
210 struct irq_affinity_notify *notify =
211 container_of(work, struct irq_affinity_notify, work);
212 struct irq_desc *desc = irq_to_desc(notify->irq);
213 cpumask_var_t cpumask;
214 unsigned long flags;
215
216 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
217 goto out;
218
219 raw_spin_lock_irqsave(&desc->lock, flags);
220 if (irq_move_pending(&desc->irq_data))
221 irq_get_pending(cpumask, desc);
222 else
223 cpumask_copy(cpumask, desc->irq_data.affinity);
224 raw_spin_unlock_irqrestore(&desc->lock, flags);
225
226 notify->notify(notify, cpumask);
227
228 free_cpumask_var(cpumask);
229out:
230 kref_put(¬ify->kref, notify->release);
231}
232
233/**
234 * irq_set_affinity_notifier - control notification of IRQ affinity changes
235 * @irq: Interrupt for which to enable/disable notification
236 * @notify: Context for notification, or %NULL to disable
237 * notification. Function pointers must be initialised;
238 * the other fields will be initialised by this function.
239 *
240 * Must be called in process context. Notification may only be enabled
241 * after the IRQ is allocated and must be disabled before the IRQ is
242 * freed using free_irq().
243 */
244int
245irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
246{
247 struct irq_desc *desc = irq_to_desc(irq);
248 struct irq_affinity_notify *old_notify;
249 unsigned long flags;
250
251 /* The release function is promised process context */
252 might_sleep();
253
254 if (!desc)
255 return -EINVAL;
256
257 /* Complete initialisation of *notify */
258 if (notify) {
259 notify->irq = irq;
260 kref_init(¬ify->kref);
261 INIT_WORK(¬ify->work, irq_affinity_notify);
262 }
263
264 raw_spin_lock_irqsave(&desc->lock, flags);
265 old_notify = desc->affinity_notify;
266 desc->affinity_notify = notify;
267 raw_spin_unlock_irqrestore(&desc->lock, flags);
268
269 if (old_notify)
270 kref_put(&old_notify->kref, old_notify->release);
271
272 return 0;
273}
274EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
275
276#ifndef CONFIG_AUTO_IRQ_AFFINITY
277/*
278 * Generic version of the affinity autoselector.
279 */
280static int
281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282{
283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity;
285 int ret;
286
287 /* Excludes PER_CPU and NO_BALANCE interrupts */
288 if (!irq_can_set_affinity(irq))
289 return 0;
290
291 /*
292 * Preserve an userspace affinity setup, but make sure that
293 * one of the targets is online.
294 */
295 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
296 if (cpumask_intersects(desc->irq_data.affinity,
297 cpu_online_mask))
298 set = desc->irq_data.affinity;
299 else
300 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
301 }
302
303 cpumask_and(mask, cpu_online_mask, set);
304 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
305 switch (ret) {
306 case IRQ_SET_MASK_OK:
307 cpumask_copy(desc->irq_data.affinity, mask);
308 case IRQ_SET_MASK_OK_NOCOPY:
309 irq_set_thread_affinity(desc);
310 }
311 return 0;
312}
313#else
314static inline int
315setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
316{
317 return irq_select_affinity(irq);
318}
319#endif
320
321/*
322 * Called when affinity is set via /proc/irq
323 */
324int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
325{
326 struct irq_desc *desc = irq_to_desc(irq);
327 unsigned long flags;
328 int ret;
329
330 raw_spin_lock_irqsave(&desc->lock, flags);
331 ret = setup_affinity(irq, desc, mask);
332 raw_spin_unlock_irqrestore(&desc->lock, flags);
333 return ret;
334}
335
336#else
337static inline int
338setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
339{
340 return 0;
341}
342#endif
343
344void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
345{
346 if (suspend) {
347 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
348 return;
349 desc->istate |= IRQS_SUSPENDED;
350 }
351
352 if (!desc->depth++)
353 irq_disable(desc);
354}
355
356static int __disable_irq_nosync(unsigned int irq)
357{
358 unsigned long flags;
359 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
360
361 if (!desc)
362 return -EINVAL;
363 __disable_irq(desc, irq, false);
364 irq_put_desc_busunlock(desc, flags);
365 return 0;
366}
367
368/**
369 * disable_irq_nosync - disable an irq without waiting
370 * @irq: Interrupt to disable
371 *
372 * Disable the selected interrupt line. Disables and Enables are
373 * nested.
374 * Unlike disable_irq(), this function does not ensure existing
375 * instances of the IRQ handler have completed before returning.
376 *
377 * This function may be called from IRQ context.
378 */
379void disable_irq_nosync(unsigned int irq)
380{
381 __disable_irq_nosync(irq);
382}
383EXPORT_SYMBOL(disable_irq_nosync);
384
385/**
386 * disable_irq - disable an irq and wait for completion
387 * @irq: Interrupt to disable
388 *
389 * Disable the selected interrupt line. Enables and Disables are
390 * nested.
391 * This function waits for any pending IRQ handlers for this interrupt
392 * to complete before returning. If you use this function while
393 * holding a resource the IRQ handler may need you will deadlock.
394 *
395 * This function may be called - with care - from IRQ context.
396 */
397void disable_irq(unsigned int irq)
398{
399 if (!__disable_irq_nosync(irq))
400 synchronize_irq(irq);
401}
402EXPORT_SYMBOL(disable_irq);
403
404void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
405{
406 if (resume) {
407 if (!(desc->istate & IRQS_SUSPENDED)) {
408 if (!desc->action)
409 return;
410 if (!(desc->action->flags & IRQF_FORCE_RESUME))
411 return;
412 /* Pretend that it got disabled ! */
413 desc->depth++;
414 }
415 desc->istate &= ~IRQS_SUSPENDED;
416 }
417
418 switch (desc->depth) {
419 case 0:
420 err_out:
421 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
422 break;
423 case 1: {
424 if (desc->istate & IRQS_SUSPENDED)
425 goto err_out;
426 /* Prevent probing on this irq: */
427 irq_settings_set_noprobe(desc);
428 irq_enable(desc);
429 check_irq_resend(desc, irq);
430 /* fall-through */
431 }
432 default:
433 desc->depth--;
434 }
435}
436
437/**
438 * enable_irq - enable handling of an irq
439 * @irq: Interrupt to enable
440 *
441 * Undoes the effect of one call to disable_irq(). If this
442 * matches the last disable, processing of interrupts on this
443 * IRQ line is re-enabled.
444 *
445 * This function may be called from IRQ context only when
446 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
447 */
448void enable_irq(unsigned int irq)
449{
450 unsigned long flags;
451 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
452
453 if (!desc)
454 return;
455 if (WARN(!desc->irq_data.chip,
456 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
457 goto out;
458
459 __enable_irq(desc, irq, false);
460out:
461 irq_put_desc_busunlock(desc, flags);
462}
463EXPORT_SYMBOL(enable_irq);
464
465static int set_irq_wake_real(unsigned int irq, unsigned int on)
466{
467 struct irq_desc *desc = irq_to_desc(irq);
468 int ret = -ENXIO;
469
470 if (desc->irq_data.chip->irq_set_wake)
471 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
472
473 return ret;
474}
475
476/**
477 * irq_set_irq_wake - control irq power management wakeup
478 * @irq: interrupt to control
479 * @on: enable/disable power management wakeup
480 *
481 * Enable/disable power management wakeup mode, which is
482 * disabled by default. Enables and disables must match,
483 * just as they match for non-wakeup mode support.
484 *
485 * Wakeup mode lets this IRQ wake the system from sleep
486 * states like "suspend to RAM".
487 */
488int irq_set_irq_wake(unsigned int irq, unsigned int on)
489{
490 unsigned long flags;
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
492 int ret = 0;
493
494 if (!desc)
495 return -EINVAL;
496
497 /* wakeup-capable irqs can be shared between drivers that
498 * don't need to have the same sleep mode behaviors.
499 */
500 if (on) {
501 if (desc->wake_depth++ == 0) {
502 ret = set_irq_wake_real(irq, on);
503 if (ret)
504 desc->wake_depth = 0;
505 else
506 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
507 }
508 } else {
509 if (desc->wake_depth == 0) {
510 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
511 } else if (--desc->wake_depth == 0) {
512 ret = set_irq_wake_real(irq, on);
513 if (ret)
514 desc->wake_depth = 1;
515 else
516 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
517 }
518 }
519 irq_put_desc_busunlock(desc, flags);
520 return ret;
521}
522EXPORT_SYMBOL(irq_set_irq_wake);
523
524/*
525 * Internal function that tells the architecture code whether a
526 * particular irq has been exclusively allocated or is available
527 * for driver use.
528 */
529int can_request_irq(unsigned int irq, unsigned long irqflags)
530{
531 unsigned long flags;
532 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
533 int canrequest = 0;
534
535 if (!desc)
536 return 0;
537
538 if (irq_settings_can_request(desc)) {
539 if (desc->action)
540 if (irqflags & desc->action->flags & IRQF_SHARED)
541 canrequest =1;
542 }
543 irq_put_desc_unlock(desc, flags);
544 return canrequest;
545}
546
547int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
548 unsigned long flags)
549{
550 struct irq_chip *chip = desc->irq_data.chip;
551 int ret, unmask = 0;
552
553 if (!chip || !chip->irq_set_type) {
554 /*
555 * IRQF_TRIGGER_* but the PIC does not support multiple
556 * flow-types?
557 */
558 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
559 chip ? (chip->name ? : "unknown") : "unknown");
560 return 0;
561 }
562
563 flags &= IRQ_TYPE_SENSE_MASK;
564
565 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
566 if (!irqd_irq_masked(&desc->irq_data))
567 mask_irq(desc);
568 if (!irqd_irq_disabled(&desc->irq_data))
569 unmask = 1;
570 }
571
572 /* caller masked out all except trigger mode flags */
573 ret = chip->irq_set_type(&desc->irq_data, flags);
574
575 switch (ret) {
576 case IRQ_SET_MASK_OK:
577 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
578 irqd_set(&desc->irq_data, flags);
579
580 case IRQ_SET_MASK_OK_NOCOPY:
581 flags = irqd_get_trigger_type(&desc->irq_data);
582 irq_settings_set_trigger_mask(desc, flags);
583 irqd_clear(&desc->irq_data, IRQD_LEVEL);
584 irq_settings_clr_level(desc);
585 if (flags & IRQ_TYPE_LEVEL_MASK) {
586 irq_settings_set_level(desc);
587 irqd_set(&desc->irq_data, IRQD_LEVEL);
588 }
589
590 ret = 0;
591 break;
592 default:
593 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
594 flags, irq, chip->irq_set_type);
595 }
596 if (unmask)
597 unmask_irq(desc);
598 return ret;
599}
600
601/*
602 * Default primary interrupt handler for threaded interrupts. Is
603 * assigned as primary handler when request_threaded_irq is called
604 * with handler == NULL. Useful for oneshot interrupts.
605 */
606static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
607{
608 return IRQ_WAKE_THREAD;
609}
610
611/*
612 * Primary handler for nested threaded interrupts. Should never be
613 * called.
614 */
615static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
616{
617 WARN(1, "Primary handler called for nested irq %d\n", irq);
618 return IRQ_NONE;
619}
620
621static int irq_wait_for_interrupt(struct irqaction *action)
622{
623 while (!kthread_should_stop()) {
624 set_current_state(TASK_INTERRUPTIBLE);
625
626 if (test_and_clear_bit(IRQTF_RUNTHREAD,
627 &action->thread_flags)) {
628 __set_current_state(TASK_RUNNING);
629 return 0;
630 }
631 schedule();
632 }
633 return -1;
634}
635
636/*
637 * Oneshot interrupts keep the irq line masked until the threaded
638 * handler finished. unmask if the interrupt has not been disabled and
639 * is marked MASKED.
640 */
641static void irq_finalize_oneshot(struct irq_desc *desc,
642 struct irqaction *action, bool force)
643{
644 if (!(desc->istate & IRQS_ONESHOT))
645 return;
646again:
647 chip_bus_lock(desc);
648 raw_spin_lock_irq(&desc->lock);
649
650 /*
651 * Implausible though it may be we need to protect us against
652 * the following scenario:
653 *
654 * The thread is faster done than the hard interrupt handler
655 * on the other CPU. If we unmask the irq line then the
656 * interrupt can come in again and masks the line, leaves due
657 * to IRQS_INPROGRESS and the irq line is masked forever.
658 *
659 * This also serializes the state of shared oneshot handlers
660 * versus "desc->threads_onehsot |= action->thread_mask;" in
661 * irq_wake_thread(). See the comment there which explains the
662 * serialization.
663 */
664 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
665 raw_spin_unlock_irq(&desc->lock);
666 chip_bus_sync_unlock(desc);
667 cpu_relax();
668 goto again;
669 }
670
671 /*
672 * Now check again, whether the thread should run. Otherwise
673 * we would clear the threads_oneshot bit of this thread which
674 * was just set.
675 */
676 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
677 goto out_unlock;
678
679 desc->threads_oneshot &= ~action->thread_mask;
680
681 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
682 irqd_irq_masked(&desc->irq_data))
683 unmask_irq(desc);
684
685out_unlock:
686 raw_spin_unlock_irq(&desc->lock);
687 chip_bus_sync_unlock(desc);
688}
689
690#ifdef CONFIG_SMP
691/*
692 * Check whether we need to chasnge the affinity of the interrupt thread.
693 */
694static void
695irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
696{
697 cpumask_var_t mask;
698
699 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
700 return;
701
702 /*
703 * In case we are out of memory we set IRQTF_AFFINITY again and
704 * try again next time
705 */
706 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
707 set_bit(IRQTF_AFFINITY, &action->thread_flags);
708 return;
709 }
710
711 raw_spin_lock_irq(&desc->lock);
712 cpumask_copy(mask, desc->irq_data.affinity);
713 raw_spin_unlock_irq(&desc->lock);
714
715 set_cpus_allowed_ptr(current, mask);
716 free_cpumask_var(mask);
717}
718#else
719static inline void
720irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
721#endif
722
723/*
724 * Interrupts which are not explicitely requested as threaded
725 * interrupts rely on the implicit bh/preempt disable of the hard irq
726 * context. So we need to disable bh here to avoid deadlocks and other
727 * side effects.
728 */
729static irqreturn_t
730irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
731{
732 irqreturn_t ret;
733
734 local_bh_disable();
735 ret = action->thread_fn(action->irq, action->dev_id);
736 irq_finalize_oneshot(desc, action, false);
737 local_bh_enable();
738 return ret;
739}
740
741/*
742 * Interrupts explicitely requested as threaded interupts want to be
743 * preemtible - many of them need to sleep and wait for slow busses to
744 * complete.
745 */
746static irqreturn_t irq_thread_fn(struct irq_desc *desc,
747 struct irqaction *action)
748{
749 irqreturn_t ret;
750
751 ret = action->thread_fn(action->irq, action->dev_id);
752 irq_finalize_oneshot(desc, action, false);
753 return ret;
754}
755
756/*
757 * Interrupt handler thread
758 */
759static int irq_thread(void *data)
760{
761 static const struct sched_param param = {
762 .sched_priority = MAX_USER_RT_PRIO/2,
763 };
764 struct irqaction *action = data;
765 struct irq_desc *desc = irq_to_desc(action->irq);
766 irqreturn_t (*handler_fn)(struct irq_desc *desc,
767 struct irqaction *action);
768 int wake;
769
770 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
771 &action->thread_flags))
772 handler_fn = irq_forced_thread_fn;
773 else
774 handler_fn = irq_thread_fn;
775
776 sched_setscheduler(current, SCHED_FIFO, ¶m);
777 current->irqaction = action;
778
779 while (!irq_wait_for_interrupt(action)) {
780
781 irq_thread_check_affinity(desc, action);
782
783 atomic_inc(&desc->threads_active);
784
785 raw_spin_lock_irq(&desc->lock);
786 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
787 /*
788 * CHECKME: We might need a dedicated
789 * IRQ_THREAD_PENDING flag here, which
790 * retriggers the thread in check_irq_resend()
791 * but AFAICT IRQS_PENDING should be fine as it
792 * retriggers the interrupt itself --- tglx
793 */
794 desc->istate |= IRQS_PENDING;
795 raw_spin_unlock_irq(&desc->lock);
796 } else {
797 irqreturn_t action_ret;
798
799 raw_spin_unlock_irq(&desc->lock);
800 action_ret = handler_fn(desc, action);
801 if (!noirqdebug)
802 note_interrupt(action->irq, desc, action_ret);
803 }
804
805 wake = atomic_dec_and_test(&desc->threads_active);
806
807 if (wake && waitqueue_active(&desc->wait_for_threads))
808 wake_up(&desc->wait_for_threads);
809 }
810
811 /* Prevent a stale desc->threads_oneshot */
812 irq_finalize_oneshot(desc, action, true);
813
814 /*
815 * Clear irqaction. Otherwise exit_irq_thread() would make
816 * fuzz about an active irq thread going into nirvana.
817 */
818 current->irqaction = NULL;
819 return 0;
820}
821
822/*
823 * Called from do_exit()
824 */
825void exit_irq_thread(void)
826{
827 struct task_struct *tsk = current;
828 struct irq_desc *desc;
829
830 if (!tsk->irqaction)
831 return;
832
833 printk(KERN_ERR
834 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
835 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
836
837 desc = irq_to_desc(tsk->irqaction->irq);
838
839 /*
840 * Prevent a stale desc->threads_oneshot. Must be called
841 * before setting the IRQTF_DIED flag.
842 */
843 irq_finalize_oneshot(desc, tsk->irqaction, true);
844
845 /*
846 * Set the THREAD DIED flag to prevent further wakeups of the
847 * soon to be gone threaded handler.
848 */
849 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
850}
851
852static void irq_setup_forced_threading(struct irqaction *new)
853{
854 if (!force_irqthreads)
855 return;
856 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
857 return;
858
859 new->flags |= IRQF_ONESHOT;
860
861 if (!new->thread_fn) {
862 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
863 new->thread_fn = new->handler;
864 new->handler = irq_default_primary_handler;
865 }
866}
867
868/*
869 * Internal function to register an irqaction - typically used to
870 * allocate special interrupts that are part of the architecture.
871 */
872static int
873__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
874{
875 struct irqaction *old, **old_ptr;
876 const char *old_name = NULL;
877 unsigned long flags, thread_mask = 0;
878 int ret, nested, shared = 0;
879 cpumask_var_t mask;
880
881 if (!desc)
882 return -EINVAL;
883
884 if (desc->irq_data.chip == &no_irq_chip)
885 return -ENOSYS;
886 if (!try_module_get(desc->owner))
887 return -ENODEV;
888 /*
889 * Some drivers like serial.c use request_irq() heavily,
890 * so we have to be careful not to interfere with a
891 * running system.
892 */
893 if (new->flags & IRQF_SAMPLE_RANDOM) {
894 /*
895 * This function might sleep, we want to call it first,
896 * outside of the atomic block.
897 * Yes, this might clear the entropy pool if the wrong
898 * driver is attempted to be loaded, without actually
899 * installing a new handler, but is this really a problem,
900 * only the sysadmin is able to do this.
901 */
902 rand_initialize_irq(irq);
903 }
904
905 /*
906 * Check whether the interrupt nests into another interrupt
907 * thread.
908 */
909 nested = irq_settings_is_nested_thread(desc);
910 if (nested) {
911 if (!new->thread_fn) {
912 ret = -EINVAL;
913 goto out_mput;
914 }
915 /*
916 * Replace the primary handler which was provided from
917 * the driver for non nested interrupt handling by the
918 * dummy function which warns when called.
919 */
920 new->handler = irq_nested_primary_handler;
921 } else {
922 if (irq_settings_can_thread(desc))
923 irq_setup_forced_threading(new);
924 }
925
926 /*
927 * Create a handler thread when a thread function is supplied
928 * and the interrupt does not nest into another interrupt
929 * thread.
930 */
931 if (new->thread_fn && !nested) {
932 struct task_struct *t;
933
934 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
935 new->name);
936 if (IS_ERR(t)) {
937 ret = PTR_ERR(t);
938 goto out_mput;
939 }
940 /*
941 * We keep the reference to the task struct even if
942 * the thread dies to avoid that the interrupt code
943 * references an already freed task_struct.
944 */
945 get_task_struct(t);
946 new->thread = t;
947 }
948
949 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
950 ret = -ENOMEM;
951 goto out_thread;
952 }
953
954 /*
955 * The following block of code has to be executed atomically
956 */
957 raw_spin_lock_irqsave(&desc->lock, flags);
958 old_ptr = &desc->action;
959 old = *old_ptr;
960 if (old) {
961 /*
962 * Can't share interrupts unless both agree to and are
963 * the same type (level, edge, polarity). So both flag
964 * fields must have IRQF_SHARED set and the bits which
965 * set the trigger type must match. Also all must
966 * agree on ONESHOT.
967 */
968 if (!((old->flags & new->flags) & IRQF_SHARED) ||
969 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
970 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
971 old_name = old->name;
972 goto mismatch;
973 }
974
975 /* All handlers must agree on per-cpuness */
976 if ((old->flags & IRQF_PERCPU) !=
977 (new->flags & IRQF_PERCPU))
978 goto mismatch;
979
980 /* add new interrupt at end of irq queue */
981 do {
982 thread_mask |= old->thread_mask;
983 old_ptr = &old->next;
984 old = *old_ptr;
985 } while (old);
986 shared = 1;
987 }
988
989 /*
990 * Setup the thread mask for this irqaction. Unlikely to have
991 * 32 resp 64 irqs sharing one line, but who knows.
992 */
993 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
994 ret = -EBUSY;
995 goto out_mask;
996 }
997 new->thread_mask = 1 << ffz(thread_mask);
998
999 if (!shared) {
1000 init_waitqueue_head(&desc->wait_for_threads);
1001
1002 /* Setup the type (level, edge polarity) if configured: */
1003 if (new->flags & IRQF_TRIGGER_MASK) {
1004 ret = __irq_set_trigger(desc, irq,
1005 new->flags & IRQF_TRIGGER_MASK);
1006
1007 if (ret)
1008 goto out_mask;
1009 }
1010
1011 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1012 IRQS_ONESHOT | IRQS_WAITING);
1013 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1014
1015 if (new->flags & IRQF_PERCPU) {
1016 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1017 irq_settings_set_per_cpu(desc);
1018 }
1019
1020 if (new->flags & IRQF_ONESHOT)
1021 desc->istate |= IRQS_ONESHOT;
1022
1023 if (irq_settings_can_autoenable(desc))
1024 irq_startup(desc);
1025 else
1026 /* Undo nested disables: */
1027 desc->depth = 1;
1028
1029 /* Exclude IRQ from balancing if requested */
1030 if (new->flags & IRQF_NOBALANCING) {
1031 irq_settings_set_no_balancing(desc);
1032 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1033 }
1034
1035 /* Set default affinity mask once everything is setup */
1036 setup_affinity(irq, desc, mask);
1037
1038 } else if (new->flags & IRQF_TRIGGER_MASK) {
1039 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1040 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1041
1042 if (nmsk != omsk)
1043 /* hope the handler works with current trigger mode */
1044 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1045 irq, nmsk, omsk);
1046 }
1047
1048 new->irq = irq;
1049 *old_ptr = new;
1050
1051 /* Reset broken irq detection when installing new handler */
1052 desc->irq_count = 0;
1053 desc->irqs_unhandled = 0;
1054
1055 /*
1056 * Check whether we disabled the irq via the spurious handler
1057 * before. Reenable it and give it another chance.
1058 */
1059 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1060 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1061 __enable_irq(desc, irq, false);
1062 }
1063
1064 raw_spin_unlock_irqrestore(&desc->lock, flags);
1065
1066 /*
1067 * Strictly no need to wake it up, but hung_task complains
1068 * when no hard interrupt wakes the thread up.
1069 */
1070 if (new->thread)
1071 wake_up_process(new->thread);
1072
1073 register_irq_proc(irq, desc);
1074 new->dir = NULL;
1075 register_handler_proc(irq, new);
1076 free_cpumask_var(mask);
1077
1078 return 0;
1079
1080mismatch:
1081#ifdef CONFIG_DEBUG_SHIRQ
1082 if (!(new->flags & IRQF_PROBE_SHARED)) {
1083 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1084 if (old_name)
1085 printk(KERN_ERR "current handler: %s\n", old_name);
1086 dump_stack();
1087 }
1088#endif
1089 ret = -EBUSY;
1090
1091out_mask:
1092 raw_spin_unlock_irqrestore(&desc->lock, flags);
1093 free_cpumask_var(mask);
1094
1095out_thread:
1096 if (new->thread) {
1097 struct task_struct *t = new->thread;
1098
1099 new->thread = NULL;
1100 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1101 kthread_stop(t);
1102 put_task_struct(t);
1103 }
1104out_mput:
1105 module_put(desc->owner);
1106 return ret;
1107}
1108
1109/**
1110 * setup_irq - setup an interrupt
1111 * @irq: Interrupt line to setup
1112 * @act: irqaction for the interrupt
1113 *
1114 * Used to statically setup interrupts in the early boot process.
1115 */
1116int setup_irq(unsigned int irq, struct irqaction *act)
1117{
1118 int retval;
1119 struct irq_desc *desc = irq_to_desc(irq);
1120
1121 chip_bus_lock(desc);
1122 retval = __setup_irq(irq, desc, act);
1123 chip_bus_sync_unlock(desc);
1124
1125 return retval;
1126}
1127EXPORT_SYMBOL_GPL(setup_irq);
1128
1129 /*
1130 * Internal function to unregister an irqaction - used to free
1131 * regular and special interrupts that are part of the architecture.
1132 */
1133static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1134{
1135 struct irq_desc *desc = irq_to_desc(irq);
1136 struct irqaction *action, **action_ptr;
1137 unsigned long flags;
1138
1139 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1140
1141 if (!desc)
1142 return NULL;
1143
1144 raw_spin_lock_irqsave(&desc->lock, flags);
1145
1146 /*
1147 * There can be multiple actions per IRQ descriptor, find the right
1148 * one based on the dev_id:
1149 */
1150 action_ptr = &desc->action;
1151 for (;;) {
1152 action = *action_ptr;
1153
1154 if (!action) {
1155 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1156 raw_spin_unlock_irqrestore(&desc->lock, flags);
1157
1158 return NULL;
1159 }
1160
1161 if (action->dev_id == dev_id)
1162 break;
1163 action_ptr = &action->next;
1164 }
1165
1166 /* Found it - now remove it from the list of entries: */
1167 *action_ptr = action->next;
1168
1169 /* Currently used only by UML, might disappear one day: */
1170#ifdef CONFIG_IRQ_RELEASE_METHOD
1171 if (desc->irq_data.chip->release)
1172 desc->irq_data.chip->release(irq, dev_id);
1173#endif
1174
1175 /* If this was the last handler, shut down the IRQ line: */
1176 if (!desc->action)
1177 irq_shutdown(desc);
1178
1179#ifdef CONFIG_SMP
1180 /* make sure affinity_hint is cleaned up */
1181 if (WARN_ON_ONCE(desc->affinity_hint))
1182 desc->affinity_hint = NULL;
1183#endif
1184
1185 raw_spin_unlock_irqrestore(&desc->lock, flags);
1186
1187 unregister_handler_proc(irq, action);
1188
1189 /* Make sure it's not being used on another CPU: */
1190 synchronize_irq(irq);
1191
1192#ifdef CONFIG_DEBUG_SHIRQ
1193 /*
1194 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1195 * event to happen even now it's being freed, so let's make sure that
1196 * is so by doing an extra call to the handler ....
1197 *
1198 * ( We do this after actually deregistering it, to make sure that a
1199 * 'real' IRQ doesn't run in * parallel with our fake. )
1200 */
1201 if (action->flags & IRQF_SHARED) {
1202 local_irq_save(flags);
1203 action->handler(irq, dev_id);
1204 local_irq_restore(flags);
1205 }
1206#endif
1207
1208 if (action->thread) {
1209 if (!test_bit(IRQTF_DIED, &action->thread_flags))
1210 kthread_stop(action->thread);
1211 put_task_struct(action->thread);
1212 }
1213
1214 module_put(desc->owner);
1215 return action;
1216}
1217
1218/**
1219 * remove_irq - free an interrupt
1220 * @irq: Interrupt line to free
1221 * @act: irqaction for the interrupt
1222 *
1223 * Used to remove interrupts statically setup by the early boot process.
1224 */
1225void remove_irq(unsigned int irq, struct irqaction *act)
1226{
1227 __free_irq(irq, act->dev_id);
1228}
1229EXPORT_SYMBOL_GPL(remove_irq);
1230
1231/**
1232 * free_irq - free an interrupt allocated with request_irq
1233 * @irq: Interrupt line to free
1234 * @dev_id: Device identity to free
1235 *
1236 * Remove an interrupt handler. The handler is removed and if the
1237 * interrupt line is no longer in use by any driver it is disabled.
1238 * On a shared IRQ the caller must ensure the interrupt is disabled
1239 * on the card it drives before calling this function. The function
1240 * does not return until any executing interrupts for this IRQ
1241 * have completed.
1242 *
1243 * This function must not be called from interrupt context.
1244 */
1245void free_irq(unsigned int irq, void *dev_id)
1246{
1247 struct irq_desc *desc = irq_to_desc(irq);
1248
1249 if (!desc)
1250 return;
1251
1252#ifdef CONFIG_SMP
1253 if (WARN_ON(desc->affinity_notify))
1254 desc->affinity_notify = NULL;
1255#endif
1256
1257 chip_bus_lock(desc);
1258 kfree(__free_irq(irq, dev_id));
1259 chip_bus_sync_unlock(desc);
1260}
1261EXPORT_SYMBOL(free_irq);
1262
1263/**
1264 * request_threaded_irq - allocate an interrupt line
1265 * @irq: Interrupt line to allocate
1266 * @handler: Function to be called when the IRQ occurs.
1267 * Primary handler for threaded interrupts
1268 * If NULL and thread_fn != NULL the default
1269 * primary handler is installed
1270 * @thread_fn: Function called from the irq handler thread
1271 * If NULL, no irq thread is created
1272 * @irqflags: Interrupt type flags
1273 * @devname: An ascii name for the claiming device
1274 * @dev_id: A cookie passed back to the handler function
1275 *
1276 * This call allocates interrupt resources and enables the
1277 * interrupt line and IRQ handling. From the point this
1278 * call is made your handler function may be invoked. Since
1279 * your handler function must clear any interrupt the board
1280 * raises, you must take care both to initialise your hardware
1281 * and to set up the interrupt handler in the right order.
1282 *
1283 * If you want to set up a threaded irq handler for your device
1284 * then you need to supply @handler and @thread_fn. @handler ist
1285 * still called in hard interrupt context and has to check
1286 * whether the interrupt originates from the device. If yes it
1287 * needs to disable the interrupt on the device and return
1288 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1289 * @thread_fn. This split handler design is necessary to support
1290 * shared interrupts.
1291 *
1292 * Dev_id must be globally unique. Normally the address of the
1293 * device data structure is used as the cookie. Since the handler
1294 * receives this value it makes sense to use it.
1295 *
1296 * If your interrupt is shared you must pass a non NULL dev_id
1297 * as this is required when freeing the interrupt.
1298 *
1299 * Flags:
1300 *
1301 * IRQF_SHARED Interrupt is shared
1302 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1303 * IRQF_TRIGGER_* Specify active edge(s) or level
1304 *
1305 */
1306int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1307 irq_handler_t thread_fn, unsigned long irqflags,
1308 const char *devname, void *dev_id)
1309{
1310 struct irqaction *action;
1311 struct irq_desc *desc;
1312 int retval;
1313
1314 /*
1315 * Sanity-check: shared interrupts must pass in a real dev-ID,
1316 * otherwise we'll have trouble later trying to figure out
1317 * which interrupt is which (messes up the interrupt freeing
1318 * logic etc).
1319 */
1320 if ((irqflags & IRQF_SHARED) && !dev_id)
1321 return -EINVAL;
1322
1323 desc = irq_to_desc(irq);
1324 if (!desc)
1325 return -EINVAL;
1326
1327 if (!irq_settings_can_request(desc))
1328 return -EINVAL;
1329
1330 if (!handler) {
1331 if (!thread_fn)
1332 return -EINVAL;
1333 handler = irq_default_primary_handler;
1334 }
1335
1336 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1337 if (!action)
1338 return -ENOMEM;
1339
1340 action->handler = handler;
1341 action->thread_fn = thread_fn;
1342 action->flags = irqflags;
1343 action->name = devname;
1344 action->dev_id = dev_id;
1345
1346 chip_bus_lock(desc);
1347 retval = __setup_irq(irq, desc, action);
1348 chip_bus_sync_unlock(desc);
1349
1350 if (retval)
1351 kfree(action);
1352
1353#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1354 if (!retval && (irqflags & IRQF_SHARED)) {
1355 /*
1356 * It's a shared IRQ -- the driver ought to be prepared for it
1357 * to happen immediately, so let's make sure....
1358 * We disable the irq to make sure that a 'real' IRQ doesn't
1359 * run in parallel with our fake.
1360 */
1361 unsigned long flags;
1362
1363 disable_irq(irq);
1364 local_irq_save(flags);
1365
1366 handler(irq, dev_id);
1367
1368 local_irq_restore(flags);
1369 enable_irq(irq);
1370 }
1371#endif
1372 return retval;
1373}
1374EXPORT_SYMBOL(request_threaded_irq);
1375
1376/**
1377 * request_any_context_irq - allocate an interrupt line
1378 * @irq: Interrupt line to allocate
1379 * @handler: Function to be called when the IRQ occurs.
1380 * Threaded handler for threaded interrupts.
1381 * @flags: Interrupt type flags
1382 * @name: An ascii name for the claiming device
1383 * @dev_id: A cookie passed back to the handler function
1384 *
1385 * This call allocates interrupt resources and enables the
1386 * interrupt line and IRQ handling. It selects either a
1387 * hardirq or threaded handling method depending on the
1388 * context.
1389 *
1390 * On failure, it returns a negative value. On success,
1391 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1392 */
1393int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1394 unsigned long flags, const char *name, void *dev_id)
1395{
1396 struct irq_desc *desc = irq_to_desc(irq);
1397 int ret;
1398
1399 if (!desc)
1400 return -EINVAL;
1401
1402 if (irq_settings_is_nested_thread(desc)) {
1403 ret = request_threaded_irq(irq, NULL, handler,
1404 flags, name, dev_id);
1405 return !ret ? IRQC_IS_NESTED : ret;
1406 }
1407
1408 ret = request_irq(irq, handler, flags, name, dev_id);
1409 return !ret ? IRQC_IS_HARDIRQ : ret;
1410}
1411EXPORT_SYMBOL_GPL(request_any_context_irq);