Loading...
1
2#include <linux/irq.h>
3#include <linux/interrupt.h>
4
5#include "internals.h"
6
7void irq_move_masked_irq(struct irq_data *idata)
8{
9 struct irq_desc *desc = irq_data_to_desc(idata);
10 struct irq_chip *chip = idata->chip;
11
12 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
13 return;
14
15 /*
16 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
17 */
18 if (!irqd_can_balance(&desc->irq_data)) {
19 WARN_ON(1);
20 return;
21 }
22
23 irqd_clr_move_pending(&desc->irq_data);
24
25 if (unlikely(cpumask_empty(desc->pending_mask)))
26 return;
27
28 if (!chip->irq_set_affinity)
29 return;
30
31 assert_raw_spin_locked(&desc->lock);
32
33 /*
34 * If there was a valid mask to work with, please
35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte
38 * when an active trigger is coming in. This could
39 * cause some ioapics to mal-function.
40 * Being paranoid i guess!
41 *
42 * For correct operation this depends on the caller
43 * masking the irqs.
44 */
45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
46 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
47
48 cpumask_clear(desc->pending_mask);
49}
50
51void irq_move_irq(struct irq_data *idata)
52{
53 bool masked;
54
55 if (likely(!irqd_is_setaffinity_pending(idata)))
56 return;
57
58 if (unlikely(irqd_irq_disabled(idata)))
59 return;
60
61 /*
62 * Be careful vs. already masked interrupts. If this is a
63 * threaded interrupt with ONESHOT set, we can end up with an
64 * interrupt storm.
65 */
66 masked = irqd_irq_masked(idata);
67 if (!masked)
68 idata->chip->irq_mask(idata);
69 irq_move_masked_irq(idata);
70 if (!masked)
71 idata->chip->irq_unmask(idata);
72}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/irq.h>
4#include <linux/interrupt.h>
5
6#include "internals.h"
7
8/**
9 * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
10 * @desc: Interrupt descriptor to clean up
11 * @force_clear: If set clear the move pending bit unconditionally.
12 * If not set, clear it only when the dying CPU is the
13 * last one in the pending mask.
14 *
15 * Returns true if the pending bit was set and the pending mask contains an
16 * online CPU other than the dying CPU.
17 */
18bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
19{
20 struct irq_data *data = irq_desc_get_irq_data(desc);
21
22 if (!irqd_is_setaffinity_pending(data))
23 return false;
24
25 /*
26 * The outgoing CPU might be the last online target in a pending
27 * interrupt move. If that's the case clear the pending move bit.
28 */
29 if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
30 irqd_clr_move_pending(data);
31 return false;
32 }
33 if (force_clear)
34 irqd_clr_move_pending(data);
35 return true;
36}
37
38void irq_move_masked_irq(struct irq_data *idata)
39{
40 struct irq_desc *desc = irq_data_to_desc(idata);
41 struct irq_data *data = &desc->irq_data;
42 struct irq_chip *chip = data->chip;
43
44 if (likely(!irqd_is_setaffinity_pending(data)))
45 return;
46
47 irqd_clr_move_pending(data);
48
49 /*
50 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
51 */
52 if (irqd_is_per_cpu(data)) {
53 WARN_ON(1);
54 return;
55 }
56
57 if (unlikely(cpumask_empty(desc->pending_mask)))
58 return;
59
60 if (!chip->irq_set_affinity)
61 return;
62
63 assert_raw_spin_locked(&desc->lock);
64
65 /*
66 * If there was a valid mask to work with, please
67 * do the disable, re-program, enable sequence.
68 * This is *not* particularly important for level triggered
69 * but in a edge trigger case, we might be setting rte
70 * when an active trigger is coming in. This could
71 * cause some ioapics to mal-function.
72 * Being paranoid i guess!
73 *
74 * For correct operation this depends on the caller
75 * masking the irqs.
76 */
77 if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) {
78 int ret;
79
80 ret = irq_do_set_affinity(data, desc->pending_mask, false);
81 /*
82 * If the there is a cleanup pending in the underlying
83 * vector management, reschedule the move for the next
84 * interrupt. Leave desc->pending_mask intact.
85 */
86 if (ret == -EBUSY) {
87 irqd_set_move_pending(data);
88 return;
89 }
90 }
91 cpumask_clear(desc->pending_mask);
92}
93
94void __irq_move_irq(struct irq_data *idata)
95{
96 bool masked;
97
98 /*
99 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
100 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
101 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
102 */
103 idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
104
105 if (unlikely(irqd_irq_disabled(idata)))
106 return;
107
108 /*
109 * Be careful vs. already masked interrupts. If this is a
110 * threaded interrupt with ONESHOT set, we can end up with an
111 * interrupt storm.
112 */
113 masked = irqd_irq_masked(idata);
114 if (!masked)
115 idata->chip->irq_mask(idata);
116 irq_move_masked_irq(idata);
117 if (!masked)
118 idata->chip->irq_unmask(idata);
119}