Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Generic cpu hotunplug interrupt migration code copied from the
  4 * arch/arm implementation
  5 *
  6 * Copyright (C) Russell King
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12#include <linux/interrupt.h>
 13#include <linux/ratelimit.h>
 14#include <linux/irq.h>
 15
 16#include "internals.h"
 17
 18/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
 19static inline bool irq_needs_fixup(struct irq_data *d)
 20{
 21	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
 22	unsigned int cpu = smp_processor_id();
 23
 24#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 25	/*
 26	 * The cpumask_empty() check is a workaround for interrupt chips,
 27	 * which do not implement effective affinity, but the architecture has
 28	 * enabled the config switch. Use the general affinity mask instead.
 29	 */
 30	if (cpumask_empty(m))
 31		m = irq_data_get_affinity_mask(d);
 32
 33	/*
 34	 * Sanity check. If the mask is not empty when excluding the outgoing
 35	 * CPU then it must contain at least one online CPU. The outgoing CPU
 36	 * has been removed from the online mask already.
 37	 */
 38	if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
 39	    cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
 40		/*
 41		 * If this happens then there was a missed IRQ fixup at some
 42		 * point. Warn about it and enforce fixup.
 43		 */
 44		pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
 45			cpumask_pr_args(m), d->irq, cpu);
 46		return true;
 47	}
 48#endif
 49	return cpumask_test_cpu(cpu, m);
 50}
 51
 52static bool migrate_one_irq(struct irq_desc *desc)
 53{
 54	struct irq_data *d = irq_desc_get_irq_data(desc);
 55	struct irq_chip *chip = irq_data_get_irq_chip(d);
 56	bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
 57	const struct cpumask *affinity;
 58	bool brokeaff = false;
 59	int err;
 60
 61	/*
 62	 * IRQ chip might be already torn down, but the irq descriptor is
 63	 * still in the radix tree. Also if the chip has no affinity setter,
 64	 * nothing can be done here.
 65	 */
 66	if (!chip || !chip->irq_set_affinity) {
 67		pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
 68		return false;
 69	}
 70
 71	/*
 72	 * No move required, if:
 73	 * - Interrupt is per cpu
 74	 * - Interrupt is not started
 75	 * - Affinity mask does not include this CPU.
 76	 *
 77	 * Note: Do not check desc->action as this might be a chained
 78	 * interrupt.
 79	 */
 80	if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
 81		/*
 82		 * If an irq move is pending, abort it if the dying CPU is
 83		 * the sole target.
 84		 */
 85		irq_fixup_move_pending(desc, false);
 86		return false;
 87	}
 88
 89	/*
 90	 * Complete an eventually pending irq move cleanup. If this
 91	 * interrupt was moved in hard irq context, then the vectors need
 92	 * to be cleaned up. It can't wait until this interrupt actually
 93	 * happens and this CPU was involved.
 94	 */
 95	irq_force_complete_move(desc);
 96
 97	/*
 98	 * If there is a setaffinity pending, then try to reuse the pending
 99	 * mask, so the last change of the affinity does not get lost. If
100	 * there is no move pending or the pending mask does not contain
101	 * any online CPU, use the current affinity mask.
102	 */
103	if (irq_fixup_move_pending(desc, true))
104		affinity = irq_desc_get_pending_mask(desc);
105	else
106		affinity = irq_data_get_affinity_mask(d);
107
108	/* Mask the chip for interrupts which cannot move in process context */
109	if (maskchip && chip->irq_mask)
110		chip->irq_mask(d);
111
112	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
113		/*
114		 * If the interrupt is managed, then shut it down and leave
115		 * the affinity untouched.
116		 */
117		if (irqd_affinity_is_managed(d)) {
118			irqd_set_managed_shutdown(d);
119			irq_shutdown_and_deactivate(desc);
120			return false;
121		}
122		affinity = cpu_online_mask;
123		brokeaff = true;
124	}
125	/*
126	 * Do not set the force argument of irq_do_set_affinity() as this
127	 * disables the masking of offline CPUs from the supplied affinity
128	 * mask and therefore might keep/reassign the irq to the outgoing
129	 * CPU.
130	 */
131	err = irq_do_set_affinity(d, affinity, false);
132	if (err) {
133		pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
134				    d->irq, err);
135		brokeaff = false;
136	}
137
138	if (maskchip && chip->irq_unmask)
139		chip->irq_unmask(d);
 
 
 
 
 
 
 
140
141	return brokeaff;
142}
143
144/**
145 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
146 *
147 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
148 * If the affinity settings do not allow other CPUs, force them onto any
149 * available CPU.
150 *
151 * Note: we must iterate over all IRQs, whether they have an attached
152 * action structure or not, as we need to get chained interrupts too.
153 */
154void irq_migrate_all_off_this_cpu(void)
155{
156	struct irq_desc *desc;
157	unsigned int irq;
 
 
 
 
158
159	for_each_active_irq(irq) {
160		bool affinity_broken;
161
162		desc = irq_to_desc(irq);
163		raw_spin_lock(&desc->lock);
164		affinity_broken = migrate_one_irq(desc);
165		raw_spin_unlock(&desc->lock);
166
167		if (affinity_broken) {
168			pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
169					    irq, smp_processor_id());
170		}
171	}
172}
173
174static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
175{
176	struct irq_data *data = irq_desc_get_irq_data(desc);
177	const struct cpumask *affinity = irq_data_get_affinity_mask(data);
178
179	if (!irqd_affinity_is_managed(data) || !desc->action ||
180	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
181		return;
182
183	if (irqd_is_managed_and_shutdown(data)) {
184		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
185		return;
186	}
187
188	/*
189	 * If the interrupt can only be directed to a single target
190	 * CPU then it is already assigned to a CPU in the affinity
191	 * mask. No point in trying to move it around.
192	 */
193	if (!irqd_is_single_target(data))
194		irq_set_affinity_locked(data, affinity, false);
195}
196
197/**
198 * irq_affinity_online_cpu - Restore affinity for managed interrupts
199 * @cpu:	Upcoming CPU for which interrupts should be restored
200 */
201int irq_affinity_online_cpu(unsigned int cpu)
202{
203	struct irq_desc *desc;
204	unsigned int irq;
205
206	irq_lock_sparse();
207	for_each_active_irq(irq) {
208		desc = irq_to_desc(irq);
209		raw_spin_lock_irq(&desc->lock);
210		irq_restore_affinity_of_irq(desc, cpu);
211		raw_spin_unlock_irq(&desc->lock);
212	}
213	irq_unlock_sparse();
214
215	return 0;
216}
v4.6
 
 1/*
 2 * Generic cpu hotunplug interrupt migration code copied from the
 3 * arch/arm implementation
 4 *
 5 * Copyright (C) Russell King
 6 *
 7 * This program is free software; you can redistribute it and/or modify
 8 * it under the terms of the GNU General Public License version 2 as
 9 * published by the Free Software Foundation.
10 */
11#include <linux/interrupt.h>
12#include <linux/ratelimit.h>
13#include <linux/irq.h>
14
15#include "internals.h"
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17static bool migrate_one_irq(struct irq_desc *desc)
18{
19	struct irq_data *d = irq_desc_get_irq_data(desc);
20	const struct cpumask *affinity = d->common->affinity;
21	struct irq_chip *c;
22	bool ret = false;
 
 
 
 
 
 
 
 
 
 
 
 
23
24	/*
25	 * If this is a per-CPU interrupt, or the affinity does not
26	 * include this CPU, then we have nothing to do.
 
 
 
 
 
27	 */
28	if (irqd_is_per_cpu(d) ||
29	    !cpumask_test_cpu(smp_processor_id(), affinity))
 
 
 
 
30		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
32	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
 
 
 
 
 
 
 
 
 
33		affinity = cpu_online_mask;
34		ret = true;
 
 
 
 
 
 
 
 
 
 
 
 
35	}
36
37	c = irq_data_get_irq_chip(d);
38	if (!c->irq_set_affinity) {
39		pr_debug("IRQ%u: unable to set affinity\n", d->irq);
40	} else {
41		int r = irq_do_set_affinity(d, affinity, false);
42		if (r)
43			pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
44					    d->irq, r);
45	}
46
47	return ret;
48}
49
50/**
51 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
52 *
53 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
54 * If the affinity settings do not allow other CPUs, force them onto any
55 * available CPU.
56 *
57 * Note: we must iterate over all IRQs, whether they have an attached
58 * action structure or not, as we need to get chained interrupts too.
59 */
60void irq_migrate_all_off_this_cpu(void)
61{
 
62	unsigned int irq;
63	struct irq_desc *desc;
64	unsigned long flags;
65
66	local_irq_save(flags);
67
68	for_each_active_irq(irq) {
69		bool affinity_broken;
70
71		desc = irq_to_desc(irq);
72		raw_spin_lock(&desc->lock);
73		affinity_broken = migrate_one_irq(desc);
74		raw_spin_unlock(&desc->lock);
75
76		if (affinity_broken)
77			pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
78					    irq, smp_processor_id());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79	}
 
80
81	local_irq_restore(flags);
82}