Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Generic cpu hotunplug interrupt migration code copied from the
  4 * arch/arm implementation
  5 *
  6 * Copyright (C) Russell King
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12#include <linux/interrupt.h>
 13#include <linux/ratelimit.h>
 14#include <linux/irq.h>
 15#include <linux/sched/isolation.h>
 16
 17#include "internals.h"
 18
 19/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
 20static inline bool irq_needs_fixup(struct irq_data *d)
 21{
 22	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
 23	unsigned int cpu = smp_processor_id();
 24
 25#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 26	/*
 27	 * The cpumask_empty() check is a workaround for interrupt chips,
 28	 * which do not implement effective affinity, but the architecture has
 29	 * enabled the config switch. Use the general affinity mask instead.
 30	 */
 31	if (cpumask_empty(m))
 32		m = irq_data_get_affinity_mask(d);
 33
 34	/*
 35	 * Sanity check. If the mask is not empty when excluding the outgoing
 36	 * CPU then it must contain at least one online CPU. The outgoing CPU
 37	 * has been removed from the online mask already.
 38	 */
 39	if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
 40	    cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
 41		/*
 42		 * If this happens then there was a missed IRQ fixup at some
 43		 * point. Warn about it and enforce fixup.
 44		 */
 45		pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
 46			cpumask_pr_args(m), d->irq, cpu);
 47		return true;
 48	}
 49#endif
 50	return cpumask_test_cpu(cpu, m);
 51}
 52
 53static bool migrate_one_irq(struct irq_desc *desc)
 54{
 55	struct irq_data *d = irq_desc_get_irq_data(desc);
 56	struct irq_chip *chip = irq_data_get_irq_chip(d);
 57	bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
 58	const struct cpumask *affinity;
 59	bool brokeaff = false;
 60	int err;
 61
 62	/*
 63	 * IRQ chip might be already torn down, but the irq descriptor is
 64	 * still in the radix tree. Also if the chip has no affinity setter,
 65	 * nothing can be done here.
 66	 */
 67	if (!chip || !chip->irq_set_affinity) {
 68		pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
 69		return false;
 70	}
 71
 72	/*
 73	 * No move required, if:
 74	 * - Interrupt is per cpu
 75	 * - Interrupt is not started
 76	 * - Affinity mask does not include this CPU.
 77	 *
 78	 * Note: Do not check desc->action as this might be a chained
 79	 * interrupt.
 80	 */
 81	if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
 82		/*
 83		 * If an irq move is pending, abort it if the dying CPU is
 84		 * the sole target.
 85		 */
 86		irq_fixup_move_pending(desc, false);
 87		return false;
 88	}
 89
 90	/*
 91	 * Complete an eventually pending irq move cleanup. If this
 92	 * interrupt was moved in hard irq context, then the vectors need
 93	 * to be cleaned up. It can't wait until this interrupt actually
 94	 * happens and this CPU was involved.
 95	 */
 96	irq_force_complete_move(desc);
 97
 98	/*
 99	 * If there is a setaffinity pending, then try to reuse the pending
100	 * mask, so the last change of the affinity does not get lost. If
101	 * there is no move pending or the pending mask does not contain
102	 * any online CPU, use the current affinity mask.
103	 */
104	if (irq_fixup_move_pending(desc, true))
105		affinity = irq_desc_get_pending_mask(desc);
106	else
107		affinity = irq_data_get_affinity_mask(d);
108
109	/* Mask the chip for interrupts which cannot move in process context */
110	if (maskchip && chip->irq_mask)
111		chip->irq_mask(d);
112
113	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
114		/*
115		 * If the interrupt is managed, then shut it down and leave
116		 * the affinity untouched.
117		 */
118		if (irqd_affinity_is_managed(d)) {
119			irqd_set_managed_shutdown(d);
120			irq_shutdown_and_deactivate(desc);
121			return false;
122		}
123		affinity = cpu_online_mask;
124		brokeaff = true;
125	}
126	/*
127	 * Do not set the force argument of irq_do_set_affinity() as this
128	 * disables the masking of offline CPUs from the supplied affinity
129	 * mask and therefore might keep/reassign the irq to the outgoing
130	 * CPU.
131	 */
132	err = irq_do_set_affinity(d, affinity, false);
133	if (err) {
134		pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
135				    d->irq, err);
136		brokeaff = false;
137	}
138
139	if (maskchip && chip->irq_unmask)
140		chip->irq_unmask(d);
141
142	return brokeaff;
143}
144
145/**
146 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
147 *
148 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
149 * If the affinity settings do not allow other CPUs, force them onto any
150 * available CPU.
151 *
152 * Note: we must iterate over all IRQs, whether they have an attached
153 * action structure or not, as we need to get chained interrupts too.
154 */
155void irq_migrate_all_off_this_cpu(void)
156{
157	struct irq_desc *desc;
158	unsigned int irq;
159
160	for_each_active_irq(irq) {
161		bool affinity_broken;
162
163		desc = irq_to_desc(irq);
164		raw_spin_lock(&desc->lock);
165		affinity_broken = migrate_one_irq(desc);
166		raw_spin_unlock(&desc->lock);
167
168		if (affinity_broken) {
169			pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
170					    irq, smp_processor_id());
171		}
172	}
173}
174
175static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
176{
177	const struct cpumask *hk_mask;
178
179	if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
180		return false;
181
182	hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
183	if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
184		return false;
185
186	return cpumask_test_cpu(cpu, hk_mask);
187}
188
189static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
190{
191	struct irq_data *data = irq_desc_get_irq_data(desc);
192	const struct cpumask *affinity = irq_data_get_affinity_mask(data);
193
194	if (!irqd_affinity_is_managed(data) || !desc->action ||
195	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
196		return;
197
198	if (irqd_is_managed_and_shutdown(data)) {
199		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
200		return;
201	}
202
203	/*
204	 * If the interrupt can only be directed to a single target
205	 * CPU then it is already assigned to a CPU in the affinity
206	 * mask. No point in trying to move it around unless the
207	 * isolation mechanism requests to move it to an upcoming
208	 * housekeeping CPU.
209	 */
210	if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
211		irq_set_affinity_locked(data, affinity, false);
212}
213
214/**
215 * irq_affinity_online_cpu - Restore affinity for managed interrupts
216 * @cpu:	Upcoming CPU for which interrupts should be restored
217 */
218int irq_affinity_online_cpu(unsigned int cpu)
219{
220	struct irq_desc *desc;
221	unsigned int irq;
222
223	irq_lock_sparse();
224	for_each_active_irq(irq) {
225		desc = irq_to_desc(irq);
226		raw_spin_lock_irq(&desc->lock);
227		irq_restore_affinity_of_irq(desc, cpu);
228		raw_spin_unlock_irq(&desc->lock);
229	}
230	irq_unlock_sparse();
231
232	return 0;
233}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Generic cpu hotunplug interrupt migration code copied from the
  4 * arch/arm implementation
  5 *
  6 * Copyright (C) Russell King
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12#include <linux/interrupt.h>
 13#include <linux/ratelimit.h>
 14#include <linux/irq.h>
 
 15
 16#include "internals.h"
 17
 18/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
 19static inline bool irq_needs_fixup(struct irq_data *d)
 20{
 21	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
 22	unsigned int cpu = smp_processor_id();
 23
 24#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 25	/*
 26	 * The cpumask_empty() check is a workaround for interrupt chips,
 27	 * which do not implement effective affinity, but the architecture has
 28	 * enabled the config switch. Use the general affinity mask instead.
 29	 */
 30	if (cpumask_empty(m))
 31		m = irq_data_get_affinity_mask(d);
 32
 33	/*
 34	 * Sanity check. If the mask is not empty when excluding the outgoing
 35	 * CPU then it must contain at least one online CPU. The outgoing CPU
 36	 * has been removed from the online mask already.
 37	 */
 38	if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
 39	    cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
 40		/*
 41		 * If this happens then there was a missed IRQ fixup at some
 42		 * point. Warn about it and enforce fixup.
 43		 */
 44		pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
 45			cpumask_pr_args(m), d->irq, cpu);
 46		return true;
 47	}
 48#endif
 49	return cpumask_test_cpu(cpu, m);
 50}
 51
 52static bool migrate_one_irq(struct irq_desc *desc)
 53{
 54	struct irq_data *d = irq_desc_get_irq_data(desc);
 55	struct irq_chip *chip = irq_data_get_irq_chip(d);
 56	bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
 57	const struct cpumask *affinity;
 58	bool brokeaff = false;
 59	int err;
 60
 61	/*
 62	 * IRQ chip might be already torn down, but the irq descriptor is
 63	 * still in the radix tree. Also if the chip has no affinity setter,
 64	 * nothing can be done here.
 65	 */
 66	if (!chip || !chip->irq_set_affinity) {
 67		pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
 68		return false;
 69	}
 70
 71	/*
 72	 * No move required, if:
 73	 * - Interrupt is per cpu
 74	 * - Interrupt is not started
 75	 * - Affinity mask does not include this CPU.
 76	 *
 77	 * Note: Do not check desc->action as this might be a chained
 78	 * interrupt.
 79	 */
 80	if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
 81		/*
 82		 * If an irq move is pending, abort it if the dying CPU is
 83		 * the sole target.
 84		 */
 85		irq_fixup_move_pending(desc, false);
 86		return false;
 87	}
 88
 89	/*
 90	 * Complete an eventually pending irq move cleanup. If this
 91	 * interrupt was moved in hard irq context, then the vectors need
 92	 * to be cleaned up. It can't wait until this interrupt actually
 93	 * happens and this CPU was involved.
 94	 */
 95	irq_force_complete_move(desc);
 96
 97	/*
 98	 * If there is a setaffinity pending, then try to reuse the pending
 99	 * mask, so the last change of the affinity does not get lost. If
100	 * there is no move pending or the pending mask does not contain
101	 * any online CPU, use the current affinity mask.
102	 */
103	if (irq_fixup_move_pending(desc, true))
104		affinity = irq_desc_get_pending_mask(desc);
105	else
106		affinity = irq_data_get_affinity_mask(d);
107
108	/* Mask the chip for interrupts which cannot move in process context */
109	if (maskchip && chip->irq_mask)
110		chip->irq_mask(d);
111
112	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
113		/*
114		 * If the interrupt is managed, then shut it down and leave
115		 * the affinity untouched.
116		 */
117		if (irqd_affinity_is_managed(d)) {
118			irqd_set_managed_shutdown(d);
119			irq_shutdown_and_deactivate(desc);
120			return false;
121		}
122		affinity = cpu_online_mask;
123		brokeaff = true;
124	}
125	/*
126	 * Do not set the force argument of irq_do_set_affinity() as this
127	 * disables the masking of offline CPUs from the supplied affinity
128	 * mask and therefore might keep/reassign the irq to the outgoing
129	 * CPU.
130	 */
131	err = irq_do_set_affinity(d, affinity, false);
132	if (err) {
133		pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
134				    d->irq, err);
135		brokeaff = false;
136	}
137
138	if (maskchip && chip->irq_unmask)
139		chip->irq_unmask(d);
140
141	return brokeaff;
142}
143
144/**
145 * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
146 *
147 * The current CPU has been marked offline.  Migrate IRQs off this CPU.
148 * If the affinity settings do not allow other CPUs, force them onto any
149 * available CPU.
150 *
151 * Note: we must iterate over all IRQs, whether they have an attached
152 * action structure or not, as we need to get chained interrupts too.
153 */
154void irq_migrate_all_off_this_cpu(void)
155{
156	struct irq_desc *desc;
157	unsigned int irq;
158
159	for_each_active_irq(irq) {
160		bool affinity_broken;
161
162		desc = irq_to_desc(irq);
163		raw_spin_lock(&desc->lock);
164		affinity_broken = migrate_one_irq(desc);
165		raw_spin_unlock(&desc->lock);
166
167		if (affinity_broken) {
168			pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
169					    irq, smp_processor_id());
170		}
171	}
172}
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
175{
176	struct irq_data *data = irq_desc_get_irq_data(desc);
177	const struct cpumask *affinity = irq_data_get_affinity_mask(data);
178
179	if (!irqd_affinity_is_managed(data) || !desc->action ||
180	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
181		return;
182
183	if (irqd_is_managed_and_shutdown(data)) {
184		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
185		return;
186	}
187
188	/*
189	 * If the interrupt can only be directed to a single target
190	 * CPU then it is already assigned to a CPU in the affinity
191	 * mask. No point in trying to move it around.
 
 
192	 */
193	if (!irqd_is_single_target(data))
194		irq_set_affinity_locked(data, affinity, false);
195}
196
197/**
198 * irq_affinity_online_cpu - Restore affinity for managed interrupts
199 * @cpu:	Upcoming CPU for which interrupts should be restored
200 */
201int irq_affinity_online_cpu(unsigned int cpu)
202{
203	struct irq_desc *desc;
204	unsigned int irq;
205
206	irq_lock_sparse();
207	for_each_active_irq(irq) {
208		desc = irq_to_desc(irq);
209		raw_spin_lock_irq(&desc->lock);
210		irq_restore_affinity_of_irq(desc, cpu);
211		raw_spin_unlock_irq(&desc->lock);
212	}
213	irq_unlock_sparse();
214
215	return 0;
216}