Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/cpumask.h>
  4#include <linux/delay.h>
  5#include <linux/smp.h>
  6
  7#include <asm/io_apic.h>
  8
  9#include "local.h"
 10
 11DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
 12
 13#ifdef CONFIG_SMP
 14static int apic_ipi_shorthand_off __ro_after_init;
 15
 16static __init int apic_ipi_shorthand(char *str)
 17{
 18	get_option(&str, &apic_ipi_shorthand_off);
 19	return 1;
 20}
 21__setup("no_ipi_broadcast=", apic_ipi_shorthand);
 22
 23static int __init print_ipi_mode(void)
 24{
 25	pr_info("IPI shorthand broadcast: %s\n",
 26		apic_ipi_shorthand_off ? "disabled" : "enabled");
 27	return 0;
 28}
 29late_initcall(print_ipi_mode);
 30
 31void apic_smt_update(void)
 32{
 33	/*
 34	 * Do not switch to broadcast mode if:
 35	 * - Disabled on the command line
 36	 * - Only a single CPU is online
 37	 * - Not all present CPUs have been at least booted once
 38	 *
 39	 * The latter is important as the local APIC might be in some
 40	 * random state and a broadcast might cause havoc. That's
 41	 * especially true for NMI broadcasting.
 42	 */
 43	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
 44	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
 45		static_branch_disable(&apic_use_ipi_shorthand);
 46	} else {
 47		static_branch_enable(&apic_use_ipi_shorthand);
 48	}
 49}
 50
 51void apic_send_IPI_allbutself(unsigned int vector)
 52{
 53	if (num_online_cpus() < 2)
 54		return;
 55
 56	if (static_branch_likely(&apic_use_ipi_shorthand))
 57		__apic_send_IPI_allbutself(vector);
 58	else
 59		__apic_send_IPI_mask_allbutself(cpu_online_mask, vector);
 60}
 61
 62/*
 63 * Send a 'reschedule' IPI to another CPU. It goes straight through and
 64 * wastes no time serializing anything. Worst case is that we lose a
 65 * reschedule ...
 66 */
 67void native_smp_send_reschedule(int cpu)
 68{
 69	if (unlikely(cpu_is_offline(cpu))) {
 70		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
 71		return;
 72	}
 73	__apic_send_IPI(cpu, RESCHEDULE_VECTOR);
 74}
 75
 76void native_send_call_func_single_ipi(int cpu)
 77{
 78	__apic_send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
 79}
 80
 81void native_send_call_func_ipi(const struct cpumask *mask)
 82{
 83	if (static_branch_likely(&apic_use_ipi_shorthand)) {
 84		unsigned int cpu = smp_processor_id();
 85
 86		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
 87			goto sendmask;
 88
 89		if (cpumask_test_cpu(cpu, mask))
 90			__apic_send_IPI_all(CALL_FUNCTION_VECTOR);
 91		else if (num_online_cpus() > 1)
 92			__apic_send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 93		return;
 94	}
 95
 96sendmask:
 97	__apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 98}
 99
100void apic_send_nmi_to_offline_cpu(unsigned int cpu)
101{
102	if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu))
103		return;
104	if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask)))
105		return;
106	apic->send_IPI(cpu, NMI_VECTOR);
107}
108#endif /* CONFIG_SMP */
109
110static inline int __prepare_ICR2(unsigned int mask)
111{
112	return SET_XAPIC_DEST_FIELD(mask);
113}
114
115u32 apic_mem_wait_icr_idle_timeout(void)
116{
117	int cnt;
118
119	for (cnt = 0; cnt < 1000; cnt++) {
120		if (!(apic_read(APIC_ICR) & APIC_ICR_BUSY))
121			return 0;
122		inc_irq_stat(icr_read_retry_count);
123		udelay(100);
124	}
125	return APIC_ICR_BUSY;
126}
127
128void apic_mem_wait_icr_idle(void)
129{
130	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
131		cpu_relax();
132}
133
134/*
135 * This is safe against interruption because it only writes the lower 32
136 * bits of the APIC_ICR register. The destination field is ignored for
137 * short hand IPIs.
138 *
139 *  wait_icr_idle()
140 *  write(ICR2, dest)
141 *  NMI
142 *	wait_icr_idle()
143 *	write(ICR)
144 *	wait_icr_idle()
145 *  write(ICR)
146 *
147 * This function does not need to disable interrupts as there is no ICR2
148 * interaction. The memory write is direct except when the machine is
149 * affected by the 11AP Pentium erratum, which turns the plain write into
150 * an XCHG operation.
151 */
152static void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
153{
154	/*
155	 * Wait for the previous ICR command to complete.  Use
156	 * safe_apic_wait_icr_idle() for the NMI vector as there have been
157	 * issues where otherwise the system hangs when the panic CPU tries
158	 * to stop the others before launching the kdump kernel.
 
 
 
 
 
 
159	 */
160	if (unlikely(vector == NMI_VECTOR))
161		apic_mem_wait_icr_idle_timeout();
162	else
163		apic_mem_wait_icr_idle();
164
165	/* Destination field (ICR2) and the destination mode are ignored */
166	native_apic_mem_write(APIC_ICR, __prepare_ICR(shortcut, vector, 0));
 
 
 
 
 
 
 
 
167}
168
169/*
170 * This is used to send an IPI with no shorthand notation (the destination is
171 * specified in bits 56 to 63 of the ICR).
172 */
173void __default_send_IPI_dest_field(unsigned int dest_mask, int vector,
174				   unsigned int dest_mode)
175{
176	/* See comment in __default_send_IPI_shortcut() */
 
 
 
 
177	if (unlikely(vector == NMI_VECTOR))
178		apic_mem_wait_icr_idle_timeout();
179	else
180		apic_mem_wait_icr_idle();
 
 
 
 
 
 
181
182	/* Set the IPI destination field in the ICR */
183	native_apic_mem_write(APIC_ICR2, __prepare_ICR2(dest_mask));
184	/* Send it with the proper destination mode */
185	native_apic_mem_write(APIC_ICR, __prepare_ICR(0, vector, dest_mode));
 
 
 
 
 
186}
187
188void default_send_IPI_single_phys(int cpu, int vector)
189{
190	unsigned long flags;
191
192	local_irq_save(flags);
193	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
194				      vector, APIC_DEST_PHYSICAL);
195	local_irq_restore(flags);
196}
197
198void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
199{
 
200	unsigned long flags;
201	unsigned long cpu;
202
 
 
 
 
 
203	local_irq_save(flags);
204	for_each_cpu(cpu, mask) {
205		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
206				cpu), vector, APIC_DEST_PHYSICAL);
207	}
208	local_irq_restore(flags);
209}
210
211void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
212						 int vector)
213{
214	unsigned int cpu, this_cpu = smp_processor_id();
 
215	unsigned long flags;
216
 
 
217	local_irq_save(flags);
218	for_each_cpu(cpu, mask) {
219		if (cpu == this_cpu)
220			continue;
221		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
222				 cpu), vector, APIC_DEST_PHYSICAL);
223	}
224	local_irq_restore(flags);
225}
226
227/*
228 * Helper function for APICs which insist on cpumasks
229 */
230void default_send_IPI_single(int cpu, int vector)
231{
232	__apic_send_IPI_mask(cpumask_of(cpu), vector);
233}
234
235void default_send_IPI_allbutself(int vector)
236{
237	__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
238}
239
240void default_send_IPI_all(int vector)
241{
242	__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
243}
244
245void default_send_IPI_self(int vector)
246{
247	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
248}
249
250#ifdef CONFIG_X86_32
251void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, int vector)
 
 
252{
253	unsigned long flags;
254	unsigned int cpu;
 
 
 
 
 
 
255
256	local_irq_save(flags);
257	for_each_cpu(cpu, mask)
258		__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
 
 
259	local_irq_restore(flags);
260}
261
262void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
263						 int vector)
264{
265	unsigned int cpu, this_cpu = smp_processor_id();
266	unsigned long flags;
 
 
 
 
267
268	local_irq_save(flags);
269	for_each_cpu(cpu, mask) {
270		if (cpu == this_cpu)
271			continue;
272		__default_send_IPI_dest_field(1U << cpu, vector, APIC_DEST_LOGICAL);
273	}
 
 
274	local_irq_restore(flags);
275}
276
 
 
 
277void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
278{
279	unsigned long mask = cpumask_bits(cpumask)[0];
280	unsigned long flags;
281
282	if (!mask)
283		return;
284
285	local_irq_save(flags);
286	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
287	__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
288	local_irq_restore(flags);
289}
290
291#ifdef CONFIG_SMP
292static int convert_apicid_to_cpu(u32 apic_id)
293{
294	int i;
295
296	for_each_possible_cpu(i) {
297		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
298			return i;
299	}
300	return -1;
301}
302
303int safe_smp_processor_id(void)
304{
305	u32 apicid;
306	int cpuid;
307
308	if (!boot_cpu_has(X86_FEATURE_APIC))
309		return 0;
310
311	apicid = read_apic_id();
312	if (apicid == BAD_APICID)
313		return 0;
314
315	cpuid = convert_apicid_to_cpu(apicid);
316
317	return cpuid >= 0 ? cpuid : 0;
318}
319#endif
320#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/cpumask.h>
 
  4#include <linux/smp.h>
 
  5#include <asm/io_apic.h>
  6
  7#include "local.h"
  8
  9DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
 10
 11#ifdef CONFIG_SMP
 12static int apic_ipi_shorthand_off __ro_after_init;
 13
 14static __init int apic_ipi_shorthand(char *str)
 15{
 16	get_option(&str, &apic_ipi_shorthand_off);
 17	return 1;
 18}
 19__setup("no_ipi_broadcast=", apic_ipi_shorthand);
 20
 21static int __init print_ipi_mode(void)
 22{
 23	pr_info("IPI shorthand broadcast: %s\n",
 24		apic_ipi_shorthand_off ? "disabled" : "enabled");
 25	return 0;
 26}
 27late_initcall(print_ipi_mode);
 28
 29void apic_smt_update(void)
 30{
 31	/*
 32	 * Do not switch to broadcast mode if:
 33	 * - Disabled on the command line
 34	 * - Only a single CPU is online
 35	 * - Not all present CPUs have been at least booted once
 36	 *
 37	 * The latter is important as the local APIC might be in some
 38	 * random state and a broadcast might cause havoc. That's
 39	 * especially true for NMI broadcasting.
 40	 */
 41	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
 42	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
 43		static_branch_disable(&apic_use_ipi_shorthand);
 44	} else {
 45		static_branch_enable(&apic_use_ipi_shorthand);
 46	}
 47}
 48
 49void apic_send_IPI_allbutself(unsigned int vector)
 50{
 51	if (num_online_cpus() < 2)
 52		return;
 53
 54	if (static_branch_likely(&apic_use_ipi_shorthand))
 55		apic->send_IPI_allbutself(vector);
 56	else
 57		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
 58}
 59
 60/*
 61 * Send a 'reschedule' IPI to another CPU. It goes straight through and
 62 * wastes no time serializing anything. Worst case is that we lose a
 63 * reschedule ...
 64 */
 65void native_smp_send_reschedule(int cpu)
 66{
 67	if (unlikely(cpu_is_offline(cpu))) {
 68		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
 69		return;
 70	}
 71	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
 72}
 73
 74void native_send_call_func_single_ipi(int cpu)
 75{
 76	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
 77}
 78
 79void native_send_call_func_ipi(const struct cpumask *mask)
 80{
 81	if (static_branch_likely(&apic_use_ipi_shorthand)) {
 82		unsigned int cpu = smp_processor_id();
 83
 84		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
 85			goto sendmask;
 86
 87		if (cpumask_test_cpu(cpu, mask))
 88			apic->send_IPI_all(CALL_FUNCTION_VECTOR);
 89		else if (num_online_cpus() > 1)
 90			apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 91		return;
 92	}
 93
 94sendmask:
 95	apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 96}
 97
 
 
 
 
 
 
 
 
 98#endif /* CONFIG_SMP */
 99
100static inline int __prepare_ICR2(unsigned int mask)
101{
102	return SET_APIC_DEST_FIELD(mask);
103}
104
105static inline void __xapic_wait_icr_idle(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
106{
107	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
108		cpu_relax();
109}
110
111void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112{
113	/*
114	 * Subtle. In the case of the 'never do double writes' workaround
115	 * we have to lock out interrupts to be safe.  As we don't care
116	 * of the value read we use an atomic rmw access to avoid costly
117	 * cli/sti.  Otherwise we use an even cheaper single atomic write
118	 * to the APIC.
119	 */
120	unsigned int cfg;
121
122	/*
123	 * Wait for idle.
124	 */
125	if (unlikely(vector == NMI_VECTOR))
126		safe_apic_wait_icr_idle();
127	else
128		__xapic_wait_icr_idle();
129
130	/*
131	 * No need to touch the target chip field. Also the destination
132	 * mode is ignored when a shorthand is used.
133	 */
134	cfg = __prepare_ICR(shortcut, vector, 0);
135
136	/*
137	 * Send the IPI. The write to APIC_ICR fires this off.
138	 */
139	native_apic_mem_write(APIC_ICR, cfg);
140}
141
142/*
143 * This is used to send an IPI with no shorthand notation (the destination is
144 * specified in bits 56 to 63 of the ICR).
145 */
146void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 
147{
148	unsigned long cfg;
149
150	/*
151	 * Wait for idle.
152	 */
153	if (unlikely(vector == NMI_VECTOR))
154		safe_apic_wait_icr_idle();
155	else
156		__xapic_wait_icr_idle();
157
158	/*
159	 * prepare target chip field
160	 */
161	cfg = __prepare_ICR2(mask);
162	native_apic_mem_write(APIC_ICR2, cfg);
163
164	/*
165	 * program the ICR
166	 */
167	cfg = __prepare_ICR(0, vector, dest);
168
169	/*
170	 * Send the IPI. The write to APIC_ICR fires this off.
171	 */
172	native_apic_mem_write(APIC_ICR, cfg);
173}
174
175void default_send_IPI_single_phys(int cpu, int vector)
176{
177	unsigned long flags;
178
179	local_irq_save(flags);
180	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
181				      vector, APIC_DEST_PHYSICAL);
182	local_irq_restore(flags);
183}
184
185void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
186{
187	unsigned long query_cpu;
188	unsigned long flags;
 
189
190	/*
191	 * Hack. The clustered APIC addressing mode doesn't allow us to send
192	 * to an arbitrary mask, so I do a unicast to each CPU instead.
193	 * - mbligh
194	 */
195	local_irq_save(flags);
196	for_each_cpu(query_cpu, mask) {
197		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198				query_cpu), vector, APIC_DEST_PHYSICAL);
199	}
200	local_irq_restore(flags);
201}
202
203void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
204						 int vector)
205{
206	unsigned int this_cpu = smp_processor_id();
207	unsigned int query_cpu;
208	unsigned long flags;
209
210	/* See Hack comment above */
211
212	local_irq_save(flags);
213	for_each_cpu(query_cpu, mask) {
214		if (query_cpu == this_cpu)
215			continue;
216		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
217				 query_cpu), vector, APIC_DEST_PHYSICAL);
218	}
219	local_irq_restore(flags);
220}
221
222/*
223 * Helper function for APICs which insist on cpumasks
224 */
225void default_send_IPI_single(int cpu, int vector)
226{
227	apic->send_IPI_mask(cpumask_of(cpu), vector);
228}
229
230void default_send_IPI_allbutself(int vector)
231{
232	__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
233}
234
235void default_send_IPI_all(int vector)
236{
237	__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
238}
239
240void default_send_IPI_self(int vector)
241{
242	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
243}
244
245#ifdef CONFIG_X86_32
246
247void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
248						 int vector)
249{
250	unsigned long flags;
251	unsigned int query_cpu;
252
253	/*
254	 * Hack. The clustered APIC addressing mode doesn't allow us to send
255	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
256	 * should be modified to do 1 message per cluster ID - mbligh
257	 */
258
259	local_irq_save(flags);
260	for_each_cpu(query_cpu, mask)
261		__default_send_IPI_dest_field(
262			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
263			vector, apic->dest_logical);
264	local_irq_restore(flags);
265}
266
267void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
268						 int vector)
269{
 
270	unsigned long flags;
271	unsigned int query_cpu;
272	unsigned int this_cpu = smp_processor_id();
273
274	/* See Hack comment above */
275
276	local_irq_save(flags);
277	for_each_cpu(query_cpu, mask) {
278		if (query_cpu == this_cpu)
279			continue;
280		__default_send_IPI_dest_field(
281			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
282			vector, apic->dest_logical);
283		}
284	local_irq_restore(flags);
285}
286
287/*
288 * This is only used on smaller machines.
289 */
290void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
291{
292	unsigned long mask = cpumask_bits(cpumask)[0];
293	unsigned long flags;
294
295	if (!mask)
296		return;
297
298	local_irq_save(flags);
299	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
300	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
301	local_irq_restore(flags);
302}
303
304/* must come after the send_IPI functions above for inlining */
305static int convert_apicid_to_cpu(int apic_id)
306{
307	int i;
308
309	for_each_possible_cpu(i) {
310		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
311			return i;
312	}
313	return -1;
314}
315
316int safe_smp_processor_id(void)
317{
318	int apicid, cpuid;
 
319
320	if (!boot_cpu_has(X86_FEATURE_APIC))
321		return 0;
322
323	apicid = hard_smp_processor_id();
324	if (apicid == BAD_APICID)
325		return 0;
326
327	cpuid = convert_apicid_to_cpu(apicid);
328
329	return cpuid >= 0 ? cpuid : 0;
330}
 
331#endif