Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/cpumask.h>
  4#include <linux/smp.h>
 
  5
  6#include "local.h"
  7
  8DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
  9
 10#ifdef CONFIG_SMP
 11static int apic_ipi_shorthand_off __ro_after_init;
 12
 13static __init int apic_ipi_shorthand(char *str)
 14{
 15	get_option(&str, &apic_ipi_shorthand_off);
 16	return 1;
 17}
 18__setup("no_ipi_broadcast=", apic_ipi_shorthand);
 19
 20static int __init print_ipi_mode(void)
 21{
 22	pr_info("IPI shorthand broadcast: %s\n",
 23		apic_ipi_shorthand_off ? "disabled" : "enabled");
 24	return 0;
 25}
 26late_initcall(print_ipi_mode);
 27
 28void apic_smt_update(void)
 29{
 30	/*
 31	 * Do not switch to broadcast mode if:
 32	 * - Disabled on the command line
 33	 * - Only a single CPU is online
 34	 * - Not all present CPUs have been at least booted once
 35	 *
 36	 * The latter is important as the local APIC might be in some
 37	 * random state and a broadcast might cause havoc. That's
 38	 * especially true for NMI broadcasting.
 39	 */
 40	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
 41	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
 42		static_branch_disable(&apic_use_ipi_shorthand);
 43	} else {
 44		static_branch_enable(&apic_use_ipi_shorthand);
 45	}
 46}
 47
 48void apic_send_IPI_allbutself(unsigned int vector)
 49{
 50	if (num_online_cpus() < 2)
 51		return;
 52
 53	if (static_branch_likely(&apic_use_ipi_shorthand))
 54		apic->send_IPI_allbutself(vector);
 55	else
 56		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
 57}
 58
 59/*
 60 * Send a 'reschedule' IPI to another CPU. It goes straight through and
 61 * wastes no time serializing anything. Worst case is that we lose a
 62 * reschedule ...
 63 */
 64void native_smp_send_reschedule(int cpu)
 65{
 66	if (unlikely(cpu_is_offline(cpu))) {
 67		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
 68		return;
 69	}
 70	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
 71}
 72
 73void native_send_call_func_single_ipi(int cpu)
 74{
 75	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
 76}
 77
 78void native_send_call_func_ipi(const struct cpumask *mask)
 79{
 80	if (static_branch_likely(&apic_use_ipi_shorthand)) {
 81		unsigned int cpu = smp_processor_id();
 82
 83		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
 84			goto sendmask;
 85
 86		if (cpumask_test_cpu(cpu, mask))
 87			apic->send_IPI_all(CALL_FUNCTION_VECTOR);
 88		else if (num_online_cpus() > 1)
 89			apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 90		return;
 91	}
 92
 93sendmask:
 94	apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 95}
 96
 97#endif /* CONFIG_SMP */
 98
 99static inline int __prepare_ICR2(unsigned int mask)
100{
101	return SET_APIC_DEST_FIELD(mask);
102}
103
104static inline void __xapic_wait_icr_idle(void)
105{
106	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
107		cpu_relax();
108}
109
110void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
111{
112	/*
113	 * Subtle. In the case of the 'never do double writes' workaround
114	 * we have to lock out interrupts to be safe.  As we don't care
115	 * of the value read we use an atomic rmw access to avoid costly
116	 * cli/sti.  Otherwise we use an even cheaper single atomic write
117	 * to the APIC.
118	 */
119	unsigned int cfg;
120
121	/*
122	 * Wait for idle.
123	 */
124	if (unlikely(vector == NMI_VECTOR))
125		safe_apic_wait_icr_idle();
126	else
127		__xapic_wait_icr_idle();
128
129	/*
130	 * No need to touch the target chip field. Also the destination
131	 * mode is ignored when a shorthand is used.
132	 */
133	cfg = __prepare_ICR(shortcut, vector, 0);
134
135	/*
136	 * Send the IPI. The write to APIC_ICR fires this off.
137	 */
138	native_apic_mem_write(APIC_ICR, cfg);
139}
140
141/*
142 * This is used to send an IPI with no shorthand notation (the destination is
143 * specified in bits 56 to 63 of the ICR).
144 */
145void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
146{
147	unsigned long cfg;
148
149	/*
150	 * Wait for idle.
151	 */
152	if (unlikely(vector == NMI_VECTOR))
153		safe_apic_wait_icr_idle();
154	else
155		__xapic_wait_icr_idle();
156
157	/*
158	 * prepare target chip field
159	 */
160	cfg = __prepare_ICR2(mask);
161	native_apic_mem_write(APIC_ICR2, cfg);
162
163	/*
164	 * program the ICR
165	 */
166	cfg = __prepare_ICR(0, vector, dest);
167
168	/*
169	 * Send the IPI. The write to APIC_ICR fires this off.
170	 */
171	native_apic_mem_write(APIC_ICR, cfg);
172}
173
174void default_send_IPI_single_phys(int cpu, int vector)
175{
176	unsigned long flags;
177
178	local_irq_save(flags);
179	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
180				      vector, APIC_DEST_PHYSICAL);
181	local_irq_restore(flags);
182}
183
184void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
185{
186	unsigned long query_cpu;
187	unsigned long flags;
188
189	/*
190	 * Hack. The clustered APIC addressing mode doesn't allow us to send
191	 * to an arbitrary mask, so I do a unicast to each CPU instead.
192	 * - mbligh
193	 */
194	local_irq_save(flags);
195	for_each_cpu(query_cpu, mask) {
196		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
197				query_cpu), vector, APIC_DEST_PHYSICAL);
198	}
199	local_irq_restore(flags);
200}
201
202void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
203						 int vector)
204{
205	unsigned int this_cpu = smp_processor_id();
206	unsigned int query_cpu;
207	unsigned long flags;
208
209	/* See Hack comment above */
210
211	local_irq_save(flags);
212	for_each_cpu(query_cpu, mask) {
213		if (query_cpu == this_cpu)
214			continue;
215		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
216				 query_cpu), vector, APIC_DEST_PHYSICAL);
217	}
218	local_irq_restore(flags);
219}
220
221/*
222 * Helper function for APICs which insist on cpumasks
223 */
224void default_send_IPI_single(int cpu, int vector)
225{
226	apic->send_IPI_mask(cpumask_of(cpu), vector);
227}
228
229void default_send_IPI_allbutself(int vector)
230{
231	__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
232}
233
234void default_send_IPI_all(int vector)
235{
236	__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
237}
238
239void default_send_IPI_self(int vector)
240{
241	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
242}
243
244#ifdef CONFIG_X86_32
245
246void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
247						 int vector)
248{
249	unsigned long flags;
250	unsigned int query_cpu;
251
252	/*
253	 * Hack. The clustered APIC addressing mode doesn't allow us to send
254	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
255	 * should be modified to do 1 message per cluster ID - mbligh
256	 */
257
258	local_irq_save(flags);
259	for_each_cpu(query_cpu, mask)
260		__default_send_IPI_dest_field(
261			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
262			vector, apic->dest_logical);
263	local_irq_restore(flags);
264}
265
266void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
267						 int vector)
268{
269	unsigned long flags;
270	unsigned int query_cpu;
271	unsigned int this_cpu = smp_processor_id();
272
273	/* See Hack comment above */
274
275	local_irq_save(flags);
276	for_each_cpu(query_cpu, mask) {
277		if (query_cpu == this_cpu)
278			continue;
279		__default_send_IPI_dest_field(
280			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
281			vector, apic->dest_logical);
282		}
283	local_irq_restore(flags);
284}
285
286/*
287 * This is only used on smaller machines.
288 */
289void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
290{
291	unsigned long mask = cpumask_bits(cpumask)[0];
292	unsigned long flags;
293
294	if (!mask)
295		return;
296
297	local_irq_save(flags);
298	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
299	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
300	local_irq_restore(flags);
301}
302
303/* must come after the send_IPI functions above for inlining */
304static int convert_apicid_to_cpu(int apic_id)
305{
306	int i;
307
308	for_each_possible_cpu(i) {
309		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
310			return i;
311	}
312	return -1;
313}
314
315int safe_smp_processor_id(void)
316{
317	int apicid, cpuid;
318
319	if (!boot_cpu_has(X86_FEATURE_APIC))
320		return 0;
321
322	apicid = hard_smp_processor_id();
323	if (apicid == BAD_APICID)
324		return 0;
325
326	cpuid = convert_apicid_to_cpu(apicid);
327
328	return cpuid >= 0 ? cpuid : 0;
329}
330#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/cpumask.h>
  4#include <linux/smp.h>
  5#include <asm/io_apic.h>
  6
  7#include "local.h"
  8
  9DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
 10
 11#ifdef CONFIG_SMP
 12static int apic_ipi_shorthand_off __ro_after_init;
 13
 14static __init int apic_ipi_shorthand(char *str)
 15{
 16	get_option(&str, &apic_ipi_shorthand_off);
 17	return 1;
 18}
 19__setup("no_ipi_broadcast=", apic_ipi_shorthand);
 20
 21static int __init print_ipi_mode(void)
 22{
 23	pr_info("IPI shorthand broadcast: %s\n",
 24		apic_ipi_shorthand_off ? "disabled" : "enabled");
 25	return 0;
 26}
 27late_initcall(print_ipi_mode);
 28
 29void apic_smt_update(void)
 30{
 31	/*
 32	 * Do not switch to broadcast mode if:
 33	 * - Disabled on the command line
 34	 * - Only a single CPU is online
 35	 * - Not all present CPUs have been at least booted once
 36	 *
 37	 * The latter is important as the local APIC might be in some
 38	 * random state and a broadcast might cause havoc. That's
 39	 * especially true for NMI broadcasting.
 40	 */
 41	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
 42	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
 43		static_branch_disable(&apic_use_ipi_shorthand);
 44	} else {
 45		static_branch_enable(&apic_use_ipi_shorthand);
 46	}
 47}
 48
 49void apic_send_IPI_allbutself(unsigned int vector)
 50{
 51	if (num_online_cpus() < 2)
 52		return;
 53
 54	if (static_branch_likely(&apic_use_ipi_shorthand))
 55		apic->send_IPI_allbutself(vector);
 56	else
 57		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
 58}
 59
 60/*
 61 * Send a 'reschedule' IPI to another CPU. It goes straight through and
 62 * wastes no time serializing anything. Worst case is that we lose a
 63 * reschedule ...
 64 */
 65void native_smp_send_reschedule(int cpu)
 66{
 67	if (unlikely(cpu_is_offline(cpu))) {
 68		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
 69		return;
 70	}
 71	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
 72}
 73
 74void native_send_call_func_single_ipi(int cpu)
 75{
 76	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
 77}
 78
 79void native_send_call_func_ipi(const struct cpumask *mask)
 80{
 81	if (static_branch_likely(&apic_use_ipi_shorthand)) {
 82		unsigned int cpu = smp_processor_id();
 83
 84		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
 85			goto sendmask;
 86
 87		if (cpumask_test_cpu(cpu, mask))
 88			apic->send_IPI_all(CALL_FUNCTION_VECTOR);
 89		else if (num_online_cpus() > 1)
 90			apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 91		return;
 92	}
 93
 94sendmask:
 95	apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 96}
 97
 98#endif /* CONFIG_SMP */
 99
100static inline int __prepare_ICR2(unsigned int mask)
101{
102	return SET_XAPIC_DEST_FIELD(mask);
103}
104
105static inline void __xapic_wait_icr_idle(void)
106{
107	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
108		cpu_relax();
109}
110
111void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
112{
113	/*
114	 * Subtle. In the case of the 'never do double writes' workaround
115	 * we have to lock out interrupts to be safe.  As we don't care
116	 * of the value read we use an atomic rmw access to avoid costly
117	 * cli/sti.  Otherwise we use an even cheaper single atomic write
118	 * to the APIC.
119	 */
120	unsigned int cfg;
121
122	/*
123	 * Wait for idle.
124	 */
125	if (unlikely(vector == NMI_VECTOR))
126		safe_apic_wait_icr_idle();
127	else
128		__xapic_wait_icr_idle();
129
130	/*
131	 * No need to touch the target chip field. Also the destination
132	 * mode is ignored when a shorthand is used.
133	 */
134	cfg = __prepare_ICR(shortcut, vector, 0);
135
136	/*
137	 * Send the IPI. The write to APIC_ICR fires this off.
138	 */
139	native_apic_mem_write(APIC_ICR, cfg);
140}
141
142/*
143 * This is used to send an IPI with no shorthand notation (the destination is
144 * specified in bits 56 to 63 of the ICR).
145 */
146void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
147{
148	unsigned long cfg;
149
150	/*
151	 * Wait for idle.
152	 */
153	if (unlikely(vector == NMI_VECTOR))
154		safe_apic_wait_icr_idle();
155	else
156		__xapic_wait_icr_idle();
157
158	/*
159	 * prepare target chip field
160	 */
161	cfg = __prepare_ICR2(mask);
162	native_apic_mem_write(APIC_ICR2, cfg);
163
164	/*
165	 * program the ICR
166	 */
167	cfg = __prepare_ICR(0, vector, dest);
168
169	/*
170	 * Send the IPI. The write to APIC_ICR fires this off.
171	 */
172	native_apic_mem_write(APIC_ICR, cfg);
173}
174
175void default_send_IPI_single_phys(int cpu, int vector)
176{
177	unsigned long flags;
178
179	local_irq_save(flags);
180	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
181				      vector, APIC_DEST_PHYSICAL);
182	local_irq_restore(flags);
183}
184
185void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
186{
187	unsigned long query_cpu;
188	unsigned long flags;
189
190	/*
191	 * Hack. The clustered APIC addressing mode doesn't allow us to send
192	 * to an arbitrary mask, so I do a unicast to each CPU instead.
193	 * - mbligh
194	 */
195	local_irq_save(flags);
196	for_each_cpu(query_cpu, mask) {
197		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198				query_cpu), vector, APIC_DEST_PHYSICAL);
199	}
200	local_irq_restore(flags);
201}
202
203void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
204						 int vector)
205{
206	unsigned int this_cpu = smp_processor_id();
207	unsigned int query_cpu;
208	unsigned long flags;
209
210	/* See Hack comment above */
211
212	local_irq_save(flags);
213	for_each_cpu(query_cpu, mask) {
214		if (query_cpu == this_cpu)
215			continue;
216		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
217				 query_cpu), vector, APIC_DEST_PHYSICAL);
218	}
219	local_irq_restore(flags);
220}
221
222/*
223 * Helper function for APICs which insist on cpumasks
224 */
225void default_send_IPI_single(int cpu, int vector)
226{
227	apic->send_IPI_mask(cpumask_of(cpu), vector);
228}
229
230void default_send_IPI_allbutself(int vector)
231{
232	__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
233}
234
235void default_send_IPI_all(int vector)
236{
237	__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
238}
239
240void default_send_IPI_self(int vector)
241{
242	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
243}
244
245#ifdef CONFIG_X86_32
246
247void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
248						 int vector)
249{
250	unsigned long flags;
251	unsigned int query_cpu;
252
253	/*
254	 * Hack. The clustered APIC addressing mode doesn't allow us to send
255	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
256	 * should be modified to do 1 message per cluster ID - mbligh
257	 */
258
259	local_irq_save(flags);
260	for_each_cpu(query_cpu, mask)
261		__default_send_IPI_dest_field(
262			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
263			vector, APIC_DEST_LOGICAL);
264	local_irq_restore(flags);
265}
266
267void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
268						 int vector)
269{
270	unsigned long flags;
271	unsigned int query_cpu;
272	unsigned int this_cpu = smp_processor_id();
273
274	/* See Hack comment above */
275
276	local_irq_save(flags);
277	for_each_cpu(query_cpu, mask) {
278		if (query_cpu == this_cpu)
279			continue;
280		__default_send_IPI_dest_field(
281			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
282			vector, APIC_DEST_LOGICAL);
283		}
284	local_irq_restore(flags);
285}
286
287/*
288 * This is only used on smaller machines.
289 */
290void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
291{
292	unsigned long mask = cpumask_bits(cpumask)[0];
293	unsigned long flags;
294
295	if (!mask)
296		return;
297
298	local_irq_save(flags);
299	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
300	__default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
301	local_irq_restore(flags);
302}
303
304/* must come after the send_IPI functions above for inlining */
305static int convert_apicid_to_cpu(int apic_id)
306{
307	int i;
308
309	for_each_possible_cpu(i) {
310		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
311			return i;
312	}
313	return -1;
314}
315
316int safe_smp_processor_id(void)
317{
318	int apicid, cpuid;
319
320	if (!boot_cpu_has(X86_FEATURE_APIC))
321		return 0;
322
323	apicid = hard_smp_processor_id();
324	if (apicid == BAD_APICID)
325		return 0;
326
327	cpuid = convert_apicid_to_cpu(apicid);
328
329	return cpuid >= 0 ? cpuid : 0;
330}
331#endif