Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
 
  2#include <linux/cpumask.h>
  3#include <linux/interrupt.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  4
  5#include <linux/mm.h>
  6#include <linux/delay.h>
  7#include <linux/spinlock.h>
  8#include <linux/kernel_stat.h>
  9#include <linux/mc146818rtc.h>
 10#include <linux/cache.h>
 11#include <linux/cpu.h>
 12
 13#include <asm/smp.h>
 14#include <asm/mtrr.h>
 15#include <asm/tlbflush.h>
 16#include <asm/mmu_context.h>
 17#include <asm/apic.h>
 18#include <asm/proto.h>
 19#include <asm/ipi.h>
 20
 21void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
 22{
 23	/*
 24	 * Subtle. In the case of the 'never do double writes' workaround
 25	 * we have to lock out interrupts to be safe.  As we don't care
 26	 * of the value read we use an atomic rmw access to avoid costly
 27	 * cli/sti.  Otherwise we use an even cheaper single atomic write
 28	 * to the APIC.
 29	 */
 30	unsigned int cfg;
 31
 32	/*
 33	 * Wait for idle.
 34	 */
 35	__xapic_wait_icr_idle();
 
 
 
 36
 37	/*
 38	 * No need to touch the target chip field
 
 39	 */
 40	cfg = __prepare_ICR(shortcut, vector, dest);
 41
 42	/*
 43	 * Send the IPI. The write to APIC_ICR fires this off.
 44	 */
 45	native_apic_mem_write(APIC_ICR, cfg);
 46}
 47
 48/*
 49 * This is used to send an IPI with no shorthand notation (the destination is
 50 * specified in bits 56 to 63 of the ICR).
 51 */
 52void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 53{
 54	unsigned long cfg;
 55
 56	/*
 57	 * Wait for idle.
 58	 */
 59	if (unlikely(vector == NMI_VECTOR))
 60		safe_apic_wait_icr_idle();
 61	else
 62		__xapic_wait_icr_idle();
 63
 64	/*
 65	 * prepare target chip field
 66	 */
 67	cfg = __prepare_ICR2(mask);
 68	native_apic_mem_write(APIC_ICR2, cfg);
 69
 70	/*
 71	 * program the ICR
 72	 */
 73	cfg = __prepare_ICR(0, vector, dest);
 74
 75	/*
 76	 * Send the IPI. The write to APIC_ICR fires this off.
 77	 */
 78	native_apic_mem_write(APIC_ICR, cfg);
 79}
 80
 81void default_send_IPI_single_phys(int cpu, int vector)
 82{
 83	unsigned long flags;
 84
 85	local_irq_save(flags);
 86	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
 87				      vector, APIC_DEST_PHYSICAL);
 88	local_irq_restore(flags);
 89}
 90
 91void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
 92{
 93	unsigned long query_cpu;
 94	unsigned long flags;
 95
 96	/*
 97	 * Hack. The clustered APIC addressing mode doesn't allow us to send
 98	 * to an arbitrary mask, so I do a unicast to each CPU instead.
 99	 * - mbligh
100	 */
101	local_irq_save(flags);
102	for_each_cpu(query_cpu, mask) {
103		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
104				query_cpu), vector, APIC_DEST_PHYSICAL);
105	}
106	local_irq_restore(flags);
107}
108
109void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
110						 int vector)
111{
112	unsigned int this_cpu = smp_processor_id();
113	unsigned int query_cpu;
114	unsigned long flags;
115
116	/* See Hack comment above */
117
118	local_irq_save(flags);
119	for_each_cpu(query_cpu, mask) {
120		if (query_cpu == this_cpu)
121			continue;
122		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
123				 query_cpu), vector, APIC_DEST_PHYSICAL);
124	}
125	local_irq_restore(flags);
126}
127
128/*
129 * Helper function for APICs which insist on cpumasks
130 */
131void default_send_IPI_single(int cpu, int vector)
132{
133	apic->send_IPI_mask(cpumask_of(cpu), vector);
134}
135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136#ifdef CONFIG_X86_32
137
138void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
139						 int vector)
140{
141	unsigned long flags;
142	unsigned int query_cpu;
143
144	/*
145	 * Hack. The clustered APIC addressing mode doesn't allow us to send
146	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
147	 * should be modified to do 1 message per cluster ID - mbligh
148	 */
149
150	local_irq_save(flags);
151	for_each_cpu(query_cpu, mask)
152		__default_send_IPI_dest_field(
153			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
154			vector, apic->dest_logical);
155	local_irq_restore(flags);
156}
157
158void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
159						 int vector)
160{
161	unsigned long flags;
162	unsigned int query_cpu;
163	unsigned int this_cpu = smp_processor_id();
164
165	/* See Hack comment above */
166
167	local_irq_save(flags);
168	for_each_cpu(query_cpu, mask) {
169		if (query_cpu == this_cpu)
170			continue;
171		__default_send_IPI_dest_field(
172			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
173			vector, apic->dest_logical);
174		}
175	local_irq_restore(flags);
176}
177
178/*
179 * This is only used on smaller machines.
180 */
181void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
182{
183	unsigned long mask = cpumask_bits(cpumask)[0];
184	unsigned long flags;
185
186	if (!mask)
187		return;
188
189	local_irq_save(flags);
190	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
191	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
192	local_irq_restore(flags);
193}
194
195void default_send_IPI_allbutself(int vector)
196{
197	/*
198	 * if there are no other CPUs in the system then we get an APIC send
199	 * error if we try to broadcast, thus avoid sending IPIs in this case.
200	 */
201	if (!(num_online_cpus() > 1))
202		return;
203
204	__default_local_send_IPI_allbutself(vector);
205}
206
207void default_send_IPI_all(int vector)
208{
209	__default_local_send_IPI_all(vector);
210}
211
212void default_send_IPI_self(int vector)
213{
214	__default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
215}
216
217/* must come after the send_IPI functions above for inlining */
218static int convert_apicid_to_cpu(int apic_id)
219{
220	int i;
221
222	for_each_possible_cpu(i) {
223		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
224			return i;
225	}
226	return -1;
227}
228
229int safe_smp_processor_id(void)
230{
231	int apicid, cpuid;
232
233	if (!boot_cpu_has(X86_FEATURE_APIC))
234		return 0;
235
236	apicid = hard_smp_processor_id();
237	if (apicid == BAD_APICID)
238		return 0;
239
240	cpuid = convert_apicid_to_cpu(apicid);
241
242	return cpuid >= 0 ? cpuid : 0;
243}
244#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/cpumask.h>
  4#include <linux/smp.h>
  5#include <asm/io_apic.h>
  6
  7#include "local.h"
  8
  9DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
 10
 11#ifdef CONFIG_SMP
 12static int apic_ipi_shorthand_off __ro_after_init;
 13
 14static __init int apic_ipi_shorthand(char *str)
 15{
 16	get_option(&str, &apic_ipi_shorthand_off);
 17	return 1;
 18}
 19__setup("no_ipi_broadcast=", apic_ipi_shorthand);
 20
 21static int __init print_ipi_mode(void)
 22{
 23	pr_info("IPI shorthand broadcast: %s\n",
 24		apic_ipi_shorthand_off ? "disabled" : "enabled");
 25	return 0;
 26}
 27late_initcall(print_ipi_mode);
 28
 29void apic_smt_update(void)
 30{
 31	/*
 32	 * Do not switch to broadcast mode if:
 33	 * - Disabled on the command line
 34	 * - Only a single CPU is online
 35	 * - Not all present CPUs have been at least booted once
 36	 *
 37	 * The latter is important as the local APIC might be in some
 38	 * random state and a broadcast might cause havoc. That's
 39	 * especially true for NMI broadcasting.
 40	 */
 41	if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
 42	    !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
 43		static_branch_disable(&apic_use_ipi_shorthand);
 44	} else {
 45		static_branch_enable(&apic_use_ipi_shorthand);
 46	}
 47}
 48
 49void apic_send_IPI_allbutself(unsigned int vector)
 50{
 51	if (num_online_cpus() < 2)
 52		return;
 53
 54	if (static_branch_likely(&apic_use_ipi_shorthand))
 55		apic->send_IPI_allbutself(vector);
 56	else
 57		apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
 58}
 59
 60/*
 61 * Send a 'reschedule' IPI to another CPU. It goes straight through and
 62 * wastes no time serializing anything. Worst case is that we lose a
 63 * reschedule ...
 64 */
 65void native_smp_send_reschedule(int cpu)
 66{
 67	if (unlikely(cpu_is_offline(cpu))) {
 68		WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
 69		return;
 70	}
 71	apic->send_IPI(cpu, RESCHEDULE_VECTOR);
 72}
 73
 74void native_send_call_func_single_ipi(int cpu)
 75{
 76	apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
 77}
 78
 79void native_send_call_func_ipi(const struct cpumask *mask)
 80{
 81	if (static_branch_likely(&apic_use_ipi_shorthand)) {
 82		unsigned int cpu = smp_processor_id();
 83
 84		if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
 85			goto sendmask;
 86
 87		if (cpumask_test_cpu(cpu, mask))
 88			apic->send_IPI_all(CALL_FUNCTION_VECTOR);
 89		else if (num_online_cpus() > 1)
 90			apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
 91		return;
 92	}
 93
 94sendmask:
 95	apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 96}
 97
 98#endif /* CONFIG_SMP */
 99
100static inline int __prepare_ICR2(unsigned int mask)
101{
102	return SET_APIC_DEST_FIELD(mask);
103}
104
105static inline void __xapic_wait_icr_idle(void)
106{
107	while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
108		cpu_relax();
109}
 
 
 
 
 
 
 
 
 
 
110
111void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
112{
113	/*
114	 * Subtle. In the case of the 'never do double writes' workaround
115	 * we have to lock out interrupts to be safe.  As we don't care
116	 * of the value read we use an atomic rmw access to avoid costly
117	 * cli/sti.  Otherwise we use an even cheaper single atomic write
118	 * to the APIC.
119	 */
120	unsigned int cfg;
121
122	/*
123	 * Wait for idle.
124	 */
125	if (unlikely(vector == NMI_VECTOR))
126		safe_apic_wait_icr_idle();
127	else
128		__xapic_wait_icr_idle();
129
130	/*
131	 * No need to touch the target chip field. Also the destination
132	 * mode is ignored when a shorthand is used.
133	 */
134	cfg = __prepare_ICR(shortcut, vector, 0);
135
136	/*
137	 * Send the IPI. The write to APIC_ICR fires this off.
138	 */
139	native_apic_mem_write(APIC_ICR, cfg);
140}
141
142/*
143 * This is used to send an IPI with no shorthand notation (the destination is
144 * specified in bits 56 to 63 of the ICR).
145 */
146void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
147{
148	unsigned long cfg;
149
150	/*
151	 * Wait for idle.
152	 */
153	if (unlikely(vector == NMI_VECTOR))
154		safe_apic_wait_icr_idle();
155	else
156		__xapic_wait_icr_idle();
157
158	/*
159	 * prepare target chip field
160	 */
161	cfg = __prepare_ICR2(mask);
162	native_apic_mem_write(APIC_ICR2, cfg);
163
164	/*
165	 * program the ICR
166	 */
167	cfg = __prepare_ICR(0, vector, dest);
168
169	/*
170	 * Send the IPI. The write to APIC_ICR fires this off.
171	 */
172	native_apic_mem_write(APIC_ICR, cfg);
173}
174
175void default_send_IPI_single_phys(int cpu, int vector)
176{
177	unsigned long flags;
178
179	local_irq_save(flags);
180	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
181				      vector, APIC_DEST_PHYSICAL);
182	local_irq_restore(flags);
183}
184
185void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
186{
187	unsigned long query_cpu;
188	unsigned long flags;
189
190	/*
191	 * Hack. The clustered APIC addressing mode doesn't allow us to send
192	 * to an arbitrary mask, so I do a unicast to each CPU instead.
193	 * - mbligh
194	 */
195	local_irq_save(flags);
196	for_each_cpu(query_cpu, mask) {
197		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198				query_cpu), vector, APIC_DEST_PHYSICAL);
199	}
200	local_irq_restore(flags);
201}
202
203void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
204						 int vector)
205{
206	unsigned int this_cpu = smp_processor_id();
207	unsigned int query_cpu;
208	unsigned long flags;
209
210	/* See Hack comment above */
211
212	local_irq_save(flags);
213	for_each_cpu(query_cpu, mask) {
214		if (query_cpu == this_cpu)
215			continue;
216		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
217				 query_cpu), vector, APIC_DEST_PHYSICAL);
218	}
219	local_irq_restore(flags);
220}
221
222/*
223 * Helper function for APICs which insist on cpumasks
224 */
225void default_send_IPI_single(int cpu, int vector)
226{
227	apic->send_IPI_mask(cpumask_of(cpu), vector);
228}
229
230void default_send_IPI_allbutself(int vector)
231{
232	__default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
233}
234
235void default_send_IPI_all(int vector)
236{
237	__default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
238}
239
240void default_send_IPI_self(int vector)
241{
242	__default_send_IPI_shortcut(APIC_DEST_SELF, vector);
243}
244
245#ifdef CONFIG_X86_32
246
247void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
248						 int vector)
249{
250	unsigned long flags;
251	unsigned int query_cpu;
252
253	/*
254	 * Hack. The clustered APIC addressing mode doesn't allow us to send
255	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
256	 * should be modified to do 1 message per cluster ID - mbligh
257	 */
258
259	local_irq_save(flags);
260	for_each_cpu(query_cpu, mask)
261		__default_send_IPI_dest_field(
262			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
263			vector, apic->dest_logical);
264	local_irq_restore(flags);
265}
266
267void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
268						 int vector)
269{
270	unsigned long flags;
271	unsigned int query_cpu;
272	unsigned int this_cpu = smp_processor_id();
273
274	/* See Hack comment above */
275
276	local_irq_save(flags);
277	for_each_cpu(query_cpu, mask) {
278		if (query_cpu == this_cpu)
279			continue;
280		__default_send_IPI_dest_field(
281			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
282			vector, apic->dest_logical);
283		}
284	local_irq_restore(flags);
285}
286
287/*
288 * This is only used on smaller machines.
289 */
290void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
291{
292	unsigned long mask = cpumask_bits(cpumask)[0];
293	unsigned long flags;
294
295	if (!mask)
296		return;
297
298	local_irq_save(flags);
299	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
300	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
301	local_irq_restore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302}
303
304/* must come after the send_IPI functions above for inlining */
305static int convert_apicid_to_cpu(int apic_id)
306{
307	int i;
308
309	for_each_possible_cpu(i) {
310		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
311			return i;
312	}
313	return -1;
314}
315
316int safe_smp_processor_id(void)
317{
318	int apicid, cpuid;
319
320	if (!boot_cpu_has(X86_FEATURE_APIC))
321		return 0;
322
323	apicid = hard_smp_processor_id();
324	if (apicid == BAD_APICID)
325		return 0;
326
327	cpuid = convert_apicid_to_cpu(apicid);
328
329	return cpuid >= 0 ? cpuid : 0;
330}
331#endif