Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/cpumask.h>
  3#include <linux/interrupt.h>
  4
  5#include <linux/mm.h>
  6#include <linux/delay.h>
  7#include <linux/spinlock.h>
  8#include <linux/kernel_stat.h>
  9#include <linux/mc146818rtc.h>
 10#include <linux/cache.h>
 11#include <linux/cpu.h>
 12
 13#include <asm/smp.h>
 14#include <asm/mtrr.h>
 15#include <asm/tlbflush.h>
 16#include <asm/mmu_context.h>
 17#include <asm/apic.h>
 18#include <asm/proto.h>
 19#include <asm/ipi.h>
 20
 21void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
 22{
 23	/*
 24	 * Subtle. In the case of the 'never do double writes' workaround
 25	 * we have to lock out interrupts to be safe.  As we don't care
 26	 * of the value read we use an atomic rmw access to avoid costly
 27	 * cli/sti.  Otherwise we use an even cheaper single atomic write
 28	 * to the APIC.
 29	 */
 30	unsigned int cfg;
 31
 32	/*
 33	 * Wait for idle.
 34	 */
 35	__xapic_wait_icr_idle();
 36
 37	/*
 38	 * No need to touch the target chip field
 39	 */
 40	cfg = __prepare_ICR(shortcut, vector, dest);
 41
 42	/*
 43	 * Send the IPI. The write to APIC_ICR fires this off.
 44	 */
 45	native_apic_mem_write(APIC_ICR, cfg);
 46}
 47
 48/*
 49 * This is used to send an IPI with no shorthand notation (the destination is
 50 * specified in bits 56 to 63 of the ICR).
 51 */
 52void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 53{
 54	unsigned long cfg;
 55
 56	/*
 57	 * Wait for idle.
 58	 */
 59	if (unlikely(vector == NMI_VECTOR))
 60		safe_apic_wait_icr_idle();
 61	else
 62		__xapic_wait_icr_idle();
 63
 64	/*
 65	 * prepare target chip field
 66	 */
 67	cfg = __prepare_ICR2(mask);
 68	native_apic_mem_write(APIC_ICR2, cfg);
 69
 70	/*
 71	 * program the ICR
 72	 */
 73	cfg = __prepare_ICR(0, vector, dest);
 74
 75	/*
 76	 * Send the IPI. The write to APIC_ICR fires this off.
 77	 */
 78	native_apic_mem_write(APIC_ICR, cfg);
 79}
 80
 81void default_send_IPI_single_phys(int cpu, int vector)
 82{
 83	unsigned long flags;
 84
 85	local_irq_save(flags);
 86	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
 87				      vector, APIC_DEST_PHYSICAL);
 88	local_irq_restore(flags);
 89}
 90
 91void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
 92{
 93	unsigned long query_cpu;
 94	unsigned long flags;
 95
 96	/*
 97	 * Hack. The clustered APIC addressing mode doesn't allow us to send
 98	 * to an arbitrary mask, so I do a unicast to each CPU instead.
 99	 * - mbligh
100	 */
101	local_irq_save(flags);
102	for_each_cpu(query_cpu, mask) {
103		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
104				query_cpu), vector, APIC_DEST_PHYSICAL);
105	}
106	local_irq_restore(flags);
107}
108
109void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
110						 int vector)
111{
112	unsigned int this_cpu = smp_processor_id();
113	unsigned int query_cpu;
114	unsigned long flags;
115
116	/* See Hack comment above */
117
118	local_irq_save(flags);
119	for_each_cpu(query_cpu, mask) {
120		if (query_cpu == this_cpu)
121			continue;
122		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
123				 query_cpu), vector, APIC_DEST_PHYSICAL);
124	}
125	local_irq_restore(flags);
126}
127
128/*
129 * Helper function for APICs which insist on cpumasks
130 */
131void default_send_IPI_single(int cpu, int vector)
132{
133	apic->send_IPI_mask(cpumask_of(cpu), vector);
134}
135
136#ifdef CONFIG_X86_32
137
138void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
139						 int vector)
140{
141	unsigned long flags;
142	unsigned int query_cpu;
143
144	/*
145	 * Hack. The clustered APIC addressing mode doesn't allow us to send
146	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
147	 * should be modified to do 1 message per cluster ID - mbligh
148	 */
149
150	local_irq_save(flags);
151	for_each_cpu(query_cpu, mask)
152		__default_send_IPI_dest_field(
153			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
154			vector, apic->dest_logical);
155	local_irq_restore(flags);
156}
157
158void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
159						 int vector)
160{
161	unsigned long flags;
162	unsigned int query_cpu;
163	unsigned int this_cpu = smp_processor_id();
164
165	/* See Hack comment above */
166
167	local_irq_save(flags);
168	for_each_cpu(query_cpu, mask) {
169		if (query_cpu == this_cpu)
170			continue;
171		__default_send_IPI_dest_field(
172			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
173			vector, apic->dest_logical);
174		}
175	local_irq_restore(flags);
176}
177
178/*
179 * This is only used on smaller machines.
180 */
181void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
182{
183	unsigned long mask = cpumask_bits(cpumask)[0];
184	unsigned long flags;
185
186	if (!mask)
187		return;
188
189	local_irq_save(flags);
190	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
191	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
192	local_irq_restore(flags);
193}
194
195void default_send_IPI_allbutself(int vector)
196{
197	/*
198	 * if there are no other CPUs in the system then we get an APIC send
199	 * error if we try to broadcast, thus avoid sending IPIs in this case.
200	 */
201	if (!(num_online_cpus() > 1))
202		return;
203
204	__default_local_send_IPI_allbutself(vector);
205}
206
207void default_send_IPI_all(int vector)
208{
209	__default_local_send_IPI_all(vector);
210}
211
212void default_send_IPI_self(int vector)
213{
214	__default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
215}
216
217/* must come after the send_IPI functions above for inlining */
218static int convert_apicid_to_cpu(int apic_id)
219{
220	int i;
221
222	for_each_possible_cpu(i) {
223		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
224			return i;
225	}
226	return -1;
227}
228
229int safe_smp_processor_id(void)
230{
231	int apicid, cpuid;
232
233	if (!boot_cpu_has(X86_FEATURE_APIC))
234		return 0;
235
236	apicid = hard_smp_processor_id();
237	if (apicid == BAD_APICID)
238		return 0;
239
240	cpuid = convert_apicid_to_cpu(apicid);
241
242	return cpuid >= 0 ? cpuid : 0;
243}
244#endif
v4.10.11
 
  1#include <linux/cpumask.h>
  2#include <linux/interrupt.h>
  3
  4#include <linux/mm.h>
  5#include <linux/delay.h>
  6#include <linux/spinlock.h>
  7#include <linux/kernel_stat.h>
  8#include <linux/mc146818rtc.h>
  9#include <linux/cache.h>
 10#include <linux/cpu.h>
 11
 12#include <asm/smp.h>
 13#include <asm/mtrr.h>
 14#include <asm/tlbflush.h>
 15#include <asm/mmu_context.h>
 16#include <asm/apic.h>
 17#include <asm/proto.h>
 18#include <asm/ipi.h>
 19
 20void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
 21{
 22	/*
 23	 * Subtle. In the case of the 'never do double writes' workaround
 24	 * we have to lock out interrupts to be safe.  As we don't care
 25	 * of the value read we use an atomic rmw access to avoid costly
 26	 * cli/sti.  Otherwise we use an even cheaper single atomic write
 27	 * to the APIC.
 28	 */
 29	unsigned int cfg;
 30
 31	/*
 32	 * Wait for idle.
 33	 */
 34	__xapic_wait_icr_idle();
 35
 36	/*
 37	 * No need to touch the target chip field
 38	 */
 39	cfg = __prepare_ICR(shortcut, vector, dest);
 40
 41	/*
 42	 * Send the IPI. The write to APIC_ICR fires this off.
 43	 */
 44	native_apic_mem_write(APIC_ICR, cfg);
 45}
 46
 47/*
 48 * This is used to send an IPI with no shorthand notation (the destination is
 49 * specified in bits 56 to 63 of the ICR).
 50 */
 51void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 52{
 53	unsigned long cfg;
 54
 55	/*
 56	 * Wait for idle.
 57	 */
 58	if (unlikely(vector == NMI_VECTOR))
 59		safe_apic_wait_icr_idle();
 60	else
 61		__xapic_wait_icr_idle();
 62
 63	/*
 64	 * prepare target chip field
 65	 */
 66	cfg = __prepare_ICR2(mask);
 67	native_apic_mem_write(APIC_ICR2, cfg);
 68
 69	/*
 70	 * program the ICR
 71	 */
 72	cfg = __prepare_ICR(0, vector, dest);
 73
 74	/*
 75	 * Send the IPI. The write to APIC_ICR fires this off.
 76	 */
 77	native_apic_mem_write(APIC_ICR, cfg);
 78}
 79
 80void default_send_IPI_single_phys(int cpu, int vector)
 81{
 82	unsigned long flags;
 83
 84	local_irq_save(flags);
 85	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
 86				      vector, APIC_DEST_PHYSICAL);
 87	local_irq_restore(flags);
 88}
 89
 90void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
 91{
 92	unsigned long query_cpu;
 93	unsigned long flags;
 94
 95	/*
 96	 * Hack. The clustered APIC addressing mode doesn't allow us to send
 97	 * to an arbitrary mask, so I do a unicast to each CPU instead.
 98	 * - mbligh
 99	 */
100	local_irq_save(flags);
101	for_each_cpu(query_cpu, mask) {
102		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
103				query_cpu), vector, APIC_DEST_PHYSICAL);
104	}
105	local_irq_restore(flags);
106}
107
108void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
109						 int vector)
110{
111	unsigned int this_cpu = smp_processor_id();
112	unsigned int query_cpu;
113	unsigned long flags;
114
115	/* See Hack comment above */
116
117	local_irq_save(flags);
118	for_each_cpu(query_cpu, mask) {
119		if (query_cpu == this_cpu)
120			continue;
121		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
122				 query_cpu), vector, APIC_DEST_PHYSICAL);
123	}
124	local_irq_restore(flags);
125}
126
127/*
128 * Helper function for APICs which insist on cpumasks
129 */
130void default_send_IPI_single(int cpu, int vector)
131{
132	apic->send_IPI_mask(cpumask_of(cpu), vector);
133}
134
135#ifdef CONFIG_X86_32
136
137void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
138						 int vector)
139{
140	unsigned long flags;
141	unsigned int query_cpu;
142
143	/*
144	 * Hack. The clustered APIC addressing mode doesn't allow us to send
145	 * to an arbitrary mask, so I do a unicasts to each CPU instead. This
146	 * should be modified to do 1 message per cluster ID - mbligh
147	 */
148
149	local_irq_save(flags);
150	for_each_cpu(query_cpu, mask)
151		__default_send_IPI_dest_field(
152			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
153			vector, apic->dest_logical);
154	local_irq_restore(flags);
155}
156
157void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
158						 int vector)
159{
160	unsigned long flags;
161	unsigned int query_cpu;
162	unsigned int this_cpu = smp_processor_id();
163
164	/* See Hack comment above */
165
166	local_irq_save(flags);
167	for_each_cpu(query_cpu, mask) {
168		if (query_cpu == this_cpu)
169			continue;
170		__default_send_IPI_dest_field(
171			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
172			vector, apic->dest_logical);
173		}
174	local_irq_restore(flags);
175}
176
177/*
178 * This is only used on smaller machines.
179 */
180void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
181{
182	unsigned long mask = cpumask_bits(cpumask)[0];
183	unsigned long flags;
184
185	if (!mask)
186		return;
187
188	local_irq_save(flags);
189	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
190	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
191	local_irq_restore(flags);
192}
193
194void default_send_IPI_allbutself(int vector)
195{
196	/*
197	 * if there are no other CPUs in the system then we get an APIC send
198	 * error if we try to broadcast, thus avoid sending IPIs in this case.
199	 */
200	if (!(num_online_cpus() > 1))
201		return;
202
203	__default_local_send_IPI_allbutself(vector);
204}
205
206void default_send_IPI_all(int vector)
207{
208	__default_local_send_IPI_all(vector);
209}
210
211void default_send_IPI_self(int vector)
212{
213	__default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
214}
215
216/* must come after the send_IPI functions above for inlining */
217static int convert_apicid_to_cpu(int apic_id)
218{
219	int i;
220
221	for_each_possible_cpu(i) {
222		if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
223			return i;
224	}
225	return -1;
226}
227
228int safe_smp_processor_id(void)
229{
230	int apicid, cpuid;
231
232	if (!boot_cpu_has(X86_FEATURE_APIC))
233		return 0;
234
235	apicid = hard_smp_processor_id();
236	if (apicid == BAD_APICID)
237		return 0;
238
239	cpuid = convert_apicid_to_cpu(apicid);
240
241	return cpuid >= 0 ? cpuid : 0;
242}
243#endif