Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/cpuhotplug.h>
  4#include <linux/cpumask.h>
  5#include <linux/slab.h>
  6#include <linux/mm.h>
  7
  8#include <asm/apic.h>
  9
 10#include "local.h"
 11
 12#define apic_cluster(apicid) ((apicid) >> 4)
 13
 14/*
 15 * __x2apic_send_IPI_mask() possibly needs to read
 16 * x86_cpu_to_logical_apicid for all online cpus in a sequential way.
 17 * Using per cpu variable would cost one cache line per cpu.
 18 */
 19static u32 *x86_cpu_to_logical_apicid __read_mostly;
 20
 
 
 21static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
 22static DEFINE_PER_CPU_READ_MOSTLY(struct cpumask *, cluster_masks);
 23
 24static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 25{
 26	return x2apic_enabled();
 27}
 28
 
 
 
 
 
 29static void x2apic_send_IPI(int cpu, int vector)
 30{
 31	u32 dest = x86_cpu_to_logical_apicid[cpu];
 32
 33	/* x2apic MSRs are special and need a special fence: */
 34	weak_wrmsr_fence();
 35	__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
 36}
 37
 38static void
 39__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
 40{
 41	unsigned int cpu, clustercpu;
 42	struct cpumask *tmpmsk;
 
 43	unsigned long flags;
 44	u32 dest;
 45
 46	/* x2apic MSRs are special and need a special fence: */
 47	weak_wrmsr_fence();
 48	local_irq_save(flags);
 49
 50	tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
 51	cpumask_copy(tmpmsk, mask);
 52	/* If IPI should not be sent to self, clear current CPU */
 53	if (apic_dest != APIC_DEST_ALLINC)
 54		__cpumask_clear_cpu(smp_processor_id(), tmpmsk);
 55
 56	/* Collapse cpus in a cluster so a single IPI per cluster is sent */
 57	for_each_cpu(cpu, tmpmsk) {
 58		struct cpumask *cmsk = per_cpu(cluster_masks, cpu);
 
 
 
 
 
 59
 
 60		dest = 0;
 61		for_each_cpu_and(clustercpu, tmpmsk, cmsk)
 62			dest |= x86_cpu_to_logical_apicid[clustercpu];
 
 
 
 
 63
 64		if (!dest)
 65			continue;
 66
 67		__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
 68		/* Remove cluster CPUs from tmpmask */
 69		cpumask_andnot(tmpmsk, tmpmsk, cmsk);
 
 
 
 70	}
 71
 72	local_irq_restore(flags);
 73}
 74
 75static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
 76{
 77	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
 78}
 79
 80static void
 81x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 82{
 83	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
 84}
 85
 86static u32 x2apic_calc_apicid(unsigned int cpu)
 87{
 88	return x86_cpu_to_logical_apicid[cpu];
 89}
 90
 91static void init_x2apic_ldr(void)
 92{
 93	struct cpumask *cmsk = this_cpu_read(cluster_masks);
 94
 95	BUG_ON(!cmsk);
 96
 97	cpumask_set_cpu(smp_processor_id(), cmsk);
 98}
 99
100/*
101 * As an optimisation during boot, set the cluster_mask for all present
102 * CPUs at once, to prevent each of them having to iterate over the others
103 * to find the existing cluster_mask.
104 */
105static void prefill_clustermask(struct cpumask *cmsk, unsigned int cpu, u32 cluster)
106{
107	int cpu_i;
108
109	for_each_present_cpu(cpu_i) {
110		struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i);
111		u32 apicid = apic->cpu_present_to_apicid(cpu_i);
112
113		if (apicid == BAD_APICID || cpu_i == cpu || apic_cluster(apicid) != cluster)
 
114			continue;
 
 
 
 
115
116		if (WARN_ON_ONCE(*cpu_cmsk == cmsk))
117			continue;
118
119		BUG_ON(*cpu_cmsk);
120		*cpu_cmsk = cmsk;
 
 
 
 
121	}
122}
123
124static int alloc_clustermask(unsigned int cpu, u32 cluster, int node)
125{
126	struct cpumask *cmsk = NULL;
127	unsigned int cpu_i;
128
129	/*
130	 * At boot time, the CPU present mask is stable. The cluster mask is
131	 * allocated for the first CPU in the cluster and propagated to all
132	 * present siblings in the cluster. If the cluster mask is already set
133	 * on entry to this function for a given CPU, there is nothing to do.
134	 */
135	if (per_cpu(cluster_masks, cpu))
136		return 0;
137
138	if (system_state < SYSTEM_RUNNING)
139		goto alloc;
 
 
140
141	/*
142	 * On post boot hotplug for a CPU which was not present at boot time,
143	 * iterate over all possible CPUs (even those which are not present
144	 * any more) to find any existing cluster mask.
145	 */
146	for_each_possible_cpu(cpu_i) {
147		u32 apicid = apic->cpu_present_to_apicid(cpu_i);
148
149		if (apicid != BAD_APICID && apic_cluster(apicid) == cluster) {
150			cmsk = per_cpu(cluster_masks, cpu_i);
151			/*
152			 * If the cluster is already initialized, just store
153			 * the mask and return. There's no need to propagate.
154			 */
155			if (cmsk) {
156				per_cpu(cluster_masks, cpu) = cmsk;
157				return 0;
158			}
159		}
160	}
161	/*
162	 * No CPU in the cluster has ever been initialized, so fall through to
163	 * the boot time code which will also populate the cluster mask for any
164	 * other CPU in the cluster which is (now) present.
165	 */
166alloc:
167	cmsk = kzalloc_node(sizeof(*cmsk), GFP_KERNEL, node);
168	if (!cmsk)
169		return -ENOMEM;
170	per_cpu(cluster_masks, cpu) = cmsk;
171	prefill_clustermask(cmsk, cpu, cluster);
172
173	return 0;
174}
175
 
 
 
176static int x2apic_prepare_cpu(unsigned int cpu)
177{
178	u32 phys_apicid = apic->cpu_present_to_apicid(cpu);
179	u32 cluster = apic_cluster(phys_apicid);
180	u32 logical_apicid = (cluster << 16) | (1 << (phys_apicid & 0xf));
181	int node = cpu_to_node(cpu);
182
183	x86_cpu_to_logical_apicid[cpu] = logical_apicid;
184
185	if (alloc_clustermask(cpu, cluster, node) < 0)
186		return -ENOMEM;
187
188	if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node))
 
189		return -ENOMEM;
 
190
191	return 0;
192}
193
194static int x2apic_dead_cpu(unsigned int dead_cpu)
195{
196	struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu);
197
198	if (cmsk)
199		cpumask_clear_cpu(dead_cpu, cmsk);
200	free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
 
 
 
 
 
201	return 0;
202}
203
204static int x2apic_cluster_probe(void)
205{
206	u32 slots;
 
207
208	if (!x2apic_mode)
209		return 0;
210
211	slots = max_t(u32, L1_CACHE_BYTES/sizeof(u32), nr_cpu_ids);
212	x86_cpu_to_logical_apicid = kcalloc(slots, sizeof(u32), GFP_KERNEL);
213	if (!x86_cpu_to_logical_apicid)
214		return 0;
215
216	if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
217			      x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
218		pr_err("Failed to register X2APIC_PREPARE\n");
219		kfree(x86_cpu_to_logical_apicid);
220		x86_cpu_to_logical_apicid = NULL;
221		return 0;
222	}
223	init_x2apic_ldr();
224	return 1;
225}
226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227static struct apic apic_x2apic_cluster __ro_after_init = {
228
229	.name				= "cluster x2apic",
230	.probe				= x2apic_cluster_probe,
231	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
 
 
232
233	.dest_mode_logical		= true,
 
234
 
235	.disable_esr			= 0,
 
 
236
 
237	.init_apic_ldr			= init_x2apic_ldr,
 
 
 
238	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
 
 
 
239
240	.max_apic_id			= UINT_MAX,
241	.x2apic_set_max_apicid		= true,
242	.get_apic_id			= x2apic_get_apic_id,
 
243
244	.calc_dest_apicid		= x2apic_calc_apicid,
245
246	.send_IPI			= x2apic_send_IPI,
247	.send_IPI_mask			= x2apic_send_IPI_mask,
248	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
249	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
250	.send_IPI_all			= x2apic_send_IPI_all,
251	.send_IPI_self			= x2apic_send_IPI_self,
252	.nmi_to_offline_cpu		= true,
 
253
254	.read				= native_apic_msr_read,
255	.write				= native_apic_msr_write,
256	.eoi				= native_apic_msr_eoi,
257	.icr_read			= native_x2apic_icr_read,
258	.icr_write			= native_x2apic_icr_write,
 
 
259};
260
261apic_driver(apic_x2apic_cluster);
v4.10.11
  1#include <linux/threads.h>
 
 
  2#include <linux/cpumask.h>
  3#include <linux/string.h>
  4#include <linux/kernel.h>
  5#include <linux/ctype.h>
  6#include <linux/dmar.h>
  7#include <linux/cpu.h>
 
  8
  9#include <asm/smp.h>
 10#include <asm/x2apic.h>
 
 
 
 
 
 
 11
 12static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
 13static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
 14static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
 
 15
 16static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 17{
 18	return x2apic_enabled();
 19}
 20
 21static inline u32 x2apic_cluster(int cpu)
 22{
 23	return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
 24}
 25
 26static void x2apic_send_IPI(int cpu, int vector)
 27{
 28	u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
 29
 30	x2apic_wrmsr_fence();
 
 31	__x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
 32}
 33
 34static void
 35__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
 36{
 37	struct cpumask *cpus_in_cluster_ptr;
 38	struct cpumask *ipi_mask_ptr;
 39	unsigned int cpu, this_cpu;
 40	unsigned long flags;
 41	u32 dest;
 42
 43	x2apic_wrmsr_fence();
 44
 45	local_irq_save(flags);
 46
 47	this_cpu = smp_processor_id();
 48
 49	/*
 50	 * We are to modify mask, so we need an own copy
 51	 * and be sure it's manipulated with irq off.
 52	 */
 53	ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
 54	cpumask_copy(ipi_mask_ptr, mask);
 55
 56	/*
 57	 * The idea is to send one IPI per cluster.
 58	 */
 59	for_each_cpu(cpu, ipi_mask_ptr) {
 60		unsigned long i;
 61
 62		cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
 63		dest = 0;
 64
 65		/* Collect cpus in cluster. */
 66		for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
 67			if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
 68				dest |= per_cpu(x86_cpu_to_logical_apicid, i);
 69		}
 70
 71		if (!dest)
 72			continue;
 73
 74		__x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
 75		/*
 76		 * Cluster sibling cpus should be discared now so
 77		 * we would not send IPI them second time.
 78		 */
 79		cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
 80	}
 81
 82	local_irq_restore(flags);
 83}
 84
 85static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
 86{
 87	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
 88}
 89
 90static void
 91x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
 92{
 93	__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
 94}
 95
 96static void x2apic_send_IPI_allbutself(int vector)
 97{
 98	__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
 99}
100
101static void x2apic_send_IPI_all(int vector)
102{
103	__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
 
 
 
 
104}
105
106static int
107x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
108			      const struct cpumask *andmask,
109			      unsigned int *apicid)
 
 
110{
111	u32 dest = 0;
112	u16 cluster;
113	int i;
 
 
114
115	for_each_cpu_and(i, cpumask, andmask) {
116		if (!cpumask_test_cpu(i, cpu_online_mask))
117			continue;
118		dest = per_cpu(x86_cpu_to_logical_apicid, i);
119		cluster = x2apic_cluster(i);
120		break;
121	}
122
123	if (!dest)
124		return -EINVAL;
125
126	for_each_cpu_and(i, cpumask, andmask) {
127		if (!cpumask_test_cpu(i, cpu_online_mask))
128			continue;
129		if (cluster != x2apic_cluster(i))
130			continue;
131		dest |= per_cpu(x86_cpu_to_logical_apicid, i);
132	}
 
133
134	*apicid = dest;
 
 
 
135
136	return 0;
137}
 
 
 
 
 
 
138
139static void init_x2apic_ldr(void)
140{
141	unsigned int this_cpu = smp_processor_id();
142	unsigned int cpu;
143
144	per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
 
 
 
 
 
 
145
146	cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
147	for_each_online_cpu(cpu) {
148		if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
149			continue;
150		cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
151		cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
 
 
 
 
 
152	}
 
 
 
 
 
 
 
 
 
 
 
 
 
153}
154
155/*
156 * At CPU state changes, update the x2apic cluster sibling info.
157 */
158static int x2apic_prepare_cpu(unsigned int cpu)
159{
160	if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
 
 
 
 
 
 
 
161		return -ENOMEM;
162
163	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
164		free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
165		return -ENOMEM;
166	}
167
168	return 0;
169}
170
171static int x2apic_dead_cpu(unsigned int this_cpu)
172{
173	int cpu;
174
175	for_each_online_cpu(cpu) {
176		if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
177			continue;
178		cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
179		cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
180	}
181	free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
182	free_cpumask_var(per_cpu(ipi_mask, this_cpu));
183	return 0;
184}
185
186static int x2apic_cluster_probe(void)
187{
188	int cpu = smp_processor_id();
189	int ret;
190
191	if (!x2apic_mode)
192		return 0;
193
194	ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
195				x2apic_prepare_cpu, x2apic_dead_cpu);
196	if (ret < 0) {
 
 
 
 
197		pr_err("Failed to register X2APIC_PREPARE\n");
 
 
198		return 0;
199	}
200	cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
201	return 1;
202}
203
204static const struct cpumask *x2apic_cluster_target_cpus(void)
205{
206	return cpu_all_mask;
207}
208
209/*
210 * Each x2apic cluster is an allocation domain.
211 */
212static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
213					     const struct cpumask *mask)
214{
215	/*
216	 * To minimize vector pressure, default case of boot, device bringup
217	 * etc will use a single cpu for the interrupt destination.
218	 *
219	 * On explicit migration requests coming from irqbalance etc,
220	 * interrupts will be routed to the x2apic cluster (cluster-id
221	 * derived from the first cpu in the mask) members specified
222	 * in the mask.
223	 */
224	if (mask == x2apic_cluster_target_cpus())
225		cpumask_copy(retmask, cpumask_of(cpu));
226	else
227		cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
228}
229
230static struct apic apic_x2apic_cluster __ro_after_init = {
231
232	.name				= "cluster x2apic",
233	.probe				= x2apic_cluster_probe,
234	.acpi_madt_oem_check		= x2apic_acpi_madt_oem_check,
235	.apic_id_valid			= x2apic_apic_id_valid,
236	.apic_id_registered		= x2apic_apic_id_registered,
237
238	.irq_delivery_mode		= dest_LowestPrio,
239	.irq_dest_mode			= 1, /* logical */
240
241	.target_cpus			= x2apic_cluster_target_cpus,
242	.disable_esr			= 0,
243	.dest_logical			= APIC_DEST_LOGICAL,
244	.check_apicid_used		= NULL,
245
246	.vector_allocation_domain	= cluster_vector_allocation_domain,
247	.init_apic_ldr			= init_x2apic_ldr,
248
249	.ioapic_phys_id_map		= NULL,
250	.setup_apic_routing		= NULL,
251	.cpu_present_to_apicid		= default_cpu_present_to_apicid,
252	.apicid_to_cpu_present		= NULL,
253	.check_phys_apicid_present	= default_check_phys_apicid_present,
254	.phys_pkg_id			= x2apic_phys_pkg_id,
255
 
 
256	.get_apic_id			= x2apic_get_apic_id,
257	.set_apic_id			= x2apic_set_apic_id,
258
259	.cpu_mask_to_apicid_and		= x2apic_cpu_mask_to_apicid_and,
260
261	.send_IPI			= x2apic_send_IPI,
262	.send_IPI_mask			= x2apic_send_IPI_mask,
263	.send_IPI_mask_allbutself	= x2apic_send_IPI_mask_allbutself,
264	.send_IPI_allbutself		= x2apic_send_IPI_allbutself,
265	.send_IPI_all			= x2apic_send_IPI_all,
266	.send_IPI_self			= x2apic_send_IPI_self,
267
268	.inquire_remote_apic		= NULL,
269
270	.read				= native_apic_msr_read,
271	.write				= native_apic_msr_write,
272	.eoi_write			= native_apic_msr_eoi_write,
273	.icr_read			= native_x2apic_icr_read,
274	.icr_write			= native_x2apic_icr_write,
275	.wait_icr_idle			= native_x2apic_wait_icr_idle,
276	.safe_wait_icr_idle		= native_safe_x2apic_wait_icr_idle,
277};
278
279apic_driver(apic_x2apic_cluster);