Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  3
  4#include <linux/kernel.h>
  5#include <linux/export.h>
  6#include <linux/init.h>
  7#include <linux/memblock.h>
  8#include <linux/percpu.h>
  9#include <linux/kexec.h>
 10#include <linux/crash_dump.h>
 11#include <linux/smp.h>
 12#include <linux/topology.h>
 13#include <linux/pfn.h>
 14#include <asm/sections.h>
 15#include <asm/processor.h>
 16#include <asm/desc.h>
 17#include <asm/setup.h>
 18#include <asm/mpspec.h>
 19#include <asm/apicdef.h>
 20#include <asm/highmem.h>
 21#include <asm/proto.h>
 22#include <asm/cpumask.h>
 23#include <asm/cpu.h>
 24#include <asm/stackprotector.h>
 25
 26DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
 27EXPORT_PER_CPU_SYMBOL(cpu_number);
 28
 29#ifdef CONFIG_X86_64
 30#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
 31#else
 32#define BOOT_PERCPU_OFFSET 0
 33#endif
 34
 35DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
 36EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 37
 38unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
 39	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
 40};
 41EXPORT_SYMBOL(__per_cpu_offset);
 42
 43/*
 44 * On x86_64 symbols referenced from code should be reachable using
 45 * 32bit relocations.  Reserve space for static percpu variables in
 46 * modules so that they are always served from the first chunk which
 47 * is located at the percpu segment base.  On x86_32, anything can
 48 * address anywhere.  No need to reserve space in the first chunk.
 49 */
 50#ifdef CONFIG_X86_64
 51#define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
 52#else
 53#define PERCPU_FIRST_CHUNK_RESERVE	0
 54#endif
 55
 56#ifdef CONFIG_X86_32
 57/**
 58 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
 59 *
 60 * If NUMA is not configured or there is only one NUMA node available,
 61 * there is no reason to consider NUMA.  This function determines
 62 * whether percpu allocation should consider NUMA or not.
 63 *
 64 * RETURNS:
 65 * true if NUMA should be considered; otherwise, false.
 66 */
 67static bool __init pcpu_need_numa(void)
 68{
 69#ifdef CONFIG_NEED_MULTIPLE_NODES
 70	pg_data_t *last = NULL;
 71	unsigned int cpu;
 72
 73	for_each_possible_cpu(cpu) {
 74		int node = early_cpu_to_node(cpu);
 75
 76		if (node_online(node) && NODE_DATA(node) &&
 77		    last && last != NODE_DATA(node))
 78			return true;
 79
 80		last = NODE_DATA(node);
 81	}
 82#endif
 83	return false;
 84}
 85#endif
 86
 87/**
 88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
 89 * @cpu: cpu to allocate for
 90 * @size: size allocation in bytes
 91 * @align: alignment
 92 *
 93 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
 94 * does the right thing for NUMA regardless of the current
 95 * configuration.
 96 *
 97 * RETURNS:
 98 * Pointer to the allocated area on success, NULL on failure.
 99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101					unsigned long align)
102{
103	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104#ifdef CONFIG_NEED_MULTIPLE_NODES
105	int node = early_cpu_to_node(cpu);
106	void *ptr;
107
108	if (!node_online(node) || !NODE_DATA(node)) {
109		ptr = memblock_alloc_from(size, align, goal);
110		pr_info("cpu %d has no node %d or node-local memory\n",
111			cpu, node);
112		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113			 cpu, size, __pa(ptr));
114	} else {
115		ptr = memblock_alloc_try_nid(size, align, goal,
116					     MEMBLOCK_ALLOC_ACCESSIBLE,
117					     node);
118
119		pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
120			 cpu, size, node, __pa(ptr));
121	}
122	return ptr;
123#else
124	return memblock_alloc_from(size, align, goal);
125#endif
126}
127
128/*
129 * Helpers for first chunk memory allocation
130 */
131static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
132{
133	return pcpu_alloc_bootmem(cpu, size, align);
134}
135
136static void __init pcpu_fc_free(void *ptr, size_t size)
137{
138	memblock_free(__pa(ptr), size);
139}
140
141static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
142{
143#ifdef CONFIG_NEED_MULTIPLE_NODES
144	if (early_cpu_to_node(from) == early_cpu_to_node(to))
145		return LOCAL_DISTANCE;
146	else
147		return REMOTE_DISTANCE;
148#else
149	return LOCAL_DISTANCE;
150#endif
151}
152
153static void __init pcpup_populate_pte(unsigned long addr)
154{
155	populate_extra_pte(addr);
156}
157
158static inline void setup_percpu_segment(int cpu)
159{
160#ifdef CONFIG_X86_32
161	struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
162					      0xFFFFF);
163
164	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
165#endif
166}
167
168void __init setup_per_cpu_areas(void)
169{
170	unsigned int cpu;
171	unsigned long delta;
172	int rc;
173
174	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
175		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
176
177	/*
178	 * Allocate percpu area.  Embedding allocator is our favorite;
179	 * however, on NUMA configurations, it can result in very
180	 * sparse unit mapping and vmalloc area isn't spacious enough
181	 * on 32bit.  Use page in that case.
182	 */
183#ifdef CONFIG_X86_32
184	if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
185		pcpu_chosen_fc = PCPU_FC_PAGE;
186#endif
187	rc = -EINVAL;
188	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
189		const size_t dyn_size = PERCPU_MODULE_RESERVE +
190			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
191		size_t atom_size;
192
193		/*
194		 * On 64bit, use PMD_SIZE for atom_size so that embedded
195		 * percpu areas are aligned to PMD.  This, in the future,
196		 * can also allow using PMD mappings in vmalloc area.  Use
197		 * PAGE_SIZE on 32bit as vmalloc space is highly contended
198		 * and large vmalloc area allocs can easily fail.
199		 */
200#ifdef CONFIG_X86_64
201		atom_size = PMD_SIZE;
202#else
203		atom_size = PAGE_SIZE;
204#endif
205		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
206					    dyn_size, atom_size,
207					    pcpu_cpu_distance,
208					    pcpu_fc_alloc, pcpu_fc_free);
209		if (rc < 0)
210			pr_warning("%s allocator failed (%d), falling back to page size\n",
211				   pcpu_fc_names[pcpu_chosen_fc], rc);
212	}
213	if (rc < 0)
214		rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
215					   pcpu_fc_alloc, pcpu_fc_free,
216					   pcpup_populate_pte);
217	if (rc < 0)
218		panic("cannot initialize percpu area (err=%d)", rc);
219
220	/* alrighty, percpu areas up and running */
221	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
222	for_each_possible_cpu(cpu) {
223		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
224		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
225		per_cpu(cpu_number, cpu) = cpu;
226		setup_percpu_segment(cpu);
227		setup_stack_canary_segment(cpu);
228		/*
229		 * Copy data used in early init routines from the
230		 * initial arrays to the per cpu data areas.  These
231		 * arrays then become expendable and the *_early_ptr's
232		 * are zeroed indicating that the static arrays are
233		 * gone.
234		 */
235#ifdef CONFIG_X86_LOCAL_APIC
236		per_cpu(x86_cpu_to_apicid, cpu) =
237			early_per_cpu_map(x86_cpu_to_apicid, cpu);
238		per_cpu(x86_bios_cpu_apicid, cpu) =
239			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
240		per_cpu(x86_cpu_to_acpiid, cpu) =
241			early_per_cpu_map(x86_cpu_to_acpiid, cpu);
242#endif
243#ifdef CONFIG_X86_32
244		per_cpu(x86_cpu_to_logical_apicid, cpu) =
245			early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
246#endif
247#ifdef CONFIG_NUMA
248		per_cpu(x86_cpu_to_node_map, cpu) =
249			early_per_cpu_map(x86_cpu_to_node_map, cpu);
250		/*
251		 * Ensure that the boot cpu numa_node is correct when the boot
252		 * cpu is on a node that doesn't have memory installed.
253		 * Also cpu_up() will call cpu_to_node() for APs when
254		 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
255		 * up later with c_init aka intel_init/amd_init.
256		 * So set them all (boot cpu and all APs).
257		 */
258		set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
259#endif
260		/*
261		 * Up to this point, the boot CPU has been using .init.data
262		 * area.  Reload any changed state for the boot CPU.
263		 */
264		if (!cpu)
265			switch_to_new_gdt(cpu);
266	}
267
268	/* indicate the early static arrays will soon be gone */
269#ifdef CONFIG_X86_LOCAL_APIC
270	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
271	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
272	early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
273#endif
274#ifdef CONFIG_X86_32
275	early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
276#endif
277#ifdef CONFIG_NUMA
278	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
279#endif
280
281	/* Setup node to cpumask map */
282	setup_node_to_cpumask_map();
283
284	/* Setup cpu initialized, callin, callout masks */
285	setup_cpu_local_masks();
286
287	/*
288	 * Sync back kernel address range again.  We already did this in
289	 * setup_arch(), but percpu data also needs to be available in
290	 * the smpboot asm.  We can't reliably pick up percpu mappings
291	 * using vmalloc_fault(), because exception dispatch needs
292	 * percpu data.
293	 *
294	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
295	 * this call?
296	 */
297	sync_initial_page_table();
298}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  3
  4#include <linux/kernel.h>
  5#include <linux/export.h>
  6#include <linux/init.h>
  7#include <linux/memblock.h>
  8#include <linux/percpu.h>
  9#include <linux/kexec.h>
 10#include <linux/crash_dump.h>
 11#include <linux/smp.h>
 12#include <linux/topology.h>
 13#include <linux/pfn.h>
 14#include <asm/sections.h>
 15#include <asm/processor.h>
 16#include <asm/desc.h>
 17#include <asm/setup.h>
 18#include <asm/mpspec.h>
 19#include <asm/apicdef.h>
 20#include <asm/highmem.h>
 21#include <asm/proto.h>
 22#include <asm/cpumask.h>
 23#include <asm/cpu.h>
 24#include <asm/stackprotector.h>
 25
 26DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
 27EXPORT_PER_CPU_SYMBOL(cpu_number);
 28
 29#ifdef CONFIG_X86_64
 30#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
 31#else
 32#define BOOT_PERCPU_OFFSET 0
 33#endif
 34
 35DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
 36EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 37
 38unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
 39	[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
 40};
 41EXPORT_SYMBOL(__per_cpu_offset);
 42
 43/*
 44 * On x86_64 symbols referenced from code should be reachable using
 45 * 32bit relocations.  Reserve space for static percpu variables in
 46 * modules so that they are always served from the first chunk which
 47 * is located at the percpu segment base.  On x86_32, anything can
 48 * address anywhere.  No need to reserve space in the first chunk.
 49 */
 50#ifdef CONFIG_X86_64
 51#define PERCPU_FIRST_CHUNK_RESERVE	PERCPU_MODULE_RESERVE
 52#else
 53#define PERCPU_FIRST_CHUNK_RESERVE	0
 54#endif
 55
 56#ifdef CONFIG_X86_32
 57/**
 58 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
 59 *
 60 * If NUMA is not configured or there is only one NUMA node available,
 61 * there is no reason to consider NUMA.  This function determines
 62 * whether percpu allocation should consider NUMA or not.
 63 *
 64 * RETURNS:
 65 * true if NUMA should be considered; otherwise, false.
 66 */
 67static bool __init pcpu_need_numa(void)
 68{
 69#ifdef CONFIG_NUMA
 70	pg_data_t *last = NULL;
 71	unsigned int cpu;
 72
 73	for_each_possible_cpu(cpu) {
 74		int node = early_cpu_to_node(cpu);
 75
 76		if (node_online(node) && NODE_DATA(node) &&
 77		    last && last != NODE_DATA(node))
 78			return true;
 79
 80		last = NODE_DATA(node);
 81	}
 82#endif
 83	return false;
 84}
 85#endif
 86
 87/**
 88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
 89 * @cpu: cpu to allocate for
 90 * @size: size allocation in bytes
 91 * @align: alignment
 92 *
 93 * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
 94 * does the right thing for NUMA regardless of the current
 95 * configuration.
 96 *
 97 * RETURNS:
 98 * Pointer to the allocated area on success, NULL on failure.
 99 */
100static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101					unsigned long align)
102{
103	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104#ifdef CONFIG_NUMA
105	int node = early_cpu_to_node(cpu);
106	void *ptr;
107
108	if (!node_online(node) || !NODE_DATA(node)) {
109		ptr = memblock_alloc_from(size, align, goal);
110		pr_info("cpu %d has no node %d or node-local memory\n",
111			cpu, node);
112		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113			 cpu, size, __pa(ptr));
114	} else {
115		ptr = memblock_alloc_try_nid(size, align, goal,
116					     MEMBLOCK_ALLOC_ACCESSIBLE,
117					     node);
118
119		pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
120			 cpu, size, node, __pa(ptr));
121	}
122	return ptr;
123#else
124	return memblock_alloc_from(size, align, goal);
125#endif
126}
127
128/*
129 * Helpers for first chunk memory allocation
130 */
131static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
132{
133	return pcpu_alloc_bootmem(cpu, size, align);
134}
135
136static void __init pcpu_fc_free(void *ptr, size_t size)
137{
138	memblock_free(__pa(ptr), size);
139}
140
141static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
142{
143#ifdef CONFIG_NUMA
144	if (early_cpu_to_node(from) == early_cpu_to_node(to))
145		return LOCAL_DISTANCE;
146	else
147		return REMOTE_DISTANCE;
148#else
149	return LOCAL_DISTANCE;
150#endif
151}
152
153static void __init pcpup_populate_pte(unsigned long addr)
154{
155	populate_extra_pte(addr);
156}
157
158static inline void setup_percpu_segment(int cpu)
159{
160#ifdef CONFIG_X86_32
161	struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
162					      0xFFFFF);
163
164	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
165#endif
166}
167
168void __init setup_per_cpu_areas(void)
169{
170	unsigned int cpu;
171	unsigned long delta;
172	int rc;
173
174	pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%u nr_node_ids:%u\n",
175		NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
176
177	/*
178	 * Allocate percpu area.  Embedding allocator is our favorite;
179	 * however, on NUMA configurations, it can result in very
180	 * sparse unit mapping and vmalloc area isn't spacious enough
181	 * on 32bit.  Use page in that case.
182	 */
183#ifdef CONFIG_X86_32
184	if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
185		pcpu_chosen_fc = PCPU_FC_PAGE;
186#endif
187	rc = -EINVAL;
188	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
189		const size_t dyn_size = PERCPU_MODULE_RESERVE +
190			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
191		size_t atom_size;
192
193		/*
194		 * On 64bit, use PMD_SIZE for atom_size so that embedded
195		 * percpu areas are aligned to PMD.  This, in the future,
196		 * can also allow using PMD mappings in vmalloc area.  Use
197		 * PAGE_SIZE on 32bit as vmalloc space is highly contended
198		 * and large vmalloc area allocs can easily fail.
199		 */
200#ifdef CONFIG_X86_64
201		atom_size = PMD_SIZE;
202#else
203		atom_size = PAGE_SIZE;
204#endif
205		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
206					    dyn_size, atom_size,
207					    pcpu_cpu_distance,
208					    pcpu_fc_alloc, pcpu_fc_free);
209		if (rc < 0)
210			pr_warn("%s allocator failed (%d), falling back to page size\n",
211				pcpu_fc_names[pcpu_chosen_fc], rc);
212	}
213	if (rc < 0)
214		rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
215					   pcpu_fc_alloc, pcpu_fc_free,
216					   pcpup_populate_pte);
217	if (rc < 0)
218		panic("cannot initialize percpu area (err=%d)", rc);
219
220	/* alrighty, percpu areas up and running */
221	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
222	for_each_possible_cpu(cpu) {
223		per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
224		per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
225		per_cpu(cpu_number, cpu) = cpu;
226		setup_percpu_segment(cpu);
 
227		/*
228		 * Copy data used in early init routines from the
229		 * initial arrays to the per cpu data areas.  These
230		 * arrays then become expendable and the *_early_ptr's
231		 * are zeroed indicating that the static arrays are
232		 * gone.
233		 */
234#ifdef CONFIG_X86_LOCAL_APIC
235		per_cpu(x86_cpu_to_apicid, cpu) =
236			early_per_cpu_map(x86_cpu_to_apicid, cpu);
237		per_cpu(x86_bios_cpu_apicid, cpu) =
238			early_per_cpu_map(x86_bios_cpu_apicid, cpu);
239		per_cpu(x86_cpu_to_acpiid, cpu) =
240			early_per_cpu_map(x86_cpu_to_acpiid, cpu);
241#endif
242#ifdef CONFIG_X86_32
243		per_cpu(x86_cpu_to_logical_apicid, cpu) =
244			early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
245#endif
246#ifdef CONFIG_NUMA
247		per_cpu(x86_cpu_to_node_map, cpu) =
248			early_per_cpu_map(x86_cpu_to_node_map, cpu);
249		/*
250		 * Ensure that the boot cpu numa_node is correct when the boot
251		 * cpu is on a node that doesn't have memory installed.
252		 * Also cpu_up() will call cpu_to_node() for APs when
253		 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set
254		 * up later with c_init aka intel_init/amd_init.
255		 * So set them all (boot cpu and all APs).
256		 */
257		set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
258#endif
259		/*
260		 * Up to this point, the boot CPU has been using .init.data
261		 * area.  Reload any changed state for the boot CPU.
262		 */
263		if (!cpu)
264			switch_to_new_gdt(cpu);
265	}
266
267	/* indicate the early static arrays will soon be gone */
268#ifdef CONFIG_X86_LOCAL_APIC
269	early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
270	early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
271	early_per_cpu_ptr(x86_cpu_to_acpiid) = NULL;
272#endif
273#ifdef CONFIG_X86_32
274	early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
275#endif
276#ifdef CONFIG_NUMA
277	early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
278#endif
279
280	/* Setup node to cpumask map */
281	setup_node_to_cpumask_map();
282
283	/* Setup cpu initialized, callin, callout masks */
284	setup_cpu_local_masks();
285
286	/*
287	 * Sync back kernel address range again.  We already did this in
288	 * setup_arch(), but percpu data also needs to be available in
289	 * the smpboot asm and arch_sync_kernel_mappings() doesn't sync to
290	 * swapper_pg_dir on 32-bit. The per-cpu mappings need to be available
291	 * there too.
292	 *
293	 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
294	 * this call?
295	 */
296	sync_initial_page_table();
297}