Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
4 * Copyright (c) 2001 Intel Corp.
5 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
6 * Copyright (c) 2002 NEC Corp.
7 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
8 * Copyright (c) 2004 Silicon Graphics, Inc
9 * Russ Anderson <rja@sgi.com>
10 * Jesse Barnes <jbarnes@sgi.com>
11 * Jack Steiner <steiner@sgi.com>
12 */
13
14/*
15 * Platform initialization for Discontig Memory
16 */
17
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/nmi.h>
21#include <linux/swap.h>
22#include <linux/memblock.h>
23#include <linux/acpi.h>
24#include <linux/efi.h>
25#include <linux/nodemask.h>
26#include <linux/slab.h>
27#include <asm/tlb.h>
28#include <asm/meminit.h>
29#include <asm/numa.h>
30#include <asm/sections.h>
31
32/*
33 * Track per-node information needed to setup the boot memory allocator, the
34 * per-node areas, and the real VM.
35 */
36struct early_node_data {
37 struct ia64_node_data *node_data;
38 unsigned long pernode_addr;
39 unsigned long pernode_size;
40 unsigned long min_pfn;
41 unsigned long max_pfn;
42};
43
44static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
45static nodemask_t memory_less_mask __initdata;
46
47pg_data_t *pgdat_list[MAX_NUMNODES];
48
49/*
50 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number.
52 */
53#define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024)
54#define NODEDATA_ALIGN(addr, node) \
55 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \
56 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
57
58/**
59 * build_node_maps - callback to setup mem_data structs for each node
60 * @start: physical start of range
61 * @len: length of range
62 * @node: node where this range resides
63 *
64 * Detect extents of each piece of memory that we wish to
65 * treat as a virtually contiguous block (i.e. each node). Each such block
66 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
67 * if necessary. Any non-existent pages will simply be part of the virtual
68 * memmap.
69 */
70static int __init build_node_maps(unsigned long start, unsigned long len,
71 int node)
72{
73 unsigned long spfn, epfn, end = start + len;
74
75 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
76 spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
77
78 if (!mem_data[node].min_pfn) {
79 mem_data[node].min_pfn = spfn;
80 mem_data[node].max_pfn = epfn;
81 } else {
82 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
83 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
84 }
85
86 return 0;
87}
88
89/**
90 * early_nr_cpus_node - return number of cpus on a given node
91 * @node: node to check
92 *
93 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
94 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
95 * called yet. Note that node 0 will also count all non-existent cpus.
96 */
97static int __meminit early_nr_cpus_node(int node)
98{
99 int cpu, n = 0;
100
101 for_each_possible_early_cpu(cpu)
102 if (node == node_cpuid[cpu].nid)
103 n++;
104
105 return n;
106}
107
108/**
109 * compute_pernodesize - compute size of pernode data
110 * @node: the node id.
111 */
112static unsigned long __meminit compute_pernodesize(int node)
113{
114 unsigned long pernodesize = 0, cpus;
115
116 cpus = early_nr_cpus_node(node);
117 pernodesize += PERCPU_PAGE_SIZE * cpus;
118 pernodesize += node * L1_CACHE_BYTES;
119 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
120 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
121 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
122 pernodesize = PAGE_ALIGN(pernodesize);
123 return pernodesize;
124}
125
126/**
127 * per_cpu_node_setup - setup per-cpu areas on each node
128 * @cpu_data: per-cpu area on this node
129 * @node: node to setup
130 *
131 * Copy the static per-cpu data into the region we just set aside and then
132 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
133 * the end of the area.
134 */
135static void *per_cpu_node_setup(void *cpu_data, int node)
136{
137#ifdef CONFIG_SMP
138 int cpu;
139
140 for_each_possible_early_cpu(cpu) {
141 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
142
143 if (node != node_cpuid[cpu].nid)
144 continue;
145
146 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
147 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
148 __per_cpu_start;
149
150 /*
151 * percpu area for cpu0 is moved from the __init area
152 * which is setup by head.S and used till this point.
153 * Update ar.k3. This move is ensures that percpu
154 * area for cpu0 is on the correct node and its
155 * virtual address isn't insanely far from other
156 * percpu areas which is important for congruent
157 * percpu allocator.
158 */
159 if (cpu == 0)
160 ia64_set_kr(IA64_KR_PER_CPU_DATA,
161 (unsigned long)cpu_data -
162 (unsigned long)__per_cpu_start);
163
164 cpu_data += PERCPU_PAGE_SIZE;
165 }
166#endif
167 return cpu_data;
168}
169
170#ifdef CONFIG_SMP
171/**
172 * setup_per_cpu_areas - setup percpu areas
173 *
174 * Arch code has already allocated and initialized percpu areas. All
175 * this function has to do is to teach the determined layout to the
176 * dynamic percpu allocator, which happens to be more complex than
177 * creating whole new ones using helpers.
178 */
179void __init setup_per_cpu_areas(void)
180{
181 struct pcpu_alloc_info *ai;
182 struct pcpu_group_info *gi;
183 unsigned int *cpu_map;
184 void *base;
185 unsigned long base_offset;
186 unsigned int cpu;
187 ssize_t static_size, reserved_size, dyn_size;
188 int node, prev_node, unit, nr_units;
189
190 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
191 if (!ai)
192 panic("failed to allocate pcpu_alloc_info");
193 cpu_map = ai->groups[0].cpu_map;
194
195 /* determine base */
196 base = (void *)ULONG_MAX;
197 for_each_possible_cpu(cpu)
198 base = min(base,
199 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
200 base_offset = (void *)__per_cpu_start - base;
201
202 /* build cpu_map, units are grouped by node */
203 unit = 0;
204 for_each_node(node)
205 for_each_possible_cpu(cpu)
206 if (node == node_cpuid[cpu].nid)
207 cpu_map[unit++] = cpu;
208 nr_units = unit;
209
210 /* set basic parameters */
211 static_size = __per_cpu_end - __per_cpu_start;
212 reserved_size = PERCPU_MODULE_RESERVE;
213 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
214 if (dyn_size < 0)
215 panic("percpu area overflow static=%zd reserved=%zd\n",
216 static_size, reserved_size);
217
218 ai->static_size = static_size;
219 ai->reserved_size = reserved_size;
220 ai->dyn_size = dyn_size;
221 ai->unit_size = PERCPU_PAGE_SIZE;
222 ai->atom_size = PAGE_SIZE;
223 ai->alloc_size = PERCPU_PAGE_SIZE;
224
225 /*
226 * CPUs are put into groups according to node. Walk cpu_map
227 * and create new groups at node boundaries.
228 */
229 prev_node = NUMA_NO_NODE;
230 ai->nr_groups = 0;
231 for (unit = 0; unit < nr_units; unit++) {
232 cpu = cpu_map[unit];
233 node = node_cpuid[cpu].nid;
234
235 if (node == prev_node) {
236 gi->nr_units++;
237 continue;
238 }
239 prev_node = node;
240
241 gi = &ai->groups[ai->nr_groups++];
242 gi->nr_units = 1;
243 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
244 gi->cpu_map = &cpu_map[unit];
245 }
246
247 pcpu_setup_first_chunk(ai, base);
248 pcpu_free_alloc_info(ai);
249}
250#endif
251
252/**
253 * fill_pernode - initialize pernode data.
254 * @node: the node id.
255 * @pernode: physical address of pernode data
256 * @pernodesize: size of the pernode data
257 */
258static void __init fill_pernode(int node, unsigned long pernode,
259 unsigned long pernodesize)
260{
261 void *cpu_data;
262 int cpus = early_nr_cpus_node(node);
263
264 mem_data[node].pernode_addr = pernode;
265 mem_data[node].pernode_size = pernodesize;
266 memset(__va(pernode), 0, pernodesize);
267
268 cpu_data = (void *)pernode;
269 pernode += PERCPU_PAGE_SIZE * cpus;
270 pernode += node * L1_CACHE_BYTES;
271
272 pgdat_list[node] = __va(pernode);
273 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
274
275 mem_data[node].node_data = __va(pernode);
276 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
277 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
278
279 cpu_data = per_cpu_node_setup(cpu_data, node);
280
281 return;
282}
283
284/**
285 * find_pernode_space - allocate memory for memory map and per-node structures
286 * @start: physical start of range
287 * @len: length of range
288 * @node: node where this range resides
289 *
290 * This routine reserves space for the per-cpu data struct, the list of
291 * pg_data_ts and the per-node data struct. Each node will have something like
292 * the following in the first chunk of addr. space large enough to hold it.
293 *
294 * ________________________
295 * | |
296 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
297 * | PERCPU_PAGE_SIZE * | start and length big enough
298 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
299 * |------------------------|
300 * | local pg_data_t * |
301 * |------------------------|
302 * | local ia64_node_data |
303 * |------------------------|
304 * | ??? |
305 * |________________________|
306 *
307 * Once this space has been set aside, the bootmem maps are initialized. We
308 * could probably move the allocation of the per-cpu and ia64_node_data space
309 * outside of this function and use alloc_bootmem_node(), but doing it here
310 * is straightforward and we get the alignments we want so...
311 */
312static int __init find_pernode_space(unsigned long start, unsigned long len,
313 int node)
314{
315 unsigned long spfn, epfn;
316 unsigned long pernodesize = 0, pernode;
317
318 spfn = start >> PAGE_SHIFT;
319 epfn = (start + len) >> PAGE_SHIFT;
320
321 /*
322 * Make sure this memory falls within this node's usable memory
323 * since we may have thrown some away in build_maps().
324 */
325 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
326 return 0;
327
328 /* Don't setup this node's local space twice... */
329 if (mem_data[node].pernode_addr)
330 return 0;
331
332 /*
333 * Calculate total size needed, incl. what's necessary
334 * for good alignment and alias prevention.
335 */
336 pernodesize = compute_pernodesize(node);
337 pernode = NODEDATA_ALIGN(start, node);
338
339 /* Is this range big enough for what we want to store here? */
340 if (start + len > (pernode + pernodesize))
341 fill_pernode(node, pernode, pernodesize);
342
343 return 0;
344}
345
346/**
347 * reserve_pernode_space - reserve memory for per-node space
348 *
349 * Reserve the space used by the bootmem maps & per-node space in the boot
350 * allocator so that when we actually create the real mem maps we don't
351 * use their memory.
352 */
353static void __init reserve_pernode_space(void)
354{
355 unsigned long base, size;
356 int node;
357
358 for_each_online_node(node) {
359 if (node_isset(node, memory_less_mask))
360 continue;
361
362 /* Now the per-node space */
363 size = mem_data[node].pernode_size;
364 base = __pa(mem_data[node].pernode_addr);
365 memblock_reserve(base, size);
366 }
367}
368
369static void __meminit scatter_node_data(void)
370{
371 pg_data_t **dst;
372 int node;
373
374 /*
375 * for_each_online_node() can't be used at here.
376 * node_online_map is not set for hot-added nodes at this time,
377 * because we are halfway through initialization of the new node's
378 * structures. If for_each_online_node() is used, a new node's
379 * pg_data_ptrs will be not initialized. Instead of using it,
380 * pgdat_list[] is checked.
381 */
382 for_each_node(node) {
383 if (pgdat_list[node]) {
384 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
385 memcpy(dst, pgdat_list, sizeof(pgdat_list));
386 }
387 }
388}
389
390/**
391 * initialize_pernode_data - fixup per-cpu & per-node pointers
392 *
393 * Each node's per-node area has a copy of the global pg_data_t list, so
394 * we copy that to each node here, as well as setting the per-cpu pointer
395 * to the local node data structure.
396 */
397static void __init initialize_pernode_data(void)
398{
399 int cpu, node;
400
401 scatter_node_data();
402
403#ifdef CONFIG_SMP
404 /* Set the node_data pointer for each per-cpu struct */
405 for_each_possible_early_cpu(cpu) {
406 node = node_cpuid[cpu].nid;
407 per_cpu(ia64_cpu_info, cpu).node_data =
408 mem_data[node].node_data;
409 }
410#else
411 {
412 struct cpuinfo_ia64 *cpu0_cpu_info;
413 cpu = 0;
414 node = node_cpuid[cpu].nid;
415 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
416 ((char *)&ia64_cpu_info - __per_cpu_start));
417 cpu0_cpu_info->node_data = mem_data[node].node_data;
418 }
419#endif /* CONFIG_SMP */
420}
421
422/**
423 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
424 * node but fall back to any other node when __alloc_bootmem_node fails
425 * for best.
426 * @nid: node id
427 * @pernodesize: size of this node's pernode data
428 */
429static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
430{
431 void *ptr = NULL;
432 u8 best = 0xff;
433 int bestnode = NUMA_NO_NODE, node, anynode = 0;
434
435 for_each_online_node(node) {
436 if (node_isset(node, memory_less_mask))
437 continue;
438 else if (node_distance(nid, node) < best) {
439 best = node_distance(nid, node);
440 bestnode = node;
441 }
442 anynode = node;
443 }
444
445 if (bestnode == NUMA_NO_NODE)
446 bestnode = anynode;
447
448 ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
449 __pa(MAX_DMA_ADDRESS),
450 MEMBLOCK_ALLOC_ACCESSIBLE,
451 bestnode);
452 if (!ptr)
453 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
454 __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
455 __pa(MAX_DMA_ADDRESS));
456
457 return ptr;
458}
459
460/**
461 * memory_less_nodes - allocate and initialize CPU only nodes pernode
462 * information.
463 */
464static void __init memory_less_nodes(void)
465{
466 unsigned long pernodesize;
467 void *pernode;
468 int node;
469
470 for_each_node_mask(node, memory_less_mask) {
471 pernodesize = compute_pernodesize(node);
472 pernode = memory_less_node_alloc(node, pernodesize);
473 fill_pernode(node, __pa(pernode), pernodesize);
474 }
475
476 return;
477}
478
479/**
480 * find_memory - walk the EFI memory map and setup the bootmem allocator
481 *
482 * Called early in boot to setup the bootmem allocator, and to
483 * allocate the per-cpu and per-node structures.
484 */
485void __init find_memory(void)
486{
487 int node;
488
489 reserve_memory();
490 efi_memmap_walk(filter_memory, register_active_ranges);
491
492 if (num_online_nodes() == 0) {
493 printk(KERN_ERR "node info missing!\n");
494 node_set_online(0);
495 }
496
497 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
498 min_low_pfn = -1;
499 max_low_pfn = 0;
500
501 /* These actually end up getting called by call_pernode_memory() */
502 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
503 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
504 efi_memmap_walk(find_max_min_low_pfn, NULL);
505
506 for_each_online_node(node)
507 if (mem_data[node].min_pfn)
508 node_clear(node, memory_less_mask);
509
510 reserve_pernode_space();
511 memory_less_nodes();
512 initialize_pernode_data();
513
514 max_pfn = max_low_pfn;
515
516 find_initrd();
517}
518
519#ifdef CONFIG_SMP
520/**
521 * per_cpu_init - setup per-cpu variables
522 *
523 * find_pernode_space() does most of this already, we just need to set
524 * local_per_cpu_offset
525 */
526void *per_cpu_init(void)
527{
528 int cpu;
529 static int first_time = 1;
530
531 if (first_time) {
532 first_time = 0;
533 for_each_possible_early_cpu(cpu)
534 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
535 }
536
537 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
538}
539#endif /* CONFIG_SMP */
540
541/**
542 * call_pernode_memory - use SRAT to call callback functions with node info
543 * @start: physical start of range
544 * @len: length of range
545 * @arg: function to call for each range
546 *
547 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
548 * out to which node a block of memory belongs. Ignore memory that we cannot
549 * identify, and split blocks that run across multiple nodes.
550 *
551 * Take this opportunity to round the start address up and the end address
552 * down to page boundaries.
553 */
554void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
555{
556 unsigned long rs, re, end = start + len;
557 void (*func)(unsigned long, unsigned long, int);
558 int i;
559
560 start = PAGE_ALIGN(start);
561 end &= PAGE_MASK;
562 if (start >= end)
563 return;
564
565 func = arg;
566
567 if (!num_node_memblks) {
568 /* No SRAT table, so assume one node (node 0) */
569 if (start < end)
570 (*func)(start, end - start, 0);
571 return;
572 }
573
574 for (i = 0; i < num_node_memblks; i++) {
575 rs = max(start, node_memblk[i].start_paddr);
576 re = min(end, node_memblk[i].start_paddr +
577 node_memblk[i].size);
578
579 if (rs < re)
580 (*func)(rs, re - rs, node_memblk[i].nid);
581
582 if (re == end)
583 break;
584 }
585}
586
587/**
588 * paging_init - setup page tables
589 *
590 * paging_init() sets up the page tables for each node of the system and frees
591 * the bootmem allocator memory for general use.
592 */
593void __init paging_init(void)
594{
595 unsigned long max_dma;
596 unsigned long pfn_offset = 0;
597 unsigned long max_pfn = 0;
598 int node;
599 unsigned long max_zone_pfns[MAX_NR_ZONES];
600
601 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
602
603 sparse_init();
604
605#ifdef CONFIG_VIRTUAL_MEM_MAP
606 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
607 sizeof(struct page));
608 vmem_map = (struct page *) VMALLOC_END;
609 efi_memmap_walk(create_mem_map_page_table, NULL);
610 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
611#endif
612
613 for_each_online_node(node) {
614 pfn_offset = mem_data[node].min_pfn;
615
616#ifdef CONFIG_VIRTUAL_MEM_MAP
617 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
618#endif
619 if (mem_data[node].max_pfn > max_pfn)
620 max_pfn = mem_data[node].max_pfn;
621 }
622
623 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
624#ifdef CONFIG_ZONE_DMA32
625 max_zone_pfns[ZONE_DMA32] = max_dma;
626#endif
627 max_zone_pfns[ZONE_NORMAL] = max_pfn;
628 free_area_init(max_zone_pfns);
629
630 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
631}
632
633#ifdef CONFIG_MEMORY_HOTPLUG
634pg_data_t *arch_alloc_nodedata(int nid)
635{
636 unsigned long size = compute_pernodesize(nid);
637
638 return kzalloc(size, GFP_KERNEL);
639}
640
641void arch_free_nodedata(pg_data_t *pgdat)
642{
643 kfree(pgdat);
644}
645
646void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
647{
648 pgdat_list[update_node] = update_pgdat;
649 scatter_node_data();
650}
651#endif
652
653#ifdef CONFIG_SPARSEMEM_VMEMMAP
654int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
655 struct vmem_altmap *altmap)
656{
657 return vmemmap_populate_basepages(start, end, node, NULL);
658}
659
660void vmemmap_free(unsigned long start, unsigned long end,
661 struct vmem_altmap *altmap)
662{
663}
664#endif