Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
13 *
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
15 * memory.
16 */
17#include <linux/bootmem.h>
18#include <linux/efi.h>
19#include <linux/mm.h>
20#include <linux/nmi.h>
21#include <linux/swap.h>
22
23#include <asm/meminit.h>
24#include <asm/pgalloc.h>
25#include <asm/pgtable.h>
26#include <asm/sections.h>
27#include <asm/mca.h>
28
29#ifdef CONFIG_VIRTUAL_MEM_MAP
30static unsigned long max_gap;
31#endif
32
33/**
34 * show_mem - give short summary of memory stats
35 *
36 * Shows a simple page count of reserved and used pages in the system.
37 * For discontig machines, it does this on a per-pgdat basis.
38 */
39void show_mem(unsigned int filter)
40{
41 int i, total_reserved = 0;
42 int total_shared = 0, total_cached = 0;
43 unsigned long total_present = 0;
44 pg_data_t *pgdat;
45
46 printk(KERN_INFO "Mem-info:\n");
47 show_free_areas(filter);
48 printk(KERN_INFO "Node memory in pages:\n");
49 for_each_online_pgdat(pgdat) {
50 unsigned long present;
51 unsigned long flags;
52 int shared = 0, cached = 0, reserved = 0;
53 int nid = pgdat->node_id;
54
55 if (skip_free_areas_node(filter, nid))
56 continue;
57 pgdat_resize_lock(pgdat, &flags);
58 present = pgdat->node_present_pages;
59 for(i = 0; i < pgdat->node_spanned_pages; i++) {
60 struct page *page;
61 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
62 touch_nmi_watchdog();
63 if (pfn_valid(pgdat->node_start_pfn + i))
64 page = pfn_to_page(pgdat->node_start_pfn + i);
65 else {
66#ifdef CONFIG_VIRTUAL_MEM_MAP
67 if (max_gap < LARGE_GAP)
68 continue;
69#endif
70 i = vmemmap_find_next_valid_pfn(nid, i) - 1;
71 continue;
72 }
73 if (PageReserved(page))
74 reserved++;
75 else if (PageSwapCache(page))
76 cached++;
77 else if (page_count(page))
78 shared += page_count(page)-1;
79 }
80 pgdat_resize_unlock(pgdat, &flags);
81 total_present += present;
82 total_reserved += reserved;
83 total_cached += cached;
84 total_shared += shared;
85 printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
86 "shrd: %10d, swpd: %10d\n", nid,
87 present, reserved, shared, cached);
88 }
89 printk(KERN_INFO "%ld pages of RAM\n", total_present);
90 printk(KERN_INFO "%d reserved pages\n", total_reserved);
91 printk(KERN_INFO "%d pages shared\n", total_shared);
92 printk(KERN_INFO "%d pages swap cached\n", total_cached);
93 printk(KERN_INFO "Total of %ld pages in page table cache\n",
94 quicklist_total_size());
95 printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
96}
97
98
99/* physical address where the bootmem map is located */
100unsigned long bootmap_start;
101
102/**
103 * find_bootmap_location - callback to find a memory area for the bootmap
104 * @start: start of region
105 * @end: end of region
106 * @arg: unused callback data
107 *
108 * Find a place to put the bootmap and return its starting address in
109 * bootmap_start. This address must be page-aligned.
110 */
111static int __init
112find_bootmap_location (u64 start, u64 end, void *arg)
113{
114 u64 needed = *(unsigned long *)arg;
115 u64 range_start, range_end, free_start;
116 int i;
117
118#if IGNORE_PFN0
119 if (start == PAGE_OFFSET) {
120 start += PAGE_SIZE;
121 if (start >= end)
122 return 0;
123 }
124#endif
125
126 free_start = PAGE_OFFSET;
127
128 for (i = 0; i < num_rsvd_regions; i++) {
129 range_start = max(start, free_start);
130 range_end = min(end, rsvd_region[i].start & PAGE_MASK);
131
132 free_start = PAGE_ALIGN(rsvd_region[i].end);
133
134 if (range_end <= range_start)
135 continue; /* skip over empty range */
136
137 if (range_end - range_start >= needed) {
138 bootmap_start = __pa(range_start);
139 return -1; /* done */
140 }
141
142 /* nothing more available in this segment */
143 if (range_end == end)
144 return 0;
145 }
146 return 0;
147}
148
149#ifdef CONFIG_SMP
150static void *cpu_data;
151/**
152 * per_cpu_init - setup per-cpu variables
153 *
154 * Allocate and setup per-cpu data areas.
155 */
156void * __cpuinit
157per_cpu_init (void)
158{
159 static bool first_time = true;
160 void *cpu0_data = __cpu0_per_cpu;
161 unsigned int cpu;
162
163 if (!first_time)
164 goto skip;
165 first_time = false;
166
167 /*
168 * get_free_pages() cannot be used before cpu_init() done.
169 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
170 * to avoid that AP calls get_zeroed_page().
171 */
172 for_each_possible_cpu(cpu) {
173 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
174
175 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
176 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
177 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
178
179 /*
180 * percpu area for cpu0 is moved from the __init area
181 * which is setup by head.S and used till this point.
182 * Update ar.k3. This move is ensures that percpu
183 * area for cpu0 is on the correct node and its
184 * virtual address isn't insanely far from other
185 * percpu areas which is important for congruent
186 * percpu allocator.
187 */
188 if (cpu == 0)
189 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
190 (unsigned long)__per_cpu_start);
191
192 cpu_data += PERCPU_PAGE_SIZE;
193 }
194skip:
195 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
196}
197
198static inline void
199alloc_per_cpu_data(void)
200{
201 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
202 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
203}
204
205/**
206 * setup_per_cpu_areas - setup percpu areas
207 *
208 * Arch code has already allocated and initialized percpu areas. All
209 * this function has to do is to teach the determined layout to the
210 * dynamic percpu allocator, which happens to be more complex than
211 * creating whole new ones using helpers.
212 */
213void __init
214setup_per_cpu_areas(void)
215{
216 struct pcpu_alloc_info *ai;
217 struct pcpu_group_info *gi;
218 unsigned int cpu;
219 ssize_t static_size, reserved_size, dyn_size;
220 int rc;
221
222 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
223 if (!ai)
224 panic("failed to allocate pcpu_alloc_info");
225 gi = &ai->groups[0];
226
227 /* units are assigned consecutively to possible cpus */
228 for_each_possible_cpu(cpu)
229 gi->cpu_map[gi->nr_units++] = cpu;
230
231 /* set parameters */
232 static_size = __per_cpu_end - __per_cpu_start;
233 reserved_size = PERCPU_MODULE_RESERVE;
234 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
235 if (dyn_size < 0)
236 panic("percpu area overflow static=%zd reserved=%zd\n",
237 static_size, reserved_size);
238
239 ai->static_size = static_size;
240 ai->reserved_size = reserved_size;
241 ai->dyn_size = dyn_size;
242 ai->unit_size = PERCPU_PAGE_SIZE;
243 ai->atom_size = PAGE_SIZE;
244 ai->alloc_size = PERCPU_PAGE_SIZE;
245
246 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
247 if (rc)
248 panic("failed to setup percpu area (err=%d)", rc);
249
250 pcpu_free_alloc_info(ai);
251}
252#else
253#define alloc_per_cpu_data() do { } while (0)
254#endif /* CONFIG_SMP */
255
256/**
257 * find_memory - setup memory map
258 *
259 * Walk the EFI memory map and find usable memory for the system, taking
260 * into account reserved areas.
261 */
262void __init
263find_memory (void)
264{
265 unsigned long bootmap_size;
266
267 reserve_memory();
268
269 /* first find highest page frame number */
270 min_low_pfn = ~0UL;
271 max_low_pfn = 0;
272 efi_memmap_walk(find_max_min_low_pfn, NULL);
273 max_pfn = max_low_pfn;
274 /* how many bytes to cover all the pages */
275 bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
276
277 /* look for a location to hold the bootmap */
278 bootmap_start = ~0UL;
279 efi_memmap_walk(find_bootmap_location, &bootmap_size);
280 if (bootmap_start == ~0UL)
281 panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
282
283 bootmap_size = init_bootmem_node(NODE_DATA(0),
284 (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
285
286 /* Free all available memory, then mark bootmem-map as being in use. */
287 efi_memmap_walk(filter_rsvd_memory, free_bootmem);
288 reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
289
290 find_initrd();
291
292 alloc_per_cpu_data();
293}
294
295static int count_pages(u64 start, u64 end, void *arg)
296{
297 unsigned long *count = arg;
298
299 *count += (end - start) >> PAGE_SHIFT;
300 return 0;
301}
302
303/*
304 * Set up the page tables.
305 */
306
307void __init
308paging_init (void)
309{
310 unsigned long max_dma;
311 unsigned long max_zone_pfns[MAX_NR_ZONES];
312
313 num_physpages = 0;
314 efi_memmap_walk(count_pages, &num_physpages);
315
316 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
317#ifdef CONFIG_ZONE_DMA
318 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
319 max_zone_pfns[ZONE_DMA] = max_dma;
320#endif
321 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
322
323#ifdef CONFIG_VIRTUAL_MEM_MAP
324 efi_memmap_walk(filter_memory, register_active_ranges);
325 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
326 if (max_gap < LARGE_GAP) {
327 vmem_map = (struct page *) 0;
328 free_area_init_nodes(max_zone_pfns);
329 } else {
330 unsigned long map_size;
331
332 /* allocate virtual_mem_map */
333
334 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
335 sizeof(struct page));
336 VMALLOC_END -= map_size;
337 vmem_map = (struct page *) VMALLOC_END;
338 efi_memmap_walk(create_mem_map_page_table, NULL);
339
340 /*
341 * alloc_node_mem_map makes an adjustment for mem_map
342 * which isn't compatible with vmem_map.
343 */
344 NODE_DATA(0)->node_mem_map = vmem_map +
345 find_min_pfn_with_active_regions();
346 free_area_init_nodes(max_zone_pfns);
347
348 printk("Virtual mem_map starts at 0x%p\n", mem_map);
349 }
350#else /* !CONFIG_VIRTUAL_MEM_MAP */
351 add_active_range(0, 0, max_low_pfn);
352 free_area_init_nodes(max_zone_pfns);
353#endif /* !CONFIG_VIRTUAL_MEM_MAP */
354 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
355}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1998-2003 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Stephane Eranian <eranian@hpl.hp.com>
9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
10 * Copyright (C) 1999 VA Linux Systems
11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
13 *
14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
15 * memory.
16 */
17#include <linux/efi.h>
18#include <linux/memblock.h>
19#include <linux/mm.h>
20#include <linux/nmi.h>
21#include <linux/swap.h>
22#include <linux/sizes.h>
23
24#include <asm/efi.h>
25#include <asm/meminit.h>
26#include <asm/sections.h>
27#include <asm/mca.h>
28
29/* physical address where the bootmem map is located */
30unsigned long bootmap_start;
31
32#ifdef CONFIG_SMP
33static void *cpu_data;
34/**
35 * per_cpu_init - setup per-cpu variables
36 *
37 * Allocate and setup per-cpu data areas.
38 */
39void *per_cpu_init(void)
40{
41 static bool first_time = true;
42 void *cpu0_data = __cpu0_per_cpu;
43 unsigned int cpu;
44
45 if (!first_time)
46 goto skip;
47 first_time = false;
48
49 /*
50 * get_free_pages() cannot be used before cpu_init() done.
51 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
52 * to avoid that AP calls get_zeroed_page().
53 */
54 for_each_possible_cpu(cpu) {
55 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
56
57 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
58 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
59 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
60
61 /*
62 * percpu area for cpu0 is moved from the __init area
63 * which is setup by head.S and used till this point.
64 * Update ar.k3. This move is ensures that percpu
65 * area for cpu0 is on the correct node and its
66 * virtual address isn't insanely far from other
67 * percpu areas which is important for congruent
68 * percpu allocator.
69 */
70 if (cpu == 0)
71 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
72 (unsigned long)__per_cpu_start);
73
74 cpu_data += PERCPU_PAGE_SIZE;
75 }
76skip:
77 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
78}
79
80static inline void
81alloc_per_cpu_data(void)
82{
83 size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
84
85 cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
86 __pa(MAX_DMA_ADDRESS));
87 if (!cpu_data)
88 panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
89 __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
90}
91
92/**
93 * setup_per_cpu_areas - setup percpu areas
94 *
95 * Arch code has already allocated and initialized percpu areas. All
96 * this function has to do is to teach the determined layout to the
97 * dynamic percpu allocator, which happens to be more complex than
98 * creating whole new ones using helpers.
99 */
100void __init
101setup_per_cpu_areas(void)
102{
103 struct pcpu_alloc_info *ai;
104 struct pcpu_group_info *gi;
105 unsigned int cpu;
106 ssize_t static_size, reserved_size, dyn_size;
107
108 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
109 if (!ai)
110 panic("failed to allocate pcpu_alloc_info");
111 gi = &ai->groups[0];
112
113 /* units are assigned consecutively to possible cpus */
114 for_each_possible_cpu(cpu)
115 gi->cpu_map[gi->nr_units++] = cpu;
116
117 /* set parameters */
118 static_size = __per_cpu_end - __per_cpu_start;
119 reserved_size = PERCPU_MODULE_RESERVE;
120 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
121 if (dyn_size < 0)
122 panic("percpu area overflow static=%zd reserved=%zd\n",
123 static_size, reserved_size);
124
125 ai->static_size = static_size;
126 ai->reserved_size = reserved_size;
127 ai->dyn_size = dyn_size;
128 ai->unit_size = PERCPU_PAGE_SIZE;
129 ai->atom_size = PAGE_SIZE;
130 ai->alloc_size = PERCPU_PAGE_SIZE;
131
132 pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
133 pcpu_free_alloc_info(ai);
134}
135#else
136#define alloc_per_cpu_data() do { } while (0)
137#endif /* CONFIG_SMP */
138
139/**
140 * find_memory - setup memory map
141 *
142 * Walk the EFI memory map and find usable memory for the system, taking
143 * into account reserved areas.
144 */
145void __init
146find_memory (void)
147{
148 reserve_memory();
149
150 /* first find highest page frame number */
151 min_low_pfn = ~0UL;
152 max_low_pfn = 0;
153 efi_memmap_walk(find_max_min_low_pfn, NULL);
154 max_pfn = max_low_pfn;
155
156 memblock_add_node(0, PFN_PHYS(max_low_pfn), 0, MEMBLOCK_NONE);
157
158 find_initrd();
159
160 alloc_per_cpu_data();
161}
162
163static int __init find_largest_hole(u64 start, u64 end, void *arg)
164{
165 u64 *max_gap = arg;
166
167 static u64 last_end = PAGE_OFFSET;
168
169 /* NOTE: this algorithm assumes efi memmap table is ordered */
170
171 if (*max_gap < (start - last_end))
172 *max_gap = start - last_end;
173 last_end = end;
174 return 0;
175}
176
177static void __init verify_gap_absence(void)
178{
179 unsigned long max_gap;
180
181 /* Forbid FLATMEM if hole is > than 1G */
182 efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
183 if (max_gap >= SZ_1G)
184 panic("Cannot use FLATMEM with %ldMB hole\n"
185 "Please switch over to SPARSEMEM\n",
186 (max_gap >> 20));
187}
188
189/*
190 * Set up the page tables.
191 */
192
193void __init
194paging_init (void)
195{
196 unsigned long max_dma;
197 unsigned long max_zone_pfns[MAX_NR_ZONES];
198
199 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
200 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
201 max_zone_pfns[ZONE_DMA32] = max_dma;
202 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
203
204 verify_gap_absence();
205
206 free_area_init(max_zone_pfns);
207 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
208}