Linux Audio

Check our new training course

Loading...
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1998-2003 Hewlett-Packard Co
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *	Stephane Eranian <eranian@hpl.hp.com>
  9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
 10 * Copyright (C) 1999 VA Linux Systems
 11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
 13 *
 14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
 15 * memory.
 16 */
 17#include <linux/bootmem.h>
 18#include <linux/efi.h>
 19#include <linux/memblock.h>
 20#include <linux/mm.h>
 21#include <linux/nmi.h>
 22#include <linux/swap.h>
 23
 24#include <asm/meminit.h>
 25#include <asm/pgalloc.h>
 26#include <asm/pgtable.h>
 27#include <asm/sections.h>
 28#include <asm/mca.h>
 29
 30#ifdef CONFIG_VIRTUAL_MEM_MAP
 31static unsigned long max_gap;
 32#endif
 33
 34/**
 35 * show_mem - give short summary of memory stats
 36 *
 37 * Shows a simple page count of reserved and used pages in the system.
 38 * For discontig machines, it does this on a per-pgdat basis.
 39 */
 40void show_mem(unsigned int filter)
 41{
 42	int i, total_reserved = 0;
 43	int total_shared = 0, total_cached = 0;
 44	unsigned long total_present = 0;
 45	pg_data_t *pgdat;
 46
 47	printk(KERN_INFO "Mem-info:\n");
 48	show_free_areas(filter);
 49	printk(KERN_INFO "Node memory in pages:\n");
 50	for_each_online_pgdat(pgdat) {
 51		unsigned long present;
 52		unsigned long flags;
 53		int shared = 0, cached = 0, reserved = 0;
 54		int nid = pgdat->node_id;
 55
 56		if (skip_free_areas_node(filter, nid))
 57			continue;
 58		pgdat_resize_lock(pgdat, &flags);
 59		present = pgdat->node_present_pages;
 60		for(i = 0; i < pgdat->node_spanned_pages; i++) {
 61			struct page *page;
 62			if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
 63				touch_nmi_watchdog();
 64			if (pfn_valid(pgdat->node_start_pfn + i))
 65				page = pfn_to_page(pgdat->node_start_pfn + i);
 66			else {
 67#ifdef CONFIG_VIRTUAL_MEM_MAP
 68				if (max_gap < LARGE_GAP)
 69					continue;
 70#endif
 71				i = vmemmap_find_next_valid_pfn(nid, i) - 1;
 72				continue;
 73			}
 74			if (PageReserved(page))
 75				reserved++;
 76			else if (PageSwapCache(page))
 77				cached++;
 78			else if (page_count(page))
 79				shared += page_count(page)-1;
 80		}
 81		pgdat_resize_unlock(pgdat, &flags);
 82		total_present += present;
 83		total_reserved += reserved;
 84		total_cached += cached;
 85		total_shared += shared;
 86		printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
 87		       "shrd: %10d, swpd: %10d\n", nid,
 88		       present, reserved, shared, cached);
 89	}
 90	printk(KERN_INFO "%ld pages of RAM\n", total_present);
 91	printk(KERN_INFO "%d reserved pages\n", total_reserved);
 92	printk(KERN_INFO "%d pages shared\n", total_shared);
 93	printk(KERN_INFO "%d pages swap cached\n", total_cached);
 94	printk(KERN_INFO "Total of %ld pages in page table cache\n",
 95	       quicklist_total_size());
 96	printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
 97}
 98
 99
100/* physical address where the bootmem map is located */
101unsigned long bootmap_start;
102
103/**
104 * find_bootmap_location - callback to find a memory area for the bootmap
105 * @start: start of region
106 * @end: end of region
107 * @arg: unused callback data
108 *
109 * Find a place to put the bootmap and return its starting address in
110 * bootmap_start.  This address must be page-aligned.
111 */
112static int __init
113find_bootmap_location (u64 start, u64 end, void *arg)
114{
115	u64 needed = *(unsigned long *)arg;
116	u64 range_start, range_end, free_start;
117	int i;
118
119#if IGNORE_PFN0
120	if (start == PAGE_OFFSET) {
121		start += PAGE_SIZE;
122		if (start >= end)
123			return 0;
124	}
125#endif
126
127	free_start = PAGE_OFFSET;
128
129	for (i = 0; i < num_rsvd_regions; i++) {
130		range_start = max(start, free_start);
131		range_end   = min(end, rsvd_region[i].start & PAGE_MASK);
132
133		free_start = PAGE_ALIGN(rsvd_region[i].end);
134
135		if (range_end <= range_start)
136			continue; /* skip over empty range */
137
138		if (range_end - range_start >= needed) {
139			bootmap_start = __pa(range_start);
140			return -1;	/* done */
141		}
142
143		/* nothing more available in this segment */
144		if (range_end == end)
145			return 0;
146	}
147	return 0;
148}
149
150#ifdef CONFIG_SMP
151static void *cpu_data;
152/**
153 * per_cpu_init - setup per-cpu variables
154 *
155 * Allocate and setup per-cpu data areas.
156 */
157void * __cpuinit
158per_cpu_init (void)
159{
160	static bool first_time = true;
161	void *cpu0_data = __cpu0_per_cpu;
162	unsigned int cpu;
163
164	if (!first_time)
165		goto skip;
166	first_time = false;
167
168	/*
169	 * get_free_pages() cannot be used before cpu_init() done.
170	 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
171	 * to avoid that AP calls get_zeroed_page().
172	 */
173	for_each_possible_cpu(cpu) {
174		void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
175
176		memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
177		__per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
178		per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
179
180		/*
181		 * percpu area for cpu0 is moved from the __init area
182		 * which is setup by head.S and used till this point.
183		 * Update ar.k3.  This move is ensures that percpu
184		 * area for cpu0 is on the correct node and its
185		 * virtual address isn't insanely far from other
186		 * percpu areas which is important for congruent
187		 * percpu allocator.
188		 */
189		if (cpu == 0)
190			ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
191				    (unsigned long)__per_cpu_start);
192
193		cpu_data += PERCPU_PAGE_SIZE;
194	}
195skip:
196	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
197}
198
199static inline void
200alloc_per_cpu_data(void)
201{
202	cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
203				   PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
204}
205
206/**
207 * setup_per_cpu_areas - setup percpu areas
208 *
209 * Arch code has already allocated and initialized percpu areas.  All
210 * this function has to do is to teach the determined layout to the
211 * dynamic percpu allocator, which happens to be more complex than
212 * creating whole new ones using helpers.
213 */
214void __init
215setup_per_cpu_areas(void)
216{
217	struct pcpu_alloc_info *ai;
218	struct pcpu_group_info *gi;
219	unsigned int cpu;
220	ssize_t static_size, reserved_size, dyn_size;
221	int rc;
222
223	ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
224	if (!ai)
225		panic("failed to allocate pcpu_alloc_info");
226	gi = &ai->groups[0];
227
228	/* units are assigned consecutively to possible cpus */
229	for_each_possible_cpu(cpu)
230		gi->cpu_map[gi->nr_units++] = cpu;
231
232	/* set parameters */
233	static_size = __per_cpu_end - __per_cpu_start;
234	reserved_size = PERCPU_MODULE_RESERVE;
235	dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
236	if (dyn_size < 0)
237		panic("percpu area overflow static=%zd reserved=%zd\n",
238		      static_size, reserved_size);
239
240	ai->static_size		= static_size;
241	ai->reserved_size	= reserved_size;
242	ai->dyn_size		= dyn_size;
243	ai->unit_size		= PERCPU_PAGE_SIZE;
244	ai->atom_size		= PAGE_SIZE;
245	ai->alloc_size		= PERCPU_PAGE_SIZE;
246
247	rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
248	if (rc)
249		panic("failed to setup percpu area (err=%d)", rc);
250
251	pcpu_free_alloc_info(ai);
252}
253#else
254#define alloc_per_cpu_data() do { } while (0)
255#endif /* CONFIG_SMP */
256
257/**
258 * find_memory - setup memory map
259 *
260 * Walk the EFI memory map and find usable memory for the system, taking
261 * into account reserved areas.
262 */
263void __init
264find_memory (void)
265{
266	unsigned long bootmap_size;
267
268	reserve_memory();
269
270	/* first find highest page frame number */
271	min_low_pfn = ~0UL;
272	max_low_pfn = 0;
273	efi_memmap_walk(find_max_min_low_pfn, NULL);
274	max_pfn = max_low_pfn;
275	/* how many bytes to cover all the pages */
276	bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
277
278	/* look for a location to hold the bootmap */
279	bootmap_start = ~0UL;
280	efi_memmap_walk(find_bootmap_location, &bootmap_size);
281	if (bootmap_start == ~0UL)
282		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
283
284	bootmap_size = init_bootmem_node(NODE_DATA(0),
285			(bootmap_start >> PAGE_SHIFT), 0, max_pfn);
286
287	/* Free all available memory, then mark bootmem-map as being in use. */
288	efi_memmap_walk(filter_rsvd_memory, free_bootmem);
289	reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
290
291	find_initrd();
292
293	alloc_per_cpu_data();
294}
295
296static int count_pages(u64 start, u64 end, void *arg)
297{
298	unsigned long *count = arg;
299
300	*count += (end - start) >> PAGE_SHIFT;
301	return 0;
302}
303
304/*
305 * Set up the page tables.
306 */
307
308void __init
309paging_init (void)
310{
311	unsigned long max_dma;
312	unsigned long max_zone_pfns[MAX_NR_ZONES];
313
314	num_physpages = 0;
315	efi_memmap_walk(count_pages, &num_physpages);
316
317	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
318#ifdef CONFIG_ZONE_DMA
319	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
320	max_zone_pfns[ZONE_DMA] = max_dma;
321#endif
322	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
323
324#ifdef CONFIG_VIRTUAL_MEM_MAP
325	efi_memmap_walk(filter_memory, register_active_ranges);
326	efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
327	if (max_gap < LARGE_GAP) {
328		vmem_map = (struct page *) 0;
329		free_area_init_nodes(max_zone_pfns);
330	} else {
331		unsigned long map_size;
332
333		/* allocate virtual_mem_map */
334
335		map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
336			sizeof(struct page));
337		VMALLOC_END -= map_size;
338		vmem_map = (struct page *) VMALLOC_END;
339		efi_memmap_walk(create_mem_map_page_table, NULL);
340
341		/*
342		 * alloc_node_mem_map makes an adjustment for mem_map
343		 * which isn't compatible with vmem_map.
344		 */
345		NODE_DATA(0)->node_mem_map = vmem_map +
346			find_min_pfn_with_active_regions();
347		free_area_init_nodes(max_zone_pfns);
348
349		printk("Virtual mem_map starts at 0x%p\n", mem_map);
350	}
351#else /* !CONFIG_VIRTUAL_MEM_MAP */
352	memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
353	free_area_init_nodes(max_zone_pfns);
354#endif /* !CONFIG_VIRTUAL_MEM_MAP */
355	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
356}