Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  linux/arch/arm/mm/init.c
  3 *
  4 *  Copyright (C) 1995-2005 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/kernel.h>
 11#include <linux/errno.h>
 12#include <linux/swap.h>
 13#include <linux/init.h>
 14#include <linux/bootmem.h>
 15#include <linux/mman.h>
 
 
 16#include <linux/export.h>
 17#include <linux/nodemask.h>
 18#include <linux/initrd.h>
 19#include <linux/of_fdt.h>
 20#include <linux/highmem.h>
 21#include <linux/gfp.h>
 22#include <linux/memblock.h>
 23#include <linux/dma-contiguous.h>
 24#include <linux/sizes.h>
 
 
 25
 
 26#include <asm/mach-types.h>
 27#include <asm/memblock.h>
 
 28#include <asm/prom.h>
 29#include <asm/sections.h>
 30#include <asm/setup.h>
 
 
 31#include <asm/tlb.h>
 32#include <asm/fixmap.h>
 
 33
 34#include <asm/mach/arch.h>
 35#include <asm/mach/map.h>
 36
 37#include "mm.h"
 38
 39static phys_addr_t phys_initrd_start __initdata = 0;
 40static unsigned long phys_initrd_size __initdata = 0;
 41
 42static int __init early_initrd(char *p)
 43{
 44	phys_addr_t start;
 45	unsigned long size;
 46	char *endp;
 47
 48	start = memparse(p, &endp);
 49	if (*endp == ',') {
 50		size = memparse(endp + 1, NULL);
 51
 52		phys_initrd_start = start;
 53		phys_initrd_size = size;
 54	}
 55	return 0;
 56}
 57early_param("initrd", early_initrd);
 58
 
 59static int __init parse_tag_initrd(const struct tag *tag)
 60{
 61	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
 62		"please update your bootloader.\n");
 63	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
 64	phys_initrd_size = tag->u.initrd.size;
 65	return 0;
 66}
 67
 68__tagtable(ATAG_INITRD, parse_tag_initrd);
 69
 70static int __init parse_tag_initrd2(const struct tag *tag)
 71{
 72	phys_initrd_start = tag->u.initrd.start;
 73	phys_initrd_size = tag->u.initrd.size;
 74	return 0;
 75}
 76
 77__tagtable(ATAG_INITRD2, parse_tag_initrd2);
 78
 79/*
 80 * This keeps memory configuration data used by a couple memory
 81 * initialization functions, as well as show_mem() for the skipping
 82 * of holes in the memory map.  It is populated by arm_add_memory().
 83 */
 84struct meminfo meminfo;
 85
 86void show_mem(unsigned int filter)
 87{
 88	int free = 0, total = 0, reserved = 0;
 89	int shared = 0, cached = 0, slab = 0, i;
 90	struct meminfo * mi = &meminfo;
 91
 92	printk("Mem-info:\n");
 93	show_free_areas(filter);
 94
 95	for_each_bank (i, mi) {
 96		struct membank *bank = &mi->bank[i];
 97		unsigned int pfn1, pfn2;
 98		struct page *page, *end;
 99
100		pfn1 = bank_pfn_start(bank);
101		pfn2 = bank_pfn_end(bank);
102
103		page = pfn_to_page(pfn1);
104		end  = pfn_to_page(pfn2 - 1) + 1;
105
106		do {
107			total++;
108			if (PageReserved(page))
109				reserved++;
110			else if (PageSwapCache(page))
111				cached++;
112			else if (PageSlab(page))
113				slab++;
114			else if (!page_count(page))
115				free++;
116			else
117				shared += page_count(page) - 1;
118			page++;
119		} while (page < end);
120	}
121
122	printk("%d pages of RAM\n", total);
123	printk("%d free pages\n", free);
124	printk("%d reserved pages\n", reserved);
125	printk("%d slab pages\n", slab);
126	printk("%d pages shared\n", shared);
127	printk("%d pages swap cached\n", cached);
128}
129
130static void __init find_limits(unsigned long *min, unsigned long *max_low,
131			       unsigned long *max_high)
132{
133	struct meminfo *mi = &meminfo;
134	int i;
135
136	/* This assumes the meminfo array is properly sorted */
137	*min = bank_pfn_start(&mi->bank[0]);
138	for_each_bank (i, mi)
139		if (mi->bank[i].highmem)
140				break;
141	*max_low = bank_pfn_end(&mi->bank[i - 1]);
142	*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
143}
144
145#ifdef CONFIG_ZONE_DMA
146
147phys_addr_t arm_dma_zone_size __read_mostly;
148EXPORT_SYMBOL(arm_dma_zone_size);
149
150/*
151 * The DMA mask corresponding to the maximum bus address allocatable
152 * using GFP_DMA.  The default here places no restriction on DMA
153 * allocations.  This must be the smallest DMA mask in the system,
154 * so a successful GFP_DMA allocation will always satisfy this.
155 */
156phys_addr_t arm_dma_limit;
157unsigned long arm_dma_pfn_limit;
158
159static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
160	unsigned long dma_size)
161{
162	if (size[0] <= dma_size)
163		return;
164
165	size[ZONE_NORMAL] = size[0] - dma_size;
166	size[ZONE_DMA] = dma_size;
167	hole[ZONE_NORMAL] = hole[0];
168	hole[ZONE_DMA] = 0;
169}
170#endif
171
172void __init setup_dma_zone(const struct machine_desc *mdesc)
173{
174#ifdef CONFIG_ZONE_DMA
175	if (mdesc->dma_zone_size) {
176		arm_dma_zone_size = mdesc->dma_zone_size;
177		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
178	} else
179		arm_dma_limit = 0xffffffff;
180	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
181#endif
182}
183
184static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
185	unsigned long max_high)
186{
187	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
188	struct memblock_region *reg;
189
190	/*
191	 * initialise the zones.
192	 */
193	memset(zone_size, 0, sizeof(zone_size));
194
195	/*
196	 * The memory size has already been determined.  If we need
197	 * to do anything fancy with the allocation of this memory
198	 * to the zones, now is the time to do it.
199	 */
200	zone_size[0] = max_low - min;
201#ifdef CONFIG_HIGHMEM
202	zone_size[ZONE_HIGHMEM] = max_high - max_low;
203#endif
204
205	/*
206	 * Calculate the size of the holes.
207	 *  holes = node_size - sum(bank_sizes)
208	 */
209	memcpy(zhole_size, zone_size, sizeof(zhole_size));
210	for_each_memblock(memory, reg) {
211		unsigned long start = memblock_region_memory_base_pfn(reg);
212		unsigned long end = memblock_region_memory_end_pfn(reg);
213
214		if (start < max_low) {
215			unsigned long low_end = min(end, max_low);
216			zhole_size[0] -= low_end - start;
217		}
218#ifdef CONFIG_HIGHMEM
219		if (end > max_low) {
220			unsigned long high_start = max(start, max_low);
221			zhole_size[ZONE_HIGHMEM] -= end - high_start;
222		}
223#endif
224	}
225
226#ifdef CONFIG_ZONE_DMA
227	/*
228	 * Adjust the sizes according to any special requirements for
229	 * this machine type.
230	 */
231	if (arm_dma_zone_size)
232		arm_adjust_dma_zone(zone_size, zhole_size,
233			arm_dma_zone_size >> PAGE_SHIFT);
234#endif
235
236	free_area_init_node(0, zone_size, min, zhole_size);
237}
238
239#ifdef CONFIG_HAVE_ARCH_PFN_VALID
240int pfn_valid(unsigned long pfn)
241{
242	return memblock_is_memory(__pfn_to_phys(pfn));
243}
244EXPORT_SYMBOL(pfn_valid);
245#endif
246
247#ifndef CONFIG_SPARSEMEM
248static void __init arm_memory_present(void)
249{
250}
251#else
252static void __init arm_memory_present(void)
253{
254	struct memblock_region *reg;
255
256	for_each_memblock(memory, reg)
257		memory_present(0, memblock_region_memory_base_pfn(reg),
258			       memblock_region_memory_end_pfn(reg));
259}
 
260#endif
261
262static bool arm_memblock_steal_permitted = true;
263
264phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
265{
266	phys_addr_t phys;
267
268	BUG_ON(!arm_memblock_steal_permitted);
269
270	phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
 
 
 
 
271	memblock_free(phys, size);
272	memblock_remove(phys, size);
273
274	return phys;
275}
276
277void __init arm_memblock_init(struct meminfo *mi,
278	const struct machine_desc *mdesc)
279{
280	int i;
281
282	for (i = 0; i < mi->nr_banks; i++)
283		memblock_add(mi->bank[i].start, mi->bank[i].size);
284
285	/* Register the kernel text, kernel data and initrd with memblock. */
286#ifdef CONFIG_XIP_KERNEL
287	memblock_reserve(__pa(_sdata), _end - _sdata);
288#else
289	memblock_reserve(__pa(_stext), _end - _stext);
290#endif
291#ifdef CONFIG_BLK_DEV_INITRD
292	/* FDT scan will populate initrd_start */
293	if (initrd_start && !phys_initrd_size) {
294		phys_initrd_start = __virt_to_phys(initrd_start);
295		phys_initrd_size = initrd_end - initrd_start;
296	}
297	initrd_start = initrd_end = 0;
298	if (phys_initrd_size &&
299	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
300		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
301		       (u64)phys_initrd_start, phys_initrd_size);
302		phys_initrd_start = phys_initrd_size = 0;
303	}
304	if (phys_initrd_size &&
305	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
306		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
307		       (u64)phys_initrd_start, phys_initrd_size);
308		phys_initrd_start = phys_initrd_size = 0;
309	}
310	if (phys_initrd_size) {
311		memblock_reserve(phys_initrd_start, phys_initrd_size);
312
313		/* Now convert initrd to virtual addresses */
314		initrd_start = __phys_to_virt(phys_initrd_start);
315		initrd_end = initrd_start + phys_initrd_size;
316	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317#endif
318
 
 
 
 
 
 
 
319	arm_mm_memblock_reserve();
320	arm_dt_memblock_reserve();
321
322	/* reserve any platform specific memblock areas */
323	if (mdesc->reserve)
324		mdesc->reserve();
325
 
326	early_init_fdt_scan_reserved_mem();
327
328	/*
329	 * reserve memory for DMA contigouos allocations,
330	 * must come from DMA area inside low memory
331	 */
332	dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
333
334	arm_memblock_steal_permitted = false;
335	memblock_dump_all();
336}
337
338void __init bootmem_init(void)
339{
340	unsigned long min, max_low, max_high;
341
342	memblock_allow_resize();
343	max_low = max_high = 0;
344
345	find_limits(&min, &max_low, &max_high);
346
347	/*
348	 * Sparsemem tries to allocate bootmem in memory_present(),
349	 * so must be done after the fixed reservations
350	 */
351	arm_memory_present();
352
353	/*
354	 * sparse_init() needs the bootmem allocator up and running.
 
355	 */
356	sparse_init();
357
358	/*
359	 * Now free the memory - free_area_init_node needs
360	 * the sparse mem_map arrays initialized by sparse_init()
361	 * for memmap_init_zone(), otherwise all PFNs are invalid.
362	 */
363	zone_sizes_init(min, max_low, max_high);
364
365	/*
366	 * This doesn't seem to be used by the Linux memory manager any
367	 * more, but is used by ll_rw_block.  If we can get rid of it, we
368	 * also get rid of some of the stuff above as well.
369	 */
370	min_low_pfn = min;
371	max_low_pfn = max_low;
372	max_pfn = max_high;
373}
374
375/*
376 * Poison init memory with an undefined instruction (ARM) or a branch to an
377 * undefined instruction (Thumb).
378 */
379static inline void poison_init_mem(void *s, size_t count)
380{
381	u32 *p = (u32 *)s;
382	for (; count != 0; count -= 4)
383		*p++ = 0xe7fddef0;
384}
385
386static inline void
387free_memmap(unsigned long start_pfn, unsigned long end_pfn)
388{
389	struct page *start_pg, *end_pg;
390	phys_addr_t pg, pgend;
391
392	/*
393	 * Convert start_pfn/end_pfn to a struct page pointer.
394	 */
395	start_pg = pfn_to_page(start_pfn - 1) + 1;
396	end_pg = pfn_to_page(end_pfn - 1) + 1;
397
398	/*
399	 * Convert to physical addresses, and
400	 * round start upwards and end downwards.
401	 */
402	pg = PAGE_ALIGN(__pa(start_pg));
403	pgend = __pa(end_pg) & PAGE_MASK;
404
405	/*
406	 * If there are free pages between these,
407	 * free the section of the memmap array.
408	 */
409	if (pg < pgend)
410		memblock_free_early(pg, pgend - pg);
411}
412
413/*
414 * The mem_map array can get very big.  Free the unused area of the memory map.
415 */
416static void __init free_unused_memmap(struct meminfo *mi)
417{
418	unsigned long bank_start, prev_bank_end = 0;
419	unsigned int i;
420
421	/*
422	 * This relies on each bank being in address order.
423	 * The banks are sorted previously in bootmem_init().
424	 */
425	for_each_bank(i, mi) {
426		struct membank *bank = &mi->bank[i];
427
428		bank_start = bank_pfn_start(bank);
429
430#ifdef CONFIG_SPARSEMEM
431		/*
432		 * Take care not to free memmap entries that don't exist
433		 * due to SPARSEMEM sections which aren't present.
434		 */
435		bank_start = min(bank_start,
436				 ALIGN(prev_bank_end, PAGES_PER_SECTION));
437#else
438		/*
439		 * Align down here since the VM subsystem insists that the
440		 * memmap entries are valid from the bank start aligned to
441		 * MAX_ORDER_NR_PAGES.
442		 */
443		bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
444#endif
445		/*
446		 * If we had a previous bank, and there is a space
447		 * between the current bank and the previous, free it.
448		 */
449		if (prev_bank_end && prev_bank_end < bank_start)
450			free_memmap(prev_bank_end, bank_start);
451
452		/*
453		 * Align up here since the VM subsystem insists that the
454		 * memmap entries are valid from the bank end aligned to
455		 * MAX_ORDER_NR_PAGES.
456		 */
457		prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
 
458	}
459
460#ifdef CONFIG_SPARSEMEM
461	if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
462		free_memmap(prev_bank_end,
463			    ALIGN(prev_bank_end, PAGES_PER_SECTION));
464#endif
465}
466
467#ifdef CONFIG_HIGHMEM
468static inline void free_area_high(unsigned long pfn, unsigned long end)
469{
470	for (; pfn < end; pfn++)
471		free_highmem_page(pfn_to_page(pfn));
472}
473#endif
474
475static void __init free_highpages(void)
476{
477#ifdef CONFIG_HIGHMEM
478	unsigned long max_low = max_low_pfn;
479	struct memblock_region *mem, *res;
480
481	/* set highmem page free */
482	for_each_memblock(memory, mem) {
483		unsigned long start = memblock_region_memory_base_pfn(mem);
484		unsigned long end = memblock_region_memory_end_pfn(mem);
485
486		/* Ignore complete lowmem entries */
487		if (end <= max_low)
488			continue;
489
 
 
 
490		/* Truncate partial highmem entries */
491		if (start < max_low)
492			start = max_low;
493
494		/* Find and exclude any reserved regions */
495		for_each_memblock(reserved, res) {
496			unsigned long res_start, res_end;
497
498			res_start = memblock_region_reserved_base_pfn(res);
499			res_end = memblock_region_reserved_end_pfn(res);
500
501			if (res_end < start)
502				continue;
503			if (res_start < start)
504				res_start = start;
505			if (res_start > end)
506				res_start = end;
507			if (res_end > end)
508				res_end = end;
509			if (res_start != start)
510				free_area_high(start, res_start);
511			start = res_end;
512			if (start == end)
513				break;
514		}
515
516		/* And now free anything which remains */
517		if (start < end)
518			free_area_high(start, end);
519	}
520#endif
521}
522
523/*
524 * mem_init() marks the free areas in the mem_map and tells us how much
525 * memory is free.  This is done after various parts of the system have
526 * claimed their memory after the kernel image.
527 */
528void __init mem_init(void)
529{
530#ifdef CONFIG_HAVE_TCM
531	/* These pointers are filled in on TCM detection */
532	extern u32 dtcm_end;
533	extern u32 itcm_end;
534#endif
535
536	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
537
538	/* this will put all unused low memory onto the freelists */
539	free_unused_memmap(&meminfo);
540	free_all_bootmem();
541
542#ifdef CONFIG_SA1111
543	/* now that our DMA memory is actually so designated, we can free it */
544	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
545#endif
546
547	free_highpages();
548
549	mem_init_print_info(NULL);
550
551#define MLK(b, t) b, t, ((t) - (b)) >> 10
552#define MLM(b, t) b, t, ((t) - (b)) >> 20
553#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
554
555	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
556			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
557#ifdef CONFIG_HAVE_TCM
558			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
559			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
560#endif
561			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
562			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
563			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
564#ifdef CONFIG_HIGHMEM
565			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
566#endif
567#ifdef CONFIG_MODULES
568			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
569#endif
570			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
571			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
572			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
573			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
574
575			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
576				(PAGE_SIZE)),
577#ifdef CONFIG_HAVE_TCM
578			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
579			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
580#endif
581			MLK(FIXADDR_START, FIXADDR_TOP),
582			MLM(VMALLOC_START, VMALLOC_END),
583			MLM(PAGE_OFFSET, (unsigned long)high_memory),
584#ifdef CONFIG_HIGHMEM
585			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
586				(PAGE_SIZE)),
587#endif
588#ifdef CONFIG_MODULES
589			MLM(MODULES_VADDR, MODULES_END),
590#endif
591
592			MLK_ROUNDUP(_text, _etext),
593			MLK_ROUNDUP(__init_begin, __init_end),
594			MLK_ROUNDUP(_sdata, _edata),
595			MLK_ROUNDUP(__bss_start, __bss_stop));
596
597#undef MLK
598#undef MLM
599#undef MLK_ROUNDUP
600
601	/*
602	 * Check boundaries twice: Some fundamental inconsistencies can
603	 * be detected at build time already.
604	 */
605#ifdef CONFIG_MMU
606	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
607	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
608#endif
609
610#ifdef CONFIG_HIGHMEM
611	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
612	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
613#endif
614
615	if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
616		extern int sysctl_overcommit_memory;
617		/*
618		 * On a machine this small we won't get
619		 * anywhere without overcommit, so turn
620		 * it on by default.
621		 */
622		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
623	}
624}
625
626void free_initmem(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627{
628#ifdef CONFIG_HAVE_TCM
629	extern char __tcm_start, __tcm_end;
 
630
631	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
632	free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
 
 
 
 
 
633#endif
 
 
 
634
635	poison_init_mem(__init_begin, __init_end - __init_begin);
636	if (!machine_is_integrator() && !machine_is_cintegrator())
637		free_initmem_default(-1);
 
 
 
 
638}
639
640#ifdef CONFIG_BLK_DEV_INITRD
 
 
 
 
641
642static int keep_initrd;
 
643
644void free_initrd_mem(unsigned long start, unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
645{
646	if (!keep_initrd) {
647		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
648		free_reserved_area((void *)start, (void *)end, -1, "initrd");
 
 
 
 
 
649	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
650}
651
652static int __init keepinitrd_setup(char *__unused)
653{
654	keep_initrd = 1;
655	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
656}
657
658__setup("keepinitrd", keepinitrd_setup);
 
 
 
 
 
 
 
 
 
 
659#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/init.c
  4 *
  5 *  Copyright (C) 1995-2005 Russell King
 
 
 
 
  6 */
  7#include <linux/kernel.h>
  8#include <linux/errno.h>
  9#include <linux/swap.h>
 10#include <linux/init.h>
 
 11#include <linux/mman.h>
 12#include <linux/sched/signal.h>
 13#include <linux/sched/task.h>
 14#include <linux/export.h>
 15#include <linux/nodemask.h>
 16#include <linux/initrd.h>
 17#include <linux/of_fdt.h>
 18#include <linux/highmem.h>
 19#include <linux/gfp.h>
 20#include <linux/memblock.h>
 21#include <linux/dma-contiguous.h>
 22#include <linux/sizes.h>
 23#include <linux/stop_machine.h>
 24#include <linux/swiotlb.h>
 25
 26#include <asm/cp15.h>
 27#include <asm/mach-types.h>
 28#include <asm/memblock.h>
 29#include <asm/memory.h>
 30#include <asm/prom.h>
 31#include <asm/sections.h>
 32#include <asm/setup.h>
 33#include <asm/set_memory.h>
 34#include <asm/system_info.h>
 35#include <asm/tlb.h>
 36#include <asm/fixmap.h>
 37#include <asm/ptdump.h>
 38
 39#include <asm/mach/arch.h>
 40#include <asm/mach/map.h>
 41
 42#include "mm.h"
 43
 44#ifdef CONFIG_CPU_CP15_MMU
 45unsigned long __init __clear_cr(unsigned long mask)
 
 
 46{
 47	cr_alignment = cr_alignment & ~mask;
 48	return cr_alignment;
 
 
 
 
 
 
 
 
 
 
 49}
 50#endif
 51
 52#ifdef CONFIG_BLK_DEV_INITRD
 53static int __init parse_tag_initrd(const struct tag *tag)
 54{
 55	pr_warn("ATAG_INITRD is deprecated; "
 56		"please update your bootloader.\n");
 57	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
 58	phys_initrd_size = tag->u.initrd.size;
 59	return 0;
 60}
 61
 62__tagtable(ATAG_INITRD, parse_tag_initrd);
 63
 64static int __init parse_tag_initrd2(const struct tag *tag)
 65{
 66	phys_initrd_start = tag->u.initrd.start;
 67	phys_initrd_size = tag->u.initrd.size;
 68	return 0;
 69}
 70
 71__tagtable(ATAG_INITRD2, parse_tag_initrd2);
 72#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 74static void __init find_limits(unsigned long *min, unsigned long *max_low,
 75			       unsigned long *max_high)
 76{
 77	*max_low = PFN_DOWN(memblock_get_current_limit());
 78	*min = PFN_UP(memblock_start_of_DRAM());
 79	*max_high = PFN_DOWN(memblock_end_of_DRAM());
 
 
 
 
 
 
 
 80}
 81
 82#ifdef CONFIG_ZONE_DMA
 83
 84phys_addr_t arm_dma_zone_size __read_mostly;
 85EXPORT_SYMBOL(arm_dma_zone_size);
 86
 87/*
 88 * The DMA mask corresponding to the maximum bus address allocatable
 89 * using GFP_DMA.  The default here places no restriction on DMA
 90 * allocations.  This must be the smallest DMA mask in the system,
 91 * so a successful GFP_DMA allocation will always satisfy this.
 92 */
 93phys_addr_t arm_dma_limit;
 94unsigned long arm_dma_pfn_limit;
 
 
 
 
 
 
 
 
 
 
 
 
 95#endif
 96
 97void __init setup_dma_zone(const struct machine_desc *mdesc)
 98{
 99#ifdef CONFIG_ZONE_DMA
100	if (mdesc->dma_zone_size) {
101		arm_dma_zone_size = mdesc->dma_zone_size;
102		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
103	} else
104		arm_dma_limit = 0xffffffff;
105	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
106#endif
107}
108
109static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
110	unsigned long max_high)
111{
112	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 
113
114#ifdef CONFIG_ZONE_DMA
115	max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
 
 
 
 
 
 
 
 
 
 
 
116#endif
117	max_zone_pfn[ZONE_NORMAL] = max_low;
 
 
 
 
 
 
 
 
 
 
 
 
 
118#ifdef CONFIG_HIGHMEM
119	max_zone_pfn[ZONE_HIGHMEM] = max_high;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120#endif
121	free_area_init(max_zone_pfn);
 
122}
123
124#ifdef CONFIG_HAVE_ARCH_PFN_VALID
125int pfn_valid(unsigned long pfn)
126{
127	phys_addr_t addr = __pfn_to_phys(pfn);
 
 
 
128
129	if (__phys_to_pfn(addr) != pfn)
130		return 0;
 
 
 
 
 
 
131
132	return memblock_is_map_memory(addr);
 
 
133}
134EXPORT_SYMBOL(pfn_valid);
135#endif
136
137static bool arm_memblock_steal_permitted = true;
138
139phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
140{
141	phys_addr_t phys;
142
143	BUG_ON(!arm_memblock_steal_permitted);
144
145	phys = memblock_phys_alloc(size, align);
146	if (!phys)
147		panic("Failed to steal %pa bytes at %pS\n",
148		      &size, (void *)_RET_IP_);
149
150	memblock_free(phys, size);
151	memblock_remove(phys, size);
152
153	return phys;
154}
155
156static void __init arm_initrd_init(void)
 
157{
 
 
 
 
 
 
 
 
 
 
 
158#ifdef CONFIG_BLK_DEV_INITRD
159	phys_addr_t start;
160	unsigned long size;
161
 
 
162	initrd_start = initrd_end = 0;
163
164	if (!phys_initrd_size)
165		return;
166
167	/*
168	 * Round the memory region to page boundaries as per free_initrd_mem()
169	 * This allows us to detect whether the pages overlapping the initrd
170	 * are in use, but more importantly, reserves the entire set of pages
171	 * as we don't want these pages allocated for other purposes.
172	 */
173	start = round_down(phys_initrd_start, PAGE_SIZE);
174	size = phys_initrd_size + (phys_initrd_start - start);
175	size = round_up(size, PAGE_SIZE);
176
177	if (!memblock_is_region_memory(start, size)) {
178		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
179		       (u64)start, size);
180		return;
181	}
182
183	if (memblock_is_region_reserved(start, size)) {
184		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
185		       (u64)start, size);
186		return;
187	}
 
 
188
189	memblock_reserve(start, size);
190
191	/* Now convert initrd to virtual addresses */
192	initrd_start = __phys_to_virt(phys_initrd_start);
193	initrd_end = initrd_start + phys_initrd_size;
194#endif
195}
196
197#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
198void check_cpu_icache_size(int cpuid)
199{
200	u32 size, ctr;
201
202	asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
203
204	size = 1 << ((ctr & 0xf) + 2);
205	if (cpuid != 0 && icache_size != size)
206		pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
207			cpuid);
208	if (icache_size > size)
209		icache_size = size;
210}
211#endif
212
213void __init arm_memblock_init(const struct machine_desc *mdesc)
214{
215	/* Register the kernel text, kernel data and initrd with memblock. */
216	memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
217
218	arm_initrd_init();
219
220	arm_mm_memblock_reserve();
 
221
222	/* reserve any platform specific memblock areas */
223	if (mdesc->reserve)
224		mdesc->reserve();
225
226	early_init_fdt_reserve_self();
227	early_init_fdt_scan_reserved_mem();
228
229	/* reserve memory for DMA contiguous allocations */
230	dma_contiguous_reserve(arm_dma_limit);
 
 
 
231
232	arm_memblock_steal_permitted = false;
233	memblock_dump_all();
234}
235
236void __init bootmem_init(void)
237{
 
 
238	memblock_allow_resize();
 
239
240	find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
241
242	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
243		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
 
 
 
244
245	/*
246	 * sparse_init() tries to allocate memory from memblock, so must be
247	 * done after the fixed reservations
248	 */
249	sparse_init();
250
251	/*
252	 * Now free the memory - free_area_init needs
253	 * the sparse mem_map arrays initialized by sparse_init()
254	 * for memmap_init_zone(), otherwise all PFNs are invalid.
255	 */
256	zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
 
 
 
 
 
 
 
 
 
257}
258
259/*
260 * Poison init memory with an undefined instruction (ARM) or a branch to an
261 * undefined instruction (Thumb).
262 */
263static inline void poison_init_mem(void *s, size_t count)
264{
265	u32 *p = (u32 *)s;
266	for (; count != 0; count -= 4)
267		*p++ = 0xe7fddef0;
268}
269
270static inline void __init
271free_memmap(unsigned long start_pfn, unsigned long end_pfn)
272{
273	struct page *start_pg, *end_pg;
274	phys_addr_t pg, pgend;
275
276	/*
277	 * Convert start_pfn/end_pfn to a struct page pointer.
278	 */
279	start_pg = pfn_to_page(start_pfn - 1) + 1;
280	end_pg = pfn_to_page(end_pfn - 1) + 1;
281
282	/*
283	 * Convert to physical addresses, and
284	 * round start upwards and end downwards.
285	 */
286	pg = PAGE_ALIGN(__pa(start_pg));
287	pgend = __pa(end_pg) & PAGE_MASK;
288
289	/*
290	 * If there are free pages between these,
291	 * free the section of the memmap array.
292	 */
293	if (pg < pgend)
294		memblock_free_early(pg, pgend - pg);
295}
296
297/*
298 * The mem_map array can get very big.  Free the unused area of the memory map.
299 */
300static void __init free_unused_memmap(void)
301{
302	unsigned long start, prev_end = 0;
303	struct memblock_region *reg;
304
305	/*
306	 * This relies on each bank being in address order.
307	 * The banks are sorted previously in bootmem_init().
308	 */
309	for_each_memblock(memory, reg) {
310		start = memblock_region_memory_base_pfn(reg);
 
 
311
312#ifdef CONFIG_SPARSEMEM
313		/*
314		 * Take care not to free memmap entries that don't exist
315		 * due to SPARSEMEM sections which aren't present.
316		 */
317		start = min(start,
318				 ALIGN(prev_end, PAGES_PER_SECTION));
319#else
320		/*
321		 * Align down here since the VM subsystem insists that the
322		 * memmap entries are valid from the bank start aligned to
323		 * MAX_ORDER_NR_PAGES.
324		 */
325		start = round_down(start, MAX_ORDER_NR_PAGES);
326#endif
327		/*
328		 * If we had a previous bank, and there is a space
329		 * between the current bank and the previous, free it.
330		 */
331		if (prev_end && prev_end < start)
332			free_memmap(prev_end, start);
333
334		/*
335		 * Align up here since the VM subsystem insists that the
336		 * memmap entries are valid from the bank end aligned to
337		 * MAX_ORDER_NR_PAGES.
338		 */
339		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
340				 MAX_ORDER_NR_PAGES);
341	}
342
343#ifdef CONFIG_SPARSEMEM
344	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
345		free_memmap(prev_end,
346			    ALIGN(prev_end, PAGES_PER_SECTION));
347#endif
348}
349
350#ifdef CONFIG_HIGHMEM
351static inline void free_area_high(unsigned long pfn, unsigned long end)
352{
353	for (; pfn < end; pfn++)
354		free_highmem_page(pfn_to_page(pfn));
355}
356#endif
357
358static void __init free_highpages(void)
359{
360#ifdef CONFIG_HIGHMEM
361	unsigned long max_low = max_low_pfn;
362	struct memblock_region *mem, *res;
363
364	/* set highmem page free */
365	for_each_memblock(memory, mem) {
366		unsigned long start = memblock_region_memory_base_pfn(mem);
367		unsigned long end = memblock_region_memory_end_pfn(mem);
368
369		/* Ignore complete lowmem entries */
370		if (end <= max_low)
371			continue;
372
373		if (memblock_is_nomap(mem))
374			continue;
375
376		/* Truncate partial highmem entries */
377		if (start < max_low)
378			start = max_low;
379
380		/* Find and exclude any reserved regions */
381		for_each_memblock(reserved, res) {
382			unsigned long res_start, res_end;
383
384			res_start = memblock_region_reserved_base_pfn(res);
385			res_end = memblock_region_reserved_end_pfn(res);
386
387			if (res_end < start)
388				continue;
389			if (res_start < start)
390				res_start = start;
391			if (res_start > end)
392				res_start = end;
393			if (res_end > end)
394				res_end = end;
395			if (res_start != start)
396				free_area_high(start, res_start);
397			start = res_end;
398			if (start == end)
399				break;
400		}
401
402		/* And now free anything which remains */
403		if (start < end)
404			free_area_high(start, end);
405	}
406#endif
407}
408
409/*
410 * mem_init() marks the free areas in the mem_map and tells us how much
411 * memory is free.  This is done after various parts of the system have
412 * claimed their memory after the kernel image.
413 */
414void __init mem_init(void)
415{
416#ifdef CONFIG_ARM_LPAE
417	swiotlb_init(1);
 
 
418#endif
419
420	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
421
422	/* this will put all unused low memory onto the freelists */
423	free_unused_memmap();
424	memblock_free_all();
425
426#ifdef CONFIG_SA1111
427	/* now that our DMA memory is actually so designated, we can free it */
428	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
429#endif
430
431	free_highpages();
432
433	mem_init_print_info(NULL);
434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
435	/*
436	 * Check boundaries twice: Some fundamental inconsistencies can
437	 * be detected at build time already.
438	 */
439#ifdef CONFIG_MMU
440	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
441	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
442#endif
443
444#ifdef CONFIG_HIGHMEM
445	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
446	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
447#endif
 
 
 
 
 
 
 
 
 
 
448}
449
450#ifdef CONFIG_STRICT_KERNEL_RWX
451struct section_perm {
452	const char *name;
453	unsigned long start;
454	unsigned long end;
455	pmdval_t mask;
456	pmdval_t prot;
457	pmdval_t clear;
458};
459
460/* First section-aligned location at or after __start_rodata. */
461extern char __start_rodata_section_aligned[];
462
463static struct section_perm nx_perms[] = {
464	/* Make pages tables, etc before _stext RW (set NX). */
465	{
466		.name	= "pre-text NX",
467		.start	= PAGE_OFFSET,
468		.end	= (unsigned long)_stext,
469		.mask	= ~PMD_SECT_XN,
470		.prot	= PMD_SECT_XN,
471	},
472	/* Make init RW (set NX). */
473	{
474		.name	= "init NX",
475		.start	= (unsigned long)__init_begin,
476		.end	= (unsigned long)_sdata,
477		.mask	= ~PMD_SECT_XN,
478		.prot	= PMD_SECT_XN,
479	},
480	/* Make rodata NX (set RO in ro_perms below). */
481	{
482		.name	= "rodata NX",
483		.start  = (unsigned long)__start_rodata_section_aligned,
484		.end    = (unsigned long)__init_begin,
485		.mask   = ~PMD_SECT_XN,
486		.prot   = PMD_SECT_XN,
487	},
488};
489
490static struct section_perm ro_perms[] = {
491	/* Make kernel code and rodata RX (set RO). */
492	{
493		.name	= "text/rodata RO",
494		.start  = (unsigned long)_stext,
495		.end    = (unsigned long)__init_begin,
496#ifdef CONFIG_ARM_LPAE
497		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
498		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
499#else
500		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
501		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
502		.clear  = PMD_SECT_AP_WRITE,
503#endif
504	},
505};
506
507/*
508 * Updates section permissions only for the current mm (sections are
509 * copied into each mm). During startup, this is the init_mm. Is only
510 * safe to be called with preemption disabled, as under stop_machine().
511 */
512static inline void section_update(unsigned long addr, pmdval_t mask,
513				  pmdval_t prot, struct mm_struct *mm)
514{
515	pmd_t *pmd;
516
517	pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
518
519#ifdef CONFIG_ARM_LPAE
520	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
521#else
522	if (addr & SECTION_SIZE)
523		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
524	else
525		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
526#endif
527	flush_pmd_entry(pmd);
528	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
529}
530
531/* Make sure extended page tables are in use. */
532static inline bool arch_has_strict_perms(void)
533{
534	if (cpu_architecture() < CPU_ARCH_ARMv6)
535		return false;
536
537	return !!(get_cr() & CR_XP);
538}
539
540static void set_section_perms(struct section_perm *perms, int n, bool set,
541			      struct mm_struct *mm)
542{
543	size_t i;
544	unsigned long addr;
545
546	if (!arch_has_strict_perms())
547		return;
548
549	for (i = 0; i < n; i++) {
550		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
551		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
552			pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
553				perms[i].name, perms[i].start, perms[i].end,
554				SECTION_SIZE);
555			continue;
556		}
557
558		for (addr = perms[i].start;
559		     addr < perms[i].end;
560		     addr += SECTION_SIZE)
561			section_update(addr, perms[i].mask,
562				set ? perms[i].prot : perms[i].clear, mm);
563	}
564
565}
566
567/**
568 * update_sections_early intended to be called only through stop_machine
569 * framework and executed by only one CPU while all other CPUs will spin and
570 * wait, so no locking is required in this function.
571 */
572static void update_sections_early(struct section_perm perms[], int n)
573{
574	struct task_struct *t, *s;
575
576	for_each_process(t) {
577		if (t->flags & PF_KTHREAD)
578			continue;
579		for_each_thread(t, s)
580			if (s->mm)
581				set_section_perms(perms, n, true, s->mm);
582	}
583	set_section_perms(perms, n, true, current->active_mm);
584	set_section_perms(perms, n, true, &init_mm);
585}
586
587static int __fix_kernmem_perms(void *unused)
588{
589	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
590	return 0;
591}
592
593static void fix_kernmem_perms(void)
594{
595	stop_machine(__fix_kernmem_perms, NULL, NULL);
596}
597
598static int __mark_rodata_ro(void *unused)
599{
600	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
601	return 0;
602}
603
604static int kernel_set_to_readonly __read_mostly;
605
606void mark_rodata_ro(void)
607{
608	kernel_set_to_readonly = 1;
609	stop_machine(__mark_rodata_ro, NULL, NULL);
610	debug_checkwx();
611}
612
613void set_kernel_text_rw(void)
614{
615	if (!kernel_set_to_readonly)
616		return;
617
618	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
619				current->active_mm);
620}
621
622void set_kernel_text_ro(void)
623{
624	if (!kernel_set_to_readonly)
625		return;
626
627	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
628				current->active_mm);
629}
630
631#else
632static inline void fix_kernmem_perms(void) { }
633#endif /* CONFIG_STRICT_KERNEL_RWX */
634
635void free_initmem(void)
636{
637	fix_kernmem_perms();
638
639	poison_init_mem(__init_begin, __init_end - __init_begin);
640	if (!machine_is_integrator() && !machine_is_cintegrator())
641		free_initmem_default(-1);
642}
643
644#ifdef CONFIG_BLK_DEV_INITRD
645void free_initrd_mem(unsigned long start, unsigned long end)
646{
647	if (start == initrd_start)
648		start = round_down(start, PAGE_SIZE);
649	if (end == initrd_end)
650		end = round_up(end, PAGE_SIZE);
651
652	poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
653	free_reserved_area((void *)start, (void *)end, -1, "initrd");
654}
655#endif