Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 *  linux/arch/arm/mm/init.c
  3 *
  4 *  Copyright (C) 1995-2005 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/kernel.h>
 11#include <linux/errno.h>
 12#include <linux/swap.h>
 13#include <linux/init.h>
 14#include <linux/bootmem.h>
 15#include <linux/mman.h>
 
 
 16#include <linux/export.h>
 17#include <linux/nodemask.h>
 18#include <linux/initrd.h>
 19#include <linux/of_fdt.h>
 20#include <linux/highmem.h>
 21#include <linux/gfp.h>
 22#include <linux/memblock.h>
 23#include <linux/dma-contiguous.h>
 
 
 
 24
 
 25#include <asm/mach-types.h>
 26#include <asm/memblock.h>
 
 27#include <asm/prom.h>
 28#include <asm/sections.h>
 29#include <asm/setup.h>
 30#include <asm/sizes.h>
 31#include <asm/tlb.h>
 32#include <asm/fixmap.h>
 
 33
 34#include <asm/mach/arch.h>
 35#include <asm/mach/map.h>
 36
 37#include "mm.h"
 38
 39static unsigned long phys_initrd_start __initdata = 0;
 40static unsigned long phys_initrd_size __initdata = 0;
 41
 42static int __init early_initrd(char *p)
 43{
 44	unsigned long start, size;
 45	char *endp;
 46
 47	start = memparse(p, &endp);
 48	if (*endp == ',') {
 49		size = memparse(endp + 1, NULL);
 50
 51		phys_initrd_start = start;
 52		phys_initrd_size = size;
 53	}
 54	return 0;
 55}
 56early_param("initrd", early_initrd);
 57
 
 58static int __init parse_tag_initrd(const struct tag *tag)
 59{
 60	printk(KERN_WARNING "ATAG_INITRD is deprecated; "
 61		"please update your bootloader.\n");
 62	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
 63	phys_initrd_size = tag->u.initrd.size;
 64	return 0;
 65}
 66
 67__tagtable(ATAG_INITRD, parse_tag_initrd);
 68
 69static int __init parse_tag_initrd2(const struct tag *tag)
 70{
 71	phys_initrd_start = tag->u.initrd.start;
 72	phys_initrd_size = tag->u.initrd.size;
 73	return 0;
 74}
 75
 76__tagtable(ATAG_INITRD2, parse_tag_initrd2);
 77
 78#ifdef CONFIG_OF_FLATTREE
 79void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
 80{
 81	phys_initrd_start = start;
 82	phys_initrd_size = end - start;
 83}
 84#endif /* CONFIG_OF_FLATTREE */
 85
 86/*
 87 * This keeps memory configuration data used by a couple memory
 88 * initialization functions, as well as show_mem() for the skipping
 89 * of holes in the memory map.  It is populated by arm_add_memory().
 90 */
 91struct meminfo meminfo;
 92
 93void show_mem(unsigned int filter)
 94{
 95	int free = 0, total = 0, reserved = 0;
 96	int shared = 0, cached = 0, slab = 0, i;
 97	struct meminfo * mi = &meminfo;
 98
 99	printk("Mem-info:\n");
100	show_free_areas(filter);
101
102	for_each_bank (i, mi) {
103		struct membank *bank = &mi->bank[i];
104		unsigned int pfn1, pfn2;
105		struct page *page, *end;
106
107		pfn1 = bank_pfn_start(bank);
108		pfn2 = bank_pfn_end(bank);
109
110		page = pfn_to_page(pfn1);
111		end  = pfn_to_page(pfn2 - 1) + 1;
112
113		do {
114			total++;
115			if (PageReserved(page))
116				reserved++;
117			else if (PageSwapCache(page))
118				cached++;
119			else if (PageSlab(page))
120				slab++;
121			else if (!page_count(page))
122				free++;
123			else
124				shared += page_count(page) - 1;
125			page++;
126		} while (page < end);
127	}
128
129	printk("%d pages of RAM\n", total);
130	printk("%d free pages\n", free);
131	printk("%d reserved pages\n", reserved);
132	printk("%d slab pages\n", slab);
133	printk("%d pages shared\n", shared);
134	printk("%d pages swap cached\n", cached);
135}
136
137static void __init find_limits(unsigned long *min, unsigned long *max_low,
138			       unsigned long *max_high)
139{
140	struct meminfo *mi = &meminfo;
141	int i;
142
143	/* This assumes the meminfo array is properly sorted */
144	*min = bank_pfn_start(&mi->bank[0]);
145	for_each_bank (i, mi)
146		if (mi->bank[i].highmem)
147				break;
148	*max_low = bank_pfn_end(&mi->bank[i - 1]);
149	*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
150}
151
152static void __init arm_bootmem_init(unsigned long start_pfn,
153	unsigned long end_pfn)
154{
155	struct memblock_region *reg;
156	unsigned int boot_pages;
157	phys_addr_t bitmap;
158	pg_data_t *pgdat;
159
160	/*
161	 * Allocate the bootmem bitmap page.  This must be in a region
162	 * of memory which has already been mapped.
163	 */
164	boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
165	bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
166				__pfn_to_phys(end_pfn));
167
168	/*
169	 * Initialise the bootmem allocator, handing the
170	 * memory banks over to bootmem.
171	 */
172	node_set_online(0);
173	pgdat = NODE_DATA(0);
174	init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
175
176	/* Free the lowmem regions from memblock into bootmem. */
177	for_each_memblock(memory, reg) {
178		unsigned long start = memblock_region_memory_base_pfn(reg);
179		unsigned long end = memblock_region_memory_end_pfn(reg);
180
181		if (end >= end_pfn)
182			end = end_pfn;
183		if (start >= end)
184			break;
185
186		free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
187	}
188
189	/* Reserve the lowmem memblock reserved regions in bootmem. */
190	for_each_memblock(reserved, reg) {
191		unsigned long start = memblock_region_reserved_base_pfn(reg);
192		unsigned long end = memblock_region_reserved_end_pfn(reg);
193
194		if (end >= end_pfn)
195			end = end_pfn;
196		if (start >= end)
197			break;
198
199		reserve_bootmem(__pfn_to_phys(start),
200			        (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
201	}
202}
203
204#ifdef CONFIG_ZONE_DMA
205
206unsigned long arm_dma_zone_size __read_mostly;
207EXPORT_SYMBOL(arm_dma_zone_size);
208
209/*
210 * The DMA mask corresponding to the maximum bus address allocatable
211 * using GFP_DMA.  The default here places no restriction on DMA
212 * allocations.  This must be the smallest DMA mask in the system,
213 * so a successful GFP_DMA allocation will always satisfy this.
214 */
215phys_addr_t arm_dma_limit;
 
216
217static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
218	unsigned long dma_size)
219{
220	if (size[0] <= dma_size)
221		return;
222
223	size[ZONE_NORMAL] = size[0] - dma_size;
224	size[ZONE_DMA] = dma_size;
225	hole[ZONE_NORMAL] = hole[0];
226	hole[ZONE_DMA] = 0;
227}
228#endif
229
230void __init setup_dma_zone(struct machine_desc *mdesc)
231{
232#ifdef CONFIG_ZONE_DMA
233	if (mdesc->dma_zone_size) {
234		arm_dma_zone_size = mdesc->dma_zone_size;
235		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
236	} else
237		arm_dma_limit = 0xffffffff;
 
238#endif
239}
240
241static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
242	unsigned long max_high)
243{
244	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
245	struct memblock_region *reg;
246
247	/*
248	 * initialise the zones.
249	 */
250	memset(zone_size, 0, sizeof(zone_size));
251
252	/*
253	 * The memory size has already been determined.  If we need
254	 * to do anything fancy with the allocation of this memory
255	 * to the zones, now is the time to do it.
256	 */
257	zone_size[0] = max_low - min;
258#ifdef CONFIG_HIGHMEM
259	zone_size[ZONE_HIGHMEM] = max_high - max_low;
260#endif
261
262	/*
263	 * Calculate the size of the holes.
264	 *  holes = node_size - sum(bank_sizes)
265	 */
266	memcpy(zhole_size, zone_size, sizeof(zhole_size));
267	for_each_memblock(memory, reg) {
268		unsigned long start = memblock_region_memory_base_pfn(reg);
269		unsigned long end = memblock_region_memory_end_pfn(reg);
270
271		if (start < max_low) {
272			unsigned long low_end = min(end, max_low);
273			zhole_size[0] -= low_end - start;
274		}
275#ifdef CONFIG_HIGHMEM
276		if (end > max_low) {
277			unsigned long high_start = max(start, max_low);
278			zhole_size[ZONE_HIGHMEM] -= end - high_start;
279		}
280#endif
281	}
282
283#ifdef CONFIG_ZONE_DMA
284	/*
285	 * Adjust the sizes according to any special requirements for
286	 * this machine type.
287	 */
288	if (arm_dma_zone_size)
289		arm_adjust_dma_zone(zone_size, zhole_size,
290			arm_dma_zone_size >> PAGE_SHIFT);
291#endif
292
293	free_area_init_node(0, zone_size, min, zhole_size);
294}
295
296#ifdef CONFIG_HAVE_ARCH_PFN_VALID
297int pfn_valid(unsigned long pfn)
298{
299	return memblock_is_memory(__pfn_to_phys(pfn));
300}
301EXPORT_SYMBOL(pfn_valid);
302#endif
303
304#ifndef CONFIG_SPARSEMEM
305static void __init arm_memory_present(void)
306{
307}
308#else
309static void __init arm_memory_present(void)
310{
311	struct memblock_region *reg;
312
313	for_each_memblock(memory, reg)
314		memory_present(0, memblock_region_memory_base_pfn(reg),
315			       memblock_region_memory_end_pfn(reg));
316}
 
317#endif
318
319static bool arm_memblock_steal_permitted = true;
320
321phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
322{
323	phys_addr_t phys;
324
325	BUG_ON(!arm_memblock_steal_permitted);
326
327	phys = memblock_alloc(size, align);
 
 
 
 
328	memblock_free(phys, size);
329	memblock_remove(phys, size);
330
331	return phys;
332}
333
334void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
335{
336	int i;
 
 
337
338	for (i = 0; i < mi->nr_banks; i++)
339		memblock_add(mi->bank[i].start, mi->bank[i].size);
340
341	/* Register the kernel text, kernel data and initrd with memblock. */
342#ifdef CONFIG_XIP_KERNEL
343	memblock_reserve(__pa(_sdata), _end - _sdata);
344#else
345	memblock_reserve(__pa(_stext), _end - _stext);
346#endif
347#ifdef CONFIG_BLK_DEV_INITRD
348	if (phys_initrd_size &&
349	    !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
350		pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
351		       phys_initrd_start, phys_initrd_size);
352		phys_initrd_start = phys_initrd_size = 0;
353	}
354	if (phys_initrd_size &&
355	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
356		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
357		       phys_initrd_start, phys_initrd_size);
358		phys_initrd_start = phys_initrd_size = 0;
359	}
360	if (phys_initrd_size) {
361		memblock_reserve(phys_initrd_start, phys_initrd_size);
362
363		/* Now convert initrd to virtual addresses */
364		initrd_start = __phys_to_virt(phys_initrd_start);
365		initrd_end = initrd_start + phys_initrd_size;
 
366	}
 
 
 
 
 
 
367#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
369	arm_mm_memblock_reserve();
370	arm_dt_memblock_reserve();
371
372	/* reserve any platform specific memblock areas */
373	if (mdesc->reserve)
374		mdesc->reserve();
375
376	/*
377	 * reserve memory for DMA contigouos allocations,
378	 * must come from DMA area inside low memory
379	 */
380	dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
381
382	arm_memblock_steal_permitted = false;
383	memblock_allow_resize();
384	memblock_dump_all();
385}
386
387void __init bootmem_init(void)
388{
389	unsigned long min, max_low, max_high;
390
391	max_low = max_high = 0;
392
393	find_limits(&min, &max_low, &max_high);
394
395	arm_bootmem_init(min, max_low);
 
396
397	/*
398	 * Sparsemem tries to allocate bootmem in memory_present(),
399	 * so must be done after the fixed reservations
400	 */
401	arm_memory_present();
402
403	/*
404	 * sparse_init() needs the bootmem allocator up and running.
405	 */
406	sparse_init();
407
408	/*
409	 * Now free the memory - free_area_init_node needs
410	 * the sparse mem_map arrays initialized by sparse_init()
411	 * for memmap_init_zone(), otherwise all PFNs are invalid.
412	 */
413	arm_bootmem_free(min, max_low, max_high);
414
415	/*
416	 * This doesn't seem to be used by the Linux memory manager any
417	 * more, but is used by ll_rw_block.  If we can get rid of it, we
418	 * also get rid of some of the stuff above as well.
419	 *
420	 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
421	 * the system, not the maximum PFN.
422	 */
423	max_low_pfn = max_low - PHYS_PFN_OFFSET;
424	max_pfn = max_high - PHYS_PFN_OFFSET;
425}
426
427static inline int free_area(unsigned long pfn, unsigned long end, char *s)
428{
429	unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
430
431	for (; pfn < end; pfn++) {
432		struct page *page = pfn_to_page(pfn);
433		ClearPageReserved(page);
434		init_page_count(page);
435		__free_page(page);
436		pages++;
437	}
438
439	if (size && s)
440		printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
441
442	return pages;
443}
444
445/*
446 * Poison init memory with an undefined instruction (ARM) or a branch to an
447 * undefined instruction (Thumb).
448 */
449static inline void poison_init_mem(void *s, size_t count)
450{
451	u32 *p = (u32 *)s;
452	for (; count != 0; count -= 4)
453		*p++ = 0xe7fddef0;
454}
455
456static inline void
457free_memmap(unsigned long start_pfn, unsigned long end_pfn)
458{
459	struct page *start_pg, *end_pg;
460	unsigned long pg, pgend;
461
462	/*
463	 * Convert start_pfn/end_pfn to a struct page pointer.
464	 */
465	start_pg = pfn_to_page(start_pfn - 1) + 1;
466	end_pg = pfn_to_page(end_pfn - 1) + 1;
467
468	/*
469	 * Convert to physical addresses, and
470	 * round start upwards and end downwards.
471	 */
472	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
473	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
474
475	/*
476	 * If there are free pages between these,
477	 * free the section of the memmap array.
478	 */
479	if (pg < pgend)
480		free_bootmem(pg, pgend - pg);
481}
482
483/*
484 * The mem_map array can get very big.  Free the unused area of the memory map.
485 */
486static void __init free_unused_memmap(struct meminfo *mi)
487{
488	unsigned long bank_start, prev_bank_end = 0;
489	unsigned int i;
490
491	/*
492	 * This relies on each bank being in address order.
493	 * The banks are sorted previously in bootmem_init().
494	 */
495	for_each_bank(i, mi) {
496		struct membank *bank = &mi->bank[i];
497
498		bank_start = bank_pfn_start(bank);
499
500#ifdef CONFIG_SPARSEMEM
501		/*
502		 * Take care not to free memmap entries that don't exist
503		 * due to SPARSEMEM sections which aren't present.
504		 */
505		bank_start = min(bank_start,
506				 ALIGN(prev_bank_end, PAGES_PER_SECTION));
507#else
508		/*
509		 * Align down here since the VM subsystem insists that the
510		 * memmap entries are valid from the bank start aligned to
511		 * MAX_ORDER_NR_PAGES.
512		 */
513		bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
514#endif
515		/*
516		 * If we had a previous bank, and there is a space
517		 * between the current bank and the previous, free it.
518		 */
519		if (prev_bank_end && prev_bank_end < bank_start)
520			free_memmap(prev_bank_end, bank_start);
521
522		/*
523		 * Align up here since the VM subsystem insists that the
524		 * memmap entries are valid from the bank end aligned to
525		 * MAX_ORDER_NR_PAGES.
526		 */
527		prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
 
528	}
529
530#ifdef CONFIG_SPARSEMEM
531	if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
532		free_memmap(prev_bank_end,
533			    ALIGN(prev_bank_end, PAGES_PER_SECTION));
534#endif
535}
536
 
 
 
 
 
 
 
 
537static void __init free_highpages(void)
538{
539#ifdef CONFIG_HIGHMEM
540	unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
541	struct memblock_region *mem, *res;
542
543	/* set highmem page free */
544	for_each_memblock(memory, mem) {
545		unsigned long start = memblock_region_memory_base_pfn(mem);
546		unsigned long end = memblock_region_memory_end_pfn(mem);
547
548		/* Ignore complete lowmem entries */
549		if (end <= max_low)
550			continue;
551
 
 
 
552		/* Truncate partial highmem entries */
553		if (start < max_low)
554			start = max_low;
555
556		/* Find and exclude any reserved regions */
557		for_each_memblock(reserved, res) {
558			unsigned long res_start, res_end;
559
560			res_start = memblock_region_reserved_base_pfn(res);
561			res_end = memblock_region_reserved_end_pfn(res);
562
563			if (res_end < start)
564				continue;
565			if (res_start < start)
566				res_start = start;
567			if (res_start > end)
568				res_start = end;
569			if (res_end > end)
570				res_end = end;
571			if (res_start != start)
572				totalhigh_pages += free_area(start, res_start,
573							     NULL);
574			start = res_end;
575			if (start == end)
576				break;
577		}
578
579		/* And now free anything which remains */
580		if (start < end)
581			totalhigh_pages += free_area(start, end, NULL);
582	}
583	totalram_pages += totalhigh_pages;
584#endif
585}
586
587/*
588 * mem_init() marks the free areas in the mem_map and tells us how much
589 * memory is free.  This is done after various parts of the system have
590 * claimed their memory after the kernel image.
591 */
592void __init mem_init(void)
593{
594	unsigned long reserved_pages, free_pages;
595	struct memblock_region *reg;
596	int i;
597#ifdef CONFIG_HAVE_TCM
598	/* These pointers are filled in on TCM detection */
599	extern u32 dtcm_end;
600	extern u32 itcm_end;
601#endif
602
603	max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
604
605	/* this will put all unused low memory onto the freelists */
606	free_unused_memmap(&meminfo);
607
608	totalram_pages += free_all_bootmem();
609
610#ifdef CONFIG_SA1111
611	/* now that our DMA memory is actually so designated, we can free it */
612	totalram_pages += free_area(PHYS_PFN_OFFSET,
613				    __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
614#endif
615
616	free_highpages();
617
618	reserved_pages = free_pages = 0;
619
620	for_each_bank(i, &meminfo) {
621		struct membank *bank = &meminfo.bank[i];
622		unsigned int pfn1, pfn2;
623		struct page *page, *end;
624
625		pfn1 = bank_pfn_start(bank);
626		pfn2 = bank_pfn_end(bank);
627
628		page = pfn_to_page(pfn1);
629		end  = pfn_to_page(pfn2 - 1) + 1;
630
631		do {
632			if (PageReserved(page))
633				reserved_pages++;
634			else if (!page_count(page))
635				free_pages++;
636			page++;
637		} while (page < end);
638	}
639
640	/*
641	 * Since our memory may not be contiguous, calculate the
642	 * real number of pages we have in this system
643	 */
644	printk(KERN_INFO "Memory:");
645	num_physpages = 0;
646	for_each_memblock(memory, reg) {
647		unsigned long pages = memblock_region_memory_end_pfn(reg) -
648			memblock_region_memory_base_pfn(reg);
649		num_physpages += pages;
650		printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
651	}
652	printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
653
654	printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
655		nr_free_pages() << (PAGE_SHIFT-10),
656		free_pages << (PAGE_SHIFT-10),
657		reserved_pages << (PAGE_SHIFT-10),
658		totalhigh_pages << (PAGE_SHIFT-10));
659
660#define MLK(b, t) b, t, ((t) - (b)) >> 10
661#define MLM(b, t) b, t, ((t) - (b)) >> 20
662#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
663
664	printk(KERN_NOTICE "Virtual kernel memory layout:\n"
665			"    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
666#ifdef CONFIG_HAVE_TCM
667			"    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
668			"    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
669#endif
670			"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
671			"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
672			"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
673#ifdef CONFIG_HIGHMEM
674			"    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
675#endif
676#ifdef CONFIG_MODULES
677			"    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
678#endif
679			"      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
680			"      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
681			"      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
682			"       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
683
684			MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
685				(PAGE_SIZE)),
686#ifdef CONFIG_HAVE_TCM
687			MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
688			MLK(ITCM_OFFSET, (unsigned long) itcm_end),
689#endif
690			MLK(FIXADDR_START, FIXADDR_TOP),
691			MLM(VMALLOC_START, VMALLOC_END),
692			MLM(PAGE_OFFSET, (unsigned long)high_memory),
693#ifdef CONFIG_HIGHMEM
694			MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
695				(PAGE_SIZE)),
696#endif
697#ifdef CONFIG_MODULES
698			MLM(MODULES_VADDR, MODULES_END),
699#endif
700
701			MLK_ROUNDUP(_text, _etext),
702			MLK_ROUNDUP(__init_begin, __init_end),
703			MLK_ROUNDUP(_sdata, _edata),
704			MLK_ROUNDUP(__bss_start, __bss_stop));
705
706#undef MLK
707#undef MLM
708#undef MLK_ROUNDUP
709
710	/*
711	 * Check boundaries twice: Some fundamental inconsistencies can
712	 * be detected at build time already.
713	 */
714#ifdef CONFIG_MMU
715	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
716	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
717#endif
718
719#ifdef CONFIG_HIGHMEM
720	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
721	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
722#endif
723
724	if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
725		extern int sysctl_overcommit_memory;
726		/*
727		 * On a machine this small we won't get
728		 * anywhere without overcommit, so turn
729		 * it on by default.
730		 */
731		sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
732	}
733}
734
735void free_initmem(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
736{
737#ifdef CONFIG_HAVE_TCM
738	extern char __tcm_start, __tcm_end;
739
740	poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
741	totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
742				    __phys_to_pfn(__pa(&__tcm_end)),
743				    "TCM link");
 
 
 
 
 
744#endif
 
 
 
745
746	poison_init_mem(__init_begin, __init_end - __init_begin);
747	if (!machine_is_integrator() && !machine_is_cintegrator())
748		totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
749					    __phys_to_pfn(__pa(__init_end)),
750					    "init");
 
 
751}
752
753#ifdef CONFIG_BLK_DEV_INITRD
 
 
 
 
754
755static int keep_initrd;
 
756
757void free_initrd_mem(unsigned long start, unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
758{
759	if (!keep_initrd) {
760		poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
761		totalram_pages += free_area(__phys_to_pfn(__pa(start)),
762					    __phys_to_pfn(__pa(end)),
763					    "initrd");
 
 
 
764	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
765}
766
767static int __init keepinitrd_setup(char *__unused)
768{
769	keep_initrd = 1;
770	return 1;
 
 
 
771}
772
773__setup("keepinitrd", keepinitrd_setup);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/mm/init.c
  4 *
  5 *  Copyright (C) 1995-2005 Russell King
 
 
 
 
  6 */
  7#include <linux/kernel.h>
  8#include <linux/errno.h>
  9#include <linux/swap.h>
 10#include <linux/init.h>
 
 11#include <linux/mman.h>
 12#include <linux/sched/signal.h>
 13#include <linux/sched/task.h>
 14#include <linux/export.h>
 15#include <linux/nodemask.h>
 16#include <linux/initrd.h>
 17#include <linux/of_fdt.h>
 18#include <linux/highmem.h>
 19#include <linux/gfp.h>
 20#include <linux/memblock.h>
 21#include <linux/dma-contiguous.h>
 22#include <linux/sizes.h>
 23#include <linux/stop_machine.h>
 24#include <linux/swiotlb.h>
 25
 26#include <asm/cp15.h>
 27#include <asm/mach-types.h>
 28#include <asm/memblock.h>
 29#include <asm/memory.h>
 30#include <asm/prom.h>
 31#include <asm/sections.h>
 32#include <asm/setup.h>
 33#include <asm/system_info.h>
 34#include <asm/tlb.h>
 35#include <asm/fixmap.h>
 36#include <asm/ptdump.h>
 37
 38#include <asm/mach/arch.h>
 39#include <asm/mach/map.h>
 40
 41#include "mm.h"
 42
 43#ifdef CONFIG_CPU_CP15_MMU
 44unsigned long __init __clear_cr(unsigned long mask)
 
 
 45{
 46	cr_alignment = cr_alignment & ~mask;
 47	return cr_alignment;
 
 
 
 
 
 
 
 
 
 48}
 49#endif
 50
 51#ifdef CONFIG_BLK_DEV_INITRD
 52static int __init parse_tag_initrd(const struct tag *tag)
 53{
 54	pr_warn("ATAG_INITRD is deprecated; "
 55		"please update your bootloader.\n");
 56	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
 57	phys_initrd_size = tag->u.initrd.size;
 58	return 0;
 59}
 60
 61__tagtable(ATAG_INITRD, parse_tag_initrd);
 62
 63static int __init parse_tag_initrd2(const struct tag *tag)
 64{
 65	phys_initrd_start = tag->u.initrd.start;
 66	phys_initrd_size = tag->u.initrd.size;
 67	return 0;
 68}
 69
 70__tagtable(ATAG_INITRD2, parse_tag_initrd2);
 71#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73static void __init find_limits(unsigned long *min, unsigned long *max_low,
 74			       unsigned long *max_high)
 75{
 76	*max_low = PFN_DOWN(memblock_get_current_limit());
 77	*min = PFN_UP(memblock_start_of_DRAM());
 78	*max_high = PFN_DOWN(memblock_end_of_DRAM());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79}
 80
 81#ifdef CONFIG_ZONE_DMA
 82
 83phys_addr_t arm_dma_zone_size __read_mostly;
 84EXPORT_SYMBOL(arm_dma_zone_size);
 85
 86/*
 87 * The DMA mask corresponding to the maximum bus address allocatable
 88 * using GFP_DMA.  The default here places no restriction on DMA
 89 * allocations.  This must be the smallest DMA mask in the system,
 90 * so a successful GFP_DMA allocation will always satisfy this.
 91 */
 92phys_addr_t arm_dma_limit;
 93unsigned long arm_dma_pfn_limit;
 94
 95static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
 96	unsigned long dma_size)
 97{
 98	if (size[0] <= dma_size)
 99		return;
100
101	size[ZONE_NORMAL] = size[0] - dma_size;
102	size[ZONE_DMA] = dma_size;
103	hole[ZONE_NORMAL] = hole[0];
104	hole[ZONE_DMA] = 0;
105}
106#endif
107
108void __init setup_dma_zone(const struct machine_desc *mdesc)
109{
110#ifdef CONFIG_ZONE_DMA
111	if (mdesc->dma_zone_size) {
112		arm_dma_zone_size = mdesc->dma_zone_size;
113		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
114	} else
115		arm_dma_limit = 0xffffffff;
116	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
117#endif
118}
119
120static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
121	unsigned long max_high)
122{
123	unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
124	struct memblock_region *reg;
125
126	/*
127	 * initialise the zones.
128	 */
129	memset(zone_size, 0, sizeof(zone_size));
130
131	/*
132	 * The memory size has already been determined.  If we need
133	 * to do anything fancy with the allocation of this memory
134	 * to the zones, now is the time to do it.
135	 */
136	zone_size[0] = max_low - min;
137#ifdef CONFIG_HIGHMEM
138	zone_size[ZONE_HIGHMEM] = max_high - max_low;
139#endif
140
141	/*
142	 * Calculate the size of the holes.
143	 *  holes = node_size - sum(bank_sizes)
144	 */
145	memcpy(zhole_size, zone_size, sizeof(zhole_size));
146	for_each_memblock(memory, reg) {
147		unsigned long start = memblock_region_memory_base_pfn(reg);
148		unsigned long end = memblock_region_memory_end_pfn(reg);
149
150		if (start < max_low) {
151			unsigned long low_end = min(end, max_low);
152			zhole_size[0] -= low_end - start;
153		}
154#ifdef CONFIG_HIGHMEM
155		if (end > max_low) {
156			unsigned long high_start = max(start, max_low);
157			zhole_size[ZONE_HIGHMEM] -= end - high_start;
158		}
159#endif
160	}
161
162#ifdef CONFIG_ZONE_DMA
163	/*
164	 * Adjust the sizes according to any special requirements for
165	 * this machine type.
166	 */
167	if (arm_dma_zone_size)
168		arm_adjust_dma_zone(zone_size, zhole_size,
169			arm_dma_zone_size >> PAGE_SHIFT);
170#endif
171
172	free_area_init_node(0, zone_size, min, zhole_size);
173}
174
175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
176int pfn_valid(unsigned long pfn)
177{
178	phys_addr_t addr = __pfn_to_phys(pfn);
 
 
 
179
180	if (__phys_to_pfn(addr) != pfn)
181		return 0;
 
 
 
 
 
 
182
183	return memblock_is_map_memory(__pfn_to_phys(pfn));
 
 
184}
185EXPORT_SYMBOL(pfn_valid);
186#endif
187
188static bool arm_memblock_steal_permitted = true;
189
190phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
191{
192	phys_addr_t phys;
193
194	BUG_ON(!arm_memblock_steal_permitted);
195
196	phys = memblock_phys_alloc(size, align);
197	if (!phys)
198		panic("Failed to steal %pa bytes at %pS\n",
199		      &size, (void *)_RET_IP_);
200
201	memblock_free(phys, size);
202	memblock_remove(phys, size);
203
204	return phys;
205}
206
207static void __init arm_initrd_init(void)
208{
209#ifdef CONFIG_BLK_DEV_INITRD
210	phys_addr_t start;
211	unsigned long size;
212
213	initrd_start = initrd_end = 0;
 
214
215	if (!phys_initrd_size)
216		return;
217
218	/*
219	 * Round the memory region to page boundaries as per free_initrd_mem()
220	 * This allows us to detect whether the pages overlapping the initrd
221	 * are in use, but more importantly, reserves the entire set of pages
222	 * as we don't want these pages allocated for other purposes.
223	 */
224	start = round_down(phys_initrd_start, PAGE_SIZE);
225	size = phys_initrd_size + (phys_initrd_start - start);
226	size = round_up(size, PAGE_SIZE);
227
228	if (!memblock_is_region_memory(start, size)) {
229		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
230		       (u64)start, size);
231		return;
 
232	}
 
 
233
234	if (memblock_is_region_reserved(start, size)) {
235		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
236		       (u64)start, size);
237		return;
238	}
239
240	memblock_reserve(start, size);
241
242	/* Now convert initrd to virtual addresses */
243	initrd_start = __phys_to_virt(phys_initrd_start);
244	initrd_end = initrd_start + phys_initrd_size;
245#endif
246}
247
248#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
249void check_cpu_icache_size(int cpuid)
250{
251	u32 size, ctr;
252
253	asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
254
255	size = 1 << ((ctr & 0xf) + 2);
256	if (cpuid != 0 && icache_size != size)
257		pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
258			cpuid);
259	if (icache_size > size)
260		icache_size = size;
261}
262#endif
263
264void __init arm_memblock_init(const struct machine_desc *mdesc)
265{
266	/* Register the kernel text, kernel data and initrd with memblock. */
267	memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
268
269	arm_initrd_init();
270
271	arm_mm_memblock_reserve();
 
272
273	/* reserve any platform specific memblock areas */
274	if (mdesc->reserve)
275		mdesc->reserve();
276
277	early_init_fdt_reserve_self();
278	early_init_fdt_scan_reserved_mem();
279
280	/* reserve memory for DMA contiguous allocations */
281	dma_contiguous_reserve(arm_dma_limit);
282
283	arm_memblock_steal_permitted = false;
 
284	memblock_dump_all();
285}
286
287void __init bootmem_init(void)
288{
289	memblock_allow_resize();
 
 
290
291	find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
292
293	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
294		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
295
296	/*
297	 * Sparsemem tries to allocate bootmem in memory_present(),
298	 * so must be done after the fixed reservations
299	 */
300	memblocks_present();
301
302	/*
303	 * sparse_init() needs the bootmem allocator up and running.
304	 */
305	sparse_init();
306
307	/*
308	 * Now free the memory - free_area_init_node needs
309	 * the sparse mem_map arrays initialized by sparse_init()
310	 * for memmap_init_zone(), otherwise all PFNs are invalid.
311	 */
312	zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313}
314
315/*
316 * Poison init memory with an undefined instruction (ARM) or a branch to an
317 * undefined instruction (Thumb).
318 */
319static inline void poison_init_mem(void *s, size_t count)
320{
321	u32 *p = (u32 *)s;
322	for (; count != 0; count -= 4)
323		*p++ = 0xe7fddef0;
324}
325
326static inline void
327free_memmap(unsigned long start_pfn, unsigned long end_pfn)
328{
329	struct page *start_pg, *end_pg;
330	phys_addr_t pg, pgend;
331
332	/*
333	 * Convert start_pfn/end_pfn to a struct page pointer.
334	 */
335	start_pg = pfn_to_page(start_pfn - 1) + 1;
336	end_pg = pfn_to_page(end_pfn - 1) + 1;
337
338	/*
339	 * Convert to physical addresses, and
340	 * round start upwards and end downwards.
341	 */
342	pg = PAGE_ALIGN(__pa(start_pg));
343	pgend = __pa(end_pg) & PAGE_MASK;
344
345	/*
346	 * If there are free pages between these,
347	 * free the section of the memmap array.
348	 */
349	if (pg < pgend)
350		memblock_free_early(pg, pgend - pg);
351}
352
353/*
354 * The mem_map array can get very big.  Free the unused area of the memory map.
355 */
356static void __init free_unused_memmap(void)
357{
358	unsigned long start, prev_end = 0;
359	struct memblock_region *reg;
360
361	/*
362	 * This relies on each bank being in address order.
363	 * The banks are sorted previously in bootmem_init().
364	 */
365	for_each_memblock(memory, reg) {
366		start = memblock_region_memory_base_pfn(reg);
 
 
367
368#ifdef CONFIG_SPARSEMEM
369		/*
370		 * Take care not to free memmap entries that don't exist
371		 * due to SPARSEMEM sections which aren't present.
372		 */
373		start = min(start,
374				 ALIGN(prev_end, PAGES_PER_SECTION));
375#else
376		/*
377		 * Align down here since the VM subsystem insists that the
378		 * memmap entries are valid from the bank start aligned to
379		 * MAX_ORDER_NR_PAGES.
380		 */
381		start = round_down(start, MAX_ORDER_NR_PAGES);
382#endif
383		/*
384		 * If we had a previous bank, and there is a space
385		 * between the current bank and the previous, free it.
386		 */
387		if (prev_end && prev_end < start)
388			free_memmap(prev_end, start);
389
390		/*
391		 * Align up here since the VM subsystem insists that the
392		 * memmap entries are valid from the bank end aligned to
393		 * MAX_ORDER_NR_PAGES.
394		 */
395		prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
396				 MAX_ORDER_NR_PAGES);
397	}
398
399#ifdef CONFIG_SPARSEMEM
400	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
401		free_memmap(prev_end,
402			    ALIGN(prev_end, PAGES_PER_SECTION));
403#endif
404}
405
406#ifdef CONFIG_HIGHMEM
407static inline void free_area_high(unsigned long pfn, unsigned long end)
408{
409	for (; pfn < end; pfn++)
410		free_highmem_page(pfn_to_page(pfn));
411}
412#endif
413
414static void __init free_highpages(void)
415{
416#ifdef CONFIG_HIGHMEM
417	unsigned long max_low = max_low_pfn;
418	struct memblock_region *mem, *res;
419
420	/* set highmem page free */
421	for_each_memblock(memory, mem) {
422		unsigned long start = memblock_region_memory_base_pfn(mem);
423		unsigned long end = memblock_region_memory_end_pfn(mem);
424
425		/* Ignore complete lowmem entries */
426		if (end <= max_low)
427			continue;
428
429		if (memblock_is_nomap(mem))
430			continue;
431
432		/* Truncate partial highmem entries */
433		if (start < max_low)
434			start = max_low;
435
436		/* Find and exclude any reserved regions */
437		for_each_memblock(reserved, res) {
438			unsigned long res_start, res_end;
439
440			res_start = memblock_region_reserved_base_pfn(res);
441			res_end = memblock_region_reserved_end_pfn(res);
442
443			if (res_end < start)
444				continue;
445			if (res_start < start)
446				res_start = start;
447			if (res_start > end)
448				res_start = end;
449			if (res_end > end)
450				res_end = end;
451			if (res_start != start)
452				free_area_high(start, res_start);
 
453			start = res_end;
454			if (start == end)
455				break;
456		}
457
458		/* And now free anything which remains */
459		if (start < end)
460			free_area_high(start, end);
461	}
 
462#endif
463}
464
465/*
466 * mem_init() marks the free areas in the mem_map and tells us how much
467 * memory is free.  This is done after various parts of the system have
468 * claimed their memory after the kernel image.
469 */
470void __init mem_init(void)
471{
472#ifdef CONFIG_ARM_LPAE
473	swiotlb_init(1);
 
 
 
 
 
474#endif
475
476	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
477
478	/* this will put all unused low memory onto the freelists */
479	free_unused_memmap();
480	memblock_free_all();
 
481
482#ifdef CONFIG_SA1111
483	/* now that our DMA memory is actually so designated, we can free it */
484	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
 
485#endif
486
487	free_highpages();
488
489	mem_init_print_info(NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490
491	/*
492	 * Check boundaries twice: Some fundamental inconsistencies can
493	 * be detected at build time already.
494	 */
495#ifdef CONFIG_MMU
496	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
497	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
498#endif
499
500#ifdef CONFIG_HIGHMEM
501	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
502	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
503#endif
 
 
 
 
 
 
 
 
 
 
504}
505
506#ifdef CONFIG_STRICT_KERNEL_RWX
507struct section_perm {
508	const char *name;
509	unsigned long start;
510	unsigned long end;
511	pmdval_t mask;
512	pmdval_t prot;
513	pmdval_t clear;
514};
515
516/* First section-aligned location at or after __start_rodata. */
517extern char __start_rodata_section_aligned[];
518
519static struct section_perm nx_perms[] = {
520	/* Make pages tables, etc before _stext RW (set NX). */
521	{
522		.name	= "pre-text NX",
523		.start	= PAGE_OFFSET,
524		.end	= (unsigned long)_stext,
525		.mask	= ~PMD_SECT_XN,
526		.prot	= PMD_SECT_XN,
527	},
528	/* Make init RW (set NX). */
529	{
530		.name	= "init NX",
531		.start	= (unsigned long)__init_begin,
532		.end	= (unsigned long)_sdata,
533		.mask	= ~PMD_SECT_XN,
534		.prot	= PMD_SECT_XN,
535	},
536	/* Make rodata NX (set RO in ro_perms below). */
537	{
538		.name	= "rodata NX",
539		.start  = (unsigned long)__start_rodata_section_aligned,
540		.end    = (unsigned long)__init_begin,
541		.mask   = ~PMD_SECT_XN,
542		.prot   = PMD_SECT_XN,
543	},
544};
545
546static struct section_perm ro_perms[] = {
547	/* Make kernel code and rodata RX (set RO). */
548	{
549		.name	= "text/rodata RO",
550		.start  = (unsigned long)_stext,
551		.end    = (unsigned long)__init_begin,
552#ifdef CONFIG_ARM_LPAE
553		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
554		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
555#else
556		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
557		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
558		.clear  = PMD_SECT_AP_WRITE,
559#endif
560	},
561};
562
563/*
564 * Updates section permissions only for the current mm (sections are
565 * copied into each mm). During startup, this is the init_mm. Is only
566 * safe to be called with preemption disabled, as under stop_machine().
567 */
568static inline void section_update(unsigned long addr, pmdval_t mask,
569				  pmdval_t prot, struct mm_struct *mm)
570{
571	pmd_t *pmd;
 
572
573	pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
574
575#ifdef CONFIG_ARM_LPAE
576	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
577#else
578	if (addr & SECTION_SIZE)
579		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
580	else
581		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
582#endif
583	flush_pmd_entry(pmd);
584	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
585}
586
587/* Make sure extended page tables are in use. */
588static inline bool arch_has_strict_perms(void)
589{
590	if (cpu_architecture() < CPU_ARCH_ARMv6)
591		return false;
592
593	return !!(get_cr() & CR_XP);
594}
595
596void set_section_perms(struct section_perm *perms, int n, bool set,
597			struct mm_struct *mm)
598{
599	size_t i;
600	unsigned long addr;
601
602	if (!arch_has_strict_perms())
603		return;
604
605	for (i = 0; i < n; i++) {
606		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
607		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
608			pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
609				perms[i].name, perms[i].start, perms[i].end,
610				SECTION_SIZE);
611			continue;
612		}
613
614		for (addr = perms[i].start;
615		     addr < perms[i].end;
616		     addr += SECTION_SIZE)
617			section_update(addr, perms[i].mask,
618				set ? perms[i].prot : perms[i].clear, mm);
619	}
620
621}
622
623/**
624 * update_sections_early intended to be called only through stop_machine
625 * framework and executed by only one CPU while all other CPUs will spin and
626 * wait, so no locking is required in this function.
627 */
628static void update_sections_early(struct section_perm perms[], int n)
629{
630	struct task_struct *t, *s;
631
632	for_each_process(t) {
633		if (t->flags & PF_KTHREAD)
634			continue;
635		for_each_thread(t, s)
636			if (s->mm)
637				set_section_perms(perms, n, true, s->mm);
638	}
639	set_section_perms(perms, n, true, current->active_mm);
640	set_section_perms(perms, n, true, &init_mm);
641}
642
643static int __fix_kernmem_perms(void *unused)
644{
645	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
646	return 0;
647}
648
649static void fix_kernmem_perms(void)
650{
651	stop_machine(__fix_kernmem_perms, NULL, NULL);
652}
653
654static int __mark_rodata_ro(void *unused)
655{
656	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
657	return 0;
658}
659
660static int kernel_set_to_readonly __read_mostly;
661
662void mark_rodata_ro(void)
663{
664	kernel_set_to_readonly = 1;
665	stop_machine(__mark_rodata_ro, NULL, NULL);
666	debug_checkwx();
667}
668
669void set_kernel_text_rw(void)
670{
671	if (!kernel_set_to_readonly)
672		return;
673
674	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
675				current->active_mm);
676}
677
678void set_kernel_text_ro(void)
679{
680	if (!kernel_set_to_readonly)
681		return;
682
683	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
684				current->active_mm);
685}
686
687#else
688static inline void fix_kernmem_perms(void) { }
689#endif /* CONFIG_STRICT_KERNEL_RWX */
690
691void free_initmem(void)
692{
693	fix_kernmem_perms();
694
695	poison_init_mem(__init_begin, __init_end - __init_begin);
696	if (!machine_is_integrator() && !machine_is_cintegrator())
697		free_initmem_default(-1);
698}
699
700#ifdef CONFIG_BLK_DEV_INITRD
701void free_initrd_mem(unsigned long start, unsigned long end)
702{
703	if (start == initrd_start)
704		start = round_down(start, PAGE_SIZE);
705	if (end == initrd_end)
706		end = round_up(end, PAGE_SIZE);
707
708	poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
709	free_reserved_area((void *)start, (void *)end, -1, "initrd");
710}
711#endif