Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  5 */
  6
  7#include <linux/init.h>
  8#include <linux/mm.h>
  9#include <linux/memblock.h>
 10#include <linux/initrd.h>
 11#include <linux/swap.h>
 12#include <linux/sizes.h>
 13#include <linux/of_fdt.h>
 14#include <linux/libfdt.h>
 15#include <linux/set_memory.h>
 16
 17#include <asm/fixmap.h>
 18#include <asm/tlbflush.h>
 19#include <asm/sections.h>
 20#include <asm/soc.h>
 21#include <asm/io.h>
 22#include <asm/ptdump.h>
 23
 24#include "../kernel/head.h"
 25
 26unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
 27							__page_aligned_bss;
 28EXPORT_SYMBOL(empty_zero_page);
 29
 30extern char _start[];
 31void *dtb_early_va;
 32
 33static void __init zone_sizes_init(void)
 34{
 35	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 36
 37#ifdef CONFIG_ZONE_DMA32
 38	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
 39			(unsigned long) PFN_PHYS(max_low_pfn)));
 40#endif
 41	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 42
 43	free_area_init(max_zone_pfns);
 44}
 45
 46static void setup_zero_page(void)
 47{
 48	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 49}
 50
 51#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
 52static inline void print_mlk(char *name, unsigned long b, unsigned long t)
 53{
 54	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
 55		  (((t) - (b)) >> 10));
 56}
 57
 58static inline void print_mlm(char *name, unsigned long b, unsigned long t)
 59{
 60	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
 61		  (((t) - (b)) >> 20));
 62}
 63
 64static void print_vm_layout(void)
 65{
 66	pr_notice("Virtual kernel memory layout:\n");
 67	print_mlk("fixmap", (unsigned long)FIXADDR_START,
 68		  (unsigned long)FIXADDR_TOP);
 69	print_mlm("pci io", (unsigned long)PCI_IO_START,
 70		  (unsigned long)PCI_IO_END);
 71	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
 72		  (unsigned long)VMEMMAP_END);
 73	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
 74		  (unsigned long)VMALLOC_END);
 75	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
 76		  (unsigned long)high_memory);
 77}
 78#else
 79static void print_vm_layout(void) { }
 80#endif /* CONFIG_DEBUG_VM */
 81
 82void __init mem_init(void)
 83{
 84#ifdef CONFIG_FLATMEM
 85	BUG_ON(!mem_map);
 86#endif /* CONFIG_FLATMEM */
 87
 88	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 89	memblock_free_all();
 90
 91	mem_init_print_info(NULL);
 92	print_vm_layout();
 93}
 94
 95#ifdef CONFIG_BLK_DEV_INITRD
 96static void __init setup_initrd(void)
 97{
 98	phys_addr_t start;
 99	unsigned long size;
100
101	/* Ignore the virtul address computed during device tree parsing */
102	initrd_start = initrd_end = 0;
103
104	if (!phys_initrd_size)
105		return;
106	/*
107	 * Round the memory region to page boundaries as per free_initrd_mem()
108	 * This allows us to detect whether the pages overlapping the initrd
109	 * are in use, but more importantly, reserves the entire set of pages
110	 * as we don't want these pages allocated for other purposes.
111	 */
112	start = round_down(phys_initrd_start, PAGE_SIZE);
113	size = phys_initrd_size + (phys_initrd_start - start);
114	size = round_up(size, PAGE_SIZE);
115
116	if (!memblock_is_region_memory(start, size)) {
117		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
118		       (u64)start, size);
119		goto disable;
120	}
121
122	if (memblock_is_region_reserved(start, size)) {
123		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
124		       (u64)start, size);
125		goto disable;
126	}
127
128	memblock_reserve(start, size);
129	/* Now convert initrd to virtual addresses */
130	initrd_start = (unsigned long)__va(phys_initrd_start);
131	initrd_end = initrd_start + phys_initrd_size;
132	initrd_below_start_ok = 1;
133
134	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
135		(void *)(initrd_start), size);
136	return;
137disable:
138	pr_cont(" - disabling initrd\n");
139	initrd_start = 0;
140	initrd_end = 0;
141}
142#endif /* CONFIG_BLK_DEV_INITRD */
143
144static phys_addr_t dtb_early_pa __initdata;
145
146void __init setup_bootmem(void)
147{
148	struct memblock_region *reg;
149	phys_addr_t mem_size = 0;
150	phys_addr_t total_mem = 0;
151	phys_addr_t mem_start, end = 0;
152	phys_addr_t vmlinux_end = __pa_symbol(&_end);
153	phys_addr_t vmlinux_start = __pa_symbol(&_start);
154
155	/* Find the memory region containing the kernel */
156	for_each_memblock(memory, reg) {
157		end = reg->base + reg->size;
158		if (!total_mem)
159			mem_start = reg->base;
160		if (reg->base <= vmlinux_start && vmlinux_end <= end)
161			BUG_ON(reg->size == 0);
162		total_mem = total_mem + reg->size;
163	}
164
165	/*
166	 * Remove memblock from the end of usable area to the
167	 * end of region
168	 */
169	mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
170	if (mem_start + mem_size < end)
171		memblock_remove(mem_start + mem_size,
172				end - mem_start - mem_size);
 
 
 
 
 
173
174	/* Reserve from the start of the kernel to the end of the kernel */
175	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
176
177	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
178	max_low_pfn = max_pfn;
179	set_max_mapnr(max_low_pfn);
180
181#ifdef CONFIG_BLK_DEV_INITRD
182	setup_initrd();
183#endif /* CONFIG_BLK_DEV_INITRD */
184
185	/*
186	 * Avoid using early_init_fdt_reserve_self() since __pa() does
187	 * not work for DTB pointers that are fixmap addresses
188	 */
189	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
190
191	early_init_fdt_scan_reserved_mem();
192	memblock_allow_resize();
193	memblock_dump_all();
194
195	for_each_memblock(memory, reg) {
196		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
197		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
198
199		memblock_set_node(PFN_PHYS(start_pfn),
200				  PFN_PHYS(end_pfn - start_pfn),
201				  &memblock.memory, 0);
202	}
203}
204
205#ifdef CONFIG_MMU
206unsigned long va_pa_offset;
207EXPORT_SYMBOL(va_pa_offset);
208unsigned long pfn_base;
209EXPORT_SYMBOL(pfn_base);
210
 
211pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
212pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
213pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
214static bool mmu_enabled;
215
216#define MAX_EARLY_MAPPING_SIZE	SZ_128M
217
218pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
219
220void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
221{
222	unsigned long addr = __fix_to_virt(idx);
223	pte_t *ptep;
224
225	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
226
227	ptep = &fixmap_pte[pte_index(addr)];
228
229	if (pgprot_val(prot))
230		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
231	else
232		pte_clear(&init_mm, addr, ptep);
233	local_flush_tlb_page(addr);
 
234}
235
236static pte_t *__init get_pte_virt(phys_addr_t pa)
237{
238	if (mmu_enabled) {
239		clear_fixmap(FIX_PTE);
240		return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
241	} else {
242		return (pte_t *)((uintptr_t)pa);
243	}
244}
245
246static phys_addr_t __init alloc_pte(uintptr_t va)
247{
248	/*
249	 * We only create PMD or PGD early mappings so we
250	 * should never reach here with MMU disabled.
251	 */
252	BUG_ON(!mmu_enabled);
253
254	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
255}
256
257static void __init create_pte_mapping(pte_t *ptep,
258				      uintptr_t va, phys_addr_t pa,
259				      phys_addr_t sz, pgprot_t prot)
260{
261	uintptr_t pte_idx = pte_index(va);
262
263	BUG_ON(sz != PAGE_SIZE);
264
265	if (pte_none(ptep[pte_idx]))
266		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
267}
268
269#ifndef __PAGETABLE_PMD_FOLDED
270
271pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
272pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
273
274#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
275#define NUM_EARLY_PMDS		1UL
276#else
277#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
278#endif
279pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
280
281static pmd_t *__init get_pmd_virt(phys_addr_t pa)
282{
283	if (mmu_enabled) {
284		clear_fixmap(FIX_PMD);
285		return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
286	} else {
287		return (pmd_t *)((uintptr_t)pa);
288	}
289}
290
291static phys_addr_t __init alloc_pmd(uintptr_t va)
292{
293	uintptr_t pmd_num;
294
295	if (mmu_enabled)
296		return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
297
298	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
299	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
300	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
301}
302
303static void __init create_pmd_mapping(pmd_t *pmdp,
304				      uintptr_t va, phys_addr_t pa,
305				      phys_addr_t sz, pgprot_t prot)
306{
307	pte_t *ptep;
308	phys_addr_t pte_phys;
309	uintptr_t pmd_idx = pmd_index(va);
310
311	if (sz == PMD_SIZE) {
312		if (pmd_none(pmdp[pmd_idx]))
313			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
314		return;
315	}
316
317	if (pmd_none(pmdp[pmd_idx])) {
318		pte_phys = alloc_pte(va);
319		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
320		ptep = get_pte_virt(pte_phys);
321		memset(ptep, 0, PAGE_SIZE);
322	} else {
323		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
324		ptep = get_pte_virt(pte_phys);
325	}
326
327	create_pte_mapping(ptep, va, pa, sz, prot);
328}
329
330#define pgd_next_t		pmd_t
331#define alloc_pgd_next(__va)	alloc_pmd(__va)
332#define get_pgd_next_virt(__pa)	get_pmd_virt(__pa)
333#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
334	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
 
335#define fixmap_pgd_next		fixmap_pmd
336#else
337#define pgd_next_t		pte_t
338#define alloc_pgd_next(__va)	alloc_pte(__va)
339#define get_pgd_next_virt(__pa)	get_pte_virt(__pa)
340#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
341	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 
342#define fixmap_pgd_next		fixmap_pte
343#endif
344
345static void __init create_pgd_mapping(pgd_t *pgdp,
346				      uintptr_t va, phys_addr_t pa,
347				      phys_addr_t sz, pgprot_t prot)
348{
349	pgd_next_t *nextp;
350	phys_addr_t next_phys;
351	uintptr_t pgd_idx = pgd_index(va);
352
353	if (sz == PGDIR_SIZE) {
354		if (pgd_val(pgdp[pgd_idx]) == 0)
355			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
356		return;
357	}
358
359	if (pgd_val(pgdp[pgd_idx]) == 0) {
360		next_phys = alloc_pgd_next(va);
361		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
362		nextp = get_pgd_next_virt(next_phys);
363		memset(nextp, 0, PAGE_SIZE);
364	} else {
365		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
366		nextp = get_pgd_next_virt(next_phys);
367	}
368
369	create_pgd_next_mapping(nextp, va, pa, sz, prot);
370}
371
372static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
373{
374	/* Upgrade to PMD_SIZE mappings whenever possible */
375	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
376		return PAGE_SIZE;
 
 
 
377
378	return PMD_SIZE;
379}
380
381/*
382 * setup_vm() is called from head.S with MMU-off.
383 *
384 * Following requirements should be honoured for setup_vm() to work
385 * correctly:
386 * 1) It should use PC-relative addressing for accessing kernel symbols.
387 *    To achieve this we always use GCC cmodel=medany.
388 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
389 *    so disable compiler instrumentation when FTRACE is enabled.
390 *
391 * Currently, the above requirements are honoured by using custom CFLAGS
392 * for init.o in mm/Makefile.
393 */
394
395#ifndef __riscv_cmodel_medany
396#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
397#endif
398
399asmlinkage void __init setup_vm(uintptr_t dtb_pa)
400{
401	uintptr_t va, end_va;
402	uintptr_t load_pa = (uintptr_t)(&_start);
403	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
404	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
405
406	va_pa_offset = PAGE_OFFSET - load_pa;
407	pfn_base = PFN_DOWN(load_pa);
408
409	/*
410	 * Enforce boot alignment requirements of RV32 and
411	 * RV64 by only allowing PMD or PGD mappings.
412	 */
413	BUG_ON(map_size == PAGE_SIZE);
414
415	/* Sanity check alignment and size */
416	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
417	BUG_ON((load_pa % map_size) != 0);
418	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
419
420	/* Setup early PGD for fixmap */
421	create_pgd_mapping(early_pg_dir, FIXADDR_START,
422			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
423
424#ifndef __PAGETABLE_PMD_FOLDED
425	/* Setup fixmap PMD */
426	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
427			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
428	/* Setup trampoline PGD and PMD */
429	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
430			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
431	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
432			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
433#else
434	/* Setup trampoline PGD */
435	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
436			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
437#endif
438
439	/*
440	 * Setup early PGD covering entire kernel which will allows
441	 * us to reach paging_init(). We map all memory banks later
442	 * in setup_vm_final() below.
443	 */
444	end_va = PAGE_OFFSET + load_sz;
445	for (va = PAGE_OFFSET; va < end_va; va += map_size)
446		create_pgd_mapping(early_pg_dir, va,
447				   load_pa + (va - PAGE_OFFSET),
448				   map_size, PAGE_KERNEL_EXEC);
449
450	/* Create fixed mapping for early FDT parsing */
451	end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
452	for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
453		create_pte_mapping(fixmap_pte, va,
454				   dtb_pa + (va - __fix_to_virt(FIX_FDT)),
455				   PAGE_SIZE, PAGE_KERNEL);
456
457	/* Save pointer to DTB for early FDT parsing */
458	dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
459	/* Save physical address for memblock reservation */
460	dtb_early_pa = dtb_pa;
461}
462
463static void __init setup_vm_final(void)
464{
465	uintptr_t va, map_size;
466	phys_addr_t pa, start, end;
467	struct memblock_region *reg;
468
469	/* Set mmu_enabled flag */
470	mmu_enabled = true;
471
472	/* Setup swapper PGD for fixmap */
473	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
474			   __pa_symbol(fixmap_pgd_next),
475			   PGDIR_SIZE, PAGE_TABLE);
476
477	/* Map all memory banks */
478	for_each_memblock(memory, reg) {
479		start = reg->base;
480		end = start + reg->size;
481
482		if (start >= end)
483			break;
484		if (memblock_is_nomap(reg))
485			continue;
486		if (start <= __pa(PAGE_OFFSET) &&
487		    __pa(PAGE_OFFSET) < end)
488			start = __pa(PAGE_OFFSET);
489
490		map_size = best_map_size(start, end - start);
491		for (pa = start; pa < end; pa += map_size) {
492			va = (uintptr_t)__va(pa);
493			create_pgd_mapping(swapper_pg_dir, va, pa,
494					   map_size, PAGE_KERNEL_EXEC);
495		}
496	}
497
498	/* Clear fixmap PTE and PMD mappings */
499	clear_fixmap(FIX_PTE);
500	clear_fixmap(FIX_PMD);
501
502	/* Move to swapper page table */
503	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
504	local_flush_tlb_all();
505}
506#else
507asmlinkage void __init setup_vm(uintptr_t dtb_pa)
508{
509#ifdef CONFIG_BUILTIN_DTB
510	dtb_early_va = soc_lookup_builtin_dtb();
511	if (!dtb_early_va) {
512		/* Fallback to first available DTS */
513		dtb_early_va = (void *) __dtb_start;
514	}
515#else
516	dtb_early_va = (void *)dtb_pa;
517#endif
518	dtb_early_pa = dtb_pa;
519}
520
521static inline void setup_vm_final(void)
522{
523}
524#endif /* CONFIG_MMU */
525
526#ifdef CONFIG_STRICT_KERNEL_RWX
527void mark_rodata_ro(void)
528{
529	unsigned long text_start = (unsigned long)_text;
530	unsigned long text_end = (unsigned long)_etext;
531	unsigned long rodata_start = (unsigned long)__start_rodata;
532	unsigned long data_start = (unsigned long)_data;
533	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
534
535	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
536	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
537	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
538	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
539
540	debug_checkwx();
541}
542#endif
543
544static void __init resource_init(void)
545{
546	struct memblock_region *region;
547
548	for_each_memblock(memory, region) {
549		struct resource *res;
550
551		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
552		if (!res)
553			panic("%s: Failed to allocate %zu bytes\n", __func__,
554			      sizeof(struct resource));
555
556		if (memblock_is_nomap(region)) {
557			res->name = "reserved";
558			res->flags = IORESOURCE_MEM;
559		} else {
560			res->name = "System RAM";
561			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
562		}
563		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
564		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
565
566		request_resource(&iomem_resource, res);
567	}
568}
569
570void __init paging_init(void)
571{
572	setup_vm_final();
 
573	sparse_init();
574	setup_zero_page();
575	zone_sizes_init();
576	resource_init();
577}
578
579#ifdef CONFIG_SPARSEMEM_VMEMMAP
580int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
581			       struct vmem_altmap *altmap)
582{
583	return vmemmap_populate_basepages(start, end, node, NULL);
584}
585#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
  5 */
  6
  7#include <linux/init.h>
  8#include <linux/mm.h>
  9#include <linux/memblock.h>
 10#include <linux/initrd.h>
 11#include <linux/swap.h>
 12#include <linux/sizes.h>
 13#include <linux/of_fdt.h>
 14#include <linux/libfdt.h>
 
 15
 16#include <asm/fixmap.h>
 17#include <asm/tlbflush.h>
 18#include <asm/sections.h>
 19#include <asm/pgtable.h>
 20#include <asm/io.h>
 
 21
 22#include "../kernel/head.h"
 23
 24unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
 25							__page_aligned_bss;
 26EXPORT_SYMBOL(empty_zero_page);
 27
 28extern char _start[];
 
 29
 30static void __init zone_sizes_init(void)
 31{
 32	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 33
 34#ifdef CONFIG_ZONE_DMA32
 35	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
 36			(unsigned long) PFN_PHYS(max_low_pfn)));
 37#endif
 38	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 39
 40	free_area_init_nodes(max_zone_pfns);
 41}
 42
 43void setup_zero_page(void)
 44{
 45	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 46}
 47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48void __init mem_init(void)
 49{
 50#ifdef CONFIG_FLATMEM
 51	BUG_ON(!mem_map);
 52#endif /* CONFIG_FLATMEM */
 53
 54	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 55	memblock_free_all();
 56
 57	mem_init_print_info(NULL);
 
 58}
 59
 60#ifdef CONFIG_BLK_DEV_INITRD
 61static void __init setup_initrd(void)
 62{
 
 63	unsigned long size;
 64
 65	if (initrd_start >= initrd_end) {
 66		pr_info("initrd not found or empty");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67		goto disable;
 68	}
 69	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
 70		pr_err("initrd extends beyond end of memory");
 
 
 71		goto disable;
 72	}
 73
 74	size = initrd_end - initrd_start;
 75	memblock_reserve(__pa(initrd_start), size);
 
 
 76	initrd_below_start_ok = 1;
 77
 78	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
 79		(void *)(initrd_start), size);
 80	return;
 81disable:
 82	pr_cont(" - disabling initrd\n");
 83	initrd_start = 0;
 84	initrd_end = 0;
 85}
 86#endif /* CONFIG_BLK_DEV_INITRD */
 87
 88static phys_addr_t dtb_early_pa __initdata;
 89
 90void __init setup_bootmem(void)
 91{
 92	struct memblock_region *reg;
 93	phys_addr_t mem_size = 0;
 94	phys_addr_t vmlinux_end = __pa(&_end);
 95	phys_addr_t vmlinux_start = __pa(&_start);
 
 
 96
 97	/* Find the memory region containing the kernel */
 98	for_each_memblock(memory, reg) {
 99		phys_addr_t end = reg->base + reg->size;
 
 
 
 
 
 
100
101		if (reg->base <= vmlinux_end && vmlinux_end <= end) {
102			mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
103
104			/*
105			 * Remove memblock from the end of usable area to the
106			 * end of region
107			 */
108			if (reg->base + mem_size < end)
109				memblock_remove(reg->base + mem_size,
110						end - reg->base - mem_size);
111		}
112	}
113	BUG_ON(mem_size == 0);
114
115	/* Reserve from the start of the kernel to the end of the kernel */
116	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
117
118	set_max_mapnr(PFN_DOWN(mem_size));
119	max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
 
120
121#ifdef CONFIG_BLK_DEV_INITRD
122	setup_initrd();
123#endif /* CONFIG_BLK_DEV_INITRD */
124
125	/*
126	 * Avoid using early_init_fdt_reserve_self() since __pa() does
127	 * not work for DTB pointers that are fixmap addresses
128	 */
129	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
130
131	early_init_fdt_scan_reserved_mem();
132	memblock_allow_resize();
133	memblock_dump_all();
134
135	for_each_memblock(memory, reg) {
136		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
137		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
138
139		memblock_set_node(PFN_PHYS(start_pfn),
140				  PFN_PHYS(end_pfn - start_pfn),
141				  &memblock.memory, 0);
142	}
143}
144
 
145unsigned long va_pa_offset;
146EXPORT_SYMBOL(va_pa_offset);
147unsigned long pfn_base;
148EXPORT_SYMBOL(pfn_base);
149
150void *dtb_early_va;
151pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
152pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
153pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
154static bool mmu_enabled;
155
156#define MAX_EARLY_MAPPING_SIZE	SZ_128M
157
158pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
159
160void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
161{
162	unsigned long addr = __fix_to_virt(idx);
163	pte_t *ptep;
164
165	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
166
167	ptep = &fixmap_pte[pte_index(addr)];
168
169	if (pgprot_val(prot)) {
170		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
171	} else {
172		pte_clear(&init_mm, addr, ptep);
173		local_flush_tlb_page(addr);
174	}
175}
176
177static pte_t *__init get_pte_virt(phys_addr_t pa)
178{
179	if (mmu_enabled) {
180		clear_fixmap(FIX_PTE);
181		return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
182	} else {
183		return (pte_t *)((uintptr_t)pa);
184	}
185}
186
187static phys_addr_t __init alloc_pte(uintptr_t va)
188{
189	/*
190	 * We only create PMD or PGD early mappings so we
191	 * should never reach here with MMU disabled.
192	 */
193	BUG_ON(!mmu_enabled);
194
195	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
196}
197
198static void __init create_pte_mapping(pte_t *ptep,
199				      uintptr_t va, phys_addr_t pa,
200				      phys_addr_t sz, pgprot_t prot)
201{
202	uintptr_t pte_index = pte_index(va);
203
204	BUG_ON(sz != PAGE_SIZE);
205
206	if (pte_none(ptep[pte_index]))
207		ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
208}
209
210#ifndef __PAGETABLE_PMD_FOLDED
211
212pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
213pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
214
215#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
216#define NUM_EARLY_PMDS		1UL
217#else
218#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
219#endif
220pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
221
222static pmd_t *__init get_pmd_virt(phys_addr_t pa)
223{
224	if (mmu_enabled) {
225		clear_fixmap(FIX_PMD);
226		return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
227	} else {
228		return (pmd_t *)((uintptr_t)pa);
229	}
230}
231
232static phys_addr_t __init alloc_pmd(uintptr_t va)
233{
234	uintptr_t pmd_num;
235
236	if (mmu_enabled)
237		return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
238
239	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
240	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
241	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
242}
243
244static void __init create_pmd_mapping(pmd_t *pmdp,
245				      uintptr_t va, phys_addr_t pa,
246				      phys_addr_t sz, pgprot_t prot)
247{
248	pte_t *ptep;
249	phys_addr_t pte_phys;
250	uintptr_t pmd_index = pmd_index(va);
251
252	if (sz == PMD_SIZE) {
253		if (pmd_none(pmdp[pmd_index]))
254			pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
255		return;
256	}
257
258	if (pmd_none(pmdp[pmd_index])) {
259		pte_phys = alloc_pte(va);
260		pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
261		ptep = get_pte_virt(pte_phys);
262		memset(ptep, 0, PAGE_SIZE);
263	} else {
264		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
265		ptep = get_pte_virt(pte_phys);
266	}
267
268	create_pte_mapping(ptep, va, pa, sz, prot);
269}
270
271#define pgd_next_t		pmd_t
272#define alloc_pgd_next(__va)	alloc_pmd(__va)
273#define get_pgd_next_virt(__pa)	get_pmd_virt(__pa)
274#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
275	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
276#define PTE_PARENT_SIZE		PMD_SIZE
277#define fixmap_pgd_next		fixmap_pmd
278#else
279#define pgd_next_t		pte_t
280#define alloc_pgd_next(__va)	alloc_pte(__va)
281#define get_pgd_next_virt(__pa)	get_pte_virt(__pa)
282#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
283	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
284#define PTE_PARENT_SIZE		PGDIR_SIZE
285#define fixmap_pgd_next		fixmap_pte
286#endif
287
288static void __init create_pgd_mapping(pgd_t *pgdp,
289				      uintptr_t va, phys_addr_t pa,
290				      phys_addr_t sz, pgprot_t prot)
291{
292	pgd_next_t *nextp;
293	phys_addr_t next_phys;
294	uintptr_t pgd_index = pgd_index(va);
295
296	if (sz == PGDIR_SIZE) {
297		if (pgd_val(pgdp[pgd_index]) == 0)
298			pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
299		return;
300	}
301
302	if (pgd_val(pgdp[pgd_index]) == 0) {
303		next_phys = alloc_pgd_next(va);
304		pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
305		nextp = get_pgd_next_virt(next_phys);
306		memset(nextp, 0, PAGE_SIZE);
307	} else {
308		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
309		nextp = get_pgd_next_virt(next_phys);
310	}
311
312	create_pgd_next_mapping(nextp, va, pa, sz, prot);
313}
314
315static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
316{
317	uintptr_t map_size = PAGE_SIZE;
318
319	/* Upgrade to PMD/PGDIR mappings whenever possible */
320	if (!(base & (PTE_PARENT_SIZE - 1)) &&
321	    !(size & (PTE_PARENT_SIZE - 1)))
322		map_size = PTE_PARENT_SIZE;
323
324	return map_size;
325}
326
327/*
328 * setup_vm() is called from head.S with MMU-off.
329 *
330 * Following requirements should be honoured for setup_vm() to work
331 * correctly:
332 * 1) It should use PC-relative addressing for accessing kernel symbols.
333 *    To achieve this we always use GCC cmodel=medany.
334 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
335 *    so disable compiler instrumentation when FTRACE is enabled.
336 *
337 * Currently, the above requirements are honoured by using custom CFLAGS
338 * for init.o in mm/Makefile.
339 */
340
341#ifndef __riscv_cmodel_medany
342#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
343#endif
344
345asmlinkage void __init setup_vm(uintptr_t dtb_pa)
346{
347	uintptr_t va, end_va;
348	uintptr_t load_pa = (uintptr_t)(&_start);
349	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
350	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
351
352	va_pa_offset = PAGE_OFFSET - load_pa;
353	pfn_base = PFN_DOWN(load_pa);
354
355	/*
356	 * Enforce boot alignment requirements of RV32 and
357	 * RV64 by only allowing PMD or PGD mappings.
358	 */
359	BUG_ON(map_size == PAGE_SIZE);
360
361	/* Sanity check alignment and size */
362	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
363	BUG_ON((load_pa % map_size) != 0);
364	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
365
366	/* Setup early PGD for fixmap */
367	create_pgd_mapping(early_pg_dir, FIXADDR_START,
368			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
369
370#ifndef __PAGETABLE_PMD_FOLDED
371	/* Setup fixmap PMD */
372	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
373			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
374	/* Setup trampoline PGD and PMD */
375	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
376			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
377	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
378			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
379#else
380	/* Setup trampoline PGD */
381	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
382			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
383#endif
384
385	/*
386	 * Setup early PGD covering entire kernel which will allows
387	 * us to reach paging_init(). We map all memory banks later
388	 * in setup_vm_final() below.
389	 */
390	end_va = PAGE_OFFSET + load_sz;
391	for (va = PAGE_OFFSET; va < end_va; va += map_size)
392		create_pgd_mapping(early_pg_dir, va,
393				   load_pa + (va - PAGE_OFFSET),
394				   map_size, PAGE_KERNEL_EXEC);
395
396	/* Create fixed mapping for early FDT parsing */
397	end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
398	for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
399		create_pte_mapping(fixmap_pte, va,
400				   dtb_pa + (va - __fix_to_virt(FIX_FDT)),
401				   PAGE_SIZE, PAGE_KERNEL);
402
403	/* Save pointer to DTB for early FDT parsing */
404	dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
405	/* Save physical address for memblock reservation */
406	dtb_early_pa = dtb_pa;
407}
408
409static void __init setup_vm_final(void)
410{
411	uintptr_t va, map_size;
412	phys_addr_t pa, start, end;
413	struct memblock_region *reg;
414
415	/* Set mmu_enabled flag */
416	mmu_enabled = true;
417
418	/* Setup swapper PGD for fixmap */
419	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
420			   __pa(fixmap_pgd_next),
421			   PGDIR_SIZE, PAGE_TABLE);
422
423	/* Map all memory banks */
424	for_each_memblock(memory, reg) {
425		start = reg->base;
426		end = start + reg->size;
427
428		if (start >= end)
429			break;
430		if (memblock_is_nomap(reg))
431			continue;
432		if (start <= __pa(PAGE_OFFSET) &&
433		    __pa(PAGE_OFFSET) < end)
434			start = __pa(PAGE_OFFSET);
435
436		map_size = best_map_size(start, end - start);
437		for (pa = start; pa < end; pa += map_size) {
438			va = (uintptr_t)__va(pa);
439			create_pgd_mapping(swapper_pg_dir, va, pa,
440					   map_size, PAGE_KERNEL_EXEC);
441		}
442	}
443
444	/* Clear fixmap PTE and PMD mappings */
445	clear_fixmap(FIX_PTE);
446	clear_fixmap(FIX_PMD);
447
448	/* Move to swapper page table */
449	csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
450	local_flush_tlb_all();
451}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
453void __init paging_init(void)
454{
455	setup_vm_final();
456	memblocks_present();
457	sparse_init();
458	setup_zero_page();
459	zone_sizes_init();
 
460}
461
462#ifdef CONFIG_SPARSEMEM_VMEMMAP
463int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
464			       struct vmem_altmap *altmap)
465{
466	return vmemmap_populate_basepages(start, end, node);
467}
468#endif