Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
 
 
  5 */
  6
  7#include <linux/init.h>
  8#include <linux/mm.h>
  9#include <linux/memblock.h>
 10#include <linux/initrd.h>
 11#include <linux/swap.h>
 
 12#include <linux/sizes.h>
 13#include <linux/of_fdt.h>
 
 14#include <linux/libfdt.h>
 15#include <linux/set_memory.h>
 
 
 
 16
 17#include <asm/fixmap.h>
 18#include <asm/tlbflush.h>
 19#include <asm/sections.h>
 20#include <asm/soc.h>
 21#include <asm/io.h>
 22#include <asm/ptdump.h>
 
 23
 24#include "../kernel/head.h"
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
 27							__page_aligned_bss;
 28EXPORT_SYMBOL(empty_zero_page);
 29
 30extern char _start[];
 31void *dtb_early_va;
 
 
 
 
 32
 33static void __init zone_sizes_init(void)
 34{
 35	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 36
 37#ifdef CONFIG_ZONE_DMA32
 38	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
 39			(unsigned long) PFN_PHYS(max_low_pfn)));
 40#endif
 41	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 42
 43	free_area_init(max_zone_pfns);
 44}
 45
 46static void setup_zero_page(void)
 47{
 48	memset((void *)empty_zero_page, 0, PAGE_SIZE);
 49}
 50
 51#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
 
 
 
 
 
 
 52static inline void print_mlk(char *name, unsigned long b, unsigned long t)
 53{
 54	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
 55		  (((t) - (b)) >> 10));
 56}
 57
 58static inline void print_mlm(char *name, unsigned long b, unsigned long t)
 59{
 60	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
 61		  (((t) - (b)) >> 20));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62}
 63
 64static void print_vm_layout(void)
 65{
 66	pr_notice("Virtual kernel memory layout:\n");
 67	print_mlk("fixmap", (unsigned long)FIXADDR_START,
 68		  (unsigned long)FIXADDR_TOP);
 69	print_mlm("pci io", (unsigned long)PCI_IO_START,
 70		  (unsigned long)PCI_IO_END);
 71	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
 72		  (unsigned long)VMEMMAP_END);
 73	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
 74		  (unsigned long)VMALLOC_END);
 75	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
 76		  (unsigned long)high_memory);
 
 
 
 
 
 
 
 
 
 
 
 
 77}
 78#else
 79static void print_vm_layout(void) { }
 80#endif /* CONFIG_DEBUG_VM */
 81
 82void __init mem_init(void)
 83{
 84#ifdef CONFIG_FLATMEM
 85	BUG_ON(!mem_map);
 86#endif /* CONFIG_FLATMEM */
 87
 88	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 89	memblock_free_all();
 90
 91	mem_init_print_info(NULL);
 92	print_vm_layout();
 93}
 94
 95#ifdef CONFIG_BLK_DEV_INITRD
 96static void __init setup_initrd(void)
 
 
 97{
 98	phys_addr_t start;
 99	unsigned long size;
100
101	/* Ignore the virtul address computed during device tree parsing */
102	initrd_start = initrd_end = 0;
103
104	if (!phys_initrd_size)
105		return;
106	/*
107	 * Round the memory region to page boundaries as per free_initrd_mem()
108	 * This allows us to detect whether the pages overlapping the initrd
109	 * are in use, but more importantly, reserves the entire set of pages
110	 * as we don't want these pages allocated for other purposes.
111	 */
112	start = round_down(phys_initrd_start, PAGE_SIZE);
113	size = phys_initrd_size + (phys_initrd_start - start);
114	size = round_up(size, PAGE_SIZE);
115
116	if (!memblock_is_region_memory(start, size)) {
117		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
118		       (u64)start, size);
119		goto disable;
120	}
121
122	if (memblock_is_region_reserved(start, size)) {
123		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
124		       (u64)start, size);
125		goto disable;
126	}
127
128	memblock_reserve(start, size);
129	/* Now convert initrd to virtual addresses */
130	initrd_start = (unsigned long)__va(phys_initrd_start);
131	initrd_end = initrd_start + phys_initrd_size;
132	initrd_below_start_ok = 1;
133
134	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
135		(void *)(initrd_start), size);
136	return;
137disable:
138	pr_cont(" - disabling initrd\n");
139	initrd_start = 0;
140	initrd_end = 0;
141}
142#endif /* CONFIG_BLK_DEV_INITRD */
143
144static phys_addr_t dtb_early_pa __initdata;
145
146void __init setup_bootmem(void)
147{
148	struct memblock_region *reg;
149	phys_addr_t mem_size = 0;
150	phys_addr_t total_mem = 0;
151	phys_addr_t mem_start, end = 0;
152	phys_addr_t vmlinux_end = __pa_symbol(&_end);
153	phys_addr_t vmlinux_start = __pa_symbol(&_start);
 
154
155	/* Find the memory region containing the kernel */
156	for_each_memblock(memory, reg) {
157		end = reg->base + reg->size;
158		if (!total_mem)
159			mem_start = reg->base;
160		if (reg->base <= vmlinux_start && vmlinux_end <= end)
161			BUG_ON(reg->size == 0);
162		total_mem = total_mem + reg->size;
163	}
164
165	/*
166	 * Remove memblock from the end of usable area to the
167	 * end of region
 
 
 
 
 
 
168	 */
169	mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
170	if (mem_start + mem_size < end)
171		memblock_remove(mem_start + mem_size,
172				end - mem_start - mem_size);
173
174	/* Reserve from the start of the kernel to the end of the kernel */
175	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
176
177	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
178	max_low_pfn = max_pfn;
179	set_max_mapnr(max_low_pfn);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
181#ifdef CONFIG_BLK_DEV_INITRD
182	setup_initrd();
183#endif /* CONFIG_BLK_DEV_INITRD */
184
 
185	/*
186	 * Avoid using early_init_fdt_reserve_self() since __pa() does
 
 
187	 * not work for DTB pointers that are fixmap addresses
188	 */
189	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
191	early_init_fdt_scan_reserved_mem();
 
 
192	memblock_allow_resize();
193	memblock_dump_all();
194
195	for_each_memblock(memory, reg) {
196		unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
197		unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
198
199		memblock_set_node(PFN_PHYS(start_pfn),
200				  PFN_PHYS(end_pfn - start_pfn),
201				  &memblock.memory, 0);
202	}
203}
204
205#ifdef CONFIG_MMU
206unsigned long va_pa_offset;
207EXPORT_SYMBOL(va_pa_offset);
208unsigned long pfn_base;
209EXPORT_SYMBOL(pfn_base);
210
211pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
212pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
213pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
214static bool mmu_enabled;
215
216#define MAX_EARLY_MAPPING_SIZE	SZ_128M
217
218pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
220void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
221{
222	unsigned long addr = __fix_to_virt(idx);
223	pte_t *ptep;
224
225	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
226
227	ptep = &fixmap_pte[pte_index(addr)];
228
229	if (pgprot_val(prot))
230		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
231	else
232		pte_clear(&init_mm, addr, ptep);
233	local_flush_tlb_page(addr);
234}
235
236static pte_t *__init get_pte_virt(phys_addr_t pa)
237{
238	if (mmu_enabled) {
239		clear_fixmap(FIX_PTE);
240		return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
241	} else {
242		return (pte_t *)((uintptr_t)pa);
243	}
 
 
 
 
 
 
244}
245
246static phys_addr_t __init alloc_pte(uintptr_t va)
247{
248	/*
249	 * We only create PMD or PGD early mappings so we
250	 * should never reach here with MMU disabled.
251	 */
252	BUG_ON(!mmu_enabled);
 
253
 
 
254	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
255}
256
 
 
 
 
 
 
 
 
 
 
257static void __init create_pte_mapping(pte_t *ptep,
258				      uintptr_t va, phys_addr_t pa,
259				      phys_addr_t sz, pgprot_t prot)
260{
261	uintptr_t pte_idx = pte_index(va);
262
263	BUG_ON(sz != PAGE_SIZE);
264
265	if (pte_none(ptep[pte_idx]))
266		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
267}
268
269#ifndef __PAGETABLE_PMD_FOLDED
270
271pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
272pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
274#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
275#define NUM_EARLY_PMDS		1UL
276#else
277#define NUM_EARLY_PMDS		(1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
278#endif
279pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
280
281static pmd_t *__init get_pmd_virt(phys_addr_t pa)
282{
283	if (mmu_enabled) {
284		clear_fixmap(FIX_PMD);
285		return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
286	} else {
287		return (pmd_t *)((uintptr_t)pa);
288	}
 
 
 
 
 
 
 
 
 
 
 
 
 
289}
290
291static phys_addr_t __init alloc_pmd(uintptr_t va)
292{
293	uintptr_t pmd_num;
294
295	if (mmu_enabled)
296		return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
297
298	pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
299	BUG_ON(pmd_num >= NUM_EARLY_PMDS);
300	return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
301}
302
303static void __init create_pmd_mapping(pmd_t *pmdp,
304				      uintptr_t va, phys_addr_t pa,
305				      phys_addr_t sz, pgprot_t prot)
306{
307	pte_t *ptep;
308	phys_addr_t pte_phys;
309	uintptr_t pmd_idx = pmd_index(va);
310
311	if (sz == PMD_SIZE) {
312		if (pmd_none(pmdp[pmd_idx]))
313			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
314		return;
315	}
316
317	if (pmd_none(pmdp[pmd_idx])) {
318		pte_phys = alloc_pte(va);
319		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
320		ptep = get_pte_virt(pte_phys);
321		memset(ptep, 0, PAGE_SIZE);
322	} else {
323		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
324		ptep = get_pte_virt(pte_phys);
325	}
326
327	create_pte_mapping(ptep, va, pa, sz, prot);
328}
329
330#define pgd_next_t		pmd_t
331#define alloc_pgd_next(__va)	alloc_pmd(__va)
332#define get_pgd_next_virt(__pa)	get_pmd_virt(__pa)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
334	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
335#define fixmap_pgd_next		fixmap_pmd
 
 
 
 
 
 
 
 
 
 
 
 
336#else
337#define pgd_next_t		pte_t
338#define alloc_pgd_next(__va)	alloc_pte(__va)
339#define get_pgd_next_virt(__pa)	get_pte_virt(__pa)
340#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
341	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
342#define fixmap_pgd_next		fixmap_pte
343#endif
 
 
 
 
344
345static void __init create_pgd_mapping(pgd_t *pgdp,
346				      uintptr_t va, phys_addr_t pa,
347				      phys_addr_t sz, pgprot_t prot)
348{
349	pgd_next_t *nextp;
350	phys_addr_t next_phys;
351	uintptr_t pgd_idx = pgd_index(va);
352
353	if (sz == PGDIR_SIZE) {
354		if (pgd_val(pgdp[pgd_idx]) == 0)
355			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
356		return;
357	}
358
359	if (pgd_val(pgdp[pgd_idx]) == 0) {
360		next_phys = alloc_pgd_next(va);
361		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
362		nextp = get_pgd_next_virt(next_phys);
363		memset(nextp, 0, PAGE_SIZE);
364	} else {
365		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
366		nextp = get_pgd_next_virt(next_phys);
367	}
368
369	create_pgd_next_mapping(nextp, va, pa, sz, prot);
370}
371
372static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
373{
374	/* Upgrade to PMD_SIZE mappings whenever possible */
375	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
376		return PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
378	return PMD_SIZE;
 
 
 
379}
 
380
381/*
382 * setup_vm() is called from head.S with MMU-off.
383 *
384 * Following requirements should be honoured for setup_vm() to work
385 * correctly:
386 * 1) It should use PC-relative addressing for accessing kernel symbols.
387 *    To achieve this we always use GCC cmodel=medany.
388 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
389 *    so disable compiler instrumentation when FTRACE is enabled.
390 *
391 * Currently, the above requirements are honoured by using custom CFLAGS
392 * for init.o in mm/Makefile.
393 */
394
395#ifndef __riscv_cmodel_medany
396#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
397#endif
398
399asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400{
401	uintptr_t va, end_va;
402	uintptr_t load_pa = (uintptr_t)(&_start);
403	uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
404	uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
405
406	va_pa_offset = PAGE_OFFSET - load_pa;
407	pfn_base = PFN_DOWN(load_pa);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408
409	/*
410	 * Enforce boot alignment requirements of RV32 and
411	 * RV64 by only allowing PMD or PGD mappings.
 
 
412	 */
413	BUG_ON(map_size == PAGE_SIZE);
414
415	/* Sanity check alignment and size */
416	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
417	BUG_ON((load_pa % map_size) != 0);
418	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
 
 
 
 
 
 
 
 
 
 
419
420	/* Setup early PGD for fixmap */
421	create_pgd_mapping(early_pg_dir, FIXADDR_START,
422			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
423
424#ifndef __PAGETABLE_PMD_FOLDED
425	/* Setup fixmap PMD */
 
 
 
 
 
 
 
426	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
427			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
428	/* Setup trampoline PGD and PMD */
429	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
430			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
431	create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
432			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
 
 
 
 
 
 
 
 
 
 
 
433#else
434	/* Setup trampoline PGD */
435	create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
436			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
437#endif
438
439	/*
440	 * Setup early PGD covering entire kernel which will allows
441	 * us to reach paging_init(). We map all memory banks later
442	 * in setup_vm_final() below.
443	 */
444	end_va = PAGE_OFFSET + load_sz;
445	for (va = PAGE_OFFSET; va < end_va; va += map_size)
446		create_pgd_mapping(early_pg_dir, va,
447				   load_pa + (va - PAGE_OFFSET),
448				   map_size, PAGE_KERNEL_EXEC);
449
450	/* Create fixed mapping for early FDT parsing */
451	end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
452	for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
453		create_pte_mapping(fixmap_pte, va,
454				   dtb_pa + (va - __fix_to_virt(FIX_FDT)),
455				   PAGE_SIZE, PAGE_KERNEL);
456
457	/* Save pointer to DTB for early FDT parsing */
458	dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
459	/* Save physical address for memblock reservation */
460	dtb_early_pa = dtb_pa;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461}
462
463static void __init setup_vm_final(void)
464{
465	uintptr_t va, map_size;
466	phys_addr_t pa, start, end;
467	struct memblock_region *reg;
468
469	/* Set mmu_enabled flag */
470	mmu_enabled = true;
471
472	/* Setup swapper PGD for fixmap */
473	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
474			   __pa_symbol(fixmap_pgd_next),
475			   PGDIR_SIZE, PAGE_TABLE);
476
477	/* Map all memory banks */
478	for_each_memblock(memory, reg) {
479		start = reg->base;
480		end = start + reg->size;
481
482		if (start >= end)
483			break;
484		if (memblock_is_nomap(reg))
485			continue;
486		if (start <= __pa(PAGE_OFFSET) &&
487		    __pa(PAGE_OFFSET) < end)
488			start = __pa(PAGE_OFFSET);
 
 
489
490		map_size = best_map_size(start, end - start);
491		for (pa = start; pa < end; pa += map_size) {
492			va = (uintptr_t)__va(pa);
493			create_pgd_mapping(swapper_pg_dir, va, pa,
494					   map_size, PAGE_KERNEL_EXEC);
 
 
495		}
496	}
497
 
 
 
 
 
 
 
 
498	/* Clear fixmap PTE and PMD mappings */
499	clear_fixmap(FIX_PTE);
500	clear_fixmap(FIX_PMD);
 
 
501
502	/* Move to swapper page table */
503	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
504	local_flush_tlb_all();
 
 
505}
506#else
507asmlinkage void __init setup_vm(uintptr_t dtb_pa)
508{
509#ifdef CONFIG_BUILTIN_DTB
510	dtb_early_va = soc_lookup_builtin_dtb();
511	if (!dtb_early_va) {
512		/* Fallback to first available DTS */
513		dtb_early_va = (void *) __dtb_start;
514	}
515#else
516	dtb_early_va = (void *)dtb_pa;
517#endif
518	dtb_early_pa = dtb_pa;
519}
520
521static inline void setup_vm_final(void)
522{
523}
524#endif /* CONFIG_MMU */
525
526#ifdef CONFIG_STRICT_KERNEL_RWX
527void mark_rodata_ro(void)
 
 
 
 
 
 
528{
529	unsigned long text_start = (unsigned long)_text;
530	unsigned long text_end = (unsigned long)_etext;
531	unsigned long rodata_start = (unsigned long)__start_rodata;
532	unsigned long data_start = (unsigned long)_data;
533	unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
534
535	set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
536	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
537	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
538	set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
539
540	debug_checkwx();
541}
542#endif
543
544static void __init resource_init(void)
545{
546	struct memblock_region *region;
 
 
 
 
 
 
 
 
547
548	for_each_memblock(memory, region) {
549		struct resource *res;
 
 
550
551		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
552		if (!res)
553			panic("%s: Failed to allocate %zu bytes\n", __func__,
554			      sizeof(struct resource));
555
556		if (memblock_is_nomap(region)) {
557			res->name = "reserved";
558			res->flags = IORESOURCE_MEM;
559		} else {
560			res->name = "System RAM";
561			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
562		}
563		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
564		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
565
566		request_resource(&iomem_resource, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567	}
 
 
 
 
 
 
568}
569
570void __init paging_init(void)
571{
 
572	setup_vm_final();
 
 
 
 
 
 
573	sparse_init();
574	setup_zero_page();
575	zone_sizes_init();
576	resource_init();
 
577}
578
579#ifdef CONFIG_SPARSEMEM_VMEMMAP
580int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
581			       struct vmem_altmap *altmap)
582{
583	return vmemmap_populate_basepages(start, end, node, NULL);
584}
585#endif
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 Regents of the University of California
   4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
   5 * Copyright (C) 2020 FORTH-ICS/CARV
   6 *  Nick Kossifidis <mick@ics.forth.gr>
   7 */
   8
   9#include <linux/init.h>
  10#include <linux/mm.h>
  11#include <linux/memblock.h>
  12#include <linux/initrd.h>
  13#include <linux/swap.h>
  14#include <linux/swiotlb.h>
  15#include <linux/sizes.h>
  16#include <linux/of_fdt.h>
  17#include <linux/of_reserved_mem.h>
  18#include <linux/libfdt.h>
  19#include <linux/set_memory.h>
  20#include <linux/dma-map-ops.h>
  21#include <linux/crash_dump.h>
  22#include <linux/hugetlb.h>
  23
  24#include <asm/fixmap.h>
  25#include <asm/tlbflush.h>
  26#include <asm/sections.h>
  27#include <asm/soc.h>
  28#include <asm/io.h>
  29#include <asm/ptdump.h>
  30#include <asm/numa.h>
  31
  32#include "../kernel/head.h"
  33
  34struct kernel_mapping kernel_map __ro_after_init;
  35EXPORT_SYMBOL(kernel_map);
  36#ifdef CONFIG_XIP_KERNEL
  37#define kernel_map	(*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
  38#endif
  39
  40#ifdef CONFIG_64BIT
  41u64 satp_mode __ro_after_init = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_57 : SATP_MODE_39;
  42#else
  43u64 satp_mode __ro_after_init = SATP_MODE_32;
  44#endif
  45EXPORT_SYMBOL(satp_mode);
  46
  47bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
  48bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
  49EXPORT_SYMBOL(pgtable_l4_enabled);
  50EXPORT_SYMBOL(pgtable_l5_enabled);
  51
  52phys_addr_t phys_ram_base __ro_after_init;
  53EXPORT_SYMBOL(phys_ram_base);
  54
  55unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  56							__page_aligned_bss;
  57EXPORT_SYMBOL(empty_zero_page);
  58
  59extern char _start[];
  60#define DTB_EARLY_BASE_VA      PGDIR_SIZE
  61void *_dtb_early_va __initdata;
  62uintptr_t _dtb_early_pa __initdata;
  63
  64static phys_addr_t dma32_phys_limit __initdata;
  65
  66static void __init zone_sizes_init(void)
  67{
  68	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
  69
  70#ifdef CONFIG_ZONE_DMA32
  71	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
 
  72#endif
  73	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  74
  75	free_area_init(max_zone_pfns);
  76}
  77
 
 
 
 
 
  78#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
  79
  80#define LOG2_SZ_1K  ilog2(SZ_1K)
  81#define LOG2_SZ_1M  ilog2(SZ_1M)
  82#define LOG2_SZ_1G  ilog2(SZ_1G)
  83#define LOG2_SZ_1T  ilog2(SZ_1T)
  84
  85static inline void print_mlk(char *name, unsigned long b, unsigned long t)
  86{
  87	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
  88		  (((t) - (b)) >> LOG2_SZ_1K));
  89}
  90
  91static inline void print_mlm(char *name, unsigned long b, unsigned long t)
  92{
  93	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
  94		  (((t) - (b)) >> LOG2_SZ_1M));
  95}
  96
  97static inline void print_mlg(char *name, unsigned long b, unsigned long t)
  98{
  99	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld GB)\n", name, b, t,
 100		   (((t) - (b)) >> LOG2_SZ_1G));
 101}
 102
 103#ifdef CONFIG_64BIT
 104static inline void print_mlt(char *name, unsigned long b, unsigned long t)
 105{
 106	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld TB)\n", name, b, t,
 107		   (((t) - (b)) >> LOG2_SZ_1T));
 108}
 109#else
 110#define print_mlt(n, b, t) do {} while (0)
 111#endif
 112
 113static inline void print_ml(char *name, unsigned long b, unsigned long t)
 114{
 115	unsigned long diff = t - b;
 116
 117	if (IS_ENABLED(CONFIG_64BIT) && (diff >> LOG2_SZ_1T) >= 10)
 118		print_mlt(name, b, t);
 119	else if ((diff >> LOG2_SZ_1G) >= 10)
 120		print_mlg(name, b, t);
 121	else if ((diff >> LOG2_SZ_1M) >= 10)
 122		print_mlm(name, b, t);
 123	else
 124		print_mlk(name, b, t);
 125}
 126
 127static void __init print_vm_layout(void)
 128{
 129	pr_notice("Virtual kernel memory layout:\n");
 130	print_ml("fixmap", (unsigned long)FIXADDR_START,
 131		(unsigned long)FIXADDR_TOP);
 132	print_ml("pci io", (unsigned long)PCI_IO_START,
 133		(unsigned long)PCI_IO_END);
 134	print_ml("vmemmap", (unsigned long)VMEMMAP_START,
 135		(unsigned long)VMEMMAP_END);
 136	print_ml("vmalloc", (unsigned long)VMALLOC_START,
 137		(unsigned long)VMALLOC_END);
 138#ifdef CONFIG_64BIT
 139	print_ml("modules", (unsigned long)MODULES_VADDR,
 140		(unsigned long)MODULES_END);
 141#endif
 142	print_ml("lowmem", (unsigned long)PAGE_OFFSET,
 143		(unsigned long)high_memory);
 144	if (IS_ENABLED(CONFIG_64BIT)) {
 145#ifdef CONFIG_KASAN
 146		print_ml("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
 147#endif
 148
 149		print_ml("kernel", (unsigned long)KERNEL_LINK_ADDR,
 150			 (unsigned long)ADDRESS_SPACE_END);
 151	}
 152}
 153#else
 154static void print_vm_layout(void) { }
 155#endif /* CONFIG_DEBUG_VM */
 156
 157void __init mem_init(void)
 158{
 159#ifdef CONFIG_FLATMEM
 160	BUG_ON(!mem_map);
 161#endif /* CONFIG_FLATMEM */
 162
 163	swiotlb_init(max_pfn > PFN_DOWN(dma32_phys_limit), SWIOTLB_VERBOSE);
 164	memblock_free_all();
 165
 
 166	print_vm_layout();
 167}
 168
 169/* Limit the memory size via mem. */
 170static phys_addr_t memory_limit;
 171
 172static int __init early_mem(char *p)
 173{
 174	u64 size;
 
 175
 176	if (!p)
 177		return 1;
 178
 179	size = memparse(p, &p) & PAGE_MASK;
 180	memory_limit = min_t(u64, size, memory_limit);
 181
 182	pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20);
 183
 184	return 0;
 185}
 186early_param("mem", early_mem);
 187
 188static void __init setup_bootmem(void)
 189{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190	phys_addr_t vmlinux_end = __pa_symbol(&_end);
 191	phys_addr_t max_mapped_addr;
 192	phys_addr_t phys_ram_end, vmlinux_start;
 193
 194	if (IS_ENABLED(CONFIG_XIP_KERNEL))
 195		vmlinux_start = __pa_symbol(&_sdata);
 196	else
 197		vmlinux_start = __pa_symbol(&_start);
 198
 199	memblock_enforce_memory_limit(memory_limit);
 
 
 
 200
 201	/*
 202	 * Make sure we align the reservation on PMD_SIZE since we will
 203	 * map the kernel in the linear mapping as read-only: we do not want
 204	 * any allocation to happen between _end and the next pmd aligned page.
 205	 */
 206	if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
 207		vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
 208	/*
 209	 * Reserve from the start of the kernel to the end of the kernel
 210	 */
 
 
 
 
 
 
 211	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 212
 213	phys_ram_end = memblock_end_of_DRAM();
 214	if (!IS_ENABLED(CONFIG_XIP_KERNEL))
 215		phys_ram_base = memblock_start_of_DRAM();
 216	/*
 217	 * memblock allocator is not aware of the fact that last 4K bytes of
 218	 * the addressable memory can not be mapped because of IS_ERR_VALUE
 219	 * macro. Make sure that last 4k bytes are not usable by memblock
 220	 * if end of dram is equal to maximum addressable memory.  For 64-bit
 221	 * kernel, this problem can't happen here as the end of the virtual
 222	 * address space is occupied by the kernel mapping then this check must
 223	 * be done as soon as the kernel mapping base address is determined.
 224	 */
 225	if (!IS_ENABLED(CONFIG_64BIT)) {
 226		max_mapped_addr = __pa(~(ulong)0);
 227		if (max_mapped_addr == (phys_ram_end - 1))
 228			memblock_set_current_limit(max_mapped_addr - 4096);
 229	}
 230
 231	min_low_pfn = PFN_UP(phys_ram_base);
 232	max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
 233	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 234
 235	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
 236	set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
 
 237
 238	reserve_initrd_mem();
 239	/*
 240	 * If DTB is built in, no need to reserve its memblock.
 241	 * Otherwise, do reserve it but avoid using
 242	 * early_init_fdt_reserve_self() since __pa() does
 243	 * not work for DTB pointers that are fixmap addresses
 244	 */
 245	if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
 246		/*
 247		 * In case the DTB is not located in a memory region we won't
 248		 * be able to locate it later on via the linear mapping and
 249		 * get a segfault when accessing it via __va(dtb_early_pa).
 250		 * To avoid this situation copy DTB to a memory region.
 251		 * Note that memblock_phys_alloc will also reserve DTB region.
 252		 */
 253		if (!memblock_is_memory(dtb_early_pa)) {
 254			size_t fdt_size = fdt_totalsize(dtb_early_va);
 255			phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
 256			void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
 257
 258			memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
 259			early_memunmap(new_dtb_early_va, fdt_size);
 260			_dtb_early_pa = new_dtb_early_pa;
 261		} else
 262			memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
 263	}
 264
 265	dma_contiguous_reserve(dma32_phys_limit);
 266	if (IS_ENABLED(CONFIG_64BIT))
 267		hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
 268	memblock_allow_resize();
 
 
 
 
 
 
 
 
 
 
 269}
 270
 271#ifdef CONFIG_MMU
 272struct pt_alloc_ops pt_ops __initdata;
 273
 274unsigned long riscv_pfn_base __ro_after_init;
 275EXPORT_SYMBOL(riscv_pfn_base);
 276
 277pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 278pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 279static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 
 
 
 280
 281pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 282static p4d_t __maybe_unused early_dtb_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 283static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
 284static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 285
 286#ifdef CONFIG_XIP_KERNEL
 287#define pt_ops			(*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
 288#define riscv_pfn_base         (*(unsigned long  *)XIP_FIXUP(&riscv_pfn_base))
 289#define trampoline_pg_dir      ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
 290#define fixmap_pte             ((pte_t *)XIP_FIXUP(fixmap_pte))
 291#define early_pg_dir           ((pgd_t *)XIP_FIXUP(early_pg_dir))
 292#endif /* CONFIG_XIP_KERNEL */
 293
 294static const pgprot_t protection_map[16] = {
 295	[VM_NONE]					= PAGE_NONE,
 296	[VM_READ]					= PAGE_READ,
 297	[VM_WRITE]					= PAGE_COPY,
 298	[VM_WRITE | VM_READ]				= PAGE_COPY,
 299	[VM_EXEC]					= PAGE_EXEC,
 300	[VM_EXEC | VM_READ]				= PAGE_READ_EXEC,
 301	[VM_EXEC | VM_WRITE]				= PAGE_COPY_EXEC,
 302	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_READ_EXEC,
 303	[VM_SHARED]					= PAGE_NONE,
 304	[VM_SHARED | VM_READ]				= PAGE_READ,
 305	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
 306	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
 307	[VM_SHARED | VM_EXEC]				= PAGE_EXEC,
 308	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READ_EXEC,
 309	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_EXEC,
 310	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_EXEC
 311};
 312DECLARE_VM_GET_PAGE_PROT
 313
 314void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 315{
 316	unsigned long addr = __fix_to_virt(idx);
 317	pte_t *ptep;
 318
 319	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
 320
 321	ptep = &fixmap_pte[pte_index(addr)];
 322
 323	if (pgprot_val(prot))
 324		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
 325	else
 326		pte_clear(&init_mm, addr, ptep);
 327	local_flush_tlb_page(addr);
 328}
 329
 330static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
 331{
 332	return (pte_t *)((uintptr_t)pa);
 333}
 334
 335static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
 336{
 337	clear_fixmap(FIX_PTE);
 338	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
 339}
 340
 341static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
 342{
 343	return (pte_t *) __va(pa);
 344}
 345
 346static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
 347{
 348	/*
 349	 * We only create PMD or PGD early mappings so we
 350	 * should never reach here with MMU disabled.
 351	 */
 352	BUG();
 353}
 354
 355static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
 356{
 357	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 358}
 359
 360static phys_addr_t __init alloc_pte_late(uintptr_t va)
 361{
 362	unsigned long vaddr;
 363
 364	vaddr = __get_free_page(GFP_KERNEL);
 365	BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
 366
 367	return __pa(vaddr);
 368}
 369
 370static void __init create_pte_mapping(pte_t *ptep,
 371				      uintptr_t va, phys_addr_t pa,
 372				      phys_addr_t sz, pgprot_t prot)
 373{
 374	uintptr_t pte_idx = pte_index(va);
 375
 376	BUG_ON(sz != PAGE_SIZE);
 377
 378	if (pte_none(ptep[pte_idx]))
 379		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
 380}
 381
 382#ifndef __PAGETABLE_PMD_FOLDED
 383
 384static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
 385static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 386static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 387
 388#ifdef CONFIG_XIP_KERNEL
 389#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
 390#define fixmap_pmd     ((pmd_t *)XIP_FIXUP(fixmap_pmd))
 391#define early_pmd      ((pmd_t *)XIP_FIXUP(early_pmd))
 392#endif /* CONFIG_XIP_KERNEL */
 393
 394static p4d_t trampoline_p4d[PTRS_PER_P4D] __page_aligned_bss;
 395static p4d_t fixmap_p4d[PTRS_PER_P4D] __page_aligned_bss;
 396static p4d_t early_p4d[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 397
 398#ifdef CONFIG_XIP_KERNEL
 399#define trampoline_p4d ((p4d_t *)XIP_FIXUP(trampoline_p4d))
 400#define fixmap_p4d     ((p4d_t *)XIP_FIXUP(fixmap_p4d))
 401#define early_p4d      ((p4d_t *)XIP_FIXUP(early_p4d))
 402#endif /* CONFIG_XIP_KERNEL */
 403
 404static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
 405static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
 406static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
 407
 408#ifdef CONFIG_XIP_KERNEL
 409#define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud))
 410#define fixmap_pud     ((pud_t *)XIP_FIXUP(fixmap_pud))
 411#define early_pud      ((pud_t *)XIP_FIXUP(early_pud))
 412#endif /* CONFIG_XIP_KERNEL */
 413
 414static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
 415{
 416	/* Before MMU is enabled */
 417	return (pmd_t *)((uintptr_t)pa);
 418}
 
 419
 420static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
 421{
 422	clear_fixmap(FIX_PMD);
 423	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
 424}
 425
 426static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
 427{
 428	return (pmd_t *) __va(pa);
 429}
 430
 431static phys_addr_t __init alloc_pmd_early(uintptr_t va)
 432{
 433	BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT);
 434
 435	return (uintptr_t)early_pmd;
 436}
 437
 438static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
 439{
 440	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 441}
 442
 443static phys_addr_t __init alloc_pmd_late(uintptr_t va)
 444{
 445	unsigned long vaddr;
 446
 447	vaddr = __get_free_page(GFP_KERNEL);
 448	BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
 449
 450	return __pa(vaddr);
 
 
 451}
 452
 453static void __init create_pmd_mapping(pmd_t *pmdp,
 454				      uintptr_t va, phys_addr_t pa,
 455				      phys_addr_t sz, pgprot_t prot)
 456{
 457	pte_t *ptep;
 458	phys_addr_t pte_phys;
 459	uintptr_t pmd_idx = pmd_index(va);
 460
 461	if (sz == PMD_SIZE) {
 462		if (pmd_none(pmdp[pmd_idx]))
 463			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
 464		return;
 465	}
 466
 467	if (pmd_none(pmdp[pmd_idx])) {
 468		pte_phys = pt_ops.alloc_pte(va);
 469		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
 470		ptep = pt_ops.get_pte_virt(pte_phys);
 471		memset(ptep, 0, PAGE_SIZE);
 472	} else {
 473		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
 474		ptep = pt_ops.get_pte_virt(pte_phys);
 475	}
 476
 477	create_pte_mapping(ptep, va, pa, sz, prot);
 478}
 479
 480static pud_t *__init get_pud_virt_early(phys_addr_t pa)
 481{
 482	return (pud_t *)((uintptr_t)pa);
 483}
 484
 485static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
 486{
 487	clear_fixmap(FIX_PUD);
 488	return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
 489}
 490
 491static pud_t *__init get_pud_virt_late(phys_addr_t pa)
 492{
 493	return (pud_t *)__va(pa);
 494}
 495
 496static phys_addr_t __init alloc_pud_early(uintptr_t va)
 497{
 498	/* Only one PUD is available for early mapping */
 499	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
 500
 501	return (uintptr_t)early_pud;
 502}
 503
 504static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
 505{
 506	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 507}
 508
 509static phys_addr_t alloc_pud_late(uintptr_t va)
 510{
 511	unsigned long vaddr;
 512
 513	vaddr = __get_free_page(GFP_KERNEL);
 514	BUG_ON(!vaddr);
 515	return __pa(vaddr);
 516}
 517
 518static p4d_t *__init get_p4d_virt_early(phys_addr_t pa)
 519{
 520	return (p4d_t *)((uintptr_t)pa);
 521}
 522
 523static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
 524{
 525	clear_fixmap(FIX_P4D);
 526	return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
 527}
 528
 529static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
 530{
 531	return (p4d_t *)__va(pa);
 532}
 533
 534static phys_addr_t __init alloc_p4d_early(uintptr_t va)
 535{
 536	/* Only one P4D is available for early mapping */
 537	BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
 538
 539	return (uintptr_t)early_p4d;
 540}
 541
 542static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
 543{
 544	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 545}
 546
 547static phys_addr_t alloc_p4d_late(uintptr_t va)
 548{
 549	unsigned long vaddr;
 550
 551	vaddr = __get_free_page(GFP_KERNEL);
 552	BUG_ON(!vaddr);
 553	return __pa(vaddr);
 554}
 555
 556static void __init create_pud_mapping(pud_t *pudp,
 557				      uintptr_t va, phys_addr_t pa,
 558				      phys_addr_t sz, pgprot_t prot)
 559{
 560	pmd_t *nextp;
 561	phys_addr_t next_phys;
 562	uintptr_t pud_index = pud_index(va);
 563
 564	if (sz == PUD_SIZE) {
 565		if (pud_val(pudp[pud_index]) == 0)
 566			pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot);
 567		return;
 568	}
 569
 570	if (pud_val(pudp[pud_index]) == 0) {
 571		next_phys = pt_ops.alloc_pmd(va);
 572		pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE);
 573		nextp = pt_ops.get_pmd_virt(next_phys);
 574		memset(nextp, 0, PAGE_SIZE);
 575	} else {
 576		next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index]));
 577		nextp = pt_ops.get_pmd_virt(next_phys);
 578	}
 579
 580	create_pmd_mapping(nextp, va, pa, sz, prot);
 581}
 582
 583static void __init create_p4d_mapping(p4d_t *p4dp,
 584				      uintptr_t va, phys_addr_t pa,
 585				      phys_addr_t sz, pgprot_t prot)
 586{
 587	pud_t *nextp;
 588	phys_addr_t next_phys;
 589	uintptr_t p4d_index = p4d_index(va);
 590
 591	if (sz == P4D_SIZE) {
 592		if (p4d_val(p4dp[p4d_index]) == 0)
 593			p4dp[p4d_index] = pfn_p4d(PFN_DOWN(pa), prot);
 594		return;
 595	}
 596
 597	if (p4d_val(p4dp[p4d_index]) == 0) {
 598		next_phys = pt_ops.alloc_pud(va);
 599		p4dp[p4d_index] = pfn_p4d(PFN_DOWN(next_phys), PAGE_TABLE);
 600		nextp = pt_ops.get_pud_virt(next_phys);
 601		memset(nextp, 0, PAGE_SIZE);
 602	} else {
 603		next_phys = PFN_PHYS(_p4d_pfn(p4dp[p4d_index]));
 604		nextp = pt_ops.get_pud_virt(next_phys);
 605	}
 606
 607	create_pud_mapping(nextp, va, pa, sz, prot);
 608}
 609
 610#define pgd_next_t		p4d_t
 611#define alloc_pgd_next(__va)	(pgtable_l5_enabled ?			\
 612		pt_ops.alloc_p4d(__va) : (pgtable_l4_enabled ?		\
 613		pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va)))
 614#define get_pgd_next_virt(__pa)	(pgtable_l5_enabled ?			\
 615		pt_ops.get_p4d_virt(__pa) : (pgd_next_t *)(pgtable_l4_enabled ?	\
 616		pt_ops.get_pud_virt(__pa) : (pud_t *)pt_ops.get_pmd_virt(__pa)))
 617#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
 618				(pgtable_l5_enabled ?			\
 619		create_p4d_mapping(__nextp, __va, __pa, __sz, __prot) : \
 620				(pgtable_l4_enabled ?			\
 621		create_pud_mapping((pud_t *)__nextp, __va, __pa, __sz, __prot) :	\
 622		create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot)))
 623#define fixmap_pgd_next		(pgtable_l5_enabled ?			\
 624		(uintptr_t)fixmap_p4d : (pgtable_l4_enabled ?		\
 625		(uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd))
 626#define trampoline_pgd_next	(pgtable_l5_enabled ?			\
 627		(uintptr_t)trampoline_p4d : (pgtable_l4_enabled ?	\
 628		(uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd))
 629#define early_dtb_pgd_next	(pgtable_l5_enabled ?			\
 630		(uintptr_t)early_dtb_p4d : (pgtable_l4_enabled ?	\
 631		(uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd))
 632#else
 633#define pgd_next_t		pte_t
 634#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
 635#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
 636#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
 637	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 638#define fixmap_pgd_next		((uintptr_t)fixmap_pte)
 639#define early_dtb_pgd_next	((uintptr_t)early_dtb_pmd)
 640#define create_p4d_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
 641#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
 642#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
 643#endif /* __PAGETABLE_PMD_FOLDED */
 644
 645void __init create_pgd_mapping(pgd_t *pgdp,
 646				      uintptr_t va, phys_addr_t pa,
 647				      phys_addr_t sz, pgprot_t prot)
 648{
 649	pgd_next_t *nextp;
 650	phys_addr_t next_phys;
 651	uintptr_t pgd_idx = pgd_index(va);
 652
 653	if (sz == PGDIR_SIZE) {
 654		if (pgd_val(pgdp[pgd_idx]) == 0)
 655			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
 656		return;
 657	}
 658
 659	if (pgd_val(pgdp[pgd_idx]) == 0) {
 660		next_phys = alloc_pgd_next(va);
 661		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
 662		nextp = get_pgd_next_virt(next_phys);
 663		memset(nextp, 0, PAGE_SIZE);
 664	} else {
 665		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
 666		nextp = get_pgd_next_virt(next_phys);
 667	}
 668
 669	create_pgd_next_mapping(nextp, va, pa, sz, prot);
 670}
 671
 672static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
 673{
 674	/* Upgrade to PMD_SIZE mappings whenever possible */
 675	base &= PMD_SIZE - 1;
 676	if (!base && size >= PMD_SIZE)
 677		return PMD_SIZE;
 678
 679	return PAGE_SIZE;
 680}
 681
 682#ifdef CONFIG_XIP_KERNEL
 683#define phys_ram_base  (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
 684extern char _xiprom[], _exiprom[], __data_loc;
 685
 686/* called from head.S with MMU off */
 687asmlinkage void __init __copy_data(void)
 688{
 689	void *from = (void *)(&__data_loc);
 690	void *to = (void *)CONFIG_PHYS_RAM_BASE;
 691	size_t sz = (size_t)((uintptr_t)(&_end) - (uintptr_t)(&_sdata));
 692
 693	memcpy(to, from, sz);
 694}
 695#endif
 696
 697#ifdef CONFIG_STRICT_KERNEL_RWX
 698static __init pgprot_t pgprot_from_va(uintptr_t va)
 699{
 700	if (is_va_kernel_text(va))
 701		return PAGE_KERNEL_READ_EXEC;
 702
 703	/*
 704	 * In 64-bit kernel, the kernel mapping is outside the linear mapping so
 705	 * we must protect its linear mapping alias from being executed and
 706	 * written.
 707	 * And rodata section is marked readonly in mark_rodata_ro.
 708	 */
 709	if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
 710		return PAGE_KERNEL_READ;
 711
 712	return PAGE_KERNEL;
 713}
 714
 715void mark_rodata_ro(void)
 716{
 717	set_kernel_memory(__start_rodata, _data, set_memory_ro);
 718	if (IS_ENABLED(CONFIG_64BIT))
 719		set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
 720				  set_memory_ro);
 721
 722	debug_checkwx();
 723}
 724#else
 725static __init pgprot_t pgprot_from_va(uintptr_t va)
 726{
 727	if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
 728		return PAGE_KERNEL;
 729
 730	return PAGE_KERNEL_EXEC;
 731}
 732#endif /* CONFIG_STRICT_KERNEL_RWX */
 733
 734#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
 735static void __init disable_pgtable_l5(void)
 736{
 737	pgtable_l5_enabled = false;
 738	kernel_map.page_offset = PAGE_OFFSET_L4;
 739	satp_mode = SATP_MODE_48;
 740}
 741
 742static void __init disable_pgtable_l4(void)
 743{
 744	pgtable_l4_enabled = false;
 745	kernel_map.page_offset = PAGE_OFFSET_L3;
 746	satp_mode = SATP_MODE_39;
 747}
 748
 749/*
 750 * There is a simple way to determine if 4-level is supported by the
 751 * underlying hardware: establish 1:1 mapping in 4-level page table mode
 752 * then read SATP to see if the configuration was taken into account
 753 * meaning sv48 is supported.
 754 */
 755static __init void set_satp_mode(void)
 756{
 757	u64 identity_satp, hw_satp;
 758	uintptr_t set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
 759	bool check_l4 = false;
 760
 761	create_p4d_mapping(early_p4d,
 762			set_satp_mode_pmd, (uintptr_t)early_pud,
 763			P4D_SIZE, PAGE_TABLE);
 764	create_pud_mapping(early_pud,
 765			   set_satp_mode_pmd, (uintptr_t)early_pmd,
 766			   PUD_SIZE, PAGE_TABLE);
 767	/* Handle the case where set_satp_mode straddles 2 PMDs */
 768	create_pmd_mapping(early_pmd,
 769			   set_satp_mode_pmd, set_satp_mode_pmd,
 770			   PMD_SIZE, PAGE_KERNEL_EXEC);
 771	create_pmd_mapping(early_pmd,
 772			   set_satp_mode_pmd + PMD_SIZE,
 773			   set_satp_mode_pmd + PMD_SIZE,
 774			   PMD_SIZE, PAGE_KERNEL_EXEC);
 775retry:
 776	create_pgd_mapping(early_pg_dir,
 777			   set_satp_mode_pmd,
 778			   check_l4 ? (uintptr_t)early_pud : (uintptr_t)early_p4d,
 779			   PGDIR_SIZE, PAGE_TABLE);
 780
 781	identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
 782
 783	local_flush_tlb_all();
 784	csr_write(CSR_SATP, identity_satp);
 785	hw_satp = csr_swap(CSR_SATP, 0ULL);
 786	local_flush_tlb_all();
 787
 788	if (hw_satp != identity_satp) {
 789		if (!check_l4) {
 790			disable_pgtable_l5();
 791			check_l4 = true;
 792			memset(early_pg_dir, 0, PAGE_SIZE);
 793			goto retry;
 794		}
 795		disable_pgtable_l4();
 796	}
 797
 798	memset(early_pg_dir, 0, PAGE_SIZE);
 799	memset(early_p4d, 0, PAGE_SIZE);
 800	memset(early_pud, 0, PAGE_SIZE);
 801	memset(early_pmd, 0, PAGE_SIZE);
 802}
 803#endif
 804
 805/*
 806 * setup_vm() is called from head.S with MMU-off.
 807 *
 808 * Following requirements should be honoured for setup_vm() to work
 809 * correctly:
 810 * 1) It should use PC-relative addressing for accessing kernel symbols.
 811 *    To achieve this we always use GCC cmodel=medany.
 812 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 813 *    so disable compiler instrumentation when FTRACE is enabled.
 814 *
 815 * Currently, the above requirements are honoured by using custom CFLAGS
 816 * for init.o in mm/Makefile.
 817 */
 818
 819#ifndef __riscv_cmodel_medany
 820#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 821#endif
 822
 823#ifdef CONFIG_XIP_KERNEL
 824static void __init create_kernel_page_table(pgd_t *pgdir,
 825					    __always_unused bool early)
 826{
 827	uintptr_t va, end_va;
 828
 829	/* Map the flash resident part */
 830	end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
 831	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
 832		create_pgd_mapping(pgdir, va,
 833				   kernel_map.xiprom + (va - kernel_map.virt_addr),
 834				   PMD_SIZE, PAGE_KERNEL_EXEC);
 835
 836	/* Map the data in RAM */
 837	end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
 838	for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
 839		create_pgd_mapping(pgdir, va,
 840				   kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
 841				   PMD_SIZE, PAGE_KERNEL);
 842}
 843#else
 844static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
 845{
 846	uintptr_t va, end_va;
 
 
 
 847
 848	end_va = kernel_map.virt_addr + kernel_map.size;
 849	for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
 850		create_pgd_mapping(pgdir, va,
 851				   kernel_map.phys_addr + (va - kernel_map.virt_addr),
 852				   PMD_SIZE,
 853				   early ?
 854					PAGE_KERNEL_EXEC : pgprot_from_va(va));
 855}
 856#endif
 857
 858/*
 859 * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
 860 * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
 861 * entry.
 862 */
 863static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
 864{
 865#ifndef CONFIG_BUILTIN_DTB
 866	uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
 867
 868	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
 869			   IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa,
 870			   PGDIR_SIZE,
 871			   IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
 872
 873	if (pgtable_l5_enabled)
 874		create_p4d_mapping(early_dtb_p4d, DTB_EARLY_BASE_VA,
 875				   (uintptr_t)early_dtb_pud, P4D_SIZE, PAGE_TABLE);
 876
 877	if (pgtable_l4_enabled)
 878		create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
 879				   (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
 880
 881	if (IS_ENABLED(CONFIG_64BIT)) {
 882		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
 883				   pa, PMD_SIZE, PAGE_KERNEL);
 884		create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
 885				   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
 886	}
 887
 888	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
 889#else
 890	/*
 891	 * For 64-bit kernel, __va can't be used since it would return a linear
 892	 * mapping address whereas dtb_early_va will be used before
 893	 * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
 894	 * kernel is mapped in the linear mapping, that makes no difference.
 895	 */
 896	dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
 897#endif
 898
 899	dtb_early_pa = dtb_pa;
 900}
 901
 902/*
 903 * MMU is not enabled, the page tables are allocated directly using
 904 * early_pmd/pud/p4d and the address returned is the physical one.
 905 */
 906static void __init pt_ops_set_early(void)
 907{
 908	pt_ops.alloc_pte = alloc_pte_early;
 909	pt_ops.get_pte_virt = get_pte_virt_early;
 910#ifndef __PAGETABLE_PMD_FOLDED
 911	pt_ops.alloc_pmd = alloc_pmd_early;
 912	pt_ops.get_pmd_virt = get_pmd_virt_early;
 913	pt_ops.alloc_pud = alloc_pud_early;
 914	pt_ops.get_pud_virt = get_pud_virt_early;
 915	pt_ops.alloc_p4d = alloc_p4d_early;
 916	pt_ops.get_p4d_virt = get_p4d_virt_early;
 917#endif
 918}
 919
 920/*
 921 * MMU is enabled but page table setup is not complete yet.
 922 * fixmap page table alloc functions must be used as a means to temporarily
 923 * map the allocated physical pages since the linear mapping does not exist yet.
 924 *
 925 * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
 926 * but it will be used as described above.
 927 */
 928static void __init pt_ops_set_fixmap(void)
 929{
 930	pt_ops.alloc_pte = kernel_mapping_pa_to_va(alloc_pte_fixmap);
 931	pt_ops.get_pte_virt = kernel_mapping_pa_to_va(get_pte_virt_fixmap);
 932#ifndef __PAGETABLE_PMD_FOLDED
 933	pt_ops.alloc_pmd = kernel_mapping_pa_to_va(alloc_pmd_fixmap);
 934	pt_ops.get_pmd_virt = kernel_mapping_pa_to_va(get_pmd_virt_fixmap);
 935	pt_ops.alloc_pud = kernel_mapping_pa_to_va(alloc_pud_fixmap);
 936	pt_ops.get_pud_virt = kernel_mapping_pa_to_va(get_pud_virt_fixmap);
 937	pt_ops.alloc_p4d = kernel_mapping_pa_to_va(alloc_p4d_fixmap);
 938	pt_ops.get_p4d_virt = kernel_mapping_pa_to_va(get_p4d_virt_fixmap);
 939#endif
 940}
 941
 942/*
 943 * MMU is enabled and page table setup is complete, so from now, we can use
 944 * generic page allocation functions to setup page table.
 945 */
 946static void __init pt_ops_set_late(void)
 947{
 948	pt_ops.alloc_pte = alloc_pte_late;
 949	pt_ops.get_pte_virt = get_pte_virt_late;
 950#ifndef __PAGETABLE_PMD_FOLDED
 951	pt_ops.alloc_pmd = alloc_pmd_late;
 952	pt_ops.get_pmd_virt = get_pmd_virt_late;
 953	pt_ops.alloc_pud = alloc_pud_late;
 954	pt_ops.get_pud_virt = get_pud_virt_late;
 955	pt_ops.alloc_p4d = alloc_p4d_late;
 956	pt_ops.get_p4d_virt = get_p4d_virt_late;
 957#endif
 958}
 959
 960asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 961{
 962	pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
 963
 964	kernel_map.virt_addr = KERNEL_LINK_ADDR;
 965	kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
 966
 967#ifdef CONFIG_XIP_KERNEL
 968	kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
 969	kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
 970
 971	phys_ram_base = CONFIG_PHYS_RAM_BASE;
 972	kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
 973	kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
 974
 975	kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
 976#else
 977	kernel_map.phys_addr = (uintptr_t)(&_start);
 978	kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
 979#endif
 980
 981#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
 982	set_satp_mode();
 983#endif
 984
 985	kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
 986	kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
 987
 988	riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
 989
 990	/*
 991	 * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
 992	 * kernel, whereas for 64-bit kernel, the end of the virtual address
 993	 * space is occupied by the modules/BPF/kernel mappings which reduces
 994	 * the available size of the linear mapping.
 995	 */
 996	memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
 997
 998	/* Sanity check alignment and size */
 999	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
1000	BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
1001
1002#ifdef CONFIG_64BIT
1003	/*
1004	 * The last 4K bytes of the addressable memory can not be mapped because
1005	 * of IS_ERR_VALUE macro.
1006	 */
1007	BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
1008#endif
1009
1010	apply_early_boot_alternatives();
1011	pt_ops_set_early();
1012
1013	/* Setup early PGD for fixmap */
1014	create_pgd_mapping(early_pg_dir, FIXADDR_START,
1015			   fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
1016
1017#ifndef __PAGETABLE_PMD_FOLDED
1018	/* Setup fixmap P4D and PUD */
1019	if (pgtable_l5_enabled)
1020		create_p4d_mapping(fixmap_p4d, FIXADDR_START,
1021				   (uintptr_t)fixmap_pud, P4D_SIZE, PAGE_TABLE);
1022	/* Setup fixmap PUD and PMD */
1023	if (pgtable_l4_enabled)
1024		create_pud_mapping(fixmap_pud, FIXADDR_START,
1025				   (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE);
1026	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
1027			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
1028	/* Setup trampoline PGD and PMD */
1029	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
1030			   trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);
1031	if (pgtable_l5_enabled)
1032		create_p4d_mapping(trampoline_p4d, kernel_map.virt_addr,
1033				   (uintptr_t)trampoline_pud, P4D_SIZE, PAGE_TABLE);
1034	if (pgtable_l4_enabled)
1035		create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
1036				   (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
1037#ifdef CONFIG_XIP_KERNEL
1038	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
1039			   kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
1040#else
1041	create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
1042			   kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC);
1043#endif
1044#else
1045	/* Setup trampoline PGD */
1046	create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
1047			   kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC);
1048#endif
1049
1050	/*
1051	 * Setup early PGD covering entire kernel which will allow
1052	 * us to reach paging_init(). We map all memory banks later
1053	 * in setup_vm_final() below.
1054	 */
1055	create_kernel_page_table(early_pg_dir, true);
1056
1057	/* Setup early mapping for FDT early scan */
1058	create_fdt_early_page_table(early_pg_dir, dtb_pa);
1059
1060	/*
1061	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
1062	 * range can not span multiple pmds.
1063	 */
1064	BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1065		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1066
1067#ifndef __PAGETABLE_PMD_FOLDED
1068	/*
1069	 * Early ioremap fixmap is already created as it lies within first 2MB
1070	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
1071	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
1072	 * the user if not.
1073	 */
1074	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
1075	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
1076	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
1077		WARN_ON(1);
1078		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
1079			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
1080		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1081			fix_to_virt(FIX_BTMAP_BEGIN));
1082		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
1083			fix_to_virt(FIX_BTMAP_END));
1084
1085		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
1086		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
1087	}
1088#endif
1089
1090	pt_ops_set_fixmap();
1091}
1092
1093static void __init setup_vm_final(void)
1094{
1095	uintptr_t va, map_size;
1096	phys_addr_t pa, start, end;
1097	u64 i;
 
 
 
1098
1099	/* Setup swapper PGD for fixmap */
1100	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
1101			   __pa_symbol(fixmap_pgd_next),
1102			   PGDIR_SIZE, PAGE_TABLE);
1103
1104	/* Map all memory banks in the linear mapping */
1105	for_each_mem_range(i, &start, &end) {
 
 
 
1106		if (start >= end)
1107			break;
 
 
1108		if (start <= __pa(PAGE_OFFSET) &&
1109		    __pa(PAGE_OFFSET) < end)
1110			start = __pa(PAGE_OFFSET);
1111		if (end >= __pa(PAGE_OFFSET) + memory_limit)
1112			end = __pa(PAGE_OFFSET) + memory_limit;
1113
 
1114		for (pa = start; pa < end; pa += map_size) {
1115			va = (uintptr_t)__va(pa);
1116			map_size = best_map_size(pa, end - pa);
1117
1118			create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
1119					   pgprot_from_va(va));
1120		}
1121	}
1122
1123	/* Map the kernel */
1124	if (IS_ENABLED(CONFIG_64BIT))
1125		create_kernel_page_table(swapper_pg_dir, false);
1126
1127#ifdef CONFIG_KASAN
1128	kasan_swapper_init();
1129#endif
1130
1131	/* Clear fixmap PTE and PMD mappings */
1132	clear_fixmap(FIX_PTE);
1133	clear_fixmap(FIX_PMD);
1134	clear_fixmap(FIX_PUD);
1135	clear_fixmap(FIX_P4D);
1136
1137	/* Move to swapper page table */
1138	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
1139	local_flush_tlb_all();
1140
1141	pt_ops_set_late();
1142}
1143#else
1144asmlinkage void __init setup_vm(uintptr_t dtb_pa)
1145{
 
 
 
 
 
 
 
1146	dtb_early_va = (void *)dtb_pa;
 
1147	dtb_early_pa = dtb_pa;
1148}
1149
1150static inline void setup_vm_final(void)
1151{
1152}
1153#endif /* CONFIG_MMU */
1154
1155/*
1156 * reserve_crashkernel() - reserves memory for crash kernel
1157 *
1158 * This function reserves memory area given in "crashkernel=" kernel command
1159 * line parameter. The memory reserved is used by dump capture kernel when
1160 * primary kernel is crashing.
1161 */
1162static void __init reserve_crashkernel(void)
1163{
1164	unsigned long long crash_base = 0;
1165	unsigned long long crash_size = 0;
1166	unsigned long search_start = memblock_start_of_DRAM();
1167	unsigned long search_end = memblock_end_of_DRAM();
 
 
 
 
 
 
1168
1169	int ret = 0;
 
 
1170
1171	if (!IS_ENABLED(CONFIG_KEXEC_CORE))
1172		return;
1173	/*
1174	 * Don't reserve a region for a crash kernel on a crash kernel
1175	 * since it doesn't make much sense and we have limited memory
1176	 * resources.
1177	 */
1178	if (is_kdump_kernel()) {
1179		pr_info("crashkernel: ignoring reservation request\n");
1180		return;
1181	}
1182
1183	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
1184				&crash_size, &crash_base);
1185	if (ret || !crash_size)
1186		return;
1187
1188	crash_size = PAGE_ALIGN(crash_size);
1189
1190	if (crash_base) {
1191		search_start = crash_base;
1192		search_end = crash_base + crash_size;
1193	}
 
 
 
 
 
 
 
 
1194
1195	/*
1196	 * Current riscv boot protocol requires 2MB alignment for
1197	 * RV64 and 4MB alignment for RV32 (hugepage size)
1198	 *
1199	 * Try to alloc from 32bit addressible physical memory so that
1200	 * swiotlb can work on the crash kernel.
1201	 */
1202	crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1203					       search_start,
1204					       min(search_end, (unsigned long) SZ_4G));
1205	if (crash_base == 0) {
1206		/* Try again without restricting region to 32bit addressible memory */
1207		crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
1208						search_start, search_end);
1209		if (crash_base == 0) {
1210			pr_warn("crashkernel: couldn't allocate %lldKB\n",
1211				crash_size >> 10);
1212			return;
1213		}
1214	}
1215
1216	pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
1217		crash_base, crash_base + crash_size, crash_size >> 20);
1218
1219	crashk_res.start = crash_base;
1220	crashk_res.end = crash_base + crash_size - 1;
1221}
1222
1223void __init paging_init(void)
1224{
1225	setup_bootmem();
1226	setup_vm_final();
1227}
1228
1229void __init misc_mem_init(void)
1230{
1231	early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
1232	arch_numa_init();
1233	sparse_init();
 
1234	zone_sizes_init();
1235	reserve_crashkernel();
1236	memblock_dump_all();
1237}
1238
1239#ifdef CONFIG_SPARSEMEM_VMEMMAP
1240int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1241			       struct vmem_altmap *altmap)
1242{
1243	return vmemmap_populate_basepages(start, end, node, NULL);
1244}
1245#endif