Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2019 Andes Technology Corporation
  3
  4#include <linux/pfn.h>
  5#include <linux/init_task.h>
  6#include <linux/kasan.h>
  7#include <linux/kernel.h>
  8#include <linux/memblock.h>
  9#include <linux/pgtable.h>
 10#include <asm/tlbflush.h>
 11#include <asm/fixmap.h>
 
 12
 13extern pgd_t early_pg_dir[PTRS_PER_PGD];
 14asmlinkage void __init kasan_early_init(void)
 15{
 16	uintptr_t i;
 17	pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
 18
 19	for (i = 0; i < PTRS_PER_PTE; ++i)
 20		set_pte(kasan_early_shadow_pte + i,
 21			mk_pte(virt_to_page(kasan_early_shadow_page),
 22			       PAGE_KERNEL));
 23
 24	for (i = 0; i < PTRS_PER_PMD; ++i)
 25		set_pmd(kasan_early_shadow_pmd + i,
 26			pfn_pmd(PFN_DOWN
 27				(__pa((uintptr_t) kasan_early_shadow_pte)),
 28				__pgprot(_PAGE_TABLE)));
 29
 30	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
 31	     i += PGDIR_SIZE, ++pgd)
 32		set_pgd(pgd,
 33			pfn_pgd(PFN_DOWN
 34				(__pa(((uintptr_t) kasan_early_shadow_pmd))),
 35				__pgprot(_PAGE_TABLE)));
 36
 37	/* init for swapper_pg_dir */
 38	pgd = pgd_offset_k(KASAN_SHADOW_START);
 39
 40	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
 41	     i += PGDIR_SIZE, ++pgd)
 42		set_pgd(pgd,
 43			pfn_pgd(PFN_DOWN
 44				(__pa(((uintptr_t) kasan_early_shadow_pmd))),
 45				__pgprot(_PAGE_TABLE)));
 46
 47	local_flush_tlb_all();
 48}
 49
 50static void __init populate(void *start, void *end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51{
 52	unsigned long i, offset;
 53	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
 54	unsigned long vend = PAGE_ALIGN((unsigned long)end);
 55	unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
 56	unsigned long n_ptes =
 57	    ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
 58	unsigned long n_pmds =
 59	    ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
 60
 61	pte_t *pte =
 62	    memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
 63	pmd_t *pmd =
 64	    memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
 65	pgd_t *pgd = pgd_offset_k(vaddr);
 66
 67	for (i = 0; i < n_pages; i++) {
 68		phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 69		set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
 70	}
 71
 72	for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
 73		set_pmd(&pmd[i],
 74			pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
 75				__pgprot(_PAGE_TABLE)));
 76
 77	for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
 78		set_pgd(&pgd[i],
 79			pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
 80				__pgprot(_PAGE_TABLE)));
 81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82	local_flush_tlb_all();
 83	memset(start, 0, end - start);
 84}
 85
 86void __init kasan_init(void)
 87{
 88	struct memblock_region *reg;
 89	unsigned long i;
 90
 
 
 
 
 91	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
 92				    (void *)kasan_mem_to_shadow((void *)
 93								VMALLOC_END));
 94
 95	for_each_memblock(memory, reg) {
 96		void *start = (void *)__va(reg->base);
 97		void *end = (void *)__va(reg->base + reg->size);
 
 
 
 
 
 
 
 
 
 98
 99		if (start >= end)
100			break;
101
102		populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
103	};
 
 
 
 
104
105	for (i = 0; i < PTRS_PER_PTE; i++)
106		set_pte(&kasan_early_shadow_pte[i],
107			mk_pte(virt_to_page(kasan_early_shadow_page),
108			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
109					_PAGE_ACCESSED)));
110
111	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
112	init_task.kasan_depth = 0;
113}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2019 Andes Technology Corporation
  3
  4#include <linux/pfn.h>
  5#include <linux/init_task.h>
  6#include <linux/kasan.h>
  7#include <linux/kernel.h>
  8#include <linux/memblock.h>
  9#include <linux/pgtable.h>
 10#include <asm/tlbflush.h>
 11#include <asm/fixmap.h>
 12#include <asm/pgalloc.h>
 13
 14extern pgd_t early_pg_dir[PTRS_PER_PGD];
 15asmlinkage void __init kasan_early_init(void)
 16{
 17	uintptr_t i;
 18	pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
 19
 20	for (i = 0; i < PTRS_PER_PTE; ++i)
 21		set_pte(kasan_early_shadow_pte + i,
 22			mk_pte(virt_to_page(kasan_early_shadow_page),
 23			       PAGE_KERNEL));
 24
 25	for (i = 0; i < PTRS_PER_PMD; ++i)
 26		set_pmd(kasan_early_shadow_pmd + i,
 27			pfn_pmd(PFN_DOWN
 28				(__pa((uintptr_t) kasan_early_shadow_pte)),
 29				__pgprot(_PAGE_TABLE)));
 30
 31	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
 32	     i += PGDIR_SIZE, ++pgd)
 33		set_pgd(pgd,
 34			pfn_pgd(PFN_DOWN
 35				(__pa(((uintptr_t) kasan_early_shadow_pmd))),
 36				__pgprot(_PAGE_TABLE)));
 37
 38	/* init for swapper_pg_dir */
 39	pgd = pgd_offset_k(KASAN_SHADOW_START);
 40
 41	for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
 42	     i += PGDIR_SIZE, ++pgd)
 43		set_pgd(pgd,
 44			pfn_pgd(PFN_DOWN
 45				(__pa(((uintptr_t) kasan_early_shadow_pmd))),
 46				__pgprot(_PAGE_TABLE)));
 47
 48	local_flush_tlb_all();
 49}
 50
 51static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
 52{
 53	phys_addr_t phys_addr;
 54	pte_t *ptep, *base_pte;
 55
 56	if (pmd_none(*pmd))
 57		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
 58	else
 59		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
 60
 61	ptep = base_pte + pte_index(vaddr);
 62
 63	do {
 64		if (pte_none(*ptep)) {
 65			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 66			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
 67		}
 68	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
 69
 70	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
 71}
 72
 73static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
 74{
 75	phys_addr_t phys_addr;
 76	pmd_t *pmdp, *base_pmd;
 77	unsigned long next;
 78
 79	base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
 80	if (base_pmd == lm_alias(kasan_early_shadow_pmd))
 81		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
 82
 83	pmdp = base_pmd + pmd_index(vaddr);
 84
 85	do {
 86		next = pmd_addr_end(vaddr, end);
 87
 88		if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
 89			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
 90			if (phys_addr) {
 91				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
 92				continue;
 93			}
 94		}
 95
 96		kasan_populate_pte(pmdp, vaddr, next);
 97	} while (pmdp++, vaddr = next, vaddr != end);
 98
 99	/*
100	 * Wait for the whole PGD to be populated before setting the PGD in
101	 * the page table, otherwise, if we did set the PGD before populating
102	 * it entirely, memblock could allocate a page at a physical address
103	 * where KASAN is not populated yet and then we'd get a page fault.
104	 */
105	set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
106}
107
108static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
109{
110	phys_addr_t phys_addr;
111	pgd_t *pgdp = pgd_offset_k(vaddr);
112	unsigned long next;
113
114	do {
115		next = pgd_addr_end(vaddr, end);
116
117		/*
118		 * pgdp can't be none since kasan_early_init initialized all KASAN
119		 * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
120		 * that means we can try to allocate a hugepage as a replacement.
121		 */
122		if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
123		    IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
124			phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
125			if (phys_addr) {
126				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
127				continue;
128			}
129		}
130
131		kasan_populate_pmd(pgdp, vaddr, next);
132	} while (pgdp++, vaddr = next, vaddr != end);
133}
134
135static void __init kasan_populate(void *start, void *end)
136{
 
137	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
138	unsigned long vend = PAGE_ALIGN((unsigned long)end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
140	kasan_populate_pgd(vaddr, vend);
 
 
 
141
142	local_flush_tlb_all();
143	memset(start, KASAN_SHADOW_INIT, end - start);
144}
 
145
146static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
147{
148	unsigned long next;
149	void *p;
150	pgd_t *pgd_k = pgd_offset_k(vaddr);
151
152	do {
153		next = pgd_addr_end(vaddr, end);
154		if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
155			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
156			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
157		}
158	} while (pgd_k++, vaddr = next, vaddr != end);
159}
160
161static void __init kasan_shallow_populate(void *start, void *end)
162{
163	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
164	unsigned long vend = PAGE_ALIGN((unsigned long)end);
165
166	kasan_shallow_populate_pgd(vaddr, vend);
167	local_flush_tlb_all();
 
168}
169
170void __init kasan_init(void)
171{
172	phys_addr_t p_start, p_end;
173	u64 i;
174
175	/*
176	 * Populate all kernel virtual address space with kasan_early_shadow_page
177	 * except for the linear mapping and the modules/kernel/BPF mapping.
178	 */
179	kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
180				    (void *)kasan_mem_to_shadow((void *)
181								VMEMMAP_END));
182	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
183		kasan_shallow_populate(
184			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
185			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
186	else
187		kasan_populate_early_shadow(
188			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
189			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
190
191	/* Populate the linear mapping */
192	for_each_mem_range(i, &p_start, &p_end) {
193		void *start = (void *)__va(p_start);
194		void *end = (void *)__va(p_end);
195
196		if (start >= end)
197			break;
198
199		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
200	}
201
202	/* Populate kernel, BPF, modules mapping */
203	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
204		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
205
206	for (i = 0; i < PTRS_PER_PTE; i++)
207		set_pte(&kasan_early_shadow_pte[i],
208			mk_pte(virt_to_page(kasan_early_shadow_page),
209			       __pgprot(_PAGE_PRESENT | _PAGE_READ |
210					_PAGE_ACCESSED)));
211
212	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
213	init_task.kasan_depth = 0;
214}