Loading...
1/*
2 * This file contains kasan initialization code for ARM64.
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#define pr_fmt(fmt) "kasan: " fmt
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/memblock.h>
17#include <linux/start_kernel.h>
18
19#include <asm/mmu_context.h>
20#include <asm/kernel-pgtable.h>
21#include <asm/page.h>
22#include <asm/pgalloc.h>
23#include <asm/pgtable.h>
24#include <asm/sections.h>
25#include <asm/tlbflush.h>
26
27static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
28
29static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
30 unsigned long end)
31{
32 pte_t *pte;
33 unsigned long next;
34
35 if (pmd_none(*pmd))
36 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
37
38 pte = pte_offset_kimg(pmd, addr);
39 do {
40 next = addr + PAGE_SIZE;
41 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
42 PAGE_KERNEL));
43 } while (pte++, addr = next, addr != end && pte_none(*pte));
44}
45
46static void __init kasan_early_pmd_populate(pud_t *pud,
47 unsigned long addr,
48 unsigned long end)
49{
50 pmd_t *pmd;
51 unsigned long next;
52
53 if (pud_none(*pud))
54 pud_populate(&init_mm, pud, kasan_zero_pmd);
55
56 pmd = pmd_offset_kimg(pud, addr);
57 do {
58 next = pmd_addr_end(addr, end);
59 kasan_early_pte_populate(pmd, addr, next);
60 } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
61}
62
63static void __init kasan_early_pud_populate(pgd_t *pgd,
64 unsigned long addr,
65 unsigned long end)
66{
67 pud_t *pud;
68 unsigned long next;
69
70 if (pgd_none(*pgd))
71 pgd_populate(&init_mm, pgd, kasan_zero_pud);
72
73 pud = pud_offset_kimg(pgd, addr);
74 do {
75 next = pud_addr_end(addr, end);
76 kasan_early_pmd_populate(pud, addr, next);
77 } while (pud++, addr = next, addr != end && pud_none(*pud));
78}
79
80static void __init kasan_map_early_shadow(void)
81{
82 unsigned long addr = KASAN_SHADOW_START;
83 unsigned long end = KASAN_SHADOW_END;
84 unsigned long next;
85 pgd_t *pgd;
86
87 pgd = pgd_offset_k(addr);
88 do {
89 next = pgd_addr_end(addr, end);
90 kasan_early_pud_populate(pgd, addr, next);
91 } while (pgd++, addr = next, addr != end);
92}
93
94asmlinkage void __init kasan_early_init(void)
95{
96 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
97 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
98 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
99 kasan_map_early_shadow();
100}
101
102/*
103 * Copy the current shadow region into a new pgdir.
104 */
105void __init kasan_copy_shadow(pgd_t *pgdir)
106{
107 pgd_t *pgd, *pgd_new, *pgd_end;
108
109 pgd = pgd_offset_k(KASAN_SHADOW_START);
110 pgd_end = pgd_offset_k(KASAN_SHADOW_END);
111 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
112 do {
113 set_pgd(pgd_new, *pgd);
114 } while (pgd++, pgd_new++, pgd != pgd_end);
115}
116
117static void __init clear_pgds(unsigned long start,
118 unsigned long end)
119{
120 /*
121 * Remove references to kasan page tables from
122 * swapper_pg_dir. pgd_clear() can't be used
123 * here because it's nop on 2,3-level pagetable setups
124 */
125 for (; start < end; start += PGDIR_SIZE)
126 set_pgd(pgd_offset_k(start), __pgd(0));
127}
128
129void __init kasan_init(void)
130{
131 u64 kimg_shadow_start, kimg_shadow_end;
132 u64 mod_shadow_start, mod_shadow_end;
133 struct memblock_region *reg;
134 int i;
135
136 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
137 kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
138
139 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
140 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
141
142 /*
143 * We are going to perform proper setup of shadow memory.
144 * At first we should unmap early shadow (clear_pgds() call bellow).
145 * However, instrumented code couldn't execute without shadow memory.
146 * tmp_pg_dir used to keep early shadow mapped until full shadow
147 * setup will be finished.
148 */
149 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
150 dsb(ishst);
151 cpu_replace_ttbr1(tmp_pg_dir);
152
153 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
154
155 vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
156 pfn_to_nid(virt_to_pfn(_text)));
157
158 /*
159 * vmemmap_populate() has populated the shadow region that covers the
160 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
161 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
162 * kasan_populate_zero_shadow() from replacing the page table entries
163 * (PMD or PTE) at the edges of the shadow region for the kernel
164 * image.
165 */
166 kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
167 kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
168
169 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
170 (void *)mod_shadow_start);
171 kasan_populate_zero_shadow((void *)kimg_shadow_end,
172 kasan_mem_to_shadow((void *)PAGE_OFFSET));
173
174 if (kimg_shadow_start > mod_shadow_end)
175 kasan_populate_zero_shadow((void *)mod_shadow_end,
176 (void *)kimg_shadow_start);
177
178 for_each_memblock(memory, reg) {
179 void *start = (void *)__phys_to_virt(reg->base);
180 void *end = (void *)__phys_to_virt(reg->base + reg->size);
181
182 if (start >= end)
183 break;
184
185 /*
186 * end + 1 here is intentional. We check several shadow bytes in
187 * advance to slightly speed up fastpath. In some rare cases
188 * we could cross boundary of mapped shadow, so we just map
189 * some more here.
190 */
191 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
192 (unsigned long)kasan_mem_to_shadow(end) + 1,
193 pfn_to_nid(virt_to_pfn(start)));
194 }
195
196 /*
197 * KAsan may reuse the contents of kasan_zero_pte directly, so we
198 * should make sure that it maps the zero page read-only.
199 */
200 for (i = 0; i < PTRS_PER_PTE; i++)
201 set_pte(&kasan_zero_pte[i],
202 pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
203
204 memset(kasan_zero_page, 0, PAGE_SIZE);
205 cpu_replace_ttbr1(swapper_pg_dir);
206
207 /* At this point kasan is fully initialized. Enable error messages */
208 init_task.kasan_depth = 0;
209 pr_info("KernelAddressSanitizer initialized\n");
210}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file contains kasan initialization code for ARM64.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 */
8
9#define pr_fmt(fmt) "kasan: " fmt
10#include <linux/kasan.h>
11#include <linux/kernel.h>
12#include <linux/sched/task.h>
13#include <linux/memblock.h>
14#include <linux/start_kernel.h>
15#include <linux/mm.h>
16
17#include <asm/mmu_context.h>
18#include <asm/kernel-pgtable.h>
19#include <asm/page.h>
20#include <asm/pgalloc.h>
21#include <asm/sections.h>
22#include <asm/tlbflush.h>
23
24#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
25
26static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
27
28/*
29 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30 * directly on kernel symbols (bm_p*d). All the early functions are called too
31 * early to use lm_alias so __p*d_populate functions must be used to populate
32 * with the physical address from __pa_symbol.
33 */
34
35static phys_addr_t __init kasan_alloc_zeroed_page(int node)
36{
37 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
38 __pa(MAX_DMA_ADDRESS),
39 MEMBLOCK_ALLOC_KASAN, node);
40 if (!p)
41 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42 __func__, PAGE_SIZE, PAGE_SIZE, node,
43 __pa(MAX_DMA_ADDRESS));
44
45 return __pa(p);
46}
47
48static phys_addr_t __init kasan_alloc_raw_page(int node)
49{
50 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
51 __pa(MAX_DMA_ADDRESS),
52 MEMBLOCK_ALLOC_KASAN, node);
53 if (!p)
54 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
55 __func__, PAGE_SIZE, PAGE_SIZE, node,
56 __pa(MAX_DMA_ADDRESS));
57
58 return __pa(p);
59}
60
61static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
62 bool early)
63{
64 if (pmd_none(READ_ONCE(*pmdp))) {
65 phys_addr_t pte_phys = early ?
66 __pa_symbol(kasan_early_shadow_pte)
67 : kasan_alloc_zeroed_page(node);
68 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
69 }
70
71 return early ? pte_offset_kimg(pmdp, addr)
72 : pte_offset_kernel(pmdp, addr);
73}
74
75static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
76 bool early)
77{
78 if (pud_none(READ_ONCE(*pudp))) {
79 phys_addr_t pmd_phys = early ?
80 __pa_symbol(kasan_early_shadow_pmd)
81 : kasan_alloc_zeroed_page(node);
82 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
83 }
84
85 return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
86}
87
88static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
89 bool early)
90{
91 if (p4d_none(READ_ONCE(*p4dp))) {
92 phys_addr_t pud_phys = early ?
93 __pa_symbol(kasan_early_shadow_pud)
94 : kasan_alloc_zeroed_page(node);
95 __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
96 }
97
98 return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
99}
100
101static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
102 unsigned long end, int node, bool early)
103{
104 unsigned long next;
105 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
106
107 do {
108 phys_addr_t page_phys = early ?
109 __pa_symbol(kasan_early_shadow_page)
110 : kasan_alloc_raw_page(node);
111 if (!early)
112 memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
113 next = addr + PAGE_SIZE;
114 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
115 } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
116}
117
118static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
119 unsigned long end, int node, bool early)
120{
121 unsigned long next;
122 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
123
124 do {
125 next = pmd_addr_end(addr, end);
126 kasan_pte_populate(pmdp, addr, next, node, early);
127 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
128}
129
130static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
131 unsigned long end, int node, bool early)
132{
133 unsigned long next;
134 pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
135
136 do {
137 next = pud_addr_end(addr, end);
138 kasan_pmd_populate(pudp, addr, next, node, early);
139 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
140}
141
142static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
143 unsigned long end, int node, bool early)
144{
145 unsigned long next;
146 p4d_t *p4dp = p4d_offset(pgdp, addr);
147
148 do {
149 next = p4d_addr_end(addr, end);
150 kasan_pud_populate(p4dp, addr, next, node, early);
151 } while (p4dp++, addr = next, addr != end);
152}
153
154static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
155 int node, bool early)
156{
157 unsigned long next;
158 pgd_t *pgdp;
159
160 pgdp = pgd_offset_k(addr);
161 do {
162 next = pgd_addr_end(addr, end);
163 kasan_p4d_populate(pgdp, addr, next, node, early);
164 } while (pgdp++, addr = next, addr != end);
165}
166
167/* The early shadow maps everything to a single page of zeroes */
168asmlinkage void __init kasan_early_init(void)
169{
170 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
171 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
172 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
173 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
174 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
175 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
176 true);
177}
178
179/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
180static void __init kasan_map_populate(unsigned long start, unsigned long end,
181 int node)
182{
183 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
184}
185
186/*
187 * Copy the current shadow region into a new pgdir.
188 */
189void __init kasan_copy_shadow(pgd_t *pgdir)
190{
191 pgd_t *pgdp, *pgdp_new, *pgdp_end;
192
193 pgdp = pgd_offset_k(KASAN_SHADOW_START);
194 pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
195 pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
196 do {
197 set_pgd(pgdp_new, READ_ONCE(*pgdp));
198 } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
199}
200
201static void __init clear_pgds(unsigned long start,
202 unsigned long end)
203{
204 /*
205 * Remove references to kasan page tables from
206 * swapper_pg_dir. pgd_clear() can't be used
207 * here because it's nop on 2,3-level pagetable setups
208 */
209 for (; start < end; start += PGDIR_SIZE)
210 set_pgd(pgd_offset_k(start), __pgd(0));
211}
212
213static void __init kasan_init_shadow(void)
214{
215 u64 kimg_shadow_start, kimg_shadow_end;
216 u64 mod_shadow_start, mod_shadow_end;
217 u64 vmalloc_shadow_end;
218 phys_addr_t pa_start, pa_end;
219 u64 i;
220
221 kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
222 kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
223
224 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
225 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
226
227 vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
228
229 /*
230 * We are going to perform proper setup of shadow memory.
231 * At first we should unmap early shadow (clear_pgds() call below).
232 * However, instrumented code couldn't execute without shadow memory.
233 * tmp_pg_dir used to keep early shadow mapped until full shadow
234 * setup will be finished.
235 */
236 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
237 dsb(ishst);
238 cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
239
240 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
241
242 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
243 early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
244
245 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
246 (void *)mod_shadow_start);
247
248 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
249 BUILD_BUG_ON(VMALLOC_START != MODULES_END);
250 kasan_populate_early_shadow((void *)vmalloc_shadow_end,
251 (void *)KASAN_SHADOW_END);
252 } else {
253 kasan_populate_early_shadow((void *)kimg_shadow_end,
254 (void *)KASAN_SHADOW_END);
255 if (kimg_shadow_start > mod_shadow_end)
256 kasan_populate_early_shadow((void *)mod_shadow_end,
257 (void *)kimg_shadow_start);
258 }
259
260 for_each_mem_range(i, &pa_start, &pa_end) {
261 void *start = (void *)__phys_to_virt(pa_start);
262 void *end = (void *)__phys_to_virt(pa_end);
263
264 if (start >= end)
265 break;
266
267 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
268 (unsigned long)kasan_mem_to_shadow(end),
269 early_pfn_to_nid(virt_to_pfn(start)));
270 }
271
272 /*
273 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
274 * so we should make sure that it maps the zero page read-only.
275 */
276 for (i = 0; i < PTRS_PER_PTE; i++)
277 set_pte(&kasan_early_shadow_pte[i],
278 pfn_pte(sym_to_pfn(kasan_early_shadow_page),
279 PAGE_KERNEL_RO));
280
281 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
282 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
283}
284
285static void __init kasan_init_depth(void)
286{
287 init_task.kasan_depth = 0;
288}
289
290void __init kasan_init(void)
291{
292 kasan_init_shadow();
293 kasan_init_depth();
294#if defined(CONFIG_KASAN_GENERIC)
295 /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
296 pr_info("KernelAddressSanitizer initialized\n");
297#endif
298}
299
300#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */