Loading...
1/*
2 * This file contains kasan initialization code for ARM64.
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#define pr_fmt(fmt) "kasan: " fmt
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/memblock.h>
17#include <linux/start_kernel.h>
18
19#include <asm/mmu_context.h>
20#include <asm/kernel-pgtable.h>
21#include <asm/page.h>
22#include <asm/pgalloc.h>
23#include <asm/pgtable.h>
24#include <asm/sections.h>
25#include <asm/tlbflush.h>
26
27static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
28
29static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
30 unsigned long end)
31{
32 pte_t *pte;
33 unsigned long next;
34
35 if (pmd_none(*pmd))
36 pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
37
38 pte = pte_offset_kimg(pmd, addr);
39 do {
40 next = addr + PAGE_SIZE;
41 set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
42 PAGE_KERNEL));
43 } while (pte++, addr = next, addr != end && pte_none(*pte));
44}
45
46static void __init kasan_early_pmd_populate(pud_t *pud,
47 unsigned long addr,
48 unsigned long end)
49{
50 pmd_t *pmd;
51 unsigned long next;
52
53 if (pud_none(*pud))
54 pud_populate(&init_mm, pud, kasan_zero_pmd);
55
56 pmd = pmd_offset_kimg(pud, addr);
57 do {
58 next = pmd_addr_end(addr, end);
59 kasan_early_pte_populate(pmd, addr, next);
60 } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
61}
62
63static void __init kasan_early_pud_populate(pgd_t *pgd,
64 unsigned long addr,
65 unsigned long end)
66{
67 pud_t *pud;
68 unsigned long next;
69
70 if (pgd_none(*pgd))
71 pgd_populate(&init_mm, pgd, kasan_zero_pud);
72
73 pud = pud_offset_kimg(pgd, addr);
74 do {
75 next = pud_addr_end(addr, end);
76 kasan_early_pmd_populate(pud, addr, next);
77 } while (pud++, addr = next, addr != end && pud_none(*pud));
78}
79
80static void __init kasan_map_early_shadow(void)
81{
82 unsigned long addr = KASAN_SHADOW_START;
83 unsigned long end = KASAN_SHADOW_END;
84 unsigned long next;
85 pgd_t *pgd;
86
87 pgd = pgd_offset_k(addr);
88 do {
89 next = pgd_addr_end(addr, end);
90 kasan_early_pud_populate(pgd, addr, next);
91 } while (pgd++, addr = next, addr != end);
92}
93
94asmlinkage void __init kasan_early_init(void)
95{
96 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
97 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
98 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
99 kasan_map_early_shadow();
100}
101
102/*
103 * Copy the current shadow region into a new pgdir.
104 */
105void __init kasan_copy_shadow(pgd_t *pgdir)
106{
107 pgd_t *pgd, *pgd_new, *pgd_end;
108
109 pgd = pgd_offset_k(KASAN_SHADOW_START);
110 pgd_end = pgd_offset_k(KASAN_SHADOW_END);
111 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
112 do {
113 set_pgd(pgd_new, *pgd);
114 } while (pgd++, pgd_new++, pgd != pgd_end);
115}
116
117static void __init clear_pgds(unsigned long start,
118 unsigned long end)
119{
120 /*
121 * Remove references to kasan page tables from
122 * swapper_pg_dir. pgd_clear() can't be used
123 * here because it's nop on 2,3-level pagetable setups
124 */
125 for (; start < end; start += PGDIR_SIZE)
126 set_pgd(pgd_offset_k(start), __pgd(0));
127}
128
129void __init kasan_init(void)
130{
131 u64 kimg_shadow_start, kimg_shadow_end;
132 u64 mod_shadow_start, mod_shadow_end;
133 struct memblock_region *reg;
134 int i;
135
136 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
137 kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
138
139 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
140 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
141
142 /*
143 * We are going to perform proper setup of shadow memory.
144 * At first we should unmap early shadow (clear_pgds() call bellow).
145 * However, instrumented code couldn't execute without shadow memory.
146 * tmp_pg_dir used to keep early shadow mapped until full shadow
147 * setup will be finished.
148 */
149 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
150 dsb(ishst);
151 cpu_replace_ttbr1(tmp_pg_dir);
152
153 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
154
155 vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
156 pfn_to_nid(virt_to_pfn(_text)));
157
158 /*
159 * vmemmap_populate() has populated the shadow region that covers the
160 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
161 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
162 * kasan_populate_zero_shadow() from replacing the page table entries
163 * (PMD or PTE) at the edges of the shadow region for the kernel
164 * image.
165 */
166 kimg_shadow_start = round_down(kimg_shadow_start, SWAPPER_BLOCK_SIZE);
167 kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
168
169 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
170 (void *)mod_shadow_start);
171 kasan_populate_zero_shadow((void *)kimg_shadow_end,
172 kasan_mem_to_shadow((void *)PAGE_OFFSET));
173
174 if (kimg_shadow_start > mod_shadow_end)
175 kasan_populate_zero_shadow((void *)mod_shadow_end,
176 (void *)kimg_shadow_start);
177
178 for_each_memblock(memory, reg) {
179 void *start = (void *)__phys_to_virt(reg->base);
180 void *end = (void *)__phys_to_virt(reg->base + reg->size);
181
182 if (start >= end)
183 break;
184
185 /*
186 * end + 1 here is intentional. We check several shadow bytes in
187 * advance to slightly speed up fastpath. In some rare cases
188 * we could cross boundary of mapped shadow, so we just map
189 * some more here.
190 */
191 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
192 (unsigned long)kasan_mem_to_shadow(end) + 1,
193 pfn_to_nid(virt_to_pfn(start)));
194 }
195
196 /*
197 * KAsan may reuse the contents of kasan_zero_pte directly, so we
198 * should make sure that it maps the zero page read-only.
199 */
200 for (i = 0; i < PTRS_PER_PTE; i++)
201 set_pte(&kasan_zero_pte[i],
202 pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
203
204 memset(kasan_zero_page, 0, PAGE_SIZE);
205 cpu_replace_ttbr1(swapper_pg_dir);
206
207 /* At this point kasan is fully initialized. Enable error messages */
208 init_task.kasan_depth = 0;
209 pr_info("KernelAddressSanitizer initialized\n");
210}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file contains kasan initialization code for ARM64.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 */
8
9#define pr_fmt(fmt) "kasan: " fmt
10#include <linux/kasan.h>
11#include <linux/kernel.h>
12#include <linux/sched/task.h>
13#include <linux/memblock.h>
14#include <linux/start_kernel.h>
15#include <linux/mm.h>
16
17#include <asm/mmu_context.h>
18#include <asm/kernel-pgtable.h>
19#include <asm/page.h>
20#include <asm/pgalloc.h>
21#include <asm/sections.h>
22#include <asm/tlbflush.h>
23
24static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
25
26/*
27 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
28 * directly on kernel symbols (bm_p*d). All the early functions are called too
29 * early to use lm_alias so __p*d_populate functions must be used to populate
30 * with the physical address from __pa_symbol.
31 */
32
33static phys_addr_t __init kasan_alloc_zeroed_page(int node)
34{
35 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
36 __pa(MAX_DMA_ADDRESS),
37 MEMBLOCK_ALLOC_KASAN, node);
38 if (!p)
39 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
40 __func__, PAGE_SIZE, PAGE_SIZE, node,
41 __pa(MAX_DMA_ADDRESS));
42
43 return __pa(p);
44}
45
46static phys_addr_t __init kasan_alloc_raw_page(int node)
47{
48 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
49 __pa(MAX_DMA_ADDRESS),
50 MEMBLOCK_ALLOC_KASAN, node);
51 if (!p)
52 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
53 __func__, PAGE_SIZE, PAGE_SIZE, node,
54 __pa(MAX_DMA_ADDRESS));
55
56 return __pa(p);
57}
58
59static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
60 bool early)
61{
62 if (pmd_none(READ_ONCE(*pmdp))) {
63 phys_addr_t pte_phys = early ?
64 __pa_symbol(kasan_early_shadow_pte)
65 : kasan_alloc_zeroed_page(node);
66 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
67 }
68
69 return early ? pte_offset_kimg(pmdp, addr)
70 : pte_offset_kernel(pmdp, addr);
71}
72
73static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
74 bool early)
75{
76 if (pud_none(READ_ONCE(*pudp))) {
77 phys_addr_t pmd_phys = early ?
78 __pa_symbol(kasan_early_shadow_pmd)
79 : kasan_alloc_zeroed_page(node);
80 __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
81 }
82
83 return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
84}
85
86static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
87 bool early)
88{
89 if (p4d_none(READ_ONCE(*p4dp))) {
90 phys_addr_t pud_phys = early ?
91 __pa_symbol(kasan_early_shadow_pud)
92 : kasan_alloc_zeroed_page(node);
93 __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
94 }
95
96 return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
97}
98
99static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
100 unsigned long end, int node, bool early)
101{
102 unsigned long next;
103 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
104
105 do {
106 phys_addr_t page_phys = early ?
107 __pa_symbol(kasan_early_shadow_page)
108 : kasan_alloc_raw_page(node);
109 if (!early)
110 memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
111 next = addr + PAGE_SIZE;
112 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
113 } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
114}
115
116static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
117 unsigned long end, int node, bool early)
118{
119 unsigned long next;
120 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
121
122 do {
123 next = pmd_addr_end(addr, end);
124 kasan_pte_populate(pmdp, addr, next, node, early);
125 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
126}
127
128static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
129 unsigned long end, int node, bool early)
130{
131 unsigned long next;
132 pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
133
134 do {
135 next = pud_addr_end(addr, end);
136 kasan_pmd_populate(pudp, addr, next, node, early);
137 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
138}
139
140static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
141 unsigned long end, int node, bool early)
142{
143 unsigned long next;
144 p4d_t *p4dp = p4d_offset(pgdp, addr);
145
146 do {
147 next = p4d_addr_end(addr, end);
148 kasan_pud_populate(p4dp, addr, next, node, early);
149 } while (p4dp++, addr = next, addr != end);
150}
151
152static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
153 int node, bool early)
154{
155 unsigned long next;
156 pgd_t *pgdp;
157
158 pgdp = pgd_offset_k(addr);
159 do {
160 next = pgd_addr_end(addr, end);
161 kasan_p4d_populate(pgdp, addr, next, node, early);
162 } while (pgdp++, addr = next, addr != end);
163}
164
165/* The early shadow maps everything to a single page of zeroes */
166asmlinkage void __init kasan_early_init(void)
167{
168 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
169 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
170 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
171 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
172 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
173 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
174 true);
175}
176
177/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
178static void __init kasan_map_populate(unsigned long start, unsigned long end,
179 int node)
180{
181 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
182}
183
184/*
185 * Copy the current shadow region into a new pgdir.
186 */
187void __init kasan_copy_shadow(pgd_t *pgdir)
188{
189 pgd_t *pgdp, *pgdp_new, *pgdp_end;
190
191 pgdp = pgd_offset_k(KASAN_SHADOW_START);
192 pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
193 pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
194 do {
195 set_pgd(pgdp_new, READ_ONCE(*pgdp));
196 } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
197}
198
199static void __init clear_pgds(unsigned long start,
200 unsigned long end)
201{
202 /*
203 * Remove references to kasan page tables from
204 * swapper_pg_dir. pgd_clear() can't be used
205 * here because it's nop on 2,3-level pagetable setups
206 */
207 for (; start < end; start += PGDIR_SIZE)
208 set_pgd(pgd_offset_k(start), __pgd(0));
209}
210
211void __init kasan_init(void)
212{
213 u64 kimg_shadow_start, kimg_shadow_end;
214 u64 mod_shadow_start, mod_shadow_end;
215 struct memblock_region *reg;
216 int i;
217
218 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
219 kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
220
221 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
222 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
223
224 /*
225 * We are going to perform proper setup of shadow memory.
226 * At first we should unmap early shadow (clear_pgds() call below).
227 * However, instrumented code couldn't execute without shadow memory.
228 * tmp_pg_dir used to keep early shadow mapped until full shadow
229 * setup will be finished.
230 */
231 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
232 dsb(ishst);
233 cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
234
235 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
236
237 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
238 early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
239
240 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
241 (void *)mod_shadow_start);
242 kasan_populate_early_shadow((void *)kimg_shadow_end,
243 (void *)KASAN_SHADOW_END);
244
245 if (kimg_shadow_start > mod_shadow_end)
246 kasan_populate_early_shadow((void *)mod_shadow_end,
247 (void *)kimg_shadow_start);
248
249 for_each_memblock(memory, reg) {
250 void *start = (void *)__phys_to_virt(reg->base);
251 void *end = (void *)__phys_to_virt(reg->base + reg->size);
252
253 if (start >= end)
254 break;
255
256 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
257 (unsigned long)kasan_mem_to_shadow(end),
258 early_pfn_to_nid(virt_to_pfn(start)));
259 }
260
261 /*
262 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
263 * so we should make sure that it maps the zero page read-only.
264 */
265 for (i = 0; i < PTRS_PER_PTE; i++)
266 set_pte(&kasan_early_shadow_pte[i],
267 pfn_pte(sym_to_pfn(kasan_early_shadow_page),
268 PAGE_KERNEL_RO));
269
270 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
271 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
272
273 /* At this point kasan is fully initialized. Enable error messages */
274 init_task.kasan_depth = 0;
275 pr_info("KernelAddressSanitizer initialized\n");
276}