Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * KMSAN shadow implementation.
  4 *
  5 * Copyright (C) 2017-2022 Google LLC
  6 * Author: Alexander Potapenko <glider@google.com>
  7 *
  8 */
  9
 10#include <asm/kmsan.h>
 11#include <asm/tlbflush.h>
 12#include <linux/cacheflush.h>
 13#include <linux/memblock.h>
 14#include <linux/mm_types.h>
 15#include <linux/slab.h>
 16#include <linux/smp.h>
 17#include <linux/stddef.h>
 18
 19#include "../internal.h"
 20#include "kmsan.h"
 21
 22#define shadow_page_for(page) ((page)->kmsan_shadow)
 23
 24#define origin_page_for(page) ((page)->kmsan_origin)
 25
 26static void *shadow_ptr_for(struct page *page)
 27{
 28	return page_address(shadow_page_for(page));
 29}
 30
 31static void *origin_ptr_for(struct page *page)
 32{
 33	return page_address(origin_page_for(page));
 34}
 35
 36static bool page_has_metadata(struct page *page)
 37{
 38	return shadow_page_for(page) && origin_page_for(page);
 39}
 40
 41static void set_no_shadow_origin_page(struct page *page)
 42{
 43	shadow_page_for(page) = NULL;
 44	origin_page_for(page) = NULL;
 45}
 46
 47/*
 48 * Dummy load and store pages to be used when the real metadata is unavailable.
 49 * There are separate pages for loads and stores, so that every load returns a
 50 * zero, and every store doesn't affect other loads.
 51 */
 52static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
 53static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
 54
 55static unsigned long vmalloc_meta(void *addr, bool is_origin)
 56{
 57	unsigned long addr64 = (unsigned long)addr, off;
 58
 59	KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
 60	if (kmsan_internal_is_vmalloc_addr(addr)) {
 61		off = addr64 - VMALLOC_START;
 62		return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
 63					  KMSAN_VMALLOC_SHADOW_START);
 64	}
 65	if (kmsan_internal_is_module_addr(addr)) {
 66		off = addr64 - MODULES_VADDR;
 67		return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
 68					  KMSAN_MODULES_SHADOW_START);
 69	}
 70	return 0;
 71}
 72
 73static struct page *virt_to_page_or_null(void *vaddr)
 74{
 75	if (kmsan_virt_addr_valid(vaddr))
 76		return virt_to_page(vaddr);
 77	else
 78		return NULL;
 79}
 80
 81struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
 82						     bool store)
 83{
 84	struct shadow_origin_ptr ret;
 85	void *shadow;
 86
 87	/*
 88	 * Even if we redirect this memory access to the dummy page, it will
 89	 * go out of bounds.
 90	 */
 91	KMSAN_WARN_ON(size > PAGE_SIZE);
 92
 93	if (!kmsan_enabled)
 94		goto return_dummy;
 95
 96	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
 97	shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
 98	if (!shadow)
 99		goto return_dummy;
100
101	ret.shadow = shadow;
102	ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
103	return ret;
104
105return_dummy:
106	if (store) {
107		/* Ignore this store. */
108		ret.shadow = dummy_store_page;
109		ret.origin = dummy_store_page;
110	} else {
111		/* This load will return zero. */
112		ret.shadow = dummy_load_page;
113		ret.origin = dummy_load_page;
114	}
115	return ret;
116}
117
118/*
119 * Obtain the shadow or origin pointer for the given address, or NULL if there's
120 * none. The caller must check the return value for being non-NULL if needed.
121 * The return value of this function should not depend on whether we're in the
122 * runtime or not.
123 */
124void *kmsan_get_metadata(void *address, bool is_origin)
125{
126	u64 addr = (u64)address, pad, off;
127	struct page *page;
128	void *ret;
129
130	if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
131		pad = addr % KMSAN_ORIGIN_SIZE;
132		addr -= pad;
133	}
134	address = (void *)addr;
135	if (kmsan_internal_is_vmalloc_addr(address) ||
136	    kmsan_internal_is_module_addr(address))
137		return (void *)vmalloc_meta(address, is_origin);
138
139	ret = arch_kmsan_get_meta_or_null(address, is_origin);
140	if (ret)
141		return ret;
142
143	page = virt_to_page_or_null(address);
144	if (!page)
145		return NULL;
146	if (!page_has_metadata(page))
147		return NULL;
148	off = addr % PAGE_SIZE;
149
150	return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
151}
152
153void kmsan_copy_page_meta(struct page *dst, struct page *src)
154{
155	if (!kmsan_enabled || kmsan_in_runtime())
156		return;
157	if (!dst || !page_has_metadata(dst))
158		return;
159	if (!src || !page_has_metadata(src)) {
160		kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
161					       /*checked*/ false);
162		return;
163	}
164
165	kmsan_enter_runtime();
166	__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
167	__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
168	kmsan_leave_runtime();
169}
170EXPORT_SYMBOL(kmsan_copy_page_meta);
171
172void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
173{
174	bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
175	struct page *shadow, *origin;
176	depot_stack_handle_t handle;
177	int pages = 1 << order;
178
179	if (!page)
180		return;
181
182	shadow = shadow_page_for(page);
183	origin = origin_page_for(page);
184
185	if (initialized) {
186		__memset(page_address(shadow), 0, PAGE_SIZE * pages);
187		__memset(page_address(origin), 0, PAGE_SIZE * pages);
188		return;
189	}
190
191	/* Zero pages allocated by the runtime should also be initialized. */
192	if (kmsan_in_runtime())
193		return;
194
195	__memset(page_address(shadow), -1, PAGE_SIZE * pages);
196	kmsan_enter_runtime();
197	handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
198	kmsan_leave_runtime();
199	/*
200	 * Addresses are page-aligned, pages are contiguous, so it's ok
201	 * to just fill the origin pages with @handle.
202	 */
203	for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
204		((depot_stack_handle_t *)page_address(origin))[i] = handle;
205}
206
207void kmsan_free_page(struct page *page, unsigned int order)
208{
209	if (!kmsan_enabled || kmsan_in_runtime())
210		return;
211	kmsan_enter_runtime();
212	kmsan_internal_poison_memory(page_address(page),
213				     PAGE_SIZE << compound_order(page),
214				     GFP_KERNEL,
215				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
216	kmsan_leave_runtime();
217}
218
219void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
220				    pgprot_t prot, struct page **pages,
221				    unsigned int page_shift)
222{
223	unsigned long shadow_start, origin_start, shadow_end, origin_end;
224	struct page **s_pages, **o_pages;
225	int nr, mapped;
226
227	if (!kmsan_enabled)
228		return;
229
230	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
231	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
232	if (!shadow_start)
233		return;
234
235	nr = (end - start) / PAGE_SIZE;
236	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
237	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
238	if (!s_pages || !o_pages)
239		goto ret;
240	for (int i = 0; i < nr; i++) {
241		s_pages[i] = shadow_page_for(pages[i]);
242		o_pages[i] = origin_page_for(pages[i]);
243	}
244	prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
245	prot = PAGE_KERNEL;
246
247	origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
248	origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
249	kmsan_enter_runtime();
250	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
251					    s_pages, page_shift);
252	KMSAN_WARN_ON(mapped);
253	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
254					    o_pages, page_shift);
255	KMSAN_WARN_ON(mapped);
256	kmsan_leave_runtime();
257	flush_tlb_kernel_range(shadow_start, shadow_end);
258	flush_tlb_kernel_range(origin_start, origin_end);
259	flush_cache_vmap(shadow_start, shadow_end);
260	flush_cache_vmap(origin_start, origin_end);
261
262ret:
263	kfree(s_pages);
264	kfree(o_pages);
265}
266
267/* Allocate metadata for pages allocated at boot time. */
268void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
269{
270	struct page *shadow_p, *origin_p;
271	void *shadow, *origin;
272	struct page *page;
273	u64 size;
274
275	start = (void *)ALIGN_DOWN((u64)start, PAGE_SIZE);
276	size = ALIGN((u64)end - (u64)start, PAGE_SIZE);
277	shadow = memblock_alloc(size, PAGE_SIZE);
278	origin = memblock_alloc(size, PAGE_SIZE);
279	for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
280		page = virt_to_page_or_null((char *)start + addr);
281		shadow_p = virt_to_page_or_null((char *)shadow + addr);
282		set_no_shadow_origin_page(shadow_p);
283		shadow_page_for(page) = shadow_p;
284		origin_p = virt_to_page_or_null((char *)origin + addr);
285		set_no_shadow_origin_page(origin_p);
286		origin_page_for(page) = origin_p;
287	}
288}
289
290void kmsan_setup_meta(struct page *page, struct page *shadow,
291		      struct page *origin, int order)
292{
293	for (int i = 0; i < (1 << order); i++) {
294		set_no_shadow_origin_page(&shadow[i]);
295		set_no_shadow_origin_page(&origin[i]);
296		shadow_page_for(&page[i]) = &shadow[i];
297		origin_page_for(&page[i]) = &origin[i];
298	}
299}