Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * KMSAN hooks for kernel subsystems.
  4 *
  5 * These functions handle creation of KMSAN metadata for memory allocations.
  6 *
  7 * Copyright (C) 2018-2022 Google LLC
  8 * Author: Alexander Potapenko <glider@google.com>
  9 *
 10 */
 11
 12#include <linux/cacheflush.h>
 13#include <linux/dma-direction.h>
 14#include <linux/gfp.h>
 15#include <linux/kmsan.h>
 16#include <linux/mm.h>
 17#include <linux/mm_types.h>
 18#include <linux/scatterlist.h>
 19#include <linux/slab.h>
 20#include <linux/uaccess.h>
 21#include <linux/usb.h>
 22
 23#include "../internal.h"
 24#include "../slab.h"
 25#include "kmsan.h"
 26
 27/*
 28 * Instrumented functions shouldn't be called under
 29 * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
 30 * skipping effects of functions like memset() inside instrumented code.
 31 */
 32
 33void kmsan_task_create(struct task_struct *task)
 34{
 35	kmsan_enter_runtime();
 36	kmsan_internal_task_create(task);
 37	kmsan_leave_runtime();
 38}
 39
 40void kmsan_task_exit(struct task_struct *task)
 41{
 42	if (!kmsan_enabled || kmsan_in_runtime())
 43		return;
 44
 45	kmsan_disable_current();
 46}
 47
 48void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
 49{
 50	if (unlikely(object == NULL))
 51		return;
 52	if (!kmsan_enabled || kmsan_in_runtime())
 53		return;
 54	/*
 55	 * There's a ctor or this is an RCU cache - do nothing. The memory
 56	 * status hasn't changed since last use.
 57	 */
 58	if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
 59		return;
 60
 61	kmsan_enter_runtime();
 62	if (flags & __GFP_ZERO)
 63		kmsan_internal_unpoison_memory(object, s->object_size,
 64					       KMSAN_POISON_CHECK);
 65	else
 66		kmsan_internal_poison_memory(object, s->object_size, flags,
 67					     KMSAN_POISON_CHECK);
 68	kmsan_leave_runtime();
 69}
 70
 71void kmsan_slab_free(struct kmem_cache *s, void *object)
 72{
 73	if (!kmsan_enabled || kmsan_in_runtime())
 74		return;
 75
 76	/* RCU slabs could be legally used after free within the RCU period */
 77	if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
 78		return;
 79	/*
 80	 * If there's a constructor, freed memory must remain in the same state
 81	 * until the next allocation. We cannot save its state to detect
 82	 * use-after-free bugs, instead we just keep it unpoisoned.
 83	 */
 84	if (s->ctor)
 85		return;
 86	kmsan_enter_runtime();
 87	kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
 88				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
 89	kmsan_leave_runtime();
 90}
 91
 92void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
 93{
 94	if (unlikely(ptr == NULL))
 95		return;
 96	if (!kmsan_enabled || kmsan_in_runtime())
 97		return;
 98	kmsan_enter_runtime();
 99	if (flags & __GFP_ZERO)
100		kmsan_internal_unpoison_memory((void *)ptr, size,
101					       /*checked*/ true);
102	else
103		kmsan_internal_poison_memory((void *)ptr, size, flags,
104					     KMSAN_POISON_CHECK);
105	kmsan_leave_runtime();
106}
107
108void kmsan_kfree_large(const void *ptr)
109{
110	struct page *page;
111
112	if (!kmsan_enabled || kmsan_in_runtime())
113		return;
114	kmsan_enter_runtime();
115	page = virt_to_head_page((void *)ptr);
116	KMSAN_WARN_ON(ptr != page_address(page));
117	kmsan_internal_poison_memory((void *)ptr,
118				     page_size(page),
119				     GFP_KERNEL,
120				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
121	kmsan_leave_runtime();
122}
123
124static unsigned long vmalloc_shadow(unsigned long addr)
125{
126	return (unsigned long)kmsan_get_metadata((void *)addr,
127						 KMSAN_META_SHADOW);
128}
129
130static unsigned long vmalloc_origin(unsigned long addr)
131{
132	return (unsigned long)kmsan_get_metadata((void *)addr,
133						 KMSAN_META_ORIGIN);
134}
135
136void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
137{
138	__vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
139	__vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
140	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
141	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
142}
143
144/*
145 * This function creates new shadow/origin pages for the physical pages mapped
146 * into the virtual memory. If those physical pages already had shadow/origin,
147 * those are ignored.
148 */
149int kmsan_ioremap_page_range(unsigned long start, unsigned long end,
150			     phys_addr_t phys_addr, pgprot_t prot,
151			     unsigned int page_shift)
152{
153	gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
154	struct page *shadow, *origin;
155	unsigned long off = 0;
156	int nr, err = 0, clean = 0, mapped;
157
158	if (!kmsan_enabled || kmsan_in_runtime())
159		return 0;
160
161	nr = (end - start) / PAGE_SIZE;
162	kmsan_enter_runtime();
163	for (int i = 0; i < nr; i++, off += PAGE_SIZE, clean = i) {
164		shadow = alloc_pages(gfp_mask, 1);
165		origin = alloc_pages(gfp_mask, 1);
166		if (!shadow || !origin) {
167			err = -ENOMEM;
168			goto ret;
169		}
170		mapped = __vmap_pages_range_noflush(
171			vmalloc_shadow(start + off),
172			vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
173			PAGE_SHIFT);
174		if (mapped) {
175			err = mapped;
176			goto ret;
177		}
178		shadow = NULL;
179		mapped = __vmap_pages_range_noflush(
180			vmalloc_origin(start + off),
181			vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
182			PAGE_SHIFT);
183		if (mapped) {
184			__vunmap_range_noflush(
185				vmalloc_shadow(start + off),
186				vmalloc_shadow(start + off + PAGE_SIZE));
187			err = mapped;
188			goto ret;
189		}
190		origin = NULL;
191	}
192	/* Page mapping loop finished normally, nothing to clean up. */
193	clean = 0;
194
195ret:
196	if (clean > 0) {
197		/*
198		 * Something went wrong. Clean up shadow/origin pages allocated
199		 * on the last loop iteration, then delete mappings created
200		 * during the previous iterations.
201		 */
202		if (shadow)
203			__free_pages(shadow, 1);
204		if (origin)
205			__free_pages(origin, 1);
206		__vunmap_range_noflush(
207			vmalloc_shadow(start),
208			vmalloc_shadow(start + clean * PAGE_SIZE));
209		__vunmap_range_noflush(
210			vmalloc_origin(start),
211			vmalloc_origin(start + clean * PAGE_SIZE));
212	}
213	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
214	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
215	kmsan_leave_runtime();
216	return err;
217}
218
219void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
220{
221	unsigned long v_shadow, v_origin;
222	struct page *shadow, *origin;
223	int nr;
224
225	if (!kmsan_enabled || kmsan_in_runtime())
226		return;
227
228	nr = (end - start) / PAGE_SIZE;
229	kmsan_enter_runtime();
230	v_shadow = (unsigned long)vmalloc_shadow(start);
231	v_origin = (unsigned long)vmalloc_origin(start);
232	for (int i = 0; i < nr;
233	     i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
234		shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
235		origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
236		__vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
237		__vunmap_range_noflush(v_origin, vmalloc_origin(end));
238		if (shadow)
239			__free_pages(shadow, 1);
240		if (origin)
241			__free_pages(origin, 1);
242	}
243	flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
244	flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
245	kmsan_leave_runtime();
246}
247
248void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
249			size_t left)
250{
251	unsigned long ua_flags;
252
253	if (!kmsan_enabled || kmsan_in_runtime())
254		return;
255	/*
256	 * At this point we've copied the memory already. It's hard to check it
257	 * before copying, as the size of actually copied buffer is unknown.
258	 */
259
260	/* copy_to_user() may copy zero bytes. No need to check. */
261	if (!to_copy)
262		return;
263	/* Or maybe copy_to_user() failed to copy anything. */
264	if (to_copy <= left)
265		return;
266
267	ua_flags = user_access_save();
268	if (!IS_ENABLED(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) ||
269	    (u64)to < TASK_SIZE) {
270		/* This is a user memory access, check it. */
271		kmsan_internal_check_memory((void *)from, to_copy - left, to,
272					    REASON_COPY_TO_USER);
273	} else {
274		/* Otherwise this is a kernel memory access. This happens when a
275		 * compat syscall passes an argument allocated on the kernel
276		 * stack to a real syscall.
277		 * Don't check anything, just copy the shadow of the copied
278		 * bytes.
279		 */
280		kmsan_internal_memmove_metadata((void *)to, (void *)from,
281						to_copy - left);
282	}
283	user_access_restore(ua_flags);
284}
285EXPORT_SYMBOL(kmsan_copy_to_user);
286
287void kmsan_memmove(void *to, const void *from, size_t size)
288{
289	if (!kmsan_enabled || kmsan_in_runtime())
290		return;
291
292	kmsan_enter_runtime();
293	kmsan_internal_memmove_metadata(to, (void *)from, size);
294	kmsan_leave_runtime();
295}
296EXPORT_SYMBOL(kmsan_memmove);
297
298/* Helper function to check an URB. */
299void kmsan_handle_urb(const struct urb *urb, bool is_out)
300{
301	if (!urb)
302		return;
303	if (is_out)
304		kmsan_internal_check_memory(urb->transfer_buffer,
305					    urb->transfer_buffer_length,
306					    /*user_addr*/ NULL,
307					    REASON_SUBMIT_URB);
308	else
309		kmsan_internal_unpoison_memory(urb->transfer_buffer,
310					       urb->transfer_buffer_length,
311					       /*checked*/ false);
312}
313EXPORT_SYMBOL_GPL(kmsan_handle_urb);
314
315static void kmsan_handle_dma_page(const void *addr, size_t size,
316				  enum dma_data_direction dir)
317{
318	switch (dir) {
319	case DMA_BIDIRECTIONAL:
320		kmsan_internal_check_memory((void *)addr, size,
321					    /*user_addr*/ NULL, REASON_ANY);
322		kmsan_internal_unpoison_memory((void *)addr, size,
323					       /*checked*/ false);
324		break;
325	case DMA_TO_DEVICE:
326		kmsan_internal_check_memory((void *)addr, size,
327					    /*user_addr*/ NULL, REASON_ANY);
328		break;
329	case DMA_FROM_DEVICE:
330		kmsan_internal_unpoison_memory((void *)addr, size,
331					       /*checked*/ false);
332		break;
333	case DMA_NONE:
334		break;
335	}
336}
337
338/* Helper function to handle DMA data transfers. */
339void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
340		      enum dma_data_direction dir)
341{
342	u64 page_offset, to_go, addr;
343
344	if (PageHighMem(page))
345		return;
346	addr = (u64)page_address(page) + offset;
347	/*
348	 * The kernel may occasionally give us adjacent DMA pages not belonging
349	 * to the same allocation. Process them separately to avoid triggering
350	 * internal KMSAN checks.
351	 */
352	while (size > 0) {
353		page_offset = offset_in_page(addr);
354		to_go = min(PAGE_SIZE - page_offset, (u64)size);
355		kmsan_handle_dma_page((void *)addr, to_go, dir);
356		addr += to_go;
357		size -= to_go;
358	}
359}
360EXPORT_SYMBOL_GPL(kmsan_handle_dma);
361
362void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
363			 enum dma_data_direction dir)
364{
365	struct scatterlist *item;
366	int i;
367
368	for_each_sg(sg, item, nents, i)
369		kmsan_handle_dma(sg_page(item), item->offset, item->length,
370				 dir);
371}
372
373/* Functions from kmsan-checks.h follow. */
374
375/*
376 * To create an origin, kmsan_poison_memory() unwinds the stacks and stores it
377 * into the stack depot. This may cause deadlocks if done from within KMSAN
378 * runtime, therefore we bail out if kmsan_in_runtime().
379 */
380void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
381{
382	if (!kmsan_enabled || kmsan_in_runtime())
383		return;
384	kmsan_enter_runtime();
385	/* The users may want to poison/unpoison random memory. */
386	kmsan_internal_poison_memory((void *)address, size, flags,
387				     KMSAN_POISON_NOCHECK);
388	kmsan_leave_runtime();
389}
390EXPORT_SYMBOL(kmsan_poison_memory);
391
392/*
393 * Unlike kmsan_poison_memory(), this function can be used from within KMSAN
394 * runtime, because it does not trigger allocations or call instrumented code.
395 */
396void kmsan_unpoison_memory(const void *address, size_t size)
397{
398	unsigned long ua_flags;
399
400	if (!kmsan_enabled)
401		return;
402
403	ua_flags = user_access_save();
404	/* The users may want to poison/unpoison random memory. */
405	kmsan_internal_unpoison_memory((void *)address, size,
406				       KMSAN_POISON_NOCHECK);
407	user_access_restore(ua_flags);
408}
409EXPORT_SYMBOL(kmsan_unpoison_memory);
410
411/*
412 * Version of kmsan_unpoison_memory() called from IRQ entry functions.
413 */
414void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
415{
416	kmsan_unpoison_memory((void *)regs, sizeof(*regs));
417}
418
419void kmsan_check_memory(const void *addr, size_t size)
420{
421	if (!kmsan_enabled)
422		return;
423	return kmsan_internal_check_memory((void *)addr, size,
424					   /*user_addr*/ NULL, REASON_ANY);
425}
426EXPORT_SYMBOL(kmsan_check_memory);
427
428void kmsan_enable_current(void)
429{
430	KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
431	current->kmsan_ctx.depth--;
432}
433EXPORT_SYMBOL(kmsan_enable_current);
434
435void kmsan_disable_current(void)
436{
437	current->kmsan_ctx.depth++;
438	KMSAN_WARN_ON(current->kmsan_ctx.depth == 0);
439}
440EXPORT_SYMBOL(kmsan_disable_current);