Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains common generic and tag-based KASAN code.
  4 *
  5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7 *
  8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9 *        Andrey Konovalov <andreyknvl@gmail.com>
 10 *
 11 * This program is free software; you can redistribute it and/or modify
 12 * it under the terms of the GNU General Public License version 2 as
 13 * published by the Free Software Foundation.
 14 *
 15 */
 16
 17#include <linux/export.h>
 18#include <linux/interrupt.h>
 19#include <linux/init.h>
 20#include <linux/kasan.h>
 21#include <linux/kernel.h>
 22#include <linux/kmemleak.h>
 23#include <linux/linkage.h>
 24#include <linux/memblock.h>
 25#include <linux/memory.h>
 26#include <linux/mm.h>
 27#include <linux/module.h>
 28#include <linux/printk.h>
 29#include <linux/sched.h>
 30#include <linux/sched/task_stack.h>
 31#include <linux/slab.h>
 32#include <linux/stacktrace.h>
 33#include <linux/string.h>
 34#include <linux/types.h>
 35#include <linux/vmalloc.h>
 36#include <linux/bug.h>
 37#include <linux/uaccess.h>
 38
 39#include "kasan.h"
 40#include "../slab.h"
 41
 42static inline int in_irqentry_text(unsigned long ptr)
 43{
 44	return (ptr >= (unsigned long)&__irqentry_text_start &&
 45		ptr < (unsigned long)&__irqentry_text_end) ||
 46		(ptr >= (unsigned long)&__softirqentry_text_start &&
 47		 ptr < (unsigned long)&__softirqentry_text_end);
 48}
 49
 50static inline unsigned int filter_irq_stacks(unsigned long *entries,
 51					     unsigned int nr_entries)
 52{
 53	unsigned int i;
 54
 55	for (i = 0; i < nr_entries; i++) {
 56		if (in_irqentry_text(entries[i])) {
 57			/* Include the irqentry function into the stack. */
 58			return i + 1;
 59		}
 60	}
 61	return nr_entries;
 62}
 63
 64static inline depot_stack_handle_t save_stack(gfp_t flags)
 65{
 66	unsigned long entries[KASAN_STACK_DEPTH];
 67	unsigned int nr_entries;
 68
 69	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 70	nr_entries = filter_irq_stacks(entries, nr_entries);
 71	return stack_depot_save(entries, nr_entries, flags);
 72}
 73
 74static inline void set_track(struct kasan_track *track, gfp_t flags)
 75{
 76	track->pid = current->pid;
 77	track->stack = save_stack(flags);
 78}
 79
 
 80void kasan_enable_current(void)
 81{
 82	current->kasan_depth++;
 83}
 
 84
 85void kasan_disable_current(void)
 86{
 87	current->kasan_depth--;
 88}
 
 89
 90bool __kasan_check_read(const volatile void *p, unsigned int size)
 91{
 92	return check_memory_region((unsigned long)p, size, false, _RET_IP_);
 93}
 94EXPORT_SYMBOL(__kasan_check_read);
 95
 96bool __kasan_check_write(const volatile void *p, unsigned int size)
 97{
 98	return check_memory_region((unsigned long)p, size, true, _RET_IP_);
 99}
100EXPORT_SYMBOL(__kasan_check_write);
101
102#undef memset
103void *memset(void *addr, int c, size_t len)
104{
105	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
106
107	return __memset(addr, c, len);
108}
109
110#undef memmove
111void *memmove(void *dest, const void *src, size_t len)
112{
113	check_memory_region((unsigned long)src, len, false, _RET_IP_);
114	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
115
116	return __memmove(dest, src, len);
117}
118
119#undef memcpy
120void *memcpy(void *dest, const void *src, size_t len)
121{
122	check_memory_region((unsigned long)src, len, false, _RET_IP_);
123	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
124
125	return __memcpy(dest, src, len);
126}
127
128/*
129 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
130 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
131 */
132void kasan_poison_shadow(const void *address, size_t size, u8 value)
133{
134	void *shadow_start, *shadow_end;
135
136	/*
137	 * Perform shadow offset calculation based on untagged address, as
138	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
139	 * addresses to this function.
140	 */
141	address = reset_tag(address);
142
143	shadow_start = kasan_mem_to_shadow(address);
144	shadow_end = kasan_mem_to_shadow(address + size);
145
146	__memset(shadow_start, value, shadow_end - shadow_start);
147}
148
149void kasan_unpoison_shadow(const void *address, size_t size)
150{
151	u8 tag = get_tag(address);
152
153	/*
154	 * Perform shadow offset calculation based on untagged address, as
155	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
156	 * addresses to this function.
157	 */
158	address = reset_tag(address);
159
160	kasan_poison_shadow(address, size, tag);
161
162	if (size & KASAN_SHADOW_MASK) {
163		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
164
165		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
166			*shadow = tag;
167		else
168			*shadow = size & KASAN_SHADOW_MASK;
169	}
170}
171
172static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
173{
174	void *base = task_stack_page(task);
175	size_t size = sp - base;
176
177	kasan_unpoison_shadow(base, size);
178}
179
 
180/* Unpoison the entire stack for a task. */
181void kasan_unpoison_task_stack(struct task_struct *task)
182{
183	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
 
 
184}
185
186/* Unpoison the stack for the current task beyond a watermark sp value. */
187asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
188{
189	/*
190	 * Calculate the task stack base address.  Avoid using 'current'
191	 * because this function is called by early resume code which hasn't
192	 * yet set up the percpu register (%gs).
193	 */
194	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
195
196	kasan_unpoison_shadow(base, watermark - base);
197}
 
198
199/*
200 * Clear all poison for the region between the current SP and a provided
201 * watermark value, as is sometimes required prior to hand-crafted asm function
202 * returns in the middle of functions.
203 */
204void kasan_unpoison_stack_above_sp_to(const void *watermark)
205{
206	const void *sp = __builtin_frame_address(0);
207	size_t size = watermark - sp;
208
209	if (WARN_ON(sp > watermark))
210		return;
211	kasan_unpoison_shadow(sp, size);
212}
213
214void kasan_alloc_pages(struct page *page, unsigned int order)
215{
216	u8 tag;
217	unsigned long i;
218
219	if (unlikely(PageHighMem(page)))
220		return;
221
222	tag = random_tag();
 
 
223	for (i = 0; i < (1 << order); i++)
224		page_kasan_tag_set(page + i, tag);
225	kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
226}
227
228void kasan_free_pages(struct page *page, unsigned int order)
229{
230	if (likely(!PageHighMem(page)))
231		kasan_poison_shadow(page_address(page),
232				PAGE_SIZE << order,
233				KASAN_FREE_PAGE);
234}
235
236/*
237 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
238 * For larger allocations larger redzones are used.
239 */
240static inline unsigned int optimal_redzone(unsigned int object_size)
241{
242	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
243		return 0;
244
245	return
246		object_size <= 64        - 16   ? 16 :
247		object_size <= 128       - 32   ? 32 :
248		object_size <= 512       - 64   ? 64 :
249		object_size <= 4096      - 128  ? 128 :
250		object_size <= (1 << 14) - 256  ? 256 :
251		object_size <= (1 << 15) - 512  ? 512 :
252		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
253}
254
255void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
256			slab_flags_t *flags)
257{
258	unsigned int orig_size = *size;
259	unsigned int redzone_size;
260	int redzone_adjust;
261
262	/* Add alloc meta. */
263	cache->kasan_info.alloc_meta_offset = *size;
264	*size += sizeof(struct kasan_alloc_meta);
265
266	/* Add free meta. */
267	if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
268	    (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
269	     cache->object_size < sizeof(struct kasan_free_meta))) {
270		cache->kasan_info.free_meta_offset = *size;
271		*size += sizeof(struct kasan_free_meta);
272	}
273
274	redzone_size = optimal_redzone(cache->object_size);
275	redzone_adjust = redzone_size -	(*size - cache->object_size);
276	if (redzone_adjust > 0)
277		*size += redzone_adjust;
278
279	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
280			max(*size, cache->object_size + redzone_size));
281
282	/*
283	 * If the metadata doesn't fit, don't enable KASAN at all.
284	 */
285	if (*size <= cache->kasan_info.alloc_meta_offset ||
286			*size <= cache->kasan_info.free_meta_offset) {
287		cache->kasan_info.alloc_meta_offset = 0;
288		cache->kasan_info.free_meta_offset = 0;
289		*size = orig_size;
290		return;
291	}
292
293	*flags |= SLAB_KASAN;
294}
295
296size_t kasan_metadata_size(struct kmem_cache *cache)
297{
298	return (cache->kasan_info.alloc_meta_offset ?
299		sizeof(struct kasan_alloc_meta) : 0) +
300		(cache->kasan_info.free_meta_offset ?
301		sizeof(struct kasan_free_meta) : 0);
302}
303
304struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
305					const void *object)
306{
307	return (void *)object + cache->kasan_info.alloc_meta_offset;
308}
309
310struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
311				      const void *object)
312{
313	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
314	return (void *)object + cache->kasan_info.free_meta_offset;
315}
316
317
318static void kasan_set_free_info(struct kmem_cache *cache,
319		void *object, u8 tag)
320{
321	struct kasan_alloc_meta *alloc_meta;
322	u8 idx = 0;
323
324	alloc_meta = get_alloc_info(cache, object);
325
326#ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
327	idx = alloc_meta->free_track_idx;
328	alloc_meta->free_pointer_tag[idx] = tag;
329	alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
330#endif
331
332	set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
333}
334
335void kasan_poison_slab(struct page *page)
336{
 
337	unsigned long i;
338
339	for (i = 0; i < compound_nr(page); i++)
340		page_kasan_tag_reset(page + i);
341	kasan_poison_shadow(page_address(page), page_size(page),
342			KASAN_KMALLOC_REDZONE);
343}
344
345void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
346{
347	kasan_unpoison_shadow(object, cache->object_size);
348}
349
350void kasan_poison_object_data(struct kmem_cache *cache, void *object)
351{
352	kasan_poison_shadow(object,
353			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
354			KASAN_KMALLOC_REDZONE);
355}
356
357/*
358 * This function assigns a tag to an object considering the following:
359 * 1. A cache might have a constructor, which might save a pointer to a slab
360 *    object somewhere (e.g. in the object itself). We preassign a tag for
361 *    each object in caches with constructors during slab creation and reuse
362 *    the same tag each time a particular object is allocated.
363 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
364 *    accessed after being freed. We preassign tags for objects in these
365 *    caches as well.
366 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
367 *    is stored as an array of indexes instead of a linked list. Assign tags
368 *    based on objects indexes, so that objects that are next to each other
369 *    get different tags.
370 */
371static u8 assign_tag(struct kmem_cache *cache, const void *object,
372			bool init, bool keep_tag)
373{
374	/*
375	 * 1. When an object is kmalloc()'ed, two hooks are called:
376	 *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
377	 *    tag only in the first one.
378	 * 2. We reuse the same tag for krealloc'ed objects.
379	 */
380	if (keep_tag)
381		return get_tag(object);
382
383	/*
384	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
385	 * set, assign a tag when the object is being allocated (init == false).
386	 */
387	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
388		return init ? KASAN_TAG_KERNEL : random_tag();
389
390	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
391#ifdef CONFIG_SLAB
392	/* For SLAB assign tags based on the object index in the freelist. */
393	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
394#else
395	/*
396	 * For SLUB assign a random tag during slab creation, otherwise reuse
397	 * the already assigned tag.
398	 */
399	return init ? random_tag() : get_tag(object);
400#endif
401}
402
403void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
404						const void *object)
405{
406	struct kasan_alloc_meta *alloc_info;
407
408	if (!(cache->flags & SLAB_KASAN))
409		return (void *)object;
410
411	alloc_info = get_alloc_info(cache, object);
412	__memset(alloc_info, 0, sizeof(*alloc_info));
413
414	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
415		object = set_tag(object,
416				assign_tag(cache, object, true, false));
417
418	return (void *)object;
419}
420
421static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
 
422{
423	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
424		return shadow_byte < 0 ||
425			shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
426
427	/* else CONFIG_KASAN_SW_TAGS: */
428	if ((u8)shadow_byte == KASAN_TAG_INVALID)
429		return true;
430	if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
431		return true;
432
433	return false;
434}
435
436static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
437			      unsigned long ip, bool quarantine)
438{
439	s8 shadow_byte;
440	u8 tag;
441	void *tagged_object;
442	unsigned long rounded_up_size;
443
444	tag = get_tag(object);
 
 
445	tagged_object = object;
446	object = reset_tag(object);
 
 
 
447
448	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
449	    object)) {
450		kasan_report_invalid_free(tagged_object, ip);
451		return true;
452	}
453
454	/* RCU slabs could be legally used after free within the RCU period */
455	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
456		return false;
457
458	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
459	if (shadow_invalid(tag, shadow_byte)) {
460		kasan_report_invalid_free(tagged_object, ip);
461		return true;
462	}
463
464	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
465	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
466
467	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
468			unlikely(!(cache->flags & SLAB_KASAN)))
469		return false;
470
471	kasan_set_free_info(cache, object, tag);
 
472
473	quarantine_put(get_free_info(cache, object), cache);
474
475	return IS_ENABLED(CONFIG_KASAN_GENERIC);
476}
477
478bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
 
479{
480	return __kasan_slab_free(cache, object, ip, true);
481}
482
483static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
484				size_t size, gfp_t flags, bool keep_tag)
485{
486	unsigned long redzone_start;
487	unsigned long redzone_end;
488	u8 tag = 0xff;
489
490	if (gfpflags_allow_blocking(flags))
491		quarantine_reduce();
492
493	if (unlikely(object == NULL))
494		return NULL;
495
496	redzone_start = round_up((unsigned long)(object + size),
497				KASAN_SHADOW_SCALE_SIZE);
498	redzone_end = round_up((unsigned long)object + cache->object_size,
499				KASAN_SHADOW_SCALE_SIZE);
500
501	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
502		tag = assign_tag(cache, object, false, keep_tag);
 
 
503
504	/* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
505	kasan_unpoison_shadow(set_tag(object, tag), size);
506	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
507		KASAN_KMALLOC_REDZONE);
508
509	if (cache->flags & SLAB_KASAN)
510		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
 
 
511
512	return set_tag(object, tag);
513}
514
515void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
516					gfp_t flags)
517{
518	return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
519}
520
521void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
522				size_t size, gfp_t flags)
523{
524	return __kasan_kmalloc(cache, object, size, flags, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525}
526EXPORT_SYMBOL(kasan_kmalloc);
527
528void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
529						gfp_t flags)
530{
531	struct page *page;
532	unsigned long redzone_start;
533	unsigned long redzone_end;
534
535	if (gfpflags_allow_blocking(flags))
536		quarantine_reduce();
537
538	if (unlikely(ptr == NULL))
539		return NULL;
540
541	page = virt_to_page(ptr);
542	redzone_start = round_up((unsigned long)(ptr + size),
543				KASAN_SHADOW_SCALE_SIZE);
544	redzone_end = (unsigned long)ptr + page_size(page);
545
546	kasan_unpoison_shadow(ptr, size);
547	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
548		KASAN_PAGE_REDZONE);
 
 
 
549
550	return (void *)ptr;
 
 
 
 
 
 
 
 
 
 
551}
552
553void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
 
554{
555	struct page *page;
 
556
557	if (unlikely(object == ZERO_SIZE_PTR))
558		return (void *)object;
559
560	page = virt_to_head_page(object);
 
561
562	if (unlikely(!PageSlab(page)))
563		return kasan_kmalloc_large(object, size, flags);
564	else
565		return __kasan_kmalloc(page->slab_cache, object, size,
566						flags, true);
567}
568
569void kasan_poison_kfree(void *ptr, unsigned long ip)
570{
571	struct page *page;
 
572
573	page = virt_to_head_page(ptr);
 
 
 
 
 
 
574
575	if (unlikely(!PageSlab(page))) {
576		if (ptr != page_address(page)) {
577			kasan_report_invalid_free(ptr, ip);
578			return;
579		}
580		kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
581	} else {
582		__kasan_slab_free(page->slab_cache, ptr, ip, false);
583	}
584}
585
586void kasan_kfree_large(void *ptr, unsigned long ip)
587{
588	if (ptr != page_address(virt_to_head_page(ptr)))
589		kasan_report_invalid_free(ptr, ip);
590	/* The object will be poisoned by page_alloc. */
591}
592
593int kasan_module_alloc(void *addr, size_t size)
594{
595	void *ret;
596	size_t scaled_size;
597	size_t shadow_size;
598	unsigned long shadow_start;
599
600	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
601	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
602	shadow_size = round_up(scaled_size, PAGE_SIZE);
603
604	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
605		return -EINVAL;
606
607	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
608			shadow_start + shadow_size,
609			GFP_KERNEL,
610			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
611			__builtin_return_address(0));
612
613	if (ret) {
614		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
615		find_vm_area(addr)->flags |= VM_KASAN;
616		kmemleak_ignore(ret);
617		return 0;
618	}
619
620	return -ENOMEM;
 
621}
622
623void kasan_free_shadow(const struct vm_struct *vm)
 
624{
625	if (vm->flags & VM_KASAN)
626		vfree(kasan_mem_to_shadow(vm->addr));
627}
 
628
629extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
630
631void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
632{
633	unsigned long flags = user_access_save();
634	__kasan_report(addr, size, is_write, ip);
635	user_access_restore(flags);
636}
637
638#ifdef CONFIG_MEMORY_HOTPLUG
639static bool shadow_mapped(unsigned long addr)
640{
641	pgd_t *pgd = pgd_offset_k(addr);
642	p4d_t *p4d;
643	pud_t *pud;
644	pmd_t *pmd;
645	pte_t *pte;
646
647	if (pgd_none(*pgd))
648		return false;
649	p4d = p4d_offset(pgd, addr);
650	if (p4d_none(*p4d))
651		return false;
652	pud = pud_offset(p4d, addr);
653	if (pud_none(*pud))
654		return false;
655
656	/*
657	 * We can't use pud_large() or pud_huge(), the first one is
658	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
659	 * pud_bad(), if pud is bad then it's bad because it's huge.
660	 */
661	if (pud_bad(*pud))
662		return true;
663	pmd = pmd_offset(pud, addr);
664	if (pmd_none(*pmd))
665		return false;
666
667	if (pmd_bad(*pmd))
668		return true;
669	pte = pte_offset_kernel(pmd, addr);
670	return !pte_none(*pte);
 
 
 
 
 
 
 
 
 
 
 
 
671}
672
673static int __meminit kasan_mem_notifier(struct notifier_block *nb,
674			unsigned long action, void *data)
675{
676	struct memory_notify *mem_data = data;
677	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
678	unsigned long shadow_end, shadow_size;
679
680	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
681	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
682	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
683	shadow_size = nr_shadow_pages << PAGE_SHIFT;
684	shadow_end = shadow_start + shadow_size;
685
686	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
687		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
688		return NOTIFY_BAD;
689
690	switch (action) {
691	case MEM_GOING_ONLINE: {
692		void *ret;
693
694		/*
695		 * If shadow is mapped already than it must have been mapped
696		 * during the boot. This could happen if we onlining previously
697		 * offlined memory.
698		 */
699		if (shadow_mapped(shadow_start))
700			return NOTIFY_OK;
701
702		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
703					shadow_end, GFP_KERNEL,
704					PAGE_KERNEL, VM_NO_GUARD,
705					pfn_to_nid(mem_data->start_pfn),
706					__builtin_return_address(0));
707		if (!ret)
708			return NOTIFY_BAD;
709
710		kmemleak_ignore(ret);
711		return NOTIFY_OK;
712	}
713	case MEM_CANCEL_ONLINE:
714	case MEM_OFFLINE: {
715		struct vm_struct *vm;
716
717		/*
718		 * shadow_start was either mapped during boot by kasan_init()
719		 * or during memory online by __vmalloc_node_range().
720		 * In the latter case we can use vfree() to free shadow.
721		 * Non-NULL result of the find_vm_area() will tell us if
722		 * that was the second case.
723		 *
724		 * Currently it's not possible to free shadow mapped
725		 * during boot by kasan_init(). It's because the code
726		 * to do that hasn't been written yet. So we'll just
727		 * leak the memory.
728		 */
729		vm = find_vm_area((void *)shadow_start);
730		if (vm)
731			vfree((void *)shadow_start);
732	}
733	}
734
735	return NOTIFY_OK;
736}
 
 
 
 
737
738static int __init kasan_memhotplug_init(void)
739{
740	hotplug_memory_notifier(kasan_mem_notifier, 0);
741
742	return 0;
 
 
 
 
743}
744
745core_initcall(kasan_memhotplug_init);
746#endif
 
 
 
 
 
 
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains common KASAN code.
  4 *
  5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7 *
  8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9 *        Andrey Konovalov <andreyknvl@gmail.com>
 
 
 
 
 
 10 */
 11
 12#include <linux/export.h>
 
 13#include <linux/init.h>
 14#include <linux/kasan.h>
 15#include <linux/kernel.h>
 
 16#include <linux/linkage.h>
 17#include <linux/memblock.h>
 18#include <linux/memory.h>
 19#include <linux/mm.h>
 20#include <linux/module.h>
 21#include <linux/printk.h>
 22#include <linux/sched.h>
 23#include <linux/sched/task_stack.h>
 24#include <linux/slab.h>
 25#include <linux/stacktrace.h>
 26#include <linux/string.h>
 27#include <linux/types.h>
 
 28#include <linux/bug.h>
 
 29
 30#include "kasan.h"
 31#include "../slab.h"
 32
 33struct slab *kasan_addr_to_slab(const void *addr)
 34{
 35	if (virt_addr_valid(addr))
 36		return virt_to_slab(addr);
 37	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38}
 39
 40depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
 41{
 42	unsigned long entries[KASAN_STACK_DEPTH];
 43	unsigned int nr_entries;
 44
 45	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 46	return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
 
 47}
 48
 49void kasan_set_track(struct kasan_track *track, gfp_t flags)
 50{
 51	track->pid = current->pid;
 52	track->stack = kasan_save_stack(flags, true);
 53}
 54
 55#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 56void kasan_enable_current(void)
 57{
 58	current->kasan_depth++;
 59}
 60EXPORT_SYMBOL(kasan_enable_current);
 61
 62void kasan_disable_current(void)
 63{
 64	current->kasan_depth--;
 65}
 66EXPORT_SYMBOL(kasan_disable_current);
 67
 68#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 69
 70void __kasan_unpoison_range(const void *address, size_t size)
 
 
 
 
 
 
 
 71{
 72	kasan_unpoison(address, size, false);
 
 
 
 73}
 74
 75#ifdef CONFIG_KASAN_STACK
 76/* Unpoison the entire stack for a task. */
 77void kasan_unpoison_task_stack(struct task_struct *task)
 78{
 79	void *base = task_stack_page(task);
 80
 81	kasan_unpoison(base, THREAD_SIZE, false);
 82}
 83
 84/* Unpoison the stack for the current task beyond a watermark sp value. */
 85asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 86{
 87	/*
 88	 * Calculate the task stack base address.  Avoid using 'current'
 89	 * because this function is called by early resume code which hasn't
 90	 * yet set up the percpu register (%gs).
 91	 */
 92	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 93
 94	kasan_unpoison(base, watermark - base, false);
 95}
 96#endif /* CONFIG_KASAN_STACK */
 97
 98void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99{
100	u8 tag;
101	unsigned long i;
102
103	if (unlikely(PageHighMem(page)))
104		return;
105
106	tag = kasan_random_tag();
107	kasan_unpoison(set_tag(page_address(page), tag),
108		       PAGE_SIZE << order, init);
109	for (i = 0; i < (1 << order); i++)
110		page_kasan_tag_set(page + i, tag);
 
111}
112
113void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
114{
115	if (likely(!PageHighMem(page)))
116		kasan_poison(page_address(page), PAGE_SIZE << order,
117			     KASAN_PAGE_FREE, init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118}
119
120void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
 
121{
122	cache->kasan_info.is_kmalloc = true;
123}
124
125void __kasan_poison_slab(struct slab *slab)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126{
127	struct page *page = slab_page(slab);
128	unsigned long i;
129
130	for (i = 0; i < compound_nr(page); i++)
131		page_kasan_tag_reset(page + i);
132	kasan_poison(page_address(page), page_size(page),
133		     KASAN_SLAB_REDZONE, false);
134}
135
136void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
137{
138	kasan_unpoison(object, cache->object_size, false);
139}
140
141void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
142{
143	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
144			KASAN_SLAB_REDZONE, false);
 
145}
146
147/*
148 * This function assigns a tag to an object considering the following:
149 * 1. A cache might have a constructor, which might save a pointer to a slab
150 *    object somewhere (e.g. in the object itself). We preassign a tag for
151 *    each object in caches with constructors during slab creation and reuse
152 *    the same tag each time a particular object is allocated.
153 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
154 *    accessed after being freed. We preassign tags for objects in these
155 *    caches as well.
156 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
157 *    is stored as an array of indexes instead of a linked list. Assign tags
158 *    based on objects indexes, so that objects that are next to each other
159 *    get different tags.
160 */
161static inline u8 assign_tag(struct kmem_cache *cache,
162					const void *object, bool init)
163{
164	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
165		return 0xff;
 
 
 
 
 
 
166
167	/*
168	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
169	 * set, assign a tag when the object is being allocated (init == false).
170	 */
171	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
172		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
173
174	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
175#ifdef CONFIG_SLAB
176	/* For SLAB assign tags based on the object index in the freelist. */
177	return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
178#else
179	/*
180	 * For SLUB assign a random tag during slab creation, otherwise reuse
181	 * the already assigned tag.
182	 */
183	return init ? kasan_random_tag() : get_tag(object);
184#endif
185}
186
187void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
188						const void *object)
189{
190	/* Initialize per-object metadata if it is present. */
191	if (kasan_requires_meta())
192		kasan_init_object_meta(cache, object);
 
193
194	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
195	object = set_tag(object, assign_tag(cache, object, true));
 
 
 
 
196
197	return (void *)object;
198}
199
200static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
201				unsigned long ip, bool quarantine, bool init)
202{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203	void *tagged_object;
 
204
205	if (!kasan_arch_is_ready())
206		return false;
207
208	tagged_object = object;
209	object = kasan_reset_tag(object);
210
211	if (is_kfence_address(object))
212		return false;
213
214	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
215	    object)) {
216		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
217		return true;
218	}
219
220	/* RCU slabs could be legally used after free within the RCU period */
221	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
222		return false;
223
224	if (!kasan_byte_accessible(tagged_object)) {
225		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
 
226		return true;
227	}
228
229	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
230			KASAN_SLAB_FREE, init);
231
232	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
 
233		return false;
234
235	if (kasan_stack_collection_enabled())
236		kasan_save_free_info(cache, tagged_object);
237
238	return kasan_quarantine_put(cache, object);
 
 
239}
240
241bool __kasan_slab_free(struct kmem_cache *cache, void *object,
242				unsigned long ip, bool init)
243{
244	return ____kasan_slab_free(cache, object, ip, true, init);
245}
246
247static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
 
248{
249	if (!kasan_arch_is_ready())
250		return false;
 
 
 
 
 
 
 
 
 
 
 
 
251
252	if (ptr != page_address(virt_to_head_page(ptr))) {
253		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
254		return true;
255	}
256
257	if (!kasan_byte_accessible(ptr)) {
258		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
259		return true;
260	}
261
262	/*
263	 * The object will be poisoned by kasan_poison_pages() or
264	 * kasan_slab_free_mempool().
265	 */
266
267	return false;
268}
269
270void __kasan_kfree_large(void *ptr, unsigned long ip)
 
271{
272	____kasan_kfree_large(ptr, ip);
273}
274
275void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
 
276{
277	struct folio *folio;
278
279	folio = virt_to_folio(ptr);
280
281	/*
282	 * Even though this function is only called for kmem_cache_alloc and
283	 * kmalloc backed mempool allocations, those allocations can still be
284	 * !PageSlab() when the size provided to kmalloc is larger than
285	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
286	 */
287	if (unlikely(!folio_test_slab(folio))) {
288		if (____kasan_kfree_large(ptr, ip))
289			return;
290		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
291	} else {
292		struct slab *slab = folio_slab(folio);
293
294		____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
295	}
296}
 
297
298void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
299					void *object, gfp_t flags, bool init)
300{
301	u8 tag;
302	void *tagged_object;
 
303
304	if (gfpflags_allow_blocking(flags))
305		kasan_quarantine_reduce();
306
307	if (unlikely(object == NULL))
308		return NULL;
309
310	if (is_kfence_address(object))
311		return (void *)object;
 
 
312
313	/*
314	 * Generate and assign random tag for tag-based modes.
315	 * Tag is ignored in set_tag() for the generic mode.
316	 */
317	tag = assign_tag(cache, object, false);
318	tagged_object = set_tag(object, tag);
319
320	/*
321	 * Unpoison the whole object.
322	 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
323	 */
324	kasan_unpoison(tagged_object, cache->object_size, init);
325
326	/* Save alloc info (if possible) for non-kmalloc() allocations. */
327	if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
328		kasan_save_alloc_info(cache, tagged_object, flags);
329
330	return tagged_object;
331}
332
333static inline void *____kasan_kmalloc(struct kmem_cache *cache,
334				const void *object, size_t size, gfp_t flags)
335{
336	unsigned long redzone_start;
337	unsigned long redzone_end;
338
339	if (gfpflags_allow_blocking(flags))
340		kasan_quarantine_reduce();
341
342	if (unlikely(object == NULL))
343		return NULL;
344
345	if (is_kfence_address(kasan_reset_tag(object)))
346		return (void *)object;
 
 
 
 
347
348	/*
349	 * The object has already been unpoisoned by kasan_slab_alloc() for
350	 * kmalloc() or by kasan_krealloc() for krealloc().
351	 */
352
353	/*
354	 * The redzone has byte-level precision for the generic mode.
355	 * Partially poison the last object granule to cover the unaligned
356	 * part of the redzone.
357	 */
358	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
359		kasan_poison_last_granule((void *)object, size);
360
361	/* Poison the aligned part of the redzone. */
362	redzone_start = round_up((unsigned long)(object + size),
363				KASAN_GRANULE_SIZE);
364	redzone_end = round_up((unsigned long)(object + cache->object_size),
365				KASAN_GRANULE_SIZE);
366	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
367			   KASAN_SLAB_REDZONE, false);
 
 
 
368
369	/*
370	 * Save alloc info (if possible) for kmalloc() allocations.
371	 * This also rewrites the alloc info when called from kasan_krealloc().
372	 */
373	if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
374		kasan_save_alloc_info(cache, (void *)object, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375
376	/* Keep the tag that was set by kasan_slab_alloc(). */
377	return (void *)object;
378}
379
380void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
381					size_t size, gfp_t flags)
382{
383	return ____kasan_kmalloc(cache, object, size, flags);
 
384}
385EXPORT_SYMBOL(__kasan_kmalloc);
386
387void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
388						gfp_t flags)
 
389{
390	unsigned long redzone_start;
391	unsigned long redzone_end;
 
 
392
393	if (gfpflags_allow_blocking(flags))
394		kasan_quarantine_reduce();
 
 
 
 
 
 
395
396	if (unlikely(ptr == NULL))
397		return NULL;
 
 
 
 
 
 
398
399	/*
400	 * The object has already been unpoisoned by kasan_unpoison_pages() for
401	 * alloc_pages() or by kasan_krealloc() for krealloc().
 
402	 */
 
 
 
 
 
403
404	/*
405	 * The redzone has byte-level precision for the generic mode.
406	 * Partially poison the last object granule to cover the unaligned
407	 * part of the redzone.
408	 */
409	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
410		kasan_poison_last_granule(ptr, size);
411
412	/* Poison the aligned part of the redzone. */
413	redzone_start = round_up((unsigned long)(ptr + size),
414				KASAN_GRANULE_SIZE);
415	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
416	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
417		     KASAN_PAGE_REDZONE, false);
418
419	return (void *)ptr;
420}
421
422void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
 
423{
424	struct slab *slab;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
426	if (unlikely(object == ZERO_SIZE_PTR))
427		return (void *)object;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428
429	/*
430	 * Unpoison the object's data.
431	 * Part of it might already have been unpoisoned, but it's unknown
432	 * how big that part is.
433	 */
434	kasan_unpoison(object, size, false);
435
436	slab = virt_to_slab(object);
 
 
437
438	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
439	if (unlikely(!slab))
440		return __kasan_kmalloc_large(object, size, flags);
441	else
442		return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
443}
444
445bool __kasan_check_byte(const void *address, unsigned long ip)
446{
447	if (!kasan_byte_accessible(address)) {
448		kasan_report((unsigned long)address, 1, false, ip);
449		return false;
450	}
451	return true;
452}