Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/*
  2 * This file contains shadow memory manipulation code.
  3 *
  4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6 *
  7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  8 *        Andrey Konovalov <adech.fo@gmail.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License version 2 as
 12 * published by the Free Software Foundation.
 13 *
 14 */
 15
 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 17#define DISABLE_BRANCH_PROFILING
 18
 19#include <linux/export.h>
 20#include <linux/interrupt.h>
 21#include <linux/init.h>
 22#include <linux/kasan.h>
 23#include <linux/kernel.h>
 24#include <linux/kmemleak.h>
 25#include <linux/linkage.h>
 26#include <linux/memblock.h>
 27#include <linux/memory.h>
 28#include <linux/mm.h>
 29#include <linux/module.h>
 30#include <linux/printk.h>
 31#include <linux/sched.h>
 32#include <linux/slab.h>
 33#include <linux/stacktrace.h>
 34#include <linux/string.h>
 35#include <linux/types.h>
 36#include <linux/vmalloc.h>
 37#include <linux/bug.h>
 38
 39#include "kasan.h"
 40#include "../slab.h"
 41
 42/*
 43 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
 44 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
 45 */
 46static void kasan_poison_shadow(const void *address, size_t size, u8 value)
 47{
 48	void *shadow_start, *shadow_end;
 49
 50	shadow_start = kasan_mem_to_shadow(address);
 51	shadow_end = kasan_mem_to_shadow(address + size);
 52
 53	memset(shadow_start, value, shadow_end - shadow_start);
 54}
 55
 56void kasan_unpoison_shadow(const void *address, size_t size)
 57{
 58	kasan_poison_shadow(address, size, 0);
 59
 60	if (size & KASAN_SHADOW_MASK) {
 61		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
 62		*shadow = size & KASAN_SHADOW_MASK;
 63	}
 64}
 65
 66static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
 67{
 68	void *base = task_stack_page(task);
 69	size_t size = sp - base;
 70
 71	kasan_unpoison_shadow(base, size);
 72}
 73
 74/* Unpoison the entire stack for a task. */
 75void kasan_unpoison_task_stack(struct task_struct *task)
 76{
 77	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
 78}
 79
 80/* Unpoison the stack for the current task beyond a watermark sp value. */
 81asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 82{
 83	/*
 84	 * Calculate the task stack base address.  Avoid using 'current'
 85	 * because this function is called by early resume code which hasn't
 86	 * yet set up the percpu register (%gs).
 87	 */
 88	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 89
 90	kasan_unpoison_shadow(base, watermark - base);
 91}
 92
 93/*
 94 * Clear all poison for the region between the current SP and a provided
 95 * watermark value, as is sometimes required prior to hand-crafted asm function
 96 * returns in the middle of functions.
 97 */
 98void kasan_unpoison_stack_above_sp_to(const void *watermark)
 99{
100	const void *sp = __builtin_frame_address(0);
101	size_t size = watermark - sp;
102
103	if (WARN_ON(sp > watermark))
104		return;
105	kasan_unpoison_shadow(sp, size);
106}
107
108/*
109 * All functions below always inlined so compiler could
110 * perform better optimizations in each of __asan_loadX/__assn_storeX
111 * depending on memory access size X.
112 */
113
114static __always_inline bool memory_is_poisoned_1(unsigned long addr)
115{
116	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
117
118	if (unlikely(shadow_value)) {
119		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
120		return unlikely(last_accessible_byte >= shadow_value);
121	}
122
123	return false;
124}
125
126static __always_inline bool memory_is_poisoned_2(unsigned long addr)
127{
128	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
129
130	if (unlikely(*shadow_addr)) {
131		if (memory_is_poisoned_1(addr + 1))
132			return true;
133
134		/*
135		 * If single shadow byte covers 2-byte access, we don't
136		 * need to do anything more. Otherwise, test the first
137		 * shadow byte.
138		 */
139		if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
140			return false;
141
142		return unlikely(*(u8 *)shadow_addr);
143	}
144
145	return false;
146}
147
148static __always_inline bool memory_is_poisoned_4(unsigned long addr)
149{
150	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
151
152	if (unlikely(*shadow_addr)) {
153		if (memory_is_poisoned_1(addr + 3))
154			return true;
155
156		/*
157		 * If single shadow byte covers 4-byte access, we don't
158		 * need to do anything more. Otherwise, test the first
159		 * shadow byte.
160		 */
161		if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
162			return false;
163
164		return unlikely(*(u8 *)shadow_addr);
165	}
166
167	return false;
168}
169
170static __always_inline bool memory_is_poisoned_8(unsigned long addr)
171{
172	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
173
174	if (unlikely(*shadow_addr)) {
175		if (memory_is_poisoned_1(addr + 7))
176			return true;
177
178		/*
179		 * If single shadow byte covers 8-byte access, we don't
180		 * need to do anything more. Otherwise, test the first
181		 * shadow byte.
182		 */
183		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
184			return false;
185
186		return unlikely(*(u8 *)shadow_addr);
187	}
188
189	return false;
190}
191
192static __always_inline bool memory_is_poisoned_16(unsigned long addr)
193{
194	u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
195
196	if (unlikely(*shadow_addr)) {
197		u16 shadow_first_bytes = *(u16 *)shadow_addr;
198
199		if (unlikely(shadow_first_bytes))
200			return true;
201
202		/*
203		 * If two shadow bytes covers 16-byte access, we don't
204		 * need to do anything more. Otherwise, test the last
205		 * shadow byte.
206		 */
207		if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
208			return false;
209
210		return memory_is_poisoned_1(addr + 15);
211	}
212
213	return false;
214}
215
216static __always_inline unsigned long bytes_is_zero(const u8 *start,
217					size_t size)
218{
219	while (size) {
220		if (unlikely(*start))
221			return (unsigned long)start;
222		start++;
223		size--;
224	}
225
226	return 0;
227}
228
229static __always_inline unsigned long memory_is_zero(const void *start,
230						const void *end)
231{
232	unsigned int words;
233	unsigned long ret;
234	unsigned int prefix = (unsigned long)start % 8;
235
236	if (end - start <= 16)
237		return bytes_is_zero(start, end - start);
238
239	if (prefix) {
240		prefix = 8 - prefix;
241		ret = bytes_is_zero(start, prefix);
242		if (unlikely(ret))
243			return ret;
244		start += prefix;
245	}
246
247	words = (end - start) / 8;
248	while (words) {
249		if (unlikely(*(u64 *)start))
250			return bytes_is_zero(start, 8);
251		start += 8;
252		words--;
253	}
254
255	return bytes_is_zero(start, (end - start) % 8);
256}
257
258static __always_inline bool memory_is_poisoned_n(unsigned long addr,
259						size_t size)
260{
261	unsigned long ret;
262
263	ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
264			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
265
266	if (unlikely(ret)) {
267		unsigned long last_byte = addr + size - 1;
268		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
269
270		if (unlikely(ret != (unsigned long)last_shadow ||
271			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
272			return true;
273	}
274	return false;
275}
276
277static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
278{
279	if (__builtin_constant_p(size)) {
280		switch (size) {
281		case 1:
282			return memory_is_poisoned_1(addr);
283		case 2:
284			return memory_is_poisoned_2(addr);
285		case 4:
286			return memory_is_poisoned_4(addr);
287		case 8:
288			return memory_is_poisoned_8(addr);
289		case 16:
290			return memory_is_poisoned_16(addr);
291		default:
292			BUILD_BUG();
293		}
294	}
295
296	return memory_is_poisoned_n(addr, size);
297}
298
299static __always_inline void check_memory_region_inline(unsigned long addr,
300						size_t size, bool write,
301						unsigned long ret_ip)
302{
303	if (unlikely(size == 0))
304		return;
305
306	if (unlikely((void *)addr <
307		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
308		kasan_report(addr, size, write, ret_ip);
309		return;
310	}
311
312	if (likely(!memory_is_poisoned(addr, size)))
313		return;
314
315	kasan_report(addr, size, write, ret_ip);
316}
317
318static void check_memory_region(unsigned long addr,
319				size_t size, bool write,
320				unsigned long ret_ip)
321{
322	check_memory_region_inline(addr, size, write, ret_ip);
323}
324
325void kasan_check_read(const void *p, unsigned int size)
326{
327	check_memory_region((unsigned long)p, size, false, _RET_IP_);
328}
329EXPORT_SYMBOL(kasan_check_read);
330
331void kasan_check_write(const void *p, unsigned int size)
332{
333	check_memory_region((unsigned long)p, size, true, _RET_IP_);
334}
335EXPORT_SYMBOL(kasan_check_write);
336
337#undef memset
338void *memset(void *addr, int c, size_t len)
339{
340	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
341
342	return __memset(addr, c, len);
343}
344
345#undef memmove
346void *memmove(void *dest, const void *src, size_t len)
347{
348	check_memory_region((unsigned long)src, len, false, _RET_IP_);
349	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
350
351	return __memmove(dest, src, len);
352}
353
354#undef memcpy
355void *memcpy(void *dest, const void *src, size_t len)
356{
357	check_memory_region((unsigned long)src, len, false, _RET_IP_);
358	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
359
360	return __memcpy(dest, src, len);
361}
362
363void kasan_alloc_pages(struct page *page, unsigned int order)
364{
365	if (likely(!PageHighMem(page)))
366		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
367}
368
369void kasan_free_pages(struct page *page, unsigned int order)
370{
371	if (likely(!PageHighMem(page)))
372		kasan_poison_shadow(page_address(page),
373				PAGE_SIZE << order,
374				KASAN_FREE_PAGE);
375}
376
377/*
378 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
379 * For larger allocations larger redzones are used.
380 */
381static size_t optimal_redzone(size_t object_size)
382{
383	int rz =
384		object_size <= 64        - 16   ? 16 :
385		object_size <= 128       - 32   ? 32 :
386		object_size <= 512       - 64   ? 64 :
387		object_size <= 4096      - 128  ? 128 :
388		object_size <= (1 << 14) - 256  ? 256 :
389		object_size <= (1 << 15) - 512  ? 512 :
390		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
391	return rz;
392}
393
394void kasan_cache_create(struct kmem_cache *cache, size_t *size,
395			unsigned long *flags)
396{
397	int redzone_adjust;
398	int orig_size = *size;
399
400	/* Add alloc meta. */
401	cache->kasan_info.alloc_meta_offset = *size;
402	*size += sizeof(struct kasan_alloc_meta);
403
404	/* Add free meta. */
405	if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
406	    cache->object_size < sizeof(struct kasan_free_meta)) {
407		cache->kasan_info.free_meta_offset = *size;
408		*size += sizeof(struct kasan_free_meta);
409	}
410	redzone_adjust = optimal_redzone(cache->object_size) -
411		(*size - cache->object_size);
412
413	if (redzone_adjust > 0)
414		*size += redzone_adjust;
415
416	*size = min(KMALLOC_MAX_SIZE, max(*size, cache->object_size +
417					optimal_redzone(cache->object_size)));
418
419	/*
420	 * If the metadata doesn't fit, don't enable KASAN at all.
421	 */
422	if (*size <= cache->kasan_info.alloc_meta_offset ||
423			*size <= cache->kasan_info.free_meta_offset) {
424		cache->kasan_info.alloc_meta_offset = 0;
425		cache->kasan_info.free_meta_offset = 0;
426		*size = orig_size;
427		return;
428	}
429
430	*flags |= SLAB_KASAN;
431}
432
433void kasan_cache_shrink(struct kmem_cache *cache)
434{
435	quarantine_remove_cache(cache);
436}
437
438void kasan_cache_destroy(struct kmem_cache *cache)
439{
440	quarantine_remove_cache(cache);
441}
442
443size_t kasan_metadata_size(struct kmem_cache *cache)
444{
445	return (cache->kasan_info.alloc_meta_offset ?
446		sizeof(struct kasan_alloc_meta) : 0) +
447		(cache->kasan_info.free_meta_offset ?
448		sizeof(struct kasan_free_meta) : 0);
449}
450
451void kasan_poison_slab(struct page *page)
452{
453	kasan_poison_shadow(page_address(page),
454			PAGE_SIZE << compound_order(page),
455			KASAN_KMALLOC_REDZONE);
456}
457
458void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
459{
460	kasan_unpoison_shadow(object, cache->object_size);
461}
462
463void kasan_poison_object_data(struct kmem_cache *cache, void *object)
464{
465	kasan_poison_shadow(object,
466			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
467			KASAN_KMALLOC_REDZONE);
468}
469
470static inline int in_irqentry_text(unsigned long ptr)
471{
472	return (ptr >= (unsigned long)&__irqentry_text_start &&
473		ptr < (unsigned long)&__irqentry_text_end) ||
474		(ptr >= (unsigned long)&__softirqentry_text_start &&
475		 ptr < (unsigned long)&__softirqentry_text_end);
476}
477
478static inline void filter_irq_stacks(struct stack_trace *trace)
479{
480	int i;
481
482	if (!trace->nr_entries)
483		return;
484	for (i = 0; i < trace->nr_entries; i++)
485		if (in_irqentry_text(trace->entries[i])) {
486			/* Include the irqentry function into the stack. */
487			trace->nr_entries = i + 1;
488			break;
489		}
490}
491
492static inline depot_stack_handle_t save_stack(gfp_t flags)
493{
494	unsigned long entries[KASAN_STACK_DEPTH];
495	struct stack_trace trace = {
496		.nr_entries = 0,
497		.entries = entries,
498		.max_entries = KASAN_STACK_DEPTH,
499		.skip = 0
500	};
501
502	save_stack_trace(&trace);
503	filter_irq_stacks(&trace);
504	if (trace.nr_entries != 0 &&
505	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
506		trace.nr_entries--;
507
508	return depot_save_stack(&trace, flags);
509}
510
511static inline void set_track(struct kasan_track *track, gfp_t flags)
512{
513	track->pid = current->pid;
514	track->stack = save_stack(flags);
515}
516
517struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
518					const void *object)
519{
520	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
521	return (void *)object + cache->kasan_info.alloc_meta_offset;
522}
523
524struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
525				      const void *object)
526{
527	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
528	return (void *)object + cache->kasan_info.free_meta_offset;
529}
530
531void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
532{
533	struct kasan_alloc_meta *alloc_info;
534
535	if (!(cache->flags & SLAB_KASAN))
536		return;
537
538	alloc_info = get_alloc_info(cache, object);
539	__memset(alloc_info, 0, sizeof(*alloc_info));
540}
541
542void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
543{
544	kasan_kmalloc(cache, object, cache->object_size, flags);
545}
546
547static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
548{
549	unsigned long size = cache->object_size;
550	unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
551
552	/* RCU slabs could be legally used after free within the RCU period */
553	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
554		return;
555
556	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
557}
558
559bool kasan_slab_free(struct kmem_cache *cache, void *object)
560{
561	s8 shadow_byte;
562
563	/* RCU slabs could be legally used after free within the RCU period */
564	if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
565		return false;
566
567	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
568	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
569		kasan_report_double_free(cache, object, shadow_byte);
570		return true;
571	}
572
573	kasan_poison_slab_free(cache, object);
574
575	if (unlikely(!(cache->flags & SLAB_KASAN)))
576		return false;
577
578	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
579	quarantine_put(get_free_info(cache, object), cache);
580	return true;
581}
582
583void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
584		   gfp_t flags)
585{
586	unsigned long redzone_start;
587	unsigned long redzone_end;
588
589	if (gfpflags_allow_blocking(flags))
590		quarantine_reduce();
591
592	if (unlikely(object == NULL))
593		return;
594
595	redzone_start = round_up((unsigned long)(object + size),
596				KASAN_SHADOW_SCALE_SIZE);
597	redzone_end = round_up((unsigned long)object + cache->object_size,
598				KASAN_SHADOW_SCALE_SIZE);
599
600	kasan_unpoison_shadow(object, size);
601	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
602		KASAN_KMALLOC_REDZONE);
603
604	if (cache->flags & SLAB_KASAN)
605		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
606}
607EXPORT_SYMBOL(kasan_kmalloc);
608
609void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
610{
611	struct page *page;
612	unsigned long redzone_start;
613	unsigned long redzone_end;
614
615	if (gfpflags_allow_blocking(flags))
616		quarantine_reduce();
617
618	if (unlikely(ptr == NULL))
619		return;
620
621	page = virt_to_page(ptr);
622	redzone_start = round_up((unsigned long)(ptr + size),
623				KASAN_SHADOW_SCALE_SIZE);
624	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
625
626	kasan_unpoison_shadow(ptr, size);
627	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
628		KASAN_PAGE_REDZONE);
629}
630
631void kasan_krealloc(const void *object, size_t size, gfp_t flags)
632{
633	struct page *page;
634
635	if (unlikely(object == ZERO_SIZE_PTR))
636		return;
637
638	page = virt_to_head_page(object);
639
640	if (unlikely(!PageSlab(page)))
641		kasan_kmalloc_large(object, size, flags);
642	else
643		kasan_kmalloc(page->slab_cache, object, size, flags);
644}
645
646void kasan_poison_kfree(void *ptr)
647{
648	struct page *page;
649
650	page = virt_to_head_page(ptr);
651
652	if (unlikely(!PageSlab(page)))
653		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
654				KASAN_FREE_PAGE);
655	else
656		kasan_poison_slab_free(page->slab_cache, ptr);
657}
658
659void kasan_kfree_large(const void *ptr)
660{
661	struct page *page = virt_to_page(ptr);
662
663	kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
664			KASAN_FREE_PAGE);
665}
666
667int kasan_module_alloc(void *addr, size_t size)
668{
669	void *ret;
670	size_t shadow_size;
671	unsigned long shadow_start;
672
673	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
674	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
675			PAGE_SIZE);
676
677	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
678		return -EINVAL;
679
680	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
681			shadow_start + shadow_size,
682			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
683			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
684			__builtin_return_address(0));
685
686	if (ret) {
687		find_vm_area(addr)->flags |= VM_KASAN;
688		kmemleak_ignore(ret);
689		return 0;
690	}
691
692	return -ENOMEM;
693}
694
695void kasan_free_shadow(const struct vm_struct *vm)
696{
697	if (vm->flags & VM_KASAN)
698		vfree(kasan_mem_to_shadow(vm->addr));
699}
700
701static void register_global(struct kasan_global *global)
702{
703	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
704
705	kasan_unpoison_shadow(global->beg, global->size);
706
707	kasan_poison_shadow(global->beg + aligned_size,
708		global->size_with_redzone - aligned_size,
709		KASAN_GLOBAL_REDZONE);
710}
711
712void __asan_register_globals(struct kasan_global *globals, size_t size)
713{
714	int i;
715
716	for (i = 0; i < size; i++)
717		register_global(&globals[i]);
718}
719EXPORT_SYMBOL(__asan_register_globals);
720
721void __asan_unregister_globals(struct kasan_global *globals, size_t size)
722{
723}
724EXPORT_SYMBOL(__asan_unregister_globals);
725
726#define DEFINE_ASAN_LOAD_STORE(size)					\
727	void __asan_load##size(unsigned long addr)			\
728	{								\
729		check_memory_region_inline(addr, size, false, _RET_IP_);\
730	}								\
731	EXPORT_SYMBOL(__asan_load##size);				\
732	__alias(__asan_load##size)					\
733	void __asan_load##size##_noabort(unsigned long);		\
734	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
735	void __asan_store##size(unsigned long addr)			\
736	{								\
737		check_memory_region_inline(addr, size, true, _RET_IP_);	\
738	}								\
739	EXPORT_SYMBOL(__asan_store##size);				\
740	__alias(__asan_store##size)					\
741	void __asan_store##size##_noabort(unsigned long);		\
742	EXPORT_SYMBOL(__asan_store##size##_noabort)
743
744DEFINE_ASAN_LOAD_STORE(1);
745DEFINE_ASAN_LOAD_STORE(2);
746DEFINE_ASAN_LOAD_STORE(4);
747DEFINE_ASAN_LOAD_STORE(8);
748DEFINE_ASAN_LOAD_STORE(16);
749
750void __asan_loadN(unsigned long addr, size_t size)
751{
752	check_memory_region(addr, size, false, _RET_IP_);
753}
754EXPORT_SYMBOL(__asan_loadN);
755
756__alias(__asan_loadN)
757void __asan_loadN_noabort(unsigned long, size_t);
758EXPORT_SYMBOL(__asan_loadN_noabort);
759
760void __asan_storeN(unsigned long addr, size_t size)
761{
762	check_memory_region(addr, size, true, _RET_IP_);
763}
764EXPORT_SYMBOL(__asan_storeN);
765
766__alias(__asan_storeN)
767void __asan_storeN_noabort(unsigned long, size_t);
768EXPORT_SYMBOL(__asan_storeN_noabort);
769
770/* to shut up compiler complaints */
771void __asan_handle_no_return(void) {}
772EXPORT_SYMBOL(__asan_handle_no_return);
773
774/* Emitted by compiler to poison large objects when they go out of scope. */
775void __asan_poison_stack_memory(const void *addr, size_t size)
776{
777	/*
778	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
779	 * by redzones, so we simply round up size to simplify logic.
780	 */
781	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
782			    KASAN_USE_AFTER_SCOPE);
783}
784EXPORT_SYMBOL(__asan_poison_stack_memory);
785
786/* Emitted by compiler to unpoison large objects when they go into scope. */
787void __asan_unpoison_stack_memory(const void *addr, size_t size)
788{
789	kasan_unpoison_shadow(addr, size);
790}
791EXPORT_SYMBOL(__asan_unpoison_stack_memory);
792
793#ifdef CONFIG_MEMORY_HOTPLUG
794static int kasan_mem_notifier(struct notifier_block *nb,
795			unsigned long action, void *data)
796{
797	return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
798}
799
800static int __init kasan_memhotplug_init(void)
801{
802	pr_info("WARNING: KASAN doesn't support memory hot-add\n");
803	pr_info("Memory hot-add will be disabled\n");
804
805	hotplug_memory_notifier(kasan_mem_notifier, 0);
806
807	return 0;
808}
809
810module_init(kasan_memhotplug_init);
811#endif