Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * This file contains shadow memory manipulation code.
  3 *
  4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6 *
  7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  8 *        Andrey Konovalov <andreyknvl@gmail.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License version 2 as
 12 * published by the Free Software Foundation.
 13 *
 14 */
 15
 16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 17#define DISABLE_BRANCH_PROFILING
 18
 19#include <linux/export.h>
 20#include <linux/interrupt.h>
 21#include <linux/init.h>
 22#include <linux/kasan.h>
 23#include <linux/kernel.h>
 24#include <linux/kmemleak.h>
 25#include <linux/linkage.h>
 26#include <linux/memblock.h>
 27#include <linux/memory.h>
 28#include <linux/mm.h>
 29#include <linux/module.h>
 30#include <linux/printk.h>
 31#include <linux/sched.h>
 32#include <linux/sched/task_stack.h>
 33#include <linux/slab.h>
 34#include <linux/stacktrace.h>
 35#include <linux/string.h>
 36#include <linux/types.h>
 37#include <linux/vmalloc.h>
 38#include <linux/bug.h>
 39
 40#include "kasan.h"
 41#include "../slab.h"
 42
 43void kasan_enable_current(void)
 44{
 45	current->kasan_depth++;
 46}
 47
 48void kasan_disable_current(void)
 49{
 50	current->kasan_depth--;
 51}
 52
 53/*
 54 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
 55 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
 56 */
 57static void kasan_poison_shadow(const void *address, size_t size, u8 value)
 58{
 59	void *shadow_start, *shadow_end;
 60
 61	shadow_start = kasan_mem_to_shadow(address);
 62	shadow_end = kasan_mem_to_shadow(address + size);
 63
 64	memset(shadow_start, value, shadow_end - shadow_start);
 65}
 66
 67void kasan_unpoison_shadow(const void *address, size_t size)
 68{
 69	kasan_poison_shadow(address, size, 0);
 70
 71	if (size & KASAN_SHADOW_MASK) {
 72		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
 73		*shadow = size & KASAN_SHADOW_MASK;
 74	}
 75}
 76
 77static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
 78{
 79	void *base = task_stack_page(task);
 80	size_t size = sp - base;
 81
 82	kasan_unpoison_shadow(base, size);
 83}
 84
 85/* Unpoison the entire stack for a task. */
 86void kasan_unpoison_task_stack(struct task_struct *task)
 87{
 88	__kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
 89}
 90
 91/* Unpoison the stack for the current task beyond a watermark sp value. */
 92asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 93{
 94	/*
 95	 * Calculate the task stack base address.  Avoid using 'current'
 96	 * because this function is called by early resume code which hasn't
 97	 * yet set up the percpu register (%gs).
 98	 */
 99	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
100
101	kasan_unpoison_shadow(base, watermark - base);
102}
103
104/*
105 * Clear all poison for the region between the current SP and a provided
106 * watermark value, as is sometimes required prior to hand-crafted asm function
107 * returns in the middle of functions.
108 */
109void kasan_unpoison_stack_above_sp_to(const void *watermark)
110{
111	const void *sp = __builtin_frame_address(0);
112	size_t size = watermark - sp;
113
114	if (WARN_ON(sp > watermark))
115		return;
116	kasan_unpoison_shadow(sp, size);
117}
118
119/*
120 * All functions below always inlined so compiler could
121 * perform better optimizations in each of __asan_loadX/__assn_storeX
122 * depending on memory access size X.
123 */
124
125static __always_inline bool memory_is_poisoned_1(unsigned long addr)
126{
127	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
128
129	if (unlikely(shadow_value)) {
130		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
131		return unlikely(last_accessible_byte >= shadow_value);
132	}
133
134	return false;
135}
136
137static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
138						unsigned long size)
139{
140	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
141
142	/*
143	 * Access crosses 8(shadow size)-byte boundary. Such access maps
144	 * into 2 shadow bytes, so we need to check them both.
145	 */
146	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
147		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
148
149	return memory_is_poisoned_1(addr + size - 1);
150}
151
152static __always_inline bool memory_is_poisoned_16(unsigned long addr)
153{
154	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
155
156	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
157	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
158		return *shadow_addr || memory_is_poisoned_1(addr + 15);
159
160	return *shadow_addr;
161}
162
163static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
164					size_t size)
165{
166	while (size) {
167		if (unlikely(*start))
168			return (unsigned long)start;
169		start++;
170		size--;
171	}
172
173	return 0;
174}
175
176static __always_inline unsigned long memory_is_nonzero(const void *start,
177						const void *end)
178{
179	unsigned int words;
180	unsigned long ret;
181	unsigned int prefix = (unsigned long)start % 8;
182
183	if (end - start <= 16)
184		return bytes_is_nonzero(start, end - start);
185
186	if (prefix) {
187		prefix = 8 - prefix;
188		ret = bytes_is_nonzero(start, prefix);
189		if (unlikely(ret))
190			return ret;
191		start += prefix;
192	}
193
194	words = (end - start) / 8;
195	while (words) {
196		if (unlikely(*(u64 *)start))
197			return bytes_is_nonzero(start, 8);
198		start += 8;
199		words--;
200	}
201
202	return bytes_is_nonzero(start, (end - start) % 8);
203}
204
205static __always_inline bool memory_is_poisoned_n(unsigned long addr,
206						size_t size)
207{
208	unsigned long ret;
209
210	ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
211			kasan_mem_to_shadow((void *)addr + size - 1) + 1);
212
213	if (unlikely(ret)) {
214		unsigned long last_byte = addr + size - 1;
215		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
216
217		if (unlikely(ret != (unsigned long)last_shadow ||
218			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
219			return true;
220	}
221	return false;
222}
223
224static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
225{
226	if (__builtin_constant_p(size)) {
227		switch (size) {
228		case 1:
229			return memory_is_poisoned_1(addr);
230		case 2:
231		case 4:
232		case 8:
233			return memory_is_poisoned_2_4_8(addr, size);
234		case 16:
235			return memory_is_poisoned_16(addr);
236		default:
237			BUILD_BUG();
238		}
239	}
240
241	return memory_is_poisoned_n(addr, size);
242}
243
244static __always_inline void check_memory_region_inline(unsigned long addr,
245						size_t size, bool write,
246						unsigned long ret_ip)
247{
248	if (unlikely(size == 0))
249		return;
250
251	if (unlikely((void *)addr <
252		kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
253		kasan_report(addr, size, write, ret_ip);
254		return;
255	}
256
257	if (likely(!memory_is_poisoned(addr, size)))
258		return;
259
260	kasan_report(addr, size, write, ret_ip);
261}
262
263static void check_memory_region(unsigned long addr,
264				size_t size, bool write,
265				unsigned long ret_ip)
266{
267	check_memory_region_inline(addr, size, write, ret_ip);
268}
269
270void kasan_check_read(const volatile void *p, unsigned int size)
271{
272	check_memory_region((unsigned long)p, size, false, _RET_IP_);
273}
274EXPORT_SYMBOL(kasan_check_read);
275
276void kasan_check_write(const volatile void *p, unsigned int size)
277{
278	check_memory_region((unsigned long)p, size, true, _RET_IP_);
279}
280EXPORT_SYMBOL(kasan_check_write);
281
282#undef memset
283void *memset(void *addr, int c, size_t len)
284{
285	check_memory_region((unsigned long)addr, len, true, _RET_IP_);
286
287	return __memset(addr, c, len);
288}
289
290#undef memmove
291void *memmove(void *dest, const void *src, size_t len)
292{
293	check_memory_region((unsigned long)src, len, false, _RET_IP_);
294	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
295
296	return __memmove(dest, src, len);
297}
298
299#undef memcpy
300void *memcpy(void *dest, const void *src, size_t len)
301{
302	check_memory_region((unsigned long)src, len, false, _RET_IP_);
303	check_memory_region((unsigned long)dest, len, true, _RET_IP_);
304
305	return __memcpy(dest, src, len);
306}
307
308void kasan_alloc_pages(struct page *page, unsigned int order)
309{
310	if (likely(!PageHighMem(page)))
311		kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
312}
313
314void kasan_free_pages(struct page *page, unsigned int order)
315{
316	if (likely(!PageHighMem(page)))
317		kasan_poison_shadow(page_address(page),
318				PAGE_SIZE << order,
319				KASAN_FREE_PAGE);
320}
321
322/*
323 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
324 * For larger allocations larger redzones are used.
325 */
326static unsigned int optimal_redzone(unsigned int object_size)
327{
328	return
329		object_size <= 64        - 16   ? 16 :
330		object_size <= 128       - 32   ? 32 :
331		object_size <= 512       - 64   ? 64 :
332		object_size <= 4096      - 128  ? 128 :
333		object_size <= (1 << 14) - 256  ? 256 :
334		object_size <= (1 << 15) - 512  ? 512 :
335		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
336}
337
338void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
339			slab_flags_t *flags)
340{
341	unsigned int orig_size = *size;
342	int redzone_adjust;
343
344	/* Add alloc meta. */
345	cache->kasan_info.alloc_meta_offset = *size;
346	*size += sizeof(struct kasan_alloc_meta);
347
348	/* Add free meta. */
349	if (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
350	    cache->object_size < sizeof(struct kasan_free_meta)) {
351		cache->kasan_info.free_meta_offset = *size;
352		*size += sizeof(struct kasan_free_meta);
353	}
354	redzone_adjust = optimal_redzone(cache->object_size) -
355		(*size - cache->object_size);
356
357	if (redzone_adjust > 0)
358		*size += redzone_adjust;
359
360	*size = min_t(unsigned int, KMALLOC_MAX_SIZE,
361			max(*size, cache->object_size +
362					optimal_redzone(cache->object_size)));
363
364	/*
365	 * If the metadata doesn't fit, don't enable KASAN at all.
366	 */
367	if (*size <= cache->kasan_info.alloc_meta_offset ||
368			*size <= cache->kasan_info.free_meta_offset) {
369		cache->kasan_info.alloc_meta_offset = 0;
370		cache->kasan_info.free_meta_offset = 0;
371		*size = orig_size;
372		return;
373	}
374
375	*flags |= SLAB_KASAN;
376}
377
378void kasan_cache_shrink(struct kmem_cache *cache)
379{
380	quarantine_remove_cache(cache);
381}
382
383void kasan_cache_shutdown(struct kmem_cache *cache)
384{
385	if (!__kmem_cache_empty(cache))
386		quarantine_remove_cache(cache);
387}
388
389size_t kasan_metadata_size(struct kmem_cache *cache)
390{
391	return (cache->kasan_info.alloc_meta_offset ?
392		sizeof(struct kasan_alloc_meta) : 0) +
393		(cache->kasan_info.free_meta_offset ?
394		sizeof(struct kasan_free_meta) : 0);
395}
396
397void kasan_poison_slab(struct page *page)
398{
399	kasan_poison_shadow(page_address(page),
400			PAGE_SIZE << compound_order(page),
401			KASAN_KMALLOC_REDZONE);
402}
403
404void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
405{
406	kasan_unpoison_shadow(object, cache->object_size);
407}
408
409void kasan_poison_object_data(struct kmem_cache *cache, void *object)
410{
411	kasan_poison_shadow(object,
412			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
413			KASAN_KMALLOC_REDZONE);
414}
415
416static inline int in_irqentry_text(unsigned long ptr)
417{
418	return (ptr >= (unsigned long)&__irqentry_text_start &&
419		ptr < (unsigned long)&__irqentry_text_end) ||
420		(ptr >= (unsigned long)&__softirqentry_text_start &&
421		 ptr < (unsigned long)&__softirqentry_text_end);
422}
423
424static inline void filter_irq_stacks(struct stack_trace *trace)
425{
426	int i;
427
428	if (!trace->nr_entries)
429		return;
430	for (i = 0; i < trace->nr_entries; i++)
431		if (in_irqentry_text(trace->entries[i])) {
432			/* Include the irqentry function into the stack. */
433			trace->nr_entries = i + 1;
434			break;
435		}
436}
437
438static inline depot_stack_handle_t save_stack(gfp_t flags)
439{
440	unsigned long entries[KASAN_STACK_DEPTH];
441	struct stack_trace trace = {
442		.nr_entries = 0,
443		.entries = entries,
444		.max_entries = KASAN_STACK_DEPTH,
445		.skip = 0
446	};
447
448	save_stack_trace(&trace);
449	filter_irq_stacks(&trace);
450	if (trace.nr_entries != 0 &&
451	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
452		trace.nr_entries--;
453
454	return depot_save_stack(&trace, flags);
455}
456
457static inline void set_track(struct kasan_track *track, gfp_t flags)
458{
459	track->pid = current->pid;
460	track->stack = save_stack(flags);
461}
462
463struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
464					const void *object)
465{
466	BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
467	return (void *)object + cache->kasan_info.alloc_meta_offset;
468}
469
470struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
471				      const void *object)
472{
473	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
474	return (void *)object + cache->kasan_info.free_meta_offset;
475}
476
477void kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
478{
479	struct kasan_alloc_meta *alloc_info;
480
481	if (!(cache->flags & SLAB_KASAN))
482		return;
483
484	alloc_info = get_alloc_info(cache, object);
485	__memset(alloc_info, 0, sizeof(*alloc_info));
486}
487
488void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
489{
490	kasan_kmalloc(cache, object, cache->object_size, flags);
491}
492
493static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
494			      unsigned long ip, bool quarantine)
495{
496	s8 shadow_byte;
497	unsigned long rounded_up_size;
498
499	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
500	    object)) {
501		kasan_report_invalid_free(object, ip);
502		return true;
503	}
504
505	/* RCU slabs could be legally used after free within the RCU period */
506	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
507		return false;
508
509	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
510	if (shadow_byte < 0 || shadow_byte >= KASAN_SHADOW_SCALE_SIZE) {
511		kasan_report_invalid_free(object, ip);
512		return true;
513	}
514
515	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
516	kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
517
518	if (!quarantine || unlikely(!(cache->flags & SLAB_KASAN)))
519		return false;
520
521	set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
522	quarantine_put(get_free_info(cache, object), cache);
523	return true;
524}
525
526bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
527{
528	return __kasan_slab_free(cache, object, ip, true);
529}
530
531void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
532		   gfp_t flags)
533{
534	unsigned long redzone_start;
535	unsigned long redzone_end;
536
537	if (gfpflags_allow_blocking(flags))
538		quarantine_reduce();
539
540	if (unlikely(object == NULL))
541		return;
542
543	redzone_start = round_up((unsigned long)(object + size),
544				KASAN_SHADOW_SCALE_SIZE);
545	redzone_end = round_up((unsigned long)object + cache->object_size,
546				KASAN_SHADOW_SCALE_SIZE);
547
548	kasan_unpoison_shadow(object, size);
549	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
550		KASAN_KMALLOC_REDZONE);
551
552	if (cache->flags & SLAB_KASAN)
553		set_track(&get_alloc_info(cache, object)->alloc_track, flags);
554}
555EXPORT_SYMBOL(kasan_kmalloc);
556
557void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
558{
559	struct page *page;
560	unsigned long redzone_start;
561	unsigned long redzone_end;
562
563	if (gfpflags_allow_blocking(flags))
564		quarantine_reduce();
565
566	if (unlikely(ptr == NULL))
567		return;
568
569	page = virt_to_page(ptr);
570	redzone_start = round_up((unsigned long)(ptr + size),
571				KASAN_SHADOW_SCALE_SIZE);
572	redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
573
574	kasan_unpoison_shadow(ptr, size);
575	kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
576		KASAN_PAGE_REDZONE);
577}
578
579void kasan_krealloc(const void *object, size_t size, gfp_t flags)
580{
581	struct page *page;
582
583	if (unlikely(object == ZERO_SIZE_PTR))
584		return;
585
586	page = virt_to_head_page(object);
587
588	if (unlikely(!PageSlab(page)))
589		kasan_kmalloc_large(object, size, flags);
590	else
591		kasan_kmalloc(page->slab_cache, object, size, flags);
592}
593
594void kasan_poison_kfree(void *ptr, unsigned long ip)
595{
596	struct page *page;
597
598	page = virt_to_head_page(ptr);
599
600	if (unlikely(!PageSlab(page))) {
601		if (ptr != page_address(page)) {
602			kasan_report_invalid_free(ptr, ip);
603			return;
604		}
605		kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
606				KASAN_FREE_PAGE);
607	} else {
608		__kasan_slab_free(page->slab_cache, ptr, ip, false);
609	}
610}
611
612void kasan_kfree_large(void *ptr, unsigned long ip)
613{
614	if (ptr != page_address(virt_to_head_page(ptr)))
615		kasan_report_invalid_free(ptr, ip);
616	/* The object will be poisoned by page_alloc. */
617}
618
619int kasan_module_alloc(void *addr, size_t size)
620{
621	void *ret;
622	size_t shadow_size;
623	unsigned long shadow_start;
624
625	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
626	shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
627			PAGE_SIZE);
628
629	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
630		return -EINVAL;
631
632	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
633			shadow_start + shadow_size,
634			GFP_KERNEL | __GFP_ZERO,
635			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
636			__builtin_return_address(0));
637
638	if (ret) {
639		find_vm_area(addr)->flags |= VM_KASAN;
640		kmemleak_ignore(ret);
641		return 0;
642	}
643
644	return -ENOMEM;
645}
646
647void kasan_free_shadow(const struct vm_struct *vm)
648{
649	if (vm->flags & VM_KASAN)
650		vfree(kasan_mem_to_shadow(vm->addr));
651}
652
653static void register_global(struct kasan_global *global)
654{
655	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
656
657	kasan_unpoison_shadow(global->beg, global->size);
658
659	kasan_poison_shadow(global->beg + aligned_size,
660		global->size_with_redzone - aligned_size,
661		KASAN_GLOBAL_REDZONE);
662}
663
664void __asan_register_globals(struct kasan_global *globals, size_t size)
665{
666	int i;
667
668	for (i = 0; i < size; i++)
669		register_global(&globals[i]);
670}
671EXPORT_SYMBOL(__asan_register_globals);
672
673void __asan_unregister_globals(struct kasan_global *globals, size_t size)
674{
675}
676EXPORT_SYMBOL(__asan_unregister_globals);
677
678#define DEFINE_ASAN_LOAD_STORE(size)					\
679	void __asan_load##size(unsigned long addr)			\
680	{								\
681		check_memory_region_inline(addr, size, false, _RET_IP_);\
682	}								\
683	EXPORT_SYMBOL(__asan_load##size);				\
684	__alias(__asan_load##size)					\
685	void __asan_load##size##_noabort(unsigned long);		\
686	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
687	void __asan_store##size(unsigned long addr)			\
688	{								\
689		check_memory_region_inline(addr, size, true, _RET_IP_);	\
690	}								\
691	EXPORT_SYMBOL(__asan_store##size);				\
692	__alias(__asan_store##size)					\
693	void __asan_store##size##_noabort(unsigned long);		\
694	EXPORT_SYMBOL(__asan_store##size##_noabort)
695
696DEFINE_ASAN_LOAD_STORE(1);
697DEFINE_ASAN_LOAD_STORE(2);
698DEFINE_ASAN_LOAD_STORE(4);
699DEFINE_ASAN_LOAD_STORE(8);
700DEFINE_ASAN_LOAD_STORE(16);
701
702void __asan_loadN(unsigned long addr, size_t size)
703{
704	check_memory_region(addr, size, false, _RET_IP_);
705}
706EXPORT_SYMBOL(__asan_loadN);
707
708__alias(__asan_loadN)
709void __asan_loadN_noabort(unsigned long, size_t);
710EXPORT_SYMBOL(__asan_loadN_noabort);
711
712void __asan_storeN(unsigned long addr, size_t size)
713{
714	check_memory_region(addr, size, true, _RET_IP_);
715}
716EXPORT_SYMBOL(__asan_storeN);
717
718__alias(__asan_storeN)
719void __asan_storeN_noabort(unsigned long, size_t);
720EXPORT_SYMBOL(__asan_storeN_noabort);
721
722/* to shut up compiler complaints */
723void __asan_handle_no_return(void) {}
724EXPORT_SYMBOL(__asan_handle_no_return);
725
726/* Emitted by compiler to poison large objects when they go out of scope. */
727void __asan_poison_stack_memory(const void *addr, size_t size)
728{
729	/*
730	 * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
731	 * by redzones, so we simply round up size to simplify logic.
732	 */
733	kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
734			    KASAN_USE_AFTER_SCOPE);
735}
736EXPORT_SYMBOL(__asan_poison_stack_memory);
737
738/* Emitted by compiler to unpoison large objects when they go into scope. */
739void __asan_unpoison_stack_memory(const void *addr, size_t size)
740{
741	kasan_unpoison_shadow(addr, size);
742}
743EXPORT_SYMBOL(__asan_unpoison_stack_memory);
744
745/* Emitted by compiler to poison alloca()ed objects. */
746void __asan_alloca_poison(unsigned long addr, size_t size)
747{
748	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
749	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
750			rounded_up_size;
751	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
752
753	const void *left_redzone = (const void *)(addr -
754			KASAN_ALLOCA_REDZONE_SIZE);
755	const void *right_redzone = (const void *)(addr + rounded_up_size);
756
757	WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
758
759	kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
760			      size - rounded_down_size);
761	kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
762			KASAN_ALLOCA_LEFT);
763	kasan_poison_shadow(right_redzone,
764			padding_size + KASAN_ALLOCA_REDZONE_SIZE,
765			KASAN_ALLOCA_RIGHT);
766}
767EXPORT_SYMBOL(__asan_alloca_poison);
768
769/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
770void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
771{
772	if (unlikely(!stack_top || stack_top > stack_bottom))
773		return;
774
775	kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
776}
777EXPORT_SYMBOL(__asan_allocas_unpoison);
778
779/* Emitted by the compiler to [un]poison local variables. */
780#define DEFINE_ASAN_SET_SHADOW(byte) \
781	void __asan_set_shadow_##byte(const void *addr, size_t size)	\
782	{								\
783		__memset((void *)addr, 0x##byte, size);			\
784	}								\
785	EXPORT_SYMBOL(__asan_set_shadow_##byte)
786
787DEFINE_ASAN_SET_SHADOW(00);
788DEFINE_ASAN_SET_SHADOW(f1);
789DEFINE_ASAN_SET_SHADOW(f2);
790DEFINE_ASAN_SET_SHADOW(f3);
791DEFINE_ASAN_SET_SHADOW(f5);
792DEFINE_ASAN_SET_SHADOW(f8);
793
794#ifdef CONFIG_MEMORY_HOTPLUG
795static bool shadow_mapped(unsigned long addr)
796{
797	pgd_t *pgd = pgd_offset_k(addr);
798	p4d_t *p4d;
799	pud_t *pud;
800	pmd_t *pmd;
801	pte_t *pte;
802
803	if (pgd_none(*pgd))
804		return false;
805	p4d = p4d_offset(pgd, addr);
806	if (p4d_none(*p4d))
807		return false;
808	pud = pud_offset(p4d, addr);
809	if (pud_none(*pud))
810		return false;
811
812	/*
813	 * We can't use pud_large() or pud_huge(), the first one is
814	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
815	 * pud_bad(), if pud is bad then it's bad because it's huge.
816	 */
817	if (pud_bad(*pud))
818		return true;
819	pmd = pmd_offset(pud, addr);
820	if (pmd_none(*pmd))
821		return false;
822
823	if (pmd_bad(*pmd))
824		return true;
825	pte = pte_offset_kernel(pmd, addr);
826	return !pte_none(*pte);
827}
828
829static int __meminit kasan_mem_notifier(struct notifier_block *nb,
830			unsigned long action, void *data)
831{
832	struct memory_notify *mem_data = data;
833	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
834	unsigned long shadow_end, shadow_size;
835
836	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
837	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
838	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
839	shadow_size = nr_shadow_pages << PAGE_SHIFT;
840	shadow_end = shadow_start + shadow_size;
841
842	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
843		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
844		return NOTIFY_BAD;
845
846	switch (action) {
847	case MEM_GOING_ONLINE: {
848		void *ret;
849
850		/*
851		 * If shadow is mapped already than it must have been mapped
852		 * during the boot. This could happen if we onlining previously
853		 * offlined memory.
854		 */
855		if (shadow_mapped(shadow_start))
856			return NOTIFY_OK;
857
858		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
859					shadow_end, GFP_KERNEL,
860					PAGE_KERNEL, VM_NO_GUARD,
861					pfn_to_nid(mem_data->start_pfn),
862					__builtin_return_address(0));
863		if (!ret)
864			return NOTIFY_BAD;
865
866		kmemleak_ignore(ret);
867		return NOTIFY_OK;
868	}
869	case MEM_CANCEL_ONLINE:
870	case MEM_OFFLINE: {
871		struct vm_struct *vm;
872
873		/*
874		 * shadow_start was either mapped during boot by kasan_init()
875		 * or during memory online by __vmalloc_node_range().
876		 * In the latter case we can use vfree() to free shadow.
877		 * Non-NULL result of the find_vm_area() will tell us if
878		 * that was the second case.
879		 *
880		 * Currently it's not possible to free shadow mapped
881		 * during boot by kasan_init(). It's because the code
882		 * to do that hasn't been written yet. So we'll just
883		 * leak the memory.
884		 */
885		vm = find_vm_area((void *)shadow_start);
886		if (vm)
887			vfree((void *)shadow_start);
888	}
889	}
890
891	return NOTIFY_OK;
892}
893
894static int __init kasan_memhotplug_init(void)
895{
896	hotplug_memory_notifier(kasan_mem_notifier, 0);
897
898	return 0;
899}
900
901core_initcall(kasan_memhotplug_init);
902#endif