Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * This file contains error reporting code.
  3 *
  4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6 *
  7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  8 *        Andrey Konovalov <adech.fo@gmail.com>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License version 2 as
 12 * published by the Free Software Foundation.
 13 *
 14 */
 15
 
 
 
 
 
 16#include <linux/kernel.h>
 
 17#include <linux/mm.h>
 18#include <linux/printk.h>
 19#include <linux/sched.h>
 20#include <linux/slab.h>
 21#include <linux/stackdepot.h>
 22#include <linux/stacktrace.h>
 23#include <linux/string.h>
 24#include <linux/types.h>
 
 25#include <linux/kasan.h>
 26#include <linux/module.h>
 
 
 
 27
 28#include <asm/sections.h>
 29
 30#include "kasan.h"
 31#include "../slab.h"
 32
 33/* Shadow layout customization. */
 34#define SHADOW_BYTES_PER_BLOCK 1
 35#define SHADOW_BLOCKS_PER_ROW 16
 36#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
 37#define SHADOW_ROWS_AROUND_ADDR 2
 38
 39static const void *find_first_bad_addr(const void *addr, size_t size)
 
 
 
 
 
 
 
 
 
 
 40{
 41	u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr);
 42	const void *first_bad_addr = addr;
 43
 44	while (!shadow_val && first_bad_addr < addr + size) {
 45		first_bad_addr += KASAN_SHADOW_SCALE_SIZE;
 46		shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr);
 47	}
 48	return first_bad_addr;
 
 
 
 
 
 49}
 
 50
 51static void print_error_description(struct kasan_access_info *info)
 52{
 53	const char *bug_type = "unknown-crash";
 54	u8 *shadow_addr;
 55
 56	info->first_bad_addr = find_first_bad_addr(info->access_addr,
 57						info->access_size);
 58
 59	shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60
 
 
 
 61	/*
 62	 * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
 63	 * at the next shadow byte to determine the type of the bad access.
 64	 */
 65	if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
 66		shadow_addr++;
 
 
 
 
 67
 68	switch (*shadow_addr) {
 69	case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
 70		/*
 71		 * In theory it's still possible to see these shadow values
 72		 * due to a data race in the kernel code.
 73		 */
 74		bug_type = "out-of-bounds";
 75		break;
 76	case KASAN_PAGE_REDZONE:
 77	case KASAN_KMALLOC_REDZONE:
 78		bug_type = "slab-out-of-bounds";
 79		break;
 80	case KASAN_GLOBAL_REDZONE:
 81		bug_type = "global-out-of-bounds";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82		break;
 83	case KASAN_STACK_LEFT:
 84	case KASAN_STACK_MID:
 85	case KASAN_STACK_RIGHT:
 86	case KASAN_STACK_PARTIAL:
 87		bug_type = "stack-out-of-bounds";
 88		break;
 89	case KASAN_FREE_PAGE:
 90	case KASAN_KMALLOC_FREE:
 91		bug_type = "use-after-free";
 92		break;
 93	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 94
 95	pr_err("BUG: KASAN: %s in %pS at addr %p\n",
 96		bug_type, (void *)info->ip,
 97		info->access_addr);
 98	pr_err("%s of size %zu by task %s/%d\n",
 99		info->is_write ? "Write" : "Read",
100		info->access_size, current->comm, task_pid_nr(current));
101}
102
103static inline bool kernel_or_module_addr(const void *addr)
104{
105	if (addr >= (void *)_stext && addr < (void *)_end)
106		return true;
107	if (is_module_address((unsigned long)addr))
108		return true;
109	return false;
110}
111
112static inline bool init_task_stack_addr(const void *addr)
113{
114	return addr >= (void *)&init_thread_union.stack &&
115		(addr <= (void *)&init_thread_union.stack +
116			sizeof(init_thread_union.stack));
117}
118
119#ifdef CONFIG_SLAB
120static void print_track(struct kasan_track *track)
121{
122	pr_err("PID = %u\n", track->pid);
123	if (track->stack) {
124		struct stack_trace trace;
125
126		depot_fetch_stack(track->stack, &trace);
127		print_stack_trace(&trace, 0);
128	} else {
129		pr_err("(stack is not available)\n");
 
 
130	}
131}
132
133static void object_err(struct kmem_cache *cache, struct page *page,
134			void *object, char *unused_reason)
135{
136	struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object);
137	struct kasan_free_meta *free_info;
138
139	dump_stack();
140	pr_err("Object at %p, in cache %s\n", object, cache->name);
141	if (!(cache->flags & SLAB_KASAN))
142		return;
143	switch (alloc_info->state) {
144	case KASAN_STATE_INIT:
145		pr_err("Object not allocated yet\n");
146		break;
147	case KASAN_STATE_ALLOC:
148		pr_err("Object allocated with size %u bytes.\n",
149		       alloc_info->alloc_size);
150		pr_err("Allocation:\n");
151		print_track(&alloc_info->track);
152		break;
153	case KASAN_STATE_FREE:
154		pr_err("Object freed, allocated with size %u bytes\n",
155		       alloc_info->alloc_size);
156		free_info = get_free_info(cache, object);
157		pr_err("Allocation:\n");
158		print_track(&alloc_info->track);
159		pr_err("Deallocation:\n");
160		print_track(&free_info->track);
161		break;
162	}
163}
164#endif
165
166static void print_address_description(struct kasan_access_info *info)
167{
168	const void *addr = info->access_addr;
 
 
 
 
 
 
169
170	if ((addr >= (void *)PAGE_OFFSET) &&
171		(addr < high_memory)) {
172		struct page *page = virt_to_head_page(addr);
173
174		if (PageSlab(page)) {
175			void *object;
176			struct kmem_cache *cache = page->slab_cache;
177			object = nearest_obj(cache, page,
178						(void *)info->access_addr);
179			object_err(cache, page, object,
180					"kasan: bad access detected");
181			return;
182		}
183		dump_page(page, "kasan: bad access detected");
184	}
185
186	if (kernel_or_module_addr(addr)) {
187		if (!init_task_stack_addr(addr))
188			pr_err("Address belongs to variable %pS\n", addr);
 
189	}
190	dump_stack();
191}
192
193static bool row_is_guilty(const void *row, const void *guilty)
194{
195	return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
196}
197
198static int shadow_pointer_offset(const void *row, const void *shadow)
199{
200	/* The length of ">ff00ff00ff00ff00: " is
201	 *    3 + (BITS_PER_LONG/8)*2 chars.
 
 
 
 
 
 
 
202	 */
203	return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
204		(shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
205}
206
207static void print_shadow_for_address(const void *addr)
208{
209	int i;
210	const void *shadow = kasan_mem_to_shadow(addr);
211	const void *shadow_row;
212
213	shadow_row = (void *)round_down((unsigned long)shadow,
214					SHADOW_BYTES_PER_ROW)
215		- SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
216
217	pr_err("Memory state around the buggy address:\n");
218
219	for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
220		const void *kaddr = kasan_shadow_to_mem(shadow_row);
221		char buffer[4 + (BITS_PER_LONG/8)*2];
222		char shadow_buf[SHADOW_BYTES_PER_ROW];
223
224		snprintf(buffer, sizeof(buffer),
225			(i == 0) ? ">%p: " : " %p: ", kaddr);
 
226		/*
227		 * We should not pass a shadow pointer to generic
228		 * function, because generic functions may try to
229		 * access kasan mapping for the passed address.
230		 */
231		memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
 
232		print_hex_dump(KERN_ERR, buffer,
233			DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
234			shadow_buf, SHADOW_BYTES_PER_ROW, 0);
235
236		if (row_is_guilty(shadow_row, shadow))
237			pr_err("%*c\n",
238				shadow_pointer_offset(shadow_row, shadow),
239				'^');
240
241		shadow_row += SHADOW_BYTES_PER_ROW;
242	}
243}
244
245static DEFINE_SPINLOCK(report_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
247static void kasan_report_error(struct kasan_access_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248{
249	unsigned long flags;
250	const char *bug_type;
251
252	/*
253	 * Make sure we don't end up in loop.
 
 
 
 
 
 
254	 */
255	kasan_disable_current();
256	spin_lock_irqsave(&report_lock, flags);
257	pr_err("==================================================================\n");
258	if (info->access_addr <
259			kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
260		if ((unsigned long)info->access_addr < PAGE_SIZE)
261			bug_type = "null-ptr-deref";
262		else if ((unsigned long)info->access_addr < TASK_SIZE)
263			bug_type = "user-memory-access";
264		else
265			bug_type = "wild-memory-access";
266		pr_err("BUG: KASAN: %s on address %p\n",
267			bug_type, info->access_addr);
268		pr_err("%s of size %zu by task %s/%d\n",
269			info->is_write ? "Write" : "Read",
270			info->access_size, current->comm,
271			task_pid_nr(current));
272		dump_stack();
273	} else {
274		print_error_description(info);
275		print_address_description(info);
276		print_shadow_for_address(info->first_bad_addr);
277	}
278	pr_err("==================================================================\n");
279	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
280	spin_unlock_irqrestore(&report_lock, flags);
281	kasan_enable_current();
282}
283
284void kasan_report(unsigned long addr, size_t size,
285		bool is_write, unsigned long ip)
 
 
 
 
 
286{
287	struct kasan_access_info info;
 
 
 
 
 
 
 
 
288
289	if (likely(!kasan_report_enabled()))
290		return;
291
292	info.access_addr = (void *)addr;
 
 
293	info.access_size = size;
294	info.is_write = is_write;
295	info.ip = ip;
296
297	kasan_report_error(&info);
298}
299
 
300
301#define DEFINE_ASAN_REPORT_LOAD(size)                     \
302void __asan_report_load##size##_noabort(unsigned long addr) \
303{                                                         \
304	kasan_report(addr, size, false, _RET_IP_);	  \
305}                                                         \
306EXPORT_SYMBOL(__asan_report_load##size##_noabort)
307
308#define DEFINE_ASAN_REPORT_STORE(size)                     \
309void __asan_report_store##size##_noabort(unsigned long addr) \
310{                                                          \
311	kasan_report(addr, size, true, _RET_IP_);	   \
312}                                                          \
313EXPORT_SYMBOL(__asan_report_store##size##_noabort)
314
315DEFINE_ASAN_REPORT_LOAD(1);
316DEFINE_ASAN_REPORT_LOAD(2);
317DEFINE_ASAN_REPORT_LOAD(4);
318DEFINE_ASAN_REPORT_LOAD(8);
319DEFINE_ASAN_REPORT_LOAD(16);
320DEFINE_ASAN_REPORT_STORE(1);
321DEFINE_ASAN_REPORT_STORE(2);
322DEFINE_ASAN_REPORT_STORE(4);
323DEFINE_ASAN_REPORT_STORE(8);
324DEFINE_ASAN_REPORT_STORE(16);
325
326void __asan_report_load_n_noabort(unsigned long addr, size_t size)
 
327{
328	kasan_report(addr, size, false, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329}
330EXPORT_SYMBOL(__asan_report_load_n_noabort);
331
332void __asan_report_store_n_noabort(unsigned long addr, size_t size)
 
 
 
 
 
 
 
 
333{
334	kasan_report(addr, size, true, _RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335}
336EXPORT_SYMBOL(__asan_report_store_n_noabort);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains common KASAN error reporting code.
  4 *
  5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7 *
  8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9 *        Andrey Konovalov <andreyknvl@gmail.com>
 
 
 
 
 
 10 */
 11
 12#include <kunit/test.h>
 13#include <kunit/visibility.h>
 14#include <linux/bitops.h>
 15#include <linux/ftrace.h>
 16#include <linux/init.h>
 17#include <linux/kernel.h>
 18#include <linux/lockdep.h>
 19#include <linux/mm.h>
 20#include <linux/printk.h>
 21#include <linux/sched.h>
 22#include <linux/slab.h>
 23#include <linux/stackdepot.h>
 24#include <linux/stacktrace.h>
 25#include <linux/string.h>
 26#include <linux/types.h>
 27#include <linux/vmalloc.h>
 28#include <linux/kasan.h>
 29#include <linux/module.h>
 30#include <linux/sched/task_stack.h>
 31#include <linux/uaccess.h>
 32#include <trace/events/error_report.h>
 33
 34#include <asm/sections.h>
 35
 36#include "kasan.h"
 37#include "../slab.h"
 38
 39static unsigned long kasan_flags;
 40
 41#define KASAN_BIT_REPORTED	0
 42#define KASAN_BIT_MULTI_SHOT	1
 
 43
 44enum kasan_arg_fault {
 45	KASAN_ARG_FAULT_DEFAULT,
 46	KASAN_ARG_FAULT_REPORT,
 47	KASAN_ARG_FAULT_PANIC,
 48	KASAN_ARG_FAULT_PANIC_ON_WRITE,
 49};
 50
 51static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT;
 52
 53/* kasan.fault=report/panic */
 54static int __init early_kasan_fault(char *arg)
 55{
 56	if (!arg)
 57		return -EINVAL;
 58
 59	if (!strcmp(arg, "report"))
 60		kasan_arg_fault = KASAN_ARG_FAULT_REPORT;
 61	else if (!strcmp(arg, "panic"))
 62		kasan_arg_fault = KASAN_ARG_FAULT_PANIC;
 63	else if (!strcmp(arg, "panic_on_write"))
 64		kasan_arg_fault = KASAN_ARG_FAULT_PANIC_ON_WRITE;
 65	else
 66		return -EINVAL;
 67
 68	return 0;
 69}
 70early_param("kasan.fault", early_kasan_fault);
 71
 72static int __init kasan_set_multi_shot(char *str)
 73{
 74	set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
 75	return 1;
 76}
 77__setup("kasan_multi_shot", kasan_set_multi_shot);
 
 78
 79/*
 80 * This function is used to check whether KASAN reports are suppressed for
 81 * software KASAN modes via kasan_disable/enable_current() critical sections.
 82 *
 83 * This is done to avoid:
 84 * 1. False-positive reports when accessing slab metadata,
 85 * 2. Deadlocking when poisoned memory is accessed by the reporting code.
 86 *
 87 * Hardware Tag-Based KASAN instead relies on:
 88 * For #1: Resetting tags via kasan_reset_tag().
 89 * For #2: Suppression of tag checks via CPU, see report_suppress_start/end().
 90 */
 91static bool report_suppressed_sw(void)
 92{
 93#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 94	if (current->kasan_depth)
 95		return true;
 96#endif
 97	return false;
 98}
 99
100static void report_suppress_start(void)
101{
102#ifdef CONFIG_KASAN_HW_TAGS
103	/*
104	 * Disable preemption for the duration of printing a KASAN report, as
105	 * hw_suppress_tag_checks_start() disables checks on the current CPU.
106	 */
107	preempt_disable();
108	hw_suppress_tag_checks_start();
109#else
110	kasan_disable_current();
111#endif
112}
113
114static void report_suppress_stop(void)
115{
116#ifdef CONFIG_KASAN_HW_TAGS
117	hw_suppress_tag_checks_stop();
118	preempt_enable();
119#else
120	kasan_enable_current();
121#endif
122}
123
124/*
125 * Used to avoid reporting more than one KASAN bug unless kasan_multi_shot
126 * is enabled. Note that KASAN tests effectively enable kasan_multi_shot
127 * for their duration.
128 */
129static bool report_enabled(void)
130{
131	if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
132		return true;
133	return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
134}
135
136#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
137
138VISIBLE_IF_KUNIT bool kasan_save_enable_multi_shot(void)
139{
140	return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
141}
142EXPORT_SYMBOL_IF_KUNIT(kasan_save_enable_multi_shot);
143
144VISIBLE_IF_KUNIT void kasan_restore_multi_shot(bool enabled)
145{
146	if (!enabled)
147		clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
148}
149EXPORT_SYMBOL_IF_KUNIT(kasan_restore_multi_shot);
150
151#endif
152
153#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
154
155/*
156 * Whether the KASAN KUnit test suite is currently being executed.
157 * Updated in kasan_test.c.
158 */
159static bool kasan_kunit_executing;
160
161VISIBLE_IF_KUNIT void kasan_kunit_test_suite_start(void)
162{
163	WRITE_ONCE(kasan_kunit_executing, true);
164}
165EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_start);
166
167VISIBLE_IF_KUNIT void kasan_kunit_test_suite_end(void)
168{
169	WRITE_ONCE(kasan_kunit_executing, false);
170}
171EXPORT_SYMBOL_IF_KUNIT(kasan_kunit_test_suite_end);
172
173static bool kasan_kunit_test_suite_executing(void)
174{
175	return READ_ONCE(kasan_kunit_executing);
176}
177
178#else /* CONFIG_KASAN_KUNIT_TEST */
179
180static inline bool kasan_kunit_test_suite_executing(void) { return false; }
181
182#endif /* CONFIG_KASAN_KUNIT_TEST */
183
184#if IS_ENABLED(CONFIG_KUNIT)
185
186static void fail_non_kasan_kunit_test(void)
187{
188	struct kunit *test;
189
190	if (kasan_kunit_test_suite_executing())
191		return;
192
193	test = current->kunit_test;
194	if (test)
195		kunit_set_failure(test);
196}
197
198#else /* CONFIG_KUNIT */
199
200static inline void fail_non_kasan_kunit_test(void) { }
201
202#endif /* CONFIG_KUNIT */
203
204static DEFINE_RAW_SPINLOCK(report_lock);
205
206static void start_report(unsigned long *flags, bool sync)
207{
208	fail_non_kasan_kunit_test();
209	/* Respect the /proc/sys/kernel/traceoff_on_warning interface. */
210	disable_trace_on_warning();
211	/* Do not allow LOCKDEP mangling KASAN reports. */
212	lockdep_off();
213	/* Make sure we don't end up in loop. */
214	report_suppress_start();
215	raw_spin_lock_irqsave(&report_lock, *flags);
216	pr_err("==================================================================\n");
217}
218
219static void end_report(unsigned long *flags, const void *addr, bool is_write)
220{
221	if (addr)
222		trace_error_report_end(ERROR_DETECTOR_KASAN,
223				       (unsigned long)addr);
224	pr_err("==================================================================\n");
225	raw_spin_unlock_irqrestore(&report_lock, *flags);
226	if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
227		check_panic_on_warn("KASAN");
228	switch (kasan_arg_fault) {
229	case KASAN_ARG_FAULT_DEFAULT:
230	case KASAN_ARG_FAULT_REPORT:
231		break;
232	case KASAN_ARG_FAULT_PANIC:
233		panic("kasan.fault=panic set ...\n");
 
 
 
234		break;
235	case KASAN_ARG_FAULT_PANIC_ON_WRITE:
236		if (is_write)
237			panic("kasan.fault=panic_on_write set ...\n");
238		break;
239	}
240	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
241	lockdep_on();
242	report_suppress_stop();
243}
244
245static void print_error_description(struct kasan_report_info *info)
246{
247	pr_err("BUG: KASAN: %s in %pS\n", info->bug_type, (void *)info->ip);
248
249	if (info->type != KASAN_REPORT_ACCESS) {
250		pr_err("Free of addr %px by task %s/%d\n",
251			info->access_addr, current->comm, task_pid_nr(current));
252		return;
253	}
254
255	if (info->access_size)
256		pr_err("%s of size %zu at addr %px by task %s/%d\n",
257			info->is_write ? "Write" : "Read", info->access_size,
258			info->access_addr, current->comm, task_pid_nr(current));
259	else
260		pr_err("%s at addr %px by task %s/%d\n",
261			info->is_write ? "Write" : "Read",
262			info->access_addr, current->comm, task_pid_nr(current));
263}
264
265static void print_track(struct kasan_track *track, const char *prefix)
266{
267#ifdef CONFIG_KASAN_EXTRA_INFO
268	u64 ts_nsec = track->timestamp;
269	unsigned long rem_usec;
270
271	ts_nsec <<= 9;
272	rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
273
274	pr_err("%s by task %u on cpu %d at %lu.%06lus:\n",
275			prefix, track->pid, track->cpu,
276			(unsigned long)ts_nsec, rem_usec);
277#else
278	pr_err("%s by task %u:\n", prefix, track->pid);
279#endif /* CONFIG_KASAN_EXTRA_INFO */
280	if (track->stack)
281		stack_depot_print(track->stack);
282	else
283		pr_err("(stack is not available)\n");
284}
285
286static inline struct page *addr_to_page(const void *addr)
287{
288	if (virt_addr_valid(addr))
289		return virt_to_head_page(addr);
290	return NULL;
291}
292
293static void describe_object_addr(const void *addr, struct kasan_report_info *info)
294{
295	unsigned long access_addr = (unsigned long)addr;
296	unsigned long object_addr = (unsigned long)info->object;
297	const char *rel_type, *region_state = "";
298	int rel_bytes;
299
300	pr_err("The buggy address belongs to the object at %px\n"
301	       " which belongs to the cache %s of size %d\n",
302		info->object, info->cache->name, info->cache->object_size);
303
304	if (access_addr < object_addr) {
305		rel_type = "to the left";
306		rel_bytes = object_addr - access_addr;
307	} else if (access_addr >= object_addr + info->alloc_size) {
308		rel_type = "to the right";
309		rel_bytes = access_addr - (object_addr + info->alloc_size);
310	} else {
311		rel_type = "inside";
312		rel_bytes = access_addr - object_addr;
313	}
314
315	/*
316	 * Tag-Based modes use the stack ring to infer the bug type, but the
317	 * memory region state description is generated based on the metadata.
318	 * Thus, defining the region state as below can contradict the metadata.
319	 * Fixing this requires further improvements, so only infer the state
320	 * for the Generic mode.
321	 */
322	if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
323		if (strcmp(info->bug_type, "slab-out-of-bounds") == 0)
324			region_state = "allocated ";
325		else if (strcmp(info->bug_type, "slab-use-after-free") == 0)
326			region_state = "freed ";
327	}
328
329	pr_err("The buggy address is located %d bytes %s of\n"
330	       " %s%zu-byte region [%px, %px)\n",
331	       rel_bytes, rel_type, region_state, info->alloc_size,
332	       (void *)object_addr, (void *)(object_addr + info->alloc_size));
333}
334
335static void describe_object_stacks(struct kasan_report_info *info)
336{
337	if (info->alloc_track.stack) {
338		print_track(&info->alloc_track, "Allocated");
339		pr_err("\n");
340	}
341
342	if (info->free_track.stack) {
343		print_track(&info->free_track, "Freed");
344		pr_err("\n");
345	}
346
347	kasan_print_aux_stacks(info->cache, info->object);
348}
349
350static void describe_object(const void *addr, struct kasan_report_info *info)
351{
352	if (kasan_stack_collection_enabled())
353		describe_object_stacks(info);
354	describe_object_addr(addr, info);
 
355}
356
357static inline bool kernel_or_module_addr(const void *addr)
358{
359	if (is_kernel((unsigned long)addr))
360		return true;
361	if (is_module_address((unsigned long)addr))
362		return true;
363	return false;
364}
365
366static inline bool init_task_stack_addr(const void *addr)
367{
368	return addr >= (void *)&init_thread_union.stack &&
369		(addr <= (void *)&init_thread_union.stack +
370			sizeof(init_thread_union.stack));
371}
372
373static void print_address_description(void *addr, u8 tag,
374				      struct kasan_report_info *info)
375{
376	struct page *page = addr_to_page(addr);
 
 
377
378	dump_stack_lvl(KERN_ERR);
379	pr_err("\n");
380
381	if (info->cache && info->object) {
382		describe_object(addr, info);
383		pr_err("\n");
384	}
 
385
386	if (kernel_or_module_addr(addr) && !init_task_stack_addr(addr)) {
387		pr_err("The buggy address belongs to the variable:\n");
388		pr_err(" %pS\n", addr);
389		pr_err("\n");
390	}
391
392	if (object_is_on_stack(addr)) {
393		/*
394		 * Currently, KASAN supports printing frame information only
395		 * for accesses to the task's own stack.
396		 */
397		kasan_print_address_stack_frame(addr);
398		pr_err("\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
399	}
 
 
400
401	if (is_vmalloc_addr(addr)) {
402		struct vm_struct *va = find_vm_area(addr);
403
404		if (va) {
405			pr_err("The buggy address belongs to the virtual mapping at\n"
406			       " [%px, %px) created by:\n"
407			       " %pS\n",
408			       va->addr, va->addr + va->size, va->caller);
409			pr_err("\n");
410
411			page = vmalloc_to_page(addr);
 
 
 
 
 
 
 
 
 
 
 
412		}
 
413	}
414
415	if (page) {
416		pr_err("The buggy address belongs to the physical page:\n");
417		dump_page(page, "kasan: bad access detected");
418		pr_err("\n");
419	}
 
420}
421
422static bool meta_row_is_guilty(const void *row, const void *addr)
423{
424	return (row <= addr) && (addr < row + META_MEM_BYTES_PER_ROW);
425}
426
427static int meta_pointer_offset(const void *row, const void *addr)
428{
429	/*
430	 * Memory state around the buggy address:
431	 *  ff00ff00ff00ff00: 00 00 00 05 fe fe fe fe fe fe fe fe fe fe fe fe
432	 *  ...
433	 *
434	 * The length of ">ff00ff00ff00ff00: " is
435	 *    3 + (BITS_PER_LONG / 8) * 2 chars.
436	 * The length of each granule metadata is 2 bytes
437	 *    plus 1 byte for space.
438	 */
439	return 3 + (BITS_PER_LONG / 8) * 2 +
440		(addr - row) / KASAN_GRANULE_SIZE * 3 + 1;
441}
442
443static void print_memory_metadata(const void *addr)
444{
445	int i;
446	void *row;
 
447
448	row = (void *)round_down((unsigned long)addr, META_MEM_BYTES_PER_ROW)
449			- META_ROWS_AROUND_ADDR * META_MEM_BYTES_PER_ROW;
 
450
451	pr_err("Memory state around the buggy address:\n");
452
453	for (i = -META_ROWS_AROUND_ADDR; i <= META_ROWS_AROUND_ADDR; i++) {
454		char buffer[4 + (BITS_PER_LONG / 8) * 2];
455		char metadata[META_BYTES_PER_ROW];
 
456
457		snprintf(buffer, sizeof(buffer),
458				(i == 0) ? ">%px: " : " %px: ", row);
459
460		/*
461		 * We should not pass a shadow pointer to generic
462		 * function, because generic functions may try to
463		 * access kasan mapping for the passed address.
464		 */
465		kasan_metadata_fetch_row(&metadata[0], row);
466
467		print_hex_dump(KERN_ERR, buffer,
468			DUMP_PREFIX_NONE, META_BYTES_PER_ROW, 1,
469			metadata, META_BYTES_PER_ROW, 0);
470
471		if (meta_row_is_guilty(row, addr))
472			pr_err("%*c\n", meta_pointer_offset(row, addr), '^');
 
 
473
474		row += META_MEM_BYTES_PER_ROW;
475	}
476}
477
478static void print_report(struct kasan_report_info *info)
479{
480	void *addr = kasan_reset_tag((void *)info->access_addr);
481	u8 tag = get_tag((void *)info->access_addr);
482
483	print_error_description(info);
484	if (addr_has_metadata(addr))
485		kasan_print_tags(tag, info->first_bad_addr);
486	pr_err("\n");
487
488	if (addr_has_metadata(addr)) {
489		print_address_description(addr, tag, info);
490		print_memory_metadata(info->first_bad_addr);
491	} else {
492		dump_stack_lvl(KERN_ERR);
493	}
494}
495
496static void complete_report_info(struct kasan_report_info *info)
497{
498	void *addr = kasan_reset_tag((void *)info->access_addr);
499	struct slab *slab;
500
501	if (info->type == KASAN_REPORT_ACCESS)
502		info->first_bad_addr = kasan_find_first_bad_addr(
503					(void *)info->access_addr, info->access_size);
504	else
505		info->first_bad_addr = addr;
506
507	slab = kasan_addr_to_slab(addr);
508	if (slab) {
509		info->cache = slab->slab_cache;
510		info->object = nearest_obj(info->cache, slab, addr);
511
512		/* Try to determine allocation size based on the metadata. */
513		info->alloc_size = kasan_get_alloc_size(info->object, info->cache);
514		/* Fallback to the object size if failed. */
515		if (!info->alloc_size)
516			info->alloc_size = info->cache->object_size;
517	} else
518		info->cache = info->object = NULL;
519
520	switch (info->type) {
521	case KASAN_REPORT_INVALID_FREE:
522		info->bug_type = "invalid-free";
523		break;
524	case KASAN_REPORT_DOUBLE_FREE:
525		info->bug_type = "double-free";
526		break;
527	default:
528		/* bug_type filled in by kasan_complete_mode_report_info. */
529		break;
530	}
531
532	/* Fill in mode-specific report info fields. */
533	kasan_complete_mode_report_info(info);
534}
535
536void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_type type)
537{
538	unsigned long flags;
539	struct kasan_report_info info;
540
541	/*
542	 * Do not check report_suppressed_sw(), as an invalid-free cannot be
543	 * caused by accessing poisoned memory and thus should not be suppressed
544	 * by kasan_disable/enable_current() critical sections.
545	 *
546	 * Note that for Hardware Tag-Based KASAN, kasan_report_invalid_free()
547	 * is triggered by explicit tag checks and not by the ones performed by
548	 * the CPU. Thus, reporting invalid-free is not suppressed as well.
549	 */
550	if (unlikely(!report_enabled()))
551		return;
552
553	start_report(&flags, true);
554
555	__memset(&info, 0, sizeof(info));
556	info.type = type;
557	info.access_addr = ptr;
558	info.access_size = 0;
559	info.is_write = false;
560	info.ip = ip;
561
562	complete_report_info(&info);
563
564	print_report(&info);
565
566	/*
567	 * Invalid free is considered a "write" since the allocator's metadata
568	 * updates involves writes.
569	 */
570	end_report(&flags, ptr, true);
 
 
 
 
 
 
571}
572
573/*
574 * kasan_report() is the only reporting function that uses
575 * user_access_save/restore(): kasan_report_invalid_free() cannot be called
576 * from a UACCESS region, and kasan_report_async() is not used on x86.
577 */
578bool kasan_report(const void *addr, size_t size, bool is_write,
579			unsigned long ip)
580{
581	bool ret = true;
582	unsigned long ua_flags = user_access_save();
583	unsigned long irq_flags;
584	struct kasan_report_info info;
585
586	if (unlikely(report_suppressed_sw()) || unlikely(!report_enabled())) {
587		ret = false;
588		goto out;
589	}
590
591	start_report(&irq_flags, true);
 
592
593	__memset(&info, 0, sizeof(info));
594	info.type = KASAN_REPORT_ACCESS;
595	info.access_addr = addr;
596	info.access_size = size;
597	info.is_write = is_write;
598	info.ip = ip;
599
600	complete_report_info(&info);
 
601
602	print_report(&info);
603
604	end_report(&irq_flags, (void *)addr, is_write);
 
 
 
 
 
605
606out:
607	user_access_restore(ua_flags);
 
 
 
 
608
609	return ret;
610}
 
 
 
 
 
 
 
 
611
612#ifdef CONFIG_KASAN_HW_TAGS
613void kasan_report_async(void)
614{
615	unsigned long flags;
616
617	/*
618	 * Do not check report_suppressed_sw(), as
619	 * kasan_disable/enable_current() critical sections do not affect
620	 * Hardware Tag-Based KASAN.
621	 */
622	if (unlikely(!report_enabled()))
623		return;
624
625	start_report(&flags, false);
626	pr_err("BUG: KASAN: invalid-access\n");
627	pr_err("Asynchronous fault: no details available\n");
628	pr_err("\n");
629	dump_stack_lvl(KERN_ERR);
630	/*
631	 * Conservatively set is_write=true, because no details are available.
632	 * In this mode, kasan.fault=panic_on_write is like kasan.fault=panic.
633	 */
634	end_report(&flags, NULL, true);
635}
636#endif /* CONFIG_KASAN_HW_TAGS */
637
638#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
639/*
640 * With compiler-based KASAN modes, accesses to bogus pointers (outside of the
641 * mapped kernel address space regions) cause faults when KASAN tries to check
642 * the shadow memory before the actual memory access. This results in cryptic
643 * GPF reports, which are hard for users to interpret. This hook helps users to
644 * figure out what the original bogus pointer was.
645 */
646void kasan_non_canonical_hook(unsigned long addr)
647{
648	unsigned long orig_addr;
649	const char *bug_type;
650
651	/*
652	 * All addresses that came as a result of the memory-to-shadow mapping
653	 * (even for bogus pointers) must be >= KASAN_SHADOW_OFFSET.
654	 */
655	if (addr < KASAN_SHADOW_OFFSET)
656		return;
657
658	orig_addr = (unsigned long)kasan_shadow_to_mem((void *)addr);
659
660	/*
661	 * For faults near the shadow address for NULL, we can be fairly certain
662	 * that this is a KASAN shadow memory access.
663	 * For faults that correspond to the shadow for low or high canonical
664	 * addresses, we can still be pretty sure: these shadow regions are a
665	 * fairly narrow chunk of the address space.
666	 * But the shadow for non-canonical addresses is a really large chunk
667	 * of the address space. For this case, we still print the decoded
668	 * address, but make it clear that this is not necessarily what's
669	 * actually going on.
670	 */
671	if (orig_addr < PAGE_SIZE)
672		bug_type = "null-ptr-deref";
673	else if (orig_addr < TASK_SIZE)
674		bug_type = "probably user-memory-access";
675	else if (addr_in_shadow((void *)addr))
676		bug_type = "probably wild-memory-access";
677	else
678		bug_type = "maybe wild-memory-access";
679	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
680		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
681}
682#endif