Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains core generic KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/export.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kasan.h>
23#include <linux/kernel.h>
24#include <linux/kmemleak.h>
25#include <linux/linkage.h>
26#include <linux/memblock.h>
27#include <linux/memory.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/printk.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/slab.h>
34#include <linux/stacktrace.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/vmalloc.h>
38#include <linux/bug.h>
39
40#include "kasan.h"
41#include "../slab.h"
42
43/*
44 * All functions below always inlined so compiler could
45 * perform better optimizations in each of __asan_loadX/__assn_storeX
46 * depending on memory access size X.
47 */
48
49static __always_inline bool memory_is_poisoned_1(unsigned long addr)
50{
51 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
52
53 if (unlikely(shadow_value)) {
54 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
55 return unlikely(last_accessible_byte >= shadow_value);
56 }
57
58 return false;
59}
60
61static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
62 unsigned long size)
63{
64 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
65
66 /*
67 * Access crosses 8(shadow size)-byte boundary. Such access maps
68 * into 2 shadow bytes, so we need to check them both.
69 */
70 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
71 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
72
73 return memory_is_poisoned_1(addr + size - 1);
74}
75
76static __always_inline bool memory_is_poisoned_16(unsigned long addr)
77{
78 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
79
80 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
81 if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
82 return *shadow_addr || memory_is_poisoned_1(addr + 15);
83
84 return *shadow_addr;
85}
86
87static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
88 size_t size)
89{
90 while (size) {
91 if (unlikely(*start))
92 return (unsigned long)start;
93 start++;
94 size--;
95 }
96
97 return 0;
98}
99
100static __always_inline unsigned long memory_is_nonzero(const void *start,
101 const void *end)
102{
103 unsigned int words;
104 unsigned long ret;
105 unsigned int prefix = (unsigned long)start % 8;
106
107 if (end - start <= 16)
108 return bytes_is_nonzero(start, end - start);
109
110 if (prefix) {
111 prefix = 8 - prefix;
112 ret = bytes_is_nonzero(start, prefix);
113 if (unlikely(ret))
114 return ret;
115 start += prefix;
116 }
117
118 words = (end - start) / 8;
119 while (words) {
120 if (unlikely(*(u64 *)start))
121 return bytes_is_nonzero(start, 8);
122 start += 8;
123 words--;
124 }
125
126 return bytes_is_nonzero(start, (end - start) % 8);
127}
128
129static __always_inline bool memory_is_poisoned_n(unsigned long addr,
130 size_t size)
131{
132 unsigned long ret;
133
134 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
135 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
136
137 if (unlikely(ret)) {
138 unsigned long last_byte = addr + size - 1;
139 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
140
141 if (unlikely(ret != (unsigned long)last_shadow ||
142 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
143 return true;
144 }
145 return false;
146}
147
148static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
149{
150 if (__builtin_constant_p(size)) {
151 switch (size) {
152 case 1:
153 return memory_is_poisoned_1(addr);
154 case 2:
155 case 4:
156 case 8:
157 return memory_is_poisoned_2_4_8(addr, size);
158 case 16:
159 return memory_is_poisoned_16(addr);
160 default:
161 BUILD_BUG();
162 }
163 }
164
165 return memory_is_poisoned_n(addr, size);
166}
167
168static __always_inline bool check_memory_region_inline(unsigned long addr,
169 size_t size, bool write,
170 unsigned long ret_ip)
171{
172 if (unlikely(size == 0))
173 return true;
174
175 if (unlikely(addr + size < addr))
176 return !kasan_report(addr, size, write, ret_ip);
177
178 if (unlikely((void *)addr <
179 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
180 return !kasan_report(addr, size, write, ret_ip);
181 }
182
183 if (likely(!memory_is_poisoned(addr, size)))
184 return true;
185
186 return !kasan_report(addr, size, write, ret_ip);
187}
188
189bool check_memory_region(unsigned long addr, size_t size, bool write,
190 unsigned long ret_ip)
191{
192 return check_memory_region_inline(addr, size, write, ret_ip);
193}
194
195void kasan_cache_shrink(struct kmem_cache *cache)
196{
197 quarantine_remove_cache(cache);
198}
199
200void kasan_cache_shutdown(struct kmem_cache *cache)
201{
202 if (!__kmem_cache_empty(cache))
203 quarantine_remove_cache(cache);
204}
205
206static void register_global(struct kasan_global *global)
207{
208 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
209
210 kasan_unpoison_shadow(global->beg, global->size);
211
212 kasan_poison_shadow(global->beg + aligned_size,
213 global->size_with_redzone - aligned_size,
214 KASAN_GLOBAL_REDZONE);
215}
216
217void __asan_register_globals(struct kasan_global *globals, size_t size)
218{
219 int i;
220
221 for (i = 0; i < size; i++)
222 register_global(&globals[i]);
223}
224EXPORT_SYMBOL(__asan_register_globals);
225
226void __asan_unregister_globals(struct kasan_global *globals, size_t size)
227{
228}
229EXPORT_SYMBOL(__asan_unregister_globals);
230
231#define DEFINE_ASAN_LOAD_STORE(size) \
232 void __asan_load##size(unsigned long addr) \
233 { \
234 check_memory_region_inline(addr, size, false, _RET_IP_);\
235 } \
236 EXPORT_SYMBOL(__asan_load##size); \
237 __alias(__asan_load##size) \
238 void __asan_load##size##_noabort(unsigned long); \
239 EXPORT_SYMBOL(__asan_load##size##_noabort); \
240 void __asan_store##size(unsigned long addr) \
241 { \
242 check_memory_region_inline(addr, size, true, _RET_IP_); \
243 } \
244 EXPORT_SYMBOL(__asan_store##size); \
245 __alias(__asan_store##size) \
246 void __asan_store##size##_noabort(unsigned long); \
247 EXPORT_SYMBOL(__asan_store##size##_noabort)
248
249DEFINE_ASAN_LOAD_STORE(1);
250DEFINE_ASAN_LOAD_STORE(2);
251DEFINE_ASAN_LOAD_STORE(4);
252DEFINE_ASAN_LOAD_STORE(8);
253DEFINE_ASAN_LOAD_STORE(16);
254
255void __asan_loadN(unsigned long addr, size_t size)
256{
257 check_memory_region(addr, size, false, _RET_IP_);
258}
259EXPORT_SYMBOL(__asan_loadN);
260
261__alias(__asan_loadN)
262void __asan_loadN_noabort(unsigned long, size_t);
263EXPORT_SYMBOL(__asan_loadN_noabort);
264
265void __asan_storeN(unsigned long addr, size_t size)
266{
267 check_memory_region(addr, size, true, _RET_IP_);
268}
269EXPORT_SYMBOL(__asan_storeN);
270
271__alias(__asan_storeN)
272void __asan_storeN_noabort(unsigned long, size_t);
273EXPORT_SYMBOL(__asan_storeN_noabort);
274
275/* to shut up compiler complaints */
276void __asan_handle_no_return(void) {}
277EXPORT_SYMBOL(__asan_handle_no_return);
278
279/* Emitted by compiler to poison alloca()ed objects. */
280void __asan_alloca_poison(unsigned long addr, size_t size)
281{
282 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
283 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
284 rounded_up_size;
285 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
286
287 const void *left_redzone = (const void *)(addr -
288 KASAN_ALLOCA_REDZONE_SIZE);
289 const void *right_redzone = (const void *)(addr + rounded_up_size);
290
291 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
292
293 kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
294 size - rounded_down_size);
295 kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
296 KASAN_ALLOCA_LEFT);
297 kasan_poison_shadow(right_redzone,
298 padding_size + KASAN_ALLOCA_REDZONE_SIZE,
299 KASAN_ALLOCA_RIGHT);
300}
301EXPORT_SYMBOL(__asan_alloca_poison);
302
303/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
304void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
305{
306 if (unlikely(!stack_top || stack_top > stack_bottom))
307 return;
308
309 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
310}
311EXPORT_SYMBOL(__asan_allocas_unpoison);
312
313/* Emitted by the compiler to [un]poison local variables. */
314#define DEFINE_ASAN_SET_SHADOW(byte) \
315 void __asan_set_shadow_##byte(const void *addr, size_t size) \
316 { \
317 __memset((void *)addr, 0x##byte, size); \
318 } \
319 EXPORT_SYMBOL(__asan_set_shadow_##byte)
320
321DEFINE_ASAN_SET_SHADOW(00);
322DEFINE_ASAN_SET_SHADOW(f1);
323DEFINE_ASAN_SET_SHADOW(f2);
324DEFINE_ASAN_SET_SHADOW(f3);
325DEFINE_ASAN_SET_SHADOW(f5);
326DEFINE_ASAN_SET_SHADOW(f8);
327
328void kasan_record_aux_stack(void *addr)
329{
330 struct page *page = kasan_addr_to_page(addr);
331 struct kmem_cache *cache;
332 struct kasan_alloc_meta *alloc_info;
333 void *object;
334
335 if (!(page && PageSlab(page)))
336 return;
337
338 cache = page->slab_cache;
339 object = nearest_obj(cache, page, addr);
340 alloc_info = get_alloc_info(cache, object);
341
342 /*
343 * record the last two call_rcu() call stacks.
344 */
345 alloc_info->aux_stack[1] = alloc_info->aux_stack[0];
346 alloc_info->aux_stack[0] = kasan_save_stack(GFP_NOWAIT);
347}
348
349void kasan_set_free_info(struct kmem_cache *cache,
350 void *object, u8 tag)
351{
352 struct kasan_free_meta *free_meta;
353
354 free_meta = get_free_info(cache, object);
355 kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
356
357 /*
358 * the object was freed and has free track set
359 */
360 *(u8 *)kasan_mem_to_shadow(object) = KASAN_KMALLOC_FREETRACK;
361}
362
363struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
364 void *object, u8 tag)
365{
366 if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_KMALLOC_FREETRACK)
367 return NULL;
368 return &get_free_info(cache, object)->free_track;
369}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains core generic KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12#include <linux/export.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/kasan.h>
16#include <linux/kernel.h>
17#include <linux/kfence.h>
18#include <linux/kmemleak.h>
19#include <linux/linkage.h>
20#include <linux/memblock.h>
21#include <linux/memory.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/printk.h>
25#include <linux/sched.h>
26#include <linux/sched/task_stack.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/stackdepot.h>
30#include <linux/stacktrace.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/vmalloc.h>
34#include <linux/bug.h>
35
36#include "kasan.h"
37#include "../slab.h"
38
39/*
40 * All functions below always inlined so compiler could
41 * perform better optimizations in each of __asan_loadX/__assn_storeX
42 * depending on memory access size X.
43 */
44
45static __always_inline bool memory_is_poisoned_1(const void *addr)
46{
47 s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
48
49 if (unlikely(shadow_value)) {
50 s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
51 return unlikely(last_accessible_byte >= shadow_value);
52 }
53
54 return false;
55}
56
57static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
58 unsigned long size)
59{
60 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
61
62 /*
63 * Access crosses 8(shadow size)-byte boundary. Such access maps
64 * into 2 shadow bytes, so we need to check them both.
65 */
66 if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
67 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
68
69 return memory_is_poisoned_1(addr + size - 1);
70}
71
72static __always_inline bool memory_is_poisoned_16(const void *addr)
73{
74 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
75
76 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
77 if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
78 return *shadow_addr || memory_is_poisoned_1(addr + 15);
79
80 return *shadow_addr;
81}
82
83static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
84 size_t size)
85{
86 while (size) {
87 if (unlikely(*start))
88 return (unsigned long)start;
89 start++;
90 size--;
91 }
92
93 return 0;
94}
95
96static __always_inline unsigned long memory_is_nonzero(const void *start,
97 const void *end)
98{
99 unsigned int words;
100 unsigned long ret;
101 unsigned int prefix = (unsigned long)start % 8;
102
103 if (end - start <= 16)
104 return bytes_is_nonzero(start, end - start);
105
106 if (prefix) {
107 prefix = 8 - prefix;
108 ret = bytes_is_nonzero(start, prefix);
109 if (unlikely(ret))
110 return ret;
111 start += prefix;
112 }
113
114 words = (end - start) / 8;
115 while (words) {
116 if (unlikely(*(u64 *)start))
117 return bytes_is_nonzero(start, 8);
118 start += 8;
119 words--;
120 }
121
122 return bytes_is_nonzero(start, (end - start) % 8);
123}
124
125static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
126{
127 unsigned long ret;
128
129 ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
130 kasan_mem_to_shadow(addr + size - 1) + 1);
131
132 if (unlikely(ret)) {
133 const void *last_byte = addr + size - 1;
134 s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
135 s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
136
137 if (unlikely(ret != (unsigned long)last_shadow ||
138 last_accessible_byte >= *last_shadow))
139 return true;
140 }
141 return false;
142}
143
144static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
145{
146 if (__builtin_constant_p(size)) {
147 switch (size) {
148 case 1:
149 return memory_is_poisoned_1(addr);
150 case 2:
151 case 4:
152 case 8:
153 return memory_is_poisoned_2_4_8(addr, size);
154 case 16:
155 return memory_is_poisoned_16(addr);
156 default:
157 BUILD_BUG();
158 }
159 }
160
161 return memory_is_poisoned_n(addr, size);
162}
163
164static __always_inline bool check_region_inline(const void *addr,
165 size_t size, bool write,
166 unsigned long ret_ip)
167{
168 if (!kasan_arch_is_ready())
169 return true;
170
171 if (unlikely(size == 0))
172 return true;
173
174 if (unlikely(addr + size < addr))
175 return !kasan_report(addr, size, write, ret_ip);
176
177 if (unlikely(!addr_has_metadata(addr)))
178 return !kasan_report(addr, size, write, ret_ip);
179
180 if (likely(!memory_is_poisoned(addr, size)))
181 return true;
182
183 return !kasan_report(addr, size, write, ret_ip);
184}
185
186bool kasan_check_range(const void *addr, size_t size, bool write,
187 unsigned long ret_ip)
188{
189 return check_region_inline(addr, size, write, ret_ip);
190}
191
192bool kasan_byte_accessible(const void *addr)
193{
194 s8 shadow_byte;
195
196 if (!kasan_arch_is_ready())
197 return true;
198
199 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
200
201 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
202}
203
204void kasan_cache_shrink(struct kmem_cache *cache)
205{
206 kasan_quarantine_remove_cache(cache);
207}
208
209void kasan_cache_shutdown(struct kmem_cache *cache)
210{
211 if (!__kmem_cache_empty(cache))
212 kasan_quarantine_remove_cache(cache);
213}
214
215static void register_global(struct kasan_global *global)
216{
217 size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
218
219 kasan_unpoison(global->beg, global->size, false);
220
221 kasan_poison(global->beg + aligned_size,
222 global->size_with_redzone - aligned_size,
223 KASAN_GLOBAL_REDZONE, false);
224}
225
226void __asan_register_globals(void *ptr, ssize_t size)
227{
228 int i;
229 struct kasan_global *globals = ptr;
230
231 for (i = 0; i < size; i++)
232 register_global(&globals[i]);
233}
234EXPORT_SYMBOL(__asan_register_globals);
235
236void __asan_unregister_globals(void *ptr, ssize_t size)
237{
238}
239EXPORT_SYMBOL(__asan_unregister_globals);
240
241#define DEFINE_ASAN_LOAD_STORE(size) \
242 void __asan_load##size(void *addr) \
243 { \
244 check_region_inline(addr, size, false, _RET_IP_); \
245 } \
246 EXPORT_SYMBOL(__asan_load##size); \
247 __alias(__asan_load##size) \
248 void __asan_load##size##_noabort(void *); \
249 EXPORT_SYMBOL(__asan_load##size##_noabort); \
250 void __asan_store##size(void *addr) \
251 { \
252 check_region_inline(addr, size, true, _RET_IP_); \
253 } \
254 EXPORT_SYMBOL(__asan_store##size); \
255 __alias(__asan_store##size) \
256 void __asan_store##size##_noabort(void *); \
257 EXPORT_SYMBOL(__asan_store##size##_noabort)
258
259DEFINE_ASAN_LOAD_STORE(1);
260DEFINE_ASAN_LOAD_STORE(2);
261DEFINE_ASAN_LOAD_STORE(4);
262DEFINE_ASAN_LOAD_STORE(8);
263DEFINE_ASAN_LOAD_STORE(16);
264
265void __asan_loadN(void *addr, ssize_t size)
266{
267 kasan_check_range(addr, size, false, _RET_IP_);
268}
269EXPORT_SYMBOL(__asan_loadN);
270
271__alias(__asan_loadN)
272void __asan_loadN_noabort(void *, ssize_t);
273EXPORT_SYMBOL(__asan_loadN_noabort);
274
275void __asan_storeN(void *addr, ssize_t size)
276{
277 kasan_check_range(addr, size, true, _RET_IP_);
278}
279EXPORT_SYMBOL(__asan_storeN);
280
281__alias(__asan_storeN)
282void __asan_storeN_noabort(void *, ssize_t);
283EXPORT_SYMBOL(__asan_storeN_noabort);
284
285/* to shut up compiler complaints */
286void __asan_handle_no_return(void) {}
287EXPORT_SYMBOL(__asan_handle_no_return);
288
289/* Emitted by compiler to poison alloca()ed objects. */
290void __asan_alloca_poison(void *addr, ssize_t size)
291{
292 size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
293 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
294 rounded_up_size;
295 size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
296
297 const void *left_redzone = (const void *)(addr -
298 KASAN_ALLOCA_REDZONE_SIZE);
299 const void *right_redzone = (const void *)(addr + rounded_up_size);
300
301 WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
302
303 kasan_unpoison((const void *)(addr + rounded_down_size),
304 size - rounded_down_size, false);
305 kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
306 KASAN_ALLOCA_LEFT, false);
307 kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
308 KASAN_ALLOCA_RIGHT, false);
309}
310EXPORT_SYMBOL(__asan_alloca_poison);
311
312/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
313void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
314{
315 if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
316 return;
317
318 kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
319}
320EXPORT_SYMBOL(__asan_allocas_unpoison);
321
322/* Emitted by the compiler to [un]poison local variables. */
323#define DEFINE_ASAN_SET_SHADOW(byte) \
324 void __asan_set_shadow_##byte(const void *addr, ssize_t size) \
325 { \
326 __memset((void *)addr, 0x##byte, size); \
327 } \
328 EXPORT_SYMBOL(__asan_set_shadow_##byte)
329
330DEFINE_ASAN_SET_SHADOW(00);
331DEFINE_ASAN_SET_SHADOW(f1);
332DEFINE_ASAN_SET_SHADOW(f2);
333DEFINE_ASAN_SET_SHADOW(f3);
334DEFINE_ASAN_SET_SHADOW(f5);
335DEFINE_ASAN_SET_SHADOW(f8);
336
337/* Only allow cache merging when no per-object metadata is present. */
338slab_flags_t kasan_never_merge(void)
339{
340 if (!kasan_requires_meta())
341 return 0;
342 return SLAB_KASAN;
343}
344
345/*
346 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
347 * For larger allocations larger redzones are used.
348 */
349static inline unsigned int optimal_redzone(unsigned int object_size)
350{
351 return
352 object_size <= 64 - 16 ? 16 :
353 object_size <= 128 - 32 ? 32 :
354 object_size <= 512 - 64 ? 64 :
355 object_size <= 4096 - 128 ? 128 :
356 object_size <= (1 << 14) - 256 ? 256 :
357 object_size <= (1 << 15) - 512 ? 512 :
358 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
359}
360
361void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
362 slab_flags_t *flags)
363{
364 unsigned int ok_size;
365 unsigned int optimal_size;
366 unsigned int rem_free_meta_size;
367 unsigned int orig_alloc_meta_offset;
368
369 if (!kasan_requires_meta())
370 return;
371
372 /*
373 * SLAB_KASAN is used to mark caches that are sanitized by KASAN
374 * and that thus have per-object metadata.
375 * Currently this flag is used in two places:
376 * 1. In slab_ksize() to account for per-object metadata when
377 * calculating the size of the accessible memory within the object.
378 * 2. In slab_common.c via kasan_never_merge() to prevent merging of
379 * caches with per-object metadata.
380 */
381 *flags |= SLAB_KASAN;
382
383 ok_size = *size;
384
385 /* Add alloc meta into the redzone. */
386 cache->kasan_info.alloc_meta_offset = *size;
387 *size += sizeof(struct kasan_alloc_meta);
388
389 /* If alloc meta doesn't fit, don't add it. */
390 if (*size > KMALLOC_MAX_SIZE) {
391 cache->kasan_info.alloc_meta_offset = 0;
392 *size = ok_size;
393 /* Continue, since free meta might still fit. */
394 }
395
396 ok_size = *size;
397 orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
398
399 /*
400 * Store free meta in the redzone when it's not possible to store
401 * it in the object. This is the case when:
402 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
403 * be touched after it was freed, or
404 * 2. Object has a constructor, which means it's expected to
405 * retain its content until the next allocation.
406 */
407 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
408 cache->kasan_info.free_meta_offset = *size;
409 *size += sizeof(struct kasan_free_meta);
410 goto free_meta_added;
411 }
412
413 /*
414 * Otherwise, if the object is large enough to contain free meta,
415 * store it within the object.
416 */
417 if (sizeof(struct kasan_free_meta) <= cache->object_size) {
418 /* cache->kasan_info.free_meta_offset = 0 is implied. */
419 goto free_meta_added;
420 }
421
422 /*
423 * For smaller objects, store the beginning of free meta within the
424 * object and the end in the redzone. And thus shift the location of
425 * alloc meta to free up space for free meta.
426 * This is only possible when slub_debug is disabled, as otherwise
427 * the end of free meta will overlap with slub_debug metadata.
428 */
429 if (!__slub_debug_enabled()) {
430 rem_free_meta_size = sizeof(struct kasan_free_meta) -
431 cache->object_size;
432 *size += rem_free_meta_size;
433 if (cache->kasan_info.alloc_meta_offset != 0)
434 cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
435 goto free_meta_added;
436 }
437
438 /*
439 * If the object is small and slub_debug is enabled, store free meta
440 * in the redzone after alloc meta.
441 */
442 cache->kasan_info.free_meta_offset = *size;
443 *size += sizeof(struct kasan_free_meta);
444
445free_meta_added:
446 /* If free meta doesn't fit, don't add it. */
447 if (*size > KMALLOC_MAX_SIZE) {
448 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
449 cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
450 *size = ok_size;
451 }
452
453 /* Calculate size with optimal redzone. */
454 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
455 /* Limit it with KMALLOC_MAX_SIZE. */
456 if (optimal_size > KMALLOC_MAX_SIZE)
457 optimal_size = KMALLOC_MAX_SIZE;
458 /* Use optimal size if the size with added metas is not large enough. */
459 if (*size < optimal_size)
460 *size = optimal_size;
461}
462
463struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
464 const void *object)
465{
466 if (!cache->kasan_info.alloc_meta_offset)
467 return NULL;
468 return (void *)object + cache->kasan_info.alloc_meta_offset;
469}
470
471struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
472 const void *object)
473{
474 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
475 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
476 return NULL;
477 return (void *)object + cache->kasan_info.free_meta_offset;
478}
479
480void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
481{
482 struct kasan_alloc_meta *alloc_meta;
483
484 alloc_meta = kasan_get_alloc_meta(cache, object);
485 if (alloc_meta) {
486 /* Zero out alloc meta to mark it as invalid. */
487 __memset(alloc_meta, 0, sizeof(*alloc_meta));
488 }
489
490 /*
491 * Explicitly marking free meta as invalid is not required: the shadow
492 * value for the first 8 bytes of a newly allocated object is not
493 * KASAN_SLAB_FREE_META.
494 */
495}
496
497static void release_alloc_meta(struct kasan_alloc_meta *meta)
498{
499 /* Zero out alloc meta to mark it as invalid. */
500 __memset(meta, 0, sizeof(*meta));
501}
502
503static void release_free_meta(const void *object, struct kasan_free_meta *meta)
504{
505 if (!kasan_arch_is_ready())
506 return;
507
508 /* Check if free meta is valid. */
509 if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
510 return;
511
512 /* Mark free meta as invalid. */
513 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
514}
515
516size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
517{
518 struct kasan_cache *info = &cache->kasan_info;
519
520 if (!kasan_requires_meta())
521 return 0;
522
523 if (in_object)
524 return (info->free_meta_offset ?
525 0 : sizeof(struct kasan_free_meta));
526 else
527 return (info->alloc_meta_offset ?
528 sizeof(struct kasan_alloc_meta) : 0) +
529 ((info->free_meta_offset &&
530 info->free_meta_offset != KASAN_NO_FREE_META) ?
531 sizeof(struct kasan_free_meta) : 0);
532}
533
534static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
535{
536 struct slab *slab = kasan_addr_to_slab(addr);
537 struct kmem_cache *cache;
538 struct kasan_alloc_meta *alloc_meta;
539 void *object;
540
541 if (is_kfence_address(addr) || !slab)
542 return;
543
544 cache = slab->slab_cache;
545 object = nearest_obj(cache, slab, addr);
546 alloc_meta = kasan_get_alloc_meta(cache, object);
547 if (!alloc_meta)
548 return;
549
550 alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
551 alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
552}
553
554void kasan_record_aux_stack(void *addr)
555{
556 return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
557}
558
559void kasan_record_aux_stack_noalloc(void *addr)
560{
561 return __kasan_record_aux_stack(addr, 0);
562}
563
564void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
565{
566 struct kasan_alloc_meta *alloc_meta;
567
568 alloc_meta = kasan_get_alloc_meta(cache, object);
569 if (!alloc_meta)
570 return;
571
572 /* Invalidate previous stack traces (might exist for krealloc or mempool). */
573 release_alloc_meta(alloc_meta);
574
575 kasan_save_track(&alloc_meta->alloc_track, flags);
576}
577
578void kasan_save_free_info(struct kmem_cache *cache, void *object)
579{
580 struct kasan_free_meta *free_meta;
581
582 free_meta = kasan_get_free_meta(cache, object);
583 if (!free_meta)
584 return;
585
586 /* Invalidate previous stack trace (might exist for mempool). */
587 release_free_meta(object, free_meta);
588
589 kasan_save_track(&free_meta->free_track, 0);
590
591 /* Mark free meta as valid. */
592 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;
593}