Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains core generic KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#define DISABLE_BRANCH_PROFILING
19
20#include <linux/export.h>
21#include <linux/interrupt.h>
22#include <linux/init.h>
23#include <linux/kasan.h>
24#include <linux/kernel.h>
25#include <linux/kmemleak.h>
26#include <linux/linkage.h>
27#include <linux/memblock.h>
28#include <linux/memory.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/printk.h>
32#include <linux/sched.h>
33#include <linux/sched/task_stack.h>
34#include <linux/slab.h>
35#include <linux/stacktrace.h>
36#include <linux/string.h>
37#include <linux/types.h>
38#include <linux/vmalloc.h>
39#include <linux/bug.h>
40
41#include "kasan.h"
42#include "../slab.h"
43
44/*
45 * All functions below always inlined so compiler could
46 * perform better optimizations in each of __asan_loadX/__assn_storeX
47 * depending on memory access size X.
48 */
49
50static __always_inline bool memory_is_poisoned_1(unsigned long addr)
51{
52 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
53
54 if (unlikely(shadow_value)) {
55 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
56 return unlikely(last_accessible_byte >= shadow_value);
57 }
58
59 return false;
60}
61
62static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
63 unsigned long size)
64{
65 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
66
67 /*
68 * Access crosses 8(shadow size)-byte boundary. Such access maps
69 * into 2 shadow bytes, so we need to check them both.
70 */
71 if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
72 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
73
74 return memory_is_poisoned_1(addr + size - 1);
75}
76
77static __always_inline bool memory_is_poisoned_16(unsigned long addr)
78{
79 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
80
81 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
82 if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
83 return *shadow_addr || memory_is_poisoned_1(addr + 15);
84
85 return *shadow_addr;
86}
87
88static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
89 size_t size)
90{
91 while (size) {
92 if (unlikely(*start))
93 return (unsigned long)start;
94 start++;
95 size--;
96 }
97
98 return 0;
99}
100
101static __always_inline unsigned long memory_is_nonzero(const void *start,
102 const void *end)
103{
104 unsigned int words;
105 unsigned long ret;
106 unsigned int prefix = (unsigned long)start % 8;
107
108 if (end - start <= 16)
109 return bytes_is_nonzero(start, end - start);
110
111 if (prefix) {
112 prefix = 8 - prefix;
113 ret = bytes_is_nonzero(start, prefix);
114 if (unlikely(ret))
115 return ret;
116 start += prefix;
117 }
118
119 words = (end - start) / 8;
120 while (words) {
121 if (unlikely(*(u64 *)start))
122 return bytes_is_nonzero(start, 8);
123 start += 8;
124 words--;
125 }
126
127 return bytes_is_nonzero(start, (end - start) % 8);
128}
129
130static __always_inline bool memory_is_poisoned_n(unsigned long addr,
131 size_t size)
132{
133 unsigned long ret;
134
135 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
136 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
137
138 if (unlikely(ret)) {
139 unsigned long last_byte = addr + size - 1;
140 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
141
142 if (unlikely(ret != (unsigned long)last_shadow ||
143 ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
144 return true;
145 }
146 return false;
147}
148
149static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
150{
151 if (__builtin_constant_p(size)) {
152 switch (size) {
153 case 1:
154 return memory_is_poisoned_1(addr);
155 case 2:
156 case 4:
157 case 8:
158 return memory_is_poisoned_2_4_8(addr, size);
159 case 16:
160 return memory_is_poisoned_16(addr);
161 default:
162 BUILD_BUG();
163 }
164 }
165
166 return memory_is_poisoned_n(addr, size);
167}
168
169static __always_inline bool check_memory_region_inline(unsigned long addr,
170 size_t size, bool write,
171 unsigned long ret_ip)
172{
173 if (unlikely(size == 0))
174 return true;
175
176 if (unlikely((void *)addr <
177 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
178 kasan_report(addr, size, write, ret_ip);
179 return false;
180 }
181
182 if (likely(!memory_is_poisoned(addr, size)))
183 return true;
184
185 kasan_report(addr, size, write, ret_ip);
186 return false;
187}
188
189bool check_memory_region(unsigned long addr, size_t size, bool write,
190 unsigned long ret_ip)
191{
192 return check_memory_region_inline(addr, size, write, ret_ip);
193}
194
195void kasan_cache_shrink(struct kmem_cache *cache)
196{
197 quarantine_remove_cache(cache);
198}
199
200void kasan_cache_shutdown(struct kmem_cache *cache)
201{
202 if (!__kmem_cache_empty(cache))
203 quarantine_remove_cache(cache);
204}
205
206static void register_global(struct kasan_global *global)
207{
208 size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
209
210 kasan_unpoison_shadow(global->beg, global->size);
211
212 kasan_poison_shadow(global->beg + aligned_size,
213 global->size_with_redzone - aligned_size,
214 KASAN_GLOBAL_REDZONE);
215}
216
217void __asan_register_globals(struct kasan_global *globals, size_t size)
218{
219 int i;
220
221 for (i = 0; i < size; i++)
222 register_global(&globals[i]);
223}
224EXPORT_SYMBOL(__asan_register_globals);
225
226void __asan_unregister_globals(struct kasan_global *globals, size_t size)
227{
228}
229EXPORT_SYMBOL(__asan_unregister_globals);
230
231#define DEFINE_ASAN_LOAD_STORE(size) \
232 void __asan_load##size(unsigned long addr) \
233 { \
234 check_memory_region_inline(addr, size, false, _RET_IP_);\
235 } \
236 EXPORT_SYMBOL(__asan_load##size); \
237 __alias(__asan_load##size) \
238 void __asan_load##size##_noabort(unsigned long); \
239 EXPORT_SYMBOL(__asan_load##size##_noabort); \
240 void __asan_store##size(unsigned long addr) \
241 { \
242 check_memory_region_inline(addr, size, true, _RET_IP_); \
243 } \
244 EXPORT_SYMBOL(__asan_store##size); \
245 __alias(__asan_store##size) \
246 void __asan_store##size##_noabort(unsigned long); \
247 EXPORT_SYMBOL(__asan_store##size##_noabort)
248
249DEFINE_ASAN_LOAD_STORE(1);
250DEFINE_ASAN_LOAD_STORE(2);
251DEFINE_ASAN_LOAD_STORE(4);
252DEFINE_ASAN_LOAD_STORE(8);
253DEFINE_ASAN_LOAD_STORE(16);
254
255void __asan_loadN(unsigned long addr, size_t size)
256{
257 check_memory_region(addr, size, false, _RET_IP_);
258}
259EXPORT_SYMBOL(__asan_loadN);
260
261__alias(__asan_loadN)
262void __asan_loadN_noabort(unsigned long, size_t);
263EXPORT_SYMBOL(__asan_loadN_noabort);
264
265void __asan_storeN(unsigned long addr, size_t size)
266{
267 check_memory_region(addr, size, true, _RET_IP_);
268}
269EXPORT_SYMBOL(__asan_storeN);
270
271__alias(__asan_storeN)
272void __asan_storeN_noabort(unsigned long, size_t);
273EXPORT_SYMBOL(__asan_storeN_noabort);
274
275/* to shut up compiler complaints */
276void __asan_handle_no_return(void) {}
277EXPORT_SYMBOL(__asan_handle_no_return);
278
279/* Emitted by compiler to poison alloca()ed objects. */
280void __asan_alloca_poison(unsigned long addr, size_t size)
281{
282 size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
283 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
284 rounded_up_size;
285 size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
286
287 const void *left_redzone = (const void *)(addr -
288 KASAN_ALLOCA_REDZONE_SIZE);
289 const void *right_redzone = (const void *)(addr + rounded_up_size);
290
291 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
292
293 kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
294 size - rounded_down_size);
295 kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
296 KASAN_ALLOCA_LEFT);
297 kasan_poison_shadow(right_redzone,
298 padding_size + KASAN_ALLOCA_REDZONE_SIZE,
299 KASAN_ALLOCA_RIGHT);
300}
301EXPORT_SYMBOL(__asan_alloca_poison);
302
303/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
304void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
305{
306 if (unlikely(!stack_top || stack_top > stack_bottom))
307 return;
308
309 kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
310}
311EXPORT_SYMBOL(__asan_allocas_unpoison);
312
313/* Emitted by the compiler to [un]poison local variables. */
314#define DEFINE_ASAN_SET_SHADOW(byte) \
315 void __asan_set_shadow_##byte(const void *addr, size_t size) \
316 { \
317 __memset((void *)addr, 0x##byte, size); \
318 } \
319 EXPORT_SYMBOL(__asan_set_shadow_##byte)
320
321DEFINE_ASAN_SET_SHADOW(00);
322DEFINE_ASAN_SET_SHADOW(f1);
323DEFINE_ASAN_SET_SHADOW(f2);
324DEFINE_ASAN_SET_SHADOW(f3);
325DEFINE_ASAN_SET_SHADOW(f5);
326DEFINE_ASAN_SET_SHADOW(f8);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains core generic KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12#include <linux/export.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/kasan.h>
16#include <linux/kernel.h>
17#include <linux/kfence.h>
18#include <linux/kmemleak.h>
19#include <linux/linkage.h>
20#include <linux/memblock.h>
21#include <linux/memory.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/printk.h>
25#include <linux/sched.h>
26#include <linux/sched/task_stack.h>
27#include <linux/slab.h>
28#include <linux/stacktrace.h>
29#include <linux/string.h>
30#include <linux/types.h>
31#include <linux/vmalloc.h>
32#include <linux/bug.h>
33
34#include "kasan.h"
35#include "../slab.h"
36
37/*
38 * All functions below always inlined so compiler could
39 * perform better optimizations in each of __asan_loadX/__assn_storeX
40 * depending on memory access size X.
41 */
42
43static __always_inline bool memory_is_poisoned_1(unsigned long addr)
44{
45 s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
46
47 if (unlikely(shadow_value)) {
48 s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
49 return unlikely(last_accessible_byte >= shadow_value);
50 }
51
52 return false;
53}
54
55static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
56 unsigned long size)
57{
58 u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
59
60 /*
61 * Access crosses 8(shadow size)-byte boundary. Such access maps
62 * into 2 shadow bytes, so we need to check them both.
63 */
64 if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
65 return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
66
67 return memory_is_poisoned_1(addr + size - 1);
68}
69
70static __always_inline bool memory_is_poisoned_16(unsigned long addr)
71{
72 u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
73
74 /* Unaligned 16-bytes access maps into 3 shadow bytes. */
75 if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
76 return *shadow_addr || memory_is_poisoned_1(addr + 15);
77
78 return *shadow_addr;
79}
80
81static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
82 size_t size)
83{
84 while (size) {
85 if (unlikely(*start))
86 return (unsigned long)start;
87 start++;
88 size--;
89 }
90
91 return 0;
92}
93
94static __always_inline unsigned long memory_is_nonzero(const void *start,
95 const void *end)
96{
97 unsigned int words;
98 unsigned long ret;
99 unsigned int prefix = (unsigned long)start % 8;
100
101 if (end - start <= 16)
102 return bytes_is_nonzero(start, end - start);
103
104 if (prefix) {
105 prefix = 8 - prefix;
106 ret = bytes_is_nonzero(start, prefix);
107 if (unlikely(ret))
108 return ret;
109 start += prefix;
110 }
111
112 words = (end - start) / 8;
113 while (words) {
114 if (unlikely(*(u64 *)start))
115 return bytes_is_nonzero(start, 8);
116 start += 8;
117 words--;
118 }
119
120 return bytes_is_nonzero(start, (end - start) % 8);
121}
122
123static __always_inline bool memory_is_poisoned_n(unsigned long addr,
124 size_t size)
125{
126 unsigned long ret;
127
128 ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
129 kasan_mem_to_shadow((void *)addr + size - 1) + 1);
130
131 if (unlikely(ret)) {
132 unsigned long last_byte = addr + size - 1;
133 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
134
135 if (unlikely(ret != (unsigned long)last_shadow ||
136 ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
137 return true;
138 }
139 return false;
140}
141
142static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
143{
144 if (__builtin_constant_p(size)) {
145 switch (size) {
146 case 1:
147 return memory_is_poisoned_1(addr);
148 case 2:
149 case 4:
150 case 8:
151 return memory_is_poisoned_2_4_8(addr, size);
152 case 16:
153 return memory_is_poisoned_16(addr);
154 default:
155 BUILD_BUG();
156 }
157 }
158
159 return memory_is_poisoned_n(addr, size);
160}
161
162static __always_inline bool check_region_inline(unsigned long addr,
163 size_t size, bool write,
164 unsigned long ret_ip)
165{
166 if (!kasan_arch_is_ready())
167 return true;
168
169 if (unlikely(size == 0))
170 return true;
171
172 if (unlikely(addr + size < addr))
173 return !kasan_report(addr, size, write, ret_ip);
174
175 if (unlikely((void *)addr <
176 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
177 return !kasan_report(addr, size, write, ret_ip);
178 }
179
180 if (likely(!memory_is_poisoned(addr, size)))
181 return true;
182
183 return !kasan_report(addr, size, write, ret_ip);
184}
185
186bool kasan_check_range(unsigned long addr, size_t size, bool write,
187 unsigned long ret_ip)
188{
189 return check_region_inline(addr, size, write, ret_ip);
190}
191
192bool kasan_byte_accessible(const void *addr)
193{
194 s8 shadow_byte;
195
196 if (!kasan_arch_is_ready())
197 return true;
198
199 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
200
201 return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
202}
203
204void kasan_cache_shrink(struct kmem_cache *cache)
205{
206 kasan_quarantine_remove_cache(cache);
207}
208
209void kasan_cache_shutdown(struct kmem_cache *cache)
210{
211 if (!__kmem_cache_empty(cache))
212 kasan_quarantine_remove_cache(cache);
213}
214
215static void register_global(struct kasan_global *global)
216{
217 size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
218
219 kasan_unpoison(global->beg, global->size, false);
220
221 kasan_poison(global->beg + aligned_size,
222 global->size_with_redzone - aligned_size,
223 KASAN_GLOBAL_REDZONE, false);
224}
225
226void __asan_register_globals(struct kasan_global *globals, size_t size)
227{
228 int i;
229
230 for (i = 0; i < size; i++)
231 register_global(&globals[i]);
232}
233EXPORT_SYMBOL(__asan_register_globals);
234
235void __asan_unregister_globals(struct kasan_global *globals, size_t size)
236{
237}
238EXPORT_SYMBOL(__asan_unregister_globals);
239
240#define DEFINE_ASAN_LOAD_STORE(size) \
241 void __asan_load##size(unsigned long addr) \
242 { \
243 check_region_inline(addr, size, false, _RET_IP_); \
244 } \
245 EXPORT_SYMBOL(__asan_load##size); \
246 __alias(__asan_load##size) \
247 void __asan_load##size##_noabort(unsigned long); \
248 EXPORT_SYMBOL(__asan_load##size##_noabort); \
249 void __asan_store##size(unsigned long addr) \
250 { \
251 check_region_inline(addr, size, true, _RET_IP_); \
252 } \
253 EXPORT_SYMBOL(__asan_store##size); \
254 __alias(__asan_store##size) \
255 void __asan_store##size##_noabort(unsigned long); \
256 EXPORT_SYMBOL(__asan_store##size##_noabort)
257
258DEFINE_ASAN_LOAD_STORE(1);
259DEFINE_ASAN_LOAD_STORE(2);
260DEFINE_ASAN_LOAD_STORE(4);
261DEFINE_ASAN_LOAD_STORE(8);
262DEFINE_ASAN_LOAD_STORE(16);
263
264void __asan_loadN(unsigned long addr, size_t size)
265{
266 kasan_check_range(addr, size, false, _RET_IP_);
267}
268EXPORT_SYMBOL(__asan_loadN);
269
270__alias(__asan_loadN)
271void __asan_loadN_noabort(unsigned long, size_t);
272EXPORT_SYMBOL(__asan_loadN_noabort);
273
274void __asan_storeN(unsigned long addr, size_t size)
275{
276 kasan_check_range(addr, size, true, _RET_IP_);
277}
278EXPORT_SYMBOL(__asan_storeN);
279
280__alias(__asan_storeN)
281void __asan_storeN_noabort(unsigned long, size_t);
282EXPORT_SYMBOL(__asan_storeN_noabort);
283
284/* to shut up compiler complaints */
285void __asan_handle_no_return(void) {}
286EXPORT_SYMBOL(__asan_handle_no_return);
287
288/* Emitted by compiler to poison alloca()ed objects. */
289void __asan_alloca_poison(unsigned long addr, size_t size)
290{
291 size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
292 size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
293 rounded_up_size;
294 size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
295
296 const void *left_redzone = (const void *)(addr -
297 KASAN_ALLOCA_REDZONE_SIZE);
298 const void *right_redzone = (const void *)(addr + rounded_up_size);
299
300 WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
301
302 kasan_unpoison((const void *)(addr + rounded_down_size),
303 size - rounded_down_size, false);
304 kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
305 KASAN_ALLOCA_LEFT, false);
306 kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
307 KASAN_ALLOCA_RIGHT, false);
308}
309EXPORT_SYMBOL(__asan_alloca_poison);
310
311/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
312void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
313{
314 if (unlikely(!stack_top || stack_top > stack_bottom))
315 return;
316
317 kasan_unpoison(stack_top, stack_bottom - stack_top, false);
318}
319EXPORT_SYMBOL(__asan_allocas_unpoison);
320
321/* Emitted by the compiler to [un]poison local variables. */
322#define DEFINE_ASAN_SET_SHADOW(byte) \
323 void __asan_set_shadow_##byte(const void *addr, size_t size) \
324 { \
325 __memset((void *)addr, 0x##byte, size); \
326 } \
327 EXPORT_SYMBOL(__asan_set_shadow_##byte)
328
329DEFINE_ASAN_SET_SHADOW(00);
330DEFINE_ASAN_SET_SHADOW(f1);
331DEFINE_ASAN_SET_SHADOW(f2);
332DEFINE_ASAN_SET_SHADOW(f3);
333DEFINE_ASAN_SET_SHADOW(f5);
334DEFINE_ASAN_SET_SHADOW(f8);
335
336/* Only allow cache merging when no per-object metadata is present. */
337slab_flags_t kasan_never_merge(void)
338{
339 if (!kasan_requires_meta())
340 return 0;
341 return SLAB_KASAN;
342}
343
344/*
345 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
346 * For larger allocations larger redzones are used.
347 */
348static inline unsigned int optimal_redzone(unsigned int object_size)
349{
350 return
351 object_size <= 64 - 16 ? 16 :
352 object_size <= 128 - 32 ? 32 :
353 object_size <= 512 - 64 ? 64 :
354 object_size <= 4096 - 128 ? 128 :
355 object_size <= (1 << 14) - 256 ? 256 :
356 object_size <= (1 << 15) - 512 ? 512 :
357 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
358}
359
360void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
361 slab_flags_t *flags)
362{
363 unsigned int ok_size;
364 unsigned int optimal_size;
365
366 if (!kasan_requires_meta())
367 return;
368
369 /*
370 * SLAB_KASAN is used to mark caches that are sanitized by KASAN
371 * and that thus have per-object metadata.
372 * Currently this flag is used in two places:
373 * 1. In slab_ksize() to account for per-object metadata when
374 * calculating the size of the accessible memory within the object.
375 * 2. In slab_common.c via kasan_never_merge() to prevent merging of
376 * caches with per-object metadata.
377 */
378 *flags |= SLAB_KASAN;
379
380 ok_size = *size;
381
382 /* Add alloc meta into redzone. */
383 cache->kasan_info.alloc_meta_offset = *size;
384 *size += sizeof(struct kasan_alloc_meta);
385
386 /*
387 * If alloc meta doesn't fit, don't add it.
388 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
389 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
390 * larger sizes.
391 */
392 if (*size > KMALLOC_MAX_SIZE) {
393 cache->kasan_info.alloc_meta_offset = 0;
394 *size = ok_size;
395 /* Continue, since free meta might still fit. */
396 }
397
398 /*
399 * Add free meta into redzone when it's not possible to store
400 * it in the object. This is the case when:
401 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
402 * be touched after it was freed, or
403 * 2. Object has a constructor, which means it's expected to
404 * retain its content until the next allocation, or
405 * 3. Object is too small.
406 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
407 */
408 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
409 cache->object_size < sizeof(struct kasan_free_meta)) {
410 ok_size = *size;
411
412 cache->kasan_info.free_meta_offset = *size;
413 *size += sizeof(struct kasan_free_meta);
414
415 /* If free meta doesn't fit, don't add it. */
416 if (*size > KMALLOC_MAX_SIZE) {
417 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
418 *size = ok_size;
419 }
420 }
421
422 /* Calculate size with optimal redzone. */
423 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
424 /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
425 if (optimal_size > KMALLOC_MAX_SIZE)
426 optimal_size = KMALLOC_MAX_SIZE;
427 /* Use optimal size if the size with added metas is not large enough. */
428 if (*size < optimal_size)
429 *size = optimal_size;
430}
431
432struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
433 const void *object)
434{
435 if (!cache->kasan_info.alloc_meta_offset)
436 return NULL;
437 return (void *)object + cache->kasan_info.alloc_meta_offset;
438}
439
440struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
441 const void *object)
442{
443 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
444 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
445 return NULL;
446 return (void *)object + cache->kasan_info.free_meta_offset;
447}
448
449void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
450{
451 struct kasan_alloc_meta *alloc_meta;
452
453 alloc_meta = kasan_get_alloc_meta(cache, object);
454 if (alloc_meta)
455 __memset(alloc_meta, 0, sizeof(*alloc_meta));
456}
457
458size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
459{
460 struct kasan_cache *info = &cache->kasan_info;
461
462 if (!kasan_requires_meta())
463 return 0;
464
465 if (in_object)
466 return (info->free_meta_offset ?
467 0 : sizeof(struct kasan_free_meta));
468 else
469 return (info->alloc_meta_offset ?
470 sizeof(struct kasan_alloc_meta) : 0) +
471 ((info->free_meta_offset &&
472 info->free_meta_offset != KASAN_NO_FREE_META) ?
473 sizeof(struct kasan_free_meta) : 0);
474}
475
476static void __kasan_record_aux_stack(void *addr, bool can_alloc)
477{
478 struct slab *slab = kasan_addr_to_slab(addr);
479 struct kmem_cache *cache;
480 struct kasan_alloc_meta *alloc_meta;
481 void *object;
482
483 if (is_kfence_address(addr) || !slab)
484 return;
485
486 cache = slab->slab_cache;
487 object = nearest_obj(cache, slab, addr);
488 alloc_meta = kasan_get_alloc_meta(cache, object);
489 if (!alloc_meta)
490 return;
491
492 alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
493 alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT, can_alloc);
494}
495
496void kasan_record_aux_stack(void *addr)
497{
498 return __kasan_record_aux_stack(addr, true);
499}
500
501void kasan_record_aux_stack_noalloc(void *addr)
502{
503 return __kasan_record_aux_stack(addr, false);
504}
505
506void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
507{
508 struct kasan_alloc_meta *alloc_meta;
509
510 alloc_meta = kasan_get_alloc_meta(cache, object);
511 if (alloc_meta)
512 kasan_set_track(&alloc_meta->alloc_track, flags);
513}
514
515void kasan_save_free_info(struct kmem_cache *cache, void *object)
516{
517 struct kasan_free_meta *free_meta;
518
519 free_meta = kasan_get_free_meta(cache, object);
520 if (!free_meta)
521 return;
522
523 kasan_set_track(&free_meta->free_track, GFP_NOWAIT);
524 /* The object was freed and has free track set. */
525 *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
526}