Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains common KASAN code.
  4 *
  5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7 *
  8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9 *        Andrey Konovalov <andreyknvl@gmail.com>
 10 */
 11
 12#include <linux/export.h>
 13#include <linux/init.h>
 14#include <linux/kasan.h>
 15#include <linux/kernel.h>
 16#include <linux/linkage.h>
 17#include <linux/memblock.h>
 18#include <linux/memory.h>
 19#include <linux/mm.h>
 20#include <linux/module.h>
 21#include <linux/printk.h>
 22#include <linux/sched.h>
 23#include <linux/sched/task_stack.h>
 24#include <linux/slab.h>
 25#include <linux/stacktrace.h>
 26#include <linux/string.h>
 27#include <linux/types.h>
 28#include <linux/bug.h>
 29
 30#include "kasan.h"
 31#include "../slab.h"
 32
 33struct slab *kasan_addr_to_slab(const void *addr)
 34{
 35	if (virt_addr_valid(addr))
 36		return virt_to_slab(addr);
 37	return NULL;
 38}
 39
 40depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
 41{
 42	unsigned long entries[KASAN_STACK_DEPTH];
 43	unsigned int nr_entries;
 44
 45	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 46	return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
 
 47}
 48
 49void kasan_set_track(struct kasan_track *track, gfp_t flags)
 50{
 51	track->pid = current->pid;
 52	track->stack = kasan_save_stack(flags, true);
 53}
 54
 55#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 56void kasan_enable_current(void)
 57{
 58	current->kasan_depth++;
 59}
 60EXPORT_SYMBOL(kasan_enable_current);
 61
 62void kasan_disable_current(void)
 63{
 64	current->kasan_depth--;
 65}
 66EXPORT_SYMBOL(kasan_disable_current);
 67
 68#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 69
 70void __kasan_unpoison_range(const void *address, size_t size)
 71{
 72	kasan_unpoison(address, size, false);
 73}
 74
 75#ifdef CONFIG_KASAN_STACK
 76/* Unpoison the entire stack for a task. */
 77void kasan_unpoison_task_stack(struct task_struct *task)
 78{
 79	void *base = task_stack_page(task);
 80
 81	kasan_unpoison(base, THREAD_SIZE, false);
 82}
 83
 84/* Unpoison the stack for the current task beyond a watermark sp value. */
 85asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 86{
 87	/*
 88	 * Calculate the task stack base address.  Avoid using 'current'
 89	 * because this function is called by early resume code which hasn't
 90	 * yet set up the percpu register (%gs).
 91	 */
 92	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 93
 94	kasan_unpoison(base, watermark - base, false);
 95}
 96#endif /* CONFIG_KASAN_STACK */
 97
 
 
 
 
 
 
 
 
 
 
 
 98void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
 99{
100	u8 tag;
101	unsigned long i;
102
103	if (unlikely(PageHighMem(page)))
104		return;
105
106	tag = kasan_random_tag();
107	kasan_unpoison(set_tag(page_address(page), tag),
108		       PAGE_SIZE << order, init);
109	for (i = 0; i < (1 << order); i++)
110		page_kasan_tag_set(page + i, tag);
 
111}
112
113void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
114{
115	if (likely(!PageHighMem(page)))
116		kasan_poison(page_address(page), PAGE_SIZE << order,
117			     KASAN_PAGE_FREE, init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118}
119
120void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
121{
122	cache->kasan_info.is_kmalloc = true;
123}
124
125void __kasan_poison_slab(struct slab *slab)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126{
127	struct page *page = slab_page(slab);
128	unsigned long i;
129
130	for (i = 0; i < compound_nr(page); i++)
131		page_kasan_tag_reset(page + i);
132	kasan_poison(page_address(page), page_size(page),
133		     KASAN_SLAB_REDZONE, false);
134}
135
136void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
137{
138	kasan_unpoison(object, cache->object_size, false);
139}
140
141void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
142{
143	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
144			KASAN_SLAB_REDZONE, false);
145}
146
147/*
148 * This function assigns a tag to an object considering the following:
149 * 1. A cache might have a constructor, which might save a pointer to a slab
150 *    object somewhere (e.g. in the object itself). We preassign a tag for
151 *    each object in caches with constructors during slab creation and reuse
152 *    the same tag each time a particular object is allocated.
153 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
154 *    accessed after being freed. We preassign tags for objects in these
155 *    caches as well.
156 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
157 *    is stored as an array of indexes instead of a linked list. Assign tags
158 *    based on objects indexes, so that objects that are next to each other
159 *    get different tags.
160 */
161static inline u8 assign_tag(struct kmem_cache *cache,
162					const void *object, bool init)
163{
164	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
165		return 0xff;
166
167	/*
168	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
169	 * set, assign a tag when the object is being allocated (init == false).
170	 */
171	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
172		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
173
174	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
175#ifdef CONFIG_SLAB
176	/* For SLAB assign tags based on the object index in the freelist. */
177	return (u8)obj_to_index(cache, virt_to_slab(object), (void *)object);
178#else
179	/*
180	 * For SLUB assign a random tag during slab creation, otherwise reuse
181	 * the already assigned tag.
182	 */
183	return init ? kasan_random_tag() : get_tag(object);
184#endif
185}
186
187void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
188						const void *object)
189{
190	/* Initialize per-object metadata if it is present. */
191	if (kasan_requires_meta())
192		kasan_init_object_meta(cache, object);
 
 
 
 
193
194	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
195	object = set_tag(object, assign_tag(cache, object, true));
196
197	return (void *)object;
198}
199
200static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
201				unsigned long ip, bool quarantine, bool init)
202{
 
203	void *tagged_object;
204
205	if (!kasan_arch_is_ready())
206		return false;
207
 
208	tagged_object = object;
209	object = kasan_reset_tag(object);
210
211	if (is_kfence_address(object))
212		return false;
213
214	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
215	    object)) {
216		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
217		return true;
218	}
219
220	/* RCU slabs could be legally used after free within the RCU period */
221	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
222		return false;
223
224	if (!kasan_byte_accessible(tagged_object)) {
225		kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
226		return true;
227	}
228
229	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
230			KASAN_SLAB_FREE, init);
231
232	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
233		return false;
234
235	if (kasan_stack_collection_enabled())
236		kasan_save_free_info(cache, tagged_object);
237
238	return kasan_quarantine_put(cache, object);
239}
240
241bool __kasan_slab_free(struct kmem_cache *cache, void *object,
242				unsigned long ip, bool init)
243{
244	return ____kasan_slab_free(cache, object, ip, true, init);
245}
246
247static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
248{
249	if (!kasan_arch_is_ready())
250		return false;
251
252	if (ptr != page_address(virt_to_head_page(ptr))) {
253		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
254		return true;
255	}
256
257	if (!kasan_byte_accessible(ptr)) {
258		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_DOUBLE_FREE);
259		return true;
260	}
261
262	/*
263	 * The object will be poisoned by kasan_poison_pages() or
264	 * kasan_slab_free_mempool().
265	 */
266
267	return false;
268}
269
270void __kasan_kfree_large(void *ptr, unsigned long ip)
271{
272	____kasan_kfree_large(ptr, ip);
273}
274
275void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
276{
277	struct folio *folio;
278
279	folio = virt_to_folio(ptr);
280
281	/*
282	 * Even though this function is only called for kmem_cache_alloc and
283	 * kmalloc backed mempool allocations, those allocations can still be
284	 * !PageSlab() when the size provided to kmalloc is larger than
285	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
286	 */
287	if (unlikely(!folio_test_slab(folio))) {
288		if (____kasan_kfree_large(ptr, ip))
289			return;
290		kasan_poison(ptr, folio_size(folio), KASAN_PAGE_FREE, false);
291	} else {
292		struct slab *slab = folio_slab(folio);
293
294		____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
295	}
296}
297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
299					void *object, gfp_t flags, bool init)
300{
301	u8 tag;
302	void *tagged_object;
303
304	if (gfpflags_allow_blocking(flags))
305		kasan_quarantine_reduce();
306
307	if (unlikely(object == NULL))
308		return NULL;
309
310	if (is_kfence_address(object))
311		return (void *)object;
312
313	/*
314	 * Generate and assign random tag for tag-based modes.
315	 * Tag is ignored in set_tag() for the generic mode.
316	 */
317	tag = assign_tag(cache, object, false);
318	tagged_object = set_tag(object, tag);
319
320	/*
321	 * Unpoison the whole object.
322	 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
323	 */
324	kasan_unpoison(tagged_object, cache->object_size, init);
325
326	/* Save alloc info (if possible) for non-kmalloc() allocations. */
327	if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
328		kasan_save_alloc_info(cache, tagged_object, flags);
329
330	return tagged_object;
331}
332
333static inline void *____kasan_kmalloc(struct kmem_cache *cache,
334				const void *object, size_t size, gfp_t flags)
335{
336	unsigned long redzone_start;
337	unsigned long redzone_end;
338
339	if (gfpflags_allow_blocking(flags))
340		kasan_quarantine_reduce();
341
342	if (unlikely(object == NULL))
343		return NULL;
344
345	if (is_kfence_address(kasan_reset_tag(object)))
346		return (void *)object;
347
348	/*
349	 * The object has already been unpoisoned by kasan_slab_alloc() for
350	 * kmalloc() or by kasan_krealloc() for krealloc().
351	 */
352
353	/*
354	 * The redzone has byte-level precision for the generic mode.
355	 * Partially poison the last object granule to cover the unaligned
356	 * part of the redzone.
357	 */
358	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
359		kasan_poison_last_granule((void *)object, size);
360
361	/* Poison the aligned part of the redzone. */
362	redzone_start = round_up((unsigned long)(object + size),
363				KASAN_GRANULE_SIZE);
364	redzone_end = round_up((unsigned long)(object + cache->object_size),
365				KASAN_GRANULE_SIZE);
366	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
367			   KASAN_SLAB_REDZONE, false);
368
369	/*
370	 * Save alloc info (if possible) for kmalloc() allocations.
371	 * This also rewrites the alloc info when called from kasan_krealloc().
372	 */
373	if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
374		kasan_save_alloc_info(cache, (void *)object, flags);
375
376	/* Keep the tag that was set by kasan_slab_alloc(). */
377	return (void *)object;
378}
379
380void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
381					size_t size, gfp_t flags)
382{
383	return ____kasan_kmalloc(cache, object, size, flags);
384}
385EXPORT_SYMBOL(__kasan_kmalloc);
386
387void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
388						gfp_t flags)
389{
390	unsigned long redzone_start;
391	unsigned long redzone_end;
392
393	if (gfpflags_allow_blocking(flags))
394		kasan_quarantine_reduce();
395
396	if (unlikely(ptr == NULL))
397		return NULL;
398
399	/*
400	 * The object has already been unpoisoned by kasan_unpoison_pages() for
401	 * alloc_pages() or by kasan_krealloc() for krealloc().
402	 */
403
404	/*
405	 * The redzone has byte-level precision for the generic mode.
406	 * Partially poison the last object granule to cover the unaligned
407	 * part of the redzone.
408	 */
409	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
410		kasan_poison_last_granule(ptr, size);
411
412	/* Poison the aligned part of the redzone. */
413	redzone_start = round_up((unsigned long)(ptr + size),
414				KASAN_GRANULE_SIZE);
415	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
416	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
417		     KASAN_PAGE_REDZONE, false);
418
419	return (void *)ptr;
420}
421
422void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
423{
424	struct slab *slab;
425
426	if (unlikely(object == ZERO_SIZE_PTR))
427		return (void *)object;
428
429	/*
430	 * Unpoison the object's data.
431	 * Part of it might already have been unpoisoned, but it's unknown
432	 * how big that part is.
433	 */
434	kasan_unpoison(object, size, false);
435
436	slab = virt_to_slab(object);
437
438	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
439	if (unlikely(!slab))
440		return __kasan_kmalloc_large(object, size, flags);
441	else
442		return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
443}
444
445bool __kasan_check_byte(const void *address, unsigned long ip)
446{
447	if (!kasan_byte_accessible(address)) {
448		kasan_report((unsigned long)address, 1, false, ip);
449		return false;
450	}
451	return true;
452}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file contains common KASAN code.
  4 *
  5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
  6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  7 *
  8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
  9 *        Andrey Konovalov <andreyknvl@gmail.com>
 10 */
 11
 12#include <linux/export.h>
 13#include <linux/init.h>
 14#include <linux/kasan.h>
 15#include <linux/kernel.h>
 16#include <linux/linkage.h>
 17#include <linux/memblock.h>
 18#include <linux/memory.h>
 19#include <linux/mm.h>
 20#include <linux/module.h>
 21#include <linux/printk.h>
 22#include <linux/sched.h>
 23#include <linux/sched/task_stack.h>
 24#include <linux/slab.h>
 25#include <linux/stacktrace.h>
 26#include <linux/string.h>
 27#include <linux/types.h>
 28#include <linux/bug.h>
 29
 30#include "kasan.h"
 31#include "../slab.h"
 32
 33depot_stack_handle_t kasan_save_stack(gfp_t flags)
 
 
 
 
 
 
 
 34{
 35	unsigned long entries[KASAN_STACK_DEPTH];
 36	unsigned int nr_entries;
 37
 38	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
 39	nr_entries = filter_irq_stacks(entries, nr_entries);
 40	return stack_depot_save(entries, nr_entries, flags);
 41}
 42
 43void kasan_set_track(struct kasan_track *track, gfp_t flags)
 44{
 45	track->pid = current->pid;
 46	track->stack = kasan_save_stack(flags);
 47}
 48
 49#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 50void kasan_enable_current(void)
 51{
 52	current->kasan_depth++;
 53}
 54EXPORT_SYMBOL(kasan_enable_current);
 55
 56void kasan_disable_current(void)
 57{
 58	current->kasan_depth--;
 59}
 60EXPORT_SYMBOL(kasan_disable_current);
 61
 62#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 63
 64void __kasan_unpoison_range(const void *address, size_t size)
 65{
 66	kasan_unpoison(address, size, false);
 67}
 68
 69#ifdef CONFIG_KASAN_STACK
 70/* Unpoison the entire stack for a task. */
 71void kasan_unpoison_task_stack(struct task_struct *task)
 72{
 73	void *base = task_stack_page(task);
 74
 75	kasan_unpoison(base, THREAD_SIZE, false);
 76}
 77
 78/* Unpoison the stack for the current task beyond a watermark sp value. */
 79asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
 80{
 81	/*
 82	 * Calculate the task stack base address.  Avoid using 'current'
 83	 * because this function is called by early resume code which hasn't
 84	 * yet set up the percpu register (%gs).
 85	 */
 86	void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
 87
 88	kasan_unpoison(base, watermark - base, false);
 89}
 90#endif /* CONFIG_KASAN_STACK */
 91
 92/*
 93 * Only allow cache merging when stack collection is disabled and no metadata
 94 * is present.
 95 */
 96slab_flags_t __kasan_never_merge(void)
 97{
 98	if (kasan_stack_collection_enabled())
 99		return SLAB_KASAN;
100	return 0;
101}
102
103void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
104{
105	u8 tag;
106	unsigned long i;
107
108	if (unlikely(PageHighMem(page)))
109		return;
110
111	tag = kasan_random_tag();
 
 
112	for (i = 0; i < (1 << order); i++)
113		page_kasan_tag_set(page + i, tag);
114	kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
115}
116
117void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
118{
119	if (likely(!PageHighMem(page)))
120		kasan_poison(page_address(page), PAGE_SIZE << order,
121			     KASAN_FREE_PAGE, init);
122}
123
124/*
125 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
126 * For larger allocations larger redzones are used.
127 */
128static inline unsigned int optimal_redzone(unsigned int object_size)
129{
130	return
131		object_size <= 64        - 16   ? 16 :
132		object_size <= 128       - 32   ? 32 :
133		object_size <= 512       - 64   ? 64 :
134		object_size <= 4096      - 128  ? 128 :
135		object_size <= (1 << 14) - 256  ? 256 :
136		object_size <= (1 << 15) - 512  ? 512 :
137		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
138}
139
140void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
141			  slab_flags_t *flags)
142{
143	unsigned int ok_size;
144	unsigned int optimal_size;
145
146	/*
147	 * SLAB_KASAN is used to mark caches as ones that are sanitized by
148	 * KASAN. Currently this flag is used in two places:
149	 * 1. In slab_ksize() when calculating the size of the accessible
150	 *    memory within the object.
151	 * 2. In slab_common.c to prevent merging of sanitized caches.
152	 */
153	*flags |= SLAB_KASAN;
154
155	if (!kasan_stack_collection_enabled())
156		return;
157
158	ok_size = *size;
159
160	/* Add alloc meta into redzone. */
161	cache->kasan_info.alloc_meta_offset = *size;
162	*size += sizeof(struct kasan_alloc_meta);
163
164	/*
165	 * If alloc meta doesn't fit, don't add it.
166	 * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
167	 * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
168	 * larger sizes.
169	 */
170	if (*size > KMALLOC_MAX_SIZE) {
171		cache->kasan_info.alloc_meta_offset = 0;
172		*size = ok_size;
173		/* Continue, since free meta might still fit. */
174	}
175
176	/* Only the generic mode uses free meta or flexible redzones. */
177	if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
178		cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
179		return;
180	}
181
182	/*
183	 * Add free meta into redzone when it's not possible to store
184	 * it in the object. This is the case when:
185	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
186	 *    be touched after it was freed, or
187	 * 2. Object has a constructor, which means it's expected to
188	 *    retain its content until the next allocation, or
189	 * 3. Object is too small.
190	 * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
191	 */
192	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
193	    cache->object_size < sizeof(struct kasan_free_meta)) {
194		ok_size = *size;
195
196		cache->kasan_info.free_meta_offset = *size;
197		*size += sizeof(struct kasan_free_meta);
198
199		/* If free meta doesn't fit, don't add it. */
200		if (*size > KMALLOC_MAX_SIZE) {
201			cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
202			*size = ok_size;
203		}
204	}
205
206	/* Calculate size with optimal redzone. */
207	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
208	/* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
209	if (optimal_size > KMALLOC_MAX_SIZE)
210		optimal_size = KMALLOC_MAX_SIZE;
211	/* Use optimal size if the size with added metas is not large enough. */
212	if (*size < optimal_size)
213		*size = optimal_size;
214}
215
216void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
217{
218	cache->kasan_info.is_kmalloc = true;
219}
220
221size_t __kasan_metadata_size(struct kmem_cache *cache)
222{
223	if (!kasan_stack_collection_enabled())
224		return 0;
225	return (cache->kasan_info.alloc_meta_offset ?
226		sizeof(struct kasan_alloc_meta) : 0) +
227		(cache->kasan_info.free_meta_offset ?
228		sizeof(struct kasan_free_meta) : 0);
229}
230
231struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
232					      const void *object)
233{
234	if (!cache->kasan_info.alloc_meta_offset)
235		return NULL;
236	return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
237}
238
239#ifdef CONFIG_KASAN_GENERIC
240struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
241					    const void *object)
242{
243	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
244	if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
245		return NULL;
246	return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
247}
248#endif
249
250void __kasan_poison_slab(struct page *page)
251{
 
252	unsigned long i;
253
254	for (i = 0; i < compound_nr(page); i++)
255		page_kasan_tag_reset(page + i);
256	kasan_poison(page_address(page), page_size(page),
257		     KASAN_KMALLOC_REDZONE, false);
258}
259
260void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
261{
262	kasan_unpoison(object, cache->object_size, false);
263}
264
265void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
266{
267	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
268			KASAN_KMALLOC_REDZONE, false);
269}
270
271/*
272 * This function assigns a tag to an object considering the following:
273 * 1. A cache might have a constructor, which might save a pointer to a slab
274 *    object somewhere (e.g. in the object itself). We preassign a tag for
275 *    each object in caches with constructors during slab creation and reuse
276 *    the same tag each time a particular object is allocated.
277 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
278 *    accessed after being freed. We preassign tags for objects in these
279 *    caches as well.
280 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
281 *    is stored as an array of indexes instead of a linked list. Assign tags
282 *    based on objects indexes, so that objects that are next to each other
283 *    get different tags.
284 */
285static inline u8 assign_tag(struct kmem_cache *cache,
286					const void *object, bool init)
287{
288	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
289		return 0xff;
290
291	/*
292	 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
293	 * set, assign a tag when the object is being allocated (init == false).
294	 */
295	if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
296		return init ? KASAN_TAG_KERNEL : kasan_random_tag();
297
298	/* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
299#ifdef CONFIG_SLAB
300	/* For SLAB assign tags based on the object index in the freelist. */
301	return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
302#else
303	/*
304	 * For SLUB assign a random tag during slab creation, otherwise reuse
305	 * the already assigned tag.
306	 */
307	return init ? kasan_random_tag() : get_tag(object);
308#endif
309}
310
311void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
312						const void *object)
313{
314	struct kasan_alloc_meta *alloc_meta;
315
316	if (kasan_stack_collection_enabled()) {
317		alloc_meta = kasan_get_alloc_meta(cache, object);
318		if (alloc_meta)
319			__memset(alloc_meta, 0, sizeof(*alloc_meta));
320	}
321
322	/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
323	object = set_tag(object, assign_tag(cache, object, true));
324
325	return (void *)object;
326}
327
328static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
329				unsigned long ip, bool quarantine, bool init)
330{
331	u8 tag;
332	void *tagged_object;
333
334	if (!kasan_arch_is_ready())
335		return false;
336
337	tag = get_tag(object);
338	tagged_object = object;
339	object = kasan_reset_tag(object);
340
341	if (is_kfence_address(object))
342		return false;
343
344	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
345	    object)) {
346		kasan_report_invalid_free(tagged_object, ip);
347		return true;
348	}
349
350	/* RCU slabs could be legally used after free within the RCU period */
351	if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
352		return false;
353
354	if (!kasan_byte_accessible(tagged_object)) {
355		kasan_report_invalid_free(tagged_object, ip);
356		return true;
357	}
358
359	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
360			KASAN_KMALLOC_FREE, init);
361
362	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
363		return false;
364
365	if (kasan_stack_collection_enabled())
366		kasan_set_free_info(cache, object, tag);
367
368	return kasan_quarantine_put(cache, object);
369}
370
371bool __kasan_slab_free(struct kmem_cache *cache, void *object,
372				unsigned long ip, bool init)
373{
374	return ____kasan_slab_free(cache, object, ip, true, init);
375}
376
377static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
378{
 
 
 
379	if (ptr != page_address(virt_to_head_page(ptr))) {
380		kasan_report_invalid_free(ptr, ip);
381		return true;
382	}
383
384	if (!kasan_byte_accessible(ptr)) {
385		kasan_report_invalid_free(ptr, ip);
386		return true;
387	}
388
389	/*
390	 * The object will be poisoned by kasan_free_pages() or
391	 * kasan_slab_free_mempool().
392	 */
393
394	return false;
395}
396
397void __kasan_kfree_large(void *ptr, unsigned long ip)
398{
399	____kasan_kfree_large(ptr, ip);
400}
401
402void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
403{
404	struct page *page;
405
406	page = virt_to_head_page(ptr);
407
408	/*
409	 * Even though this function is only called for kmem_cache_alloc and
410	 * kmalloc backed mempool allocations, those allocations can still be
411	 * !PageSlab() when the size provided to kmalloc is larger than
412	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
413	 */
414	if (unlikely(!PageSlab(page))) {
415		if (____kasan_kfree_large(ptr, ip))
416			return;
417		kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
418	} else {
419		____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
 
 
420	}
421}
422
423static void set_alloc_info(struct kmem_cache *cache, void *object,
424				gfp_t flags, bool is_kmalloc)
425{
426	struct kasan_alloc_meta *alloc_meta;
427
428	/* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
429	if (cache->kasan_info.is_kmalloc && !is_kmalloc)
430		return;
431
432	alloc_meta = kasan_get_alloc_meta(cache, object);
433	if (alloc_meta)
434		kasan_set_track(&alloc_meta->alloc_track, flags);
435}
436
437void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
438					void *object, gfp_t flags, bool init)
439{
440	u8 tag;
441	void *tagged_object;
442
443	if (gfpflags_allow_blocking(flags))
444		kasan_quarantine_reduce();
445
446	if (unlikely(object == NULL))
447		return NULL;
448
449	if (is_kfence_address(object))
450		return (void *)object;
451
452	/*
453	 * Generate and assign random tag for tag-based modes.
454	 * Tag is ignored in set_tag() for the generic mode.
455	 */
456	tag = assign_tag(cache, object, false);
457	tagged_object = set_tag(object, tag);
458
459	/*
460	 * Unpoison the whole object.
461	 * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
462	 */
463	kasan_unpoison(tagged_object, cache->object_size, init);
464
465	/* Save alloc info (if possible) for non-kmalloc() allocations. */
466	if (kasan_stack_collection_enabled())
467		set_alloc_info(cache, (void *)object, flags, false);
468
469	return tagged_object;
470}
471
472static inline void *____kasan_kmalloc(struct kmem_cache *cache,
473				const void *object, size_t size, gfp_t flags)
474{
475	unsigned long redzone_start;
476	unsigned long redzone_end;
477
478	if (gfpflags_allow_blocking(flags))
479		kasan_quarantine_reduce();
480
481	if (unlikely(object == NULL))
482		return NULL;
483
484	if (is_kfence_address(kasan_reset_tag(object)))
485		return (void *)object;
486
487	/*
488	 * The object has already been unpoisoned by kasan_slab_alloc() for
489	 * kmalloc() or by kasan_krealloc() for krealloc().
490	 */
491
492	/*
493	 * The redzone has byte-level precision for the generic mode.
494	 * Partially poison the last object granule to cover the unaligned
495	 * part of the redzone.
496	 */
497	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
498		kasan_poison_last_granule((void *)object, size);
499
500	/* Poison the aligned part of the redzone. */
501	redzone_start = round_up((unsigned long)(object + size),
502				KASAN_GRANULE_SIZE);
503	redzone_end = round_up((unsigned long)(object + cache->object_size),
504				KASAN_GRANULE_SIZE);
505	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
506			   KASAN_KMALLOC_REDZONE, false);
507
508	/*
509	 * Save alloc info (if possible) for kmalloc() allocations.
510	 * This also rewrites the alloc info when called from kasan_krealloc().
511	 */
512	if (kasan_stack_collection_enabled())
513		set_alloc_info(cache, (void *)object, flags, true);
514
515	/* Keep the tag that was set by kasan_slab_alloc(). */
516	return (void *)object;
517}
518
519void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
520					size_t size, gfp_t flags)
521{
522	return ____kasan_kmalloc(cache, object, size, flags);
523}
524EXPORT_SYMBOL(__kasan_kmalloc);
525
526void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
527						gfp_t flags)
528{
529	unsigned long redzone_start;
530	unsigned long redzone_end;
531
532	if (gfpflags_allow_blocking(flags))
533		kasan_quarantine_reduce();
534
535	if (unlikely(ptr == NULL))
536		return NULL;
537
538	/*
539	 * The object has already been unpoisoned by kasan_alloc_pages() for
540	 * alloc_pages() or by kasan_krealloc() for krealloc().
541	 */
542
543	/*
544	 * The redzone has byte-level precision for the generic mode.
545	 * Partially poison the last object granule to cover the unaligned
546	 * part of the redzone.
547	 */
548	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
549		kasan_poison_last_granule(ptr, size);
550
551	/* Poison the aligned part of the redzone. */
552	redzone_start = round_up((unsigned long)(ptr + size),
553				KASAN_GRANULE_SIZE);
554	redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
555	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
556		     KASAN_PAGE_REDZONE, false);
557
558	return (void *)ptr;
559}
560
561void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
562{
563	struct page *page;
564
565	if (unlikely(object == ZERO_SIZE_PTR))
566		return (void *)object;
567
568	/*
569	 * Unpoison the object's data.
570	 * Part of it might already have been unpoisoned, but it's unknown
571	 * how big that part is.
572	 */
573	kasan_unpoison(object, size, false);
574
575	page = virt_to_head_page(object);
576
577	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
578	if (unlikely(!PageSlab(page)))
579		return __kasan_kmalloc_large(object, size, flags);
580	else
581		return ____kasan_kmalloc(page->slab_cache, object, size, flags);
582}
583
584bool __kasan_check_byte(const void *address, unsigned long ip)
585{
586	if (!kasan_byte_accessible(address)) {
587		kasan_report((unsigned long)address, 1, false, ip);
588		return false;
589	}
590	return true;
591}