Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Kernel Electric-Fence (KFENCE). For more info please see
  4 * Documentation/dev-tools/kfence.rst.
  5 *
  6 * Copyright (C) 2020, Google LLC.
  7 */
  8
  9#ifndef MM_KFENCE_KFENCE_H
 10#define MM_KFENCE_KFENCE_H
 11
 12#include <linux/mm.h>
 13#include <linux/slab.h>
 14#include <linux/spinlock.h>
 15#include <linux/types.h>
 16
 17#include "../slab.h" /* for struct kmem_cache */
 18
 19/*
 20 * Get the canary byte pattern for @addr. Use a pattern that varies based on the
 21 * lower 3 bits of the address, to detect memory corruptions with higher
 22 * probability, where similar constants are used.
 23 */
 24#define KFENCE_CANARY_PATTERN_U8(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
 25
 26/*
 27 * Define a continuous 8-byte canary starting from a multiple of 8. The canary
 28 * of each byte is only related to the lowest three bits of its address, so the
 29 * canary of every 8 bytes is the same. 64-bit memory can be filled and checked
 30 * at a time instead of byte by byte to improve performance.
 31 */
 32#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
 33
 34/* Maximum stack depth for reports. */
 35#define KFENCE_STACK_DEPTH 64
 36
 37/* KFENCE object states. */
 38enum kfence_object_state {
 39	KFENCE_OBJECT_UNUSED,		/* Object is unused. */
 40	KFENCE_OBJECT_ALLOCATED,	/* Object is currently allocated. */
 
 41	KFENCE_OBJECT_FREED,		/* Object was allocated, and then freed. */
 42};
 43
 44/* Alloc/free tracking information. */
 45struct kfence_track {
 46	pid_t pid;
 47	int cpu;
 48	u64 ts_nsec;
 49	int num_stack_entries;
 50	unsigned long stack_entries[KFENCE_STACK_DEPTH];
 51};
 52
 53/* KFENCE metadata per guarded allocation. */
 54struct kfence_metadata {
 55	struct list_head list;		/* Freelist node; access under kfence_freelist_lock. */
 56	struct rcu_head rcu_head;	/* For delayed freeing. */
 57
 58	/*
 59	 * Lock protecting below data; to ensure consistency of the below data,
 60	 * since the following may execute concurrently: __kfence_alloc(),
 61	 * __kfence_free(), kfence_handle_page_fault(). However, note that we
 62	 * cannot grab the same metadata off the freelist twice, and multiple
 63	 * __kfence_alloc() cannot run concurrently on the same metadata.
 64	 */
 65	raw_spinlock_t lock;
 66
 67	/* The current state of the object; see above. */
 68	enum kfence_object_state state;
 69
 70	/*
 71	 * Allocated object address; cannot be calculated from size, because of
 72	 * alignment requirements.
 73	 *
 74	 * Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant.
 75	 */
 76	unsigned long addr;
 77
 78	/*
 79	 * The size of the original allocation.
 80	 */
 81	size_t size;
 82
 83	/*
 84	 * The kmem_cache cache of the last allocation; NULL if never allocated
 85	 * or the cache has already been destroyed.
 86	 */
 87	struct kmem_cache *cache;
 88
 89	/*
 90	 * In case of an invalid access, the page that was unprotected; we
 91	 * optimistically only store one address.
 92	 */
 93	unsigned long unprotected_page;
 94
 95	/* Allocation and free stack information. */
 96	struct kfence_track alloc_track;
 97	struct kfence_track free_track;
 98	/* For updating alloc_covered on frees. */
 99	u32 alloc_stack_hash;
100#ifdef CONFIG_MEMCG
101	struct obj_cgroup *objcg;
102#endif
103};
104
105#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
106					CONFIG_KFENCE_NUM_OBJECTS)
107
108extern struct kfence_metadata *kfence_metadata;
109
110static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
111{
112	long index;
113
114	/* The checks do not affect performance; only called from slow-paths. */
115
116	if (!is_kfence_address((void *)addr))
117		return NULL;
118
119	/*
120	 * May be an invalid index if called with an address at the edge of
121	 * __kfence_pool, in which case we would report an "invalid access"
122	 * error.
123	 */
124	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
125	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
126		return NULL;
127
128	return &kfence_metadata[index];
129}
130
131/* KFENCE error types for report generation. */
132enum kfence_error_type {
133	KFENCE_ERROR_OOB,		/* Detected a out-of-bounds access. */
134	KFENCE_ERROR_UAF,		/* Detected a use-after-free access. */
135	KFENCE_ERROR_CORRUPTION,	/* Detected a memory corruption on free. */
136	KFENCE_ERROR_INVALID,		/* Invalid access of unknown type. */
137	KFENCE_ERROR_INVALID_FREE,	/* Invalid free. */
138};
139
140void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
141			 const struct kfence_metadata *meta, enum kfence_error_type type);
142
143void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
144
145#endif /* MM_KFENCE_KFENCE_H */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Kernel Electric-Fence (KFENCE). For more info please see
  4 * Documentation/dev-tools/kfence.rst.
  5 *
  6 * Copyright (C) 2020, Google LLC.
  7 */
  8
  9#ifndef MM_KFENCE_KFENCE_H
 10#define MM_KFENCE_KFENCE_H
 11
 12#include <linux/mm.h>
 13#include <linux/slab.h>
 14#include <linux/spinlock.h>
 15#include <linux/types.h>
 16
 17#include "../slab.h" /* for struct kmem_cache */
 18
 19/*
 20 * Get the canary byte pattern for @addr. Use a pattern that varies based on the
 21 * lower 3 bits of the address, to detect memory corruptions with higher
 22 * probability, where similar constants are used.
 23 */
 24#define KFENCE_CANARY_PATTERN_U8(addr) ((u8)0xaa ^ (u8)((unsigned long)(addr) & 0x7))
 25
 26/*
 27 * Define a continuous 8-byte canary starting from a multiple of 8. The canary
 28 * of each byte is only related to the lowest three bits of its address, so the
 29 * canary of every 8 bytes is the same. 64-bit memory can be filled and checked
 30 * at a time instead of byte by byte to improve performance.
 31 */
 32#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
 33
 34/* Maximum stack depth for reports. */
 35#define KFENCE_STACK_DEPTH 64
 36
 37/* KFENCE object states. */
 38enum kfence_object_state {
 39	KFENCE_OBJECT_UNUSED,		/* Object is unused. */
 40	KFENCE_OBJECT_ALLOCATED,	/* Object is currently allocated. */
 41	KFENCE_OBJECT_RCU_FREEING,	/* Object was allocated, and then being freed by rcu. */
 42	KFENCE_OBJECT_FREED,		/* Object was allocated, and then freed. */
 43};
 44
 45/* Alloc/free tracking information. */
 46struct kfence_track {
 47	pid_t pid;
 48	int cpu;
 49	u64 ts_nsec;
 50	int num_stack_entries;
 51	unsigned long stack_entries[KFENCE_STACK_DEPTH];
 52};
 53
 54/* KFENCE metadata per guarded allocation. */
 55struct kfence_metadata {
 56	struct list_head list;		/* Freelist node; access under kfence_freelist_lock. */
 57	struct rcu_head rcu_head;	/* For delayed freeing. */
 58
 59	/*
 60	 * Lock protecting below data; to ensure consistency of the below data,
 61	 * since the following may execute concurrently: __kfence_alloc(),
 62	 * __kfence_free(), kfence_handle_page_fault(). However, note that we
 63	 * cannot grab the same metadata off the freelist twice, and multiple
 64	 * __kfence_alloc() cannot run concurrently on the same metadata.
 65	 */
 66	raw_spinlock_t lock;
 67
 68	/* The current state of the object; see above. */
 69	enum kfence_object_state state;
 70
 71	/*
 72	 * Allocated object address; cannot be calculated from size, because of
 73	 * alignment requirements.
 74	 *
 75	 * Invariant: ALIGN_DOWN(addr, PAGE_SIZE) is constant.
 76	 */
 77	unsigned long addr;
 78
 79	/*
 80	 * The size of the original allocation.
 81	 */
 82	size_t size;
 83
 84	/*
 85	 * The kmem_cache cache of the last allocation; NULL if never allocated
 86	 * or the cache has already been destroyed.
 87	 */
 88	struct kmem_cache *cache;
 89
 90	/*
 91	 * In case of an invalid access, the page that was unprotected; we
 92	 * optimistically only store one address.
 93	 */
 94	unsigned long unprotected_page;
 95
 96	/* Allocation and free stack information. */
 97	struct kfence_track alloc_track;
 98	struct kfence_track free_track;
 99	/* For updating alloc_covered on frees. */
100	u32 alloc_stack_hash;
101#ifdef CONFIG_MEMCG
102	struct slabobj_ext obj_exts;
103#endif
104};
105
106#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \
107					CONFIG_KFENCE_NUM_OBJECTS)
108
109extern struct kfence_metadata *kfence_metadata;
110
111static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
112{
113	long index;
114
115	/* The checks do not affect performance; only called from slow-paths. */
116
117	if (!is_kfence_address((void *)addr))
118		return NULL;
119
120	/*
121	 * May be an invalid index if called with an address at the edge of
122	 * __kfence_pool, in which case we would report an "invalid access"
123	 * error.
124	 */
125	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
126	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
127		return NULL;
128
129	return &kfence_metadata[index];
130}
131
132/* KFENCE error types for report generation. */
133enum kfence_error_type {
134	KFENCE_ERROR_OOB,		/* Detected a out-of-bounds access. */
135	KFENCE_ERROR_UAF,		/* Detected a use-after-free access. */
136	KFENCE_ERROR_CORRUPTION,	/* Detected a memory corruption on free. */
137	KFENCE_ERROR_INVALID,		/* Invalid access of unknown type. */
138	KFENCE_ERROR_INVALID_FREE,	/* Invalid free. */
139};
140
141void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
142			 const struct kfence_metadata *meta, enum kfence_error_type type);
143
144void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
145
146#endif /* MM_KFENCE_KFENCE_H */