Loading...
1/*
2 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
3 * which are designed to protect kernel memory from needless exposure
4 * and overwrite under many unintended conditions. This code is based
5 * on PAX_USERCOPY, which is:
6 *
7 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
8 * Security Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/thread_info.h>
23#include <asm/sections.h>
24
25/*
26 * Checks if a given pointer and length is contained by the current
27 * stack frame (if possible).
28 *
29 * Returns:
30 * NOT_STACK: not at all on the stack
31 * GOOD_FRAME: fully within a valid stack frame
32 * GOOD_STACK: fully on the stack (when can't do frame-checking)
33 * BAD_STACK: error condition (invalid stack position or bad stack frame)
34 */
35static noinline int check_stack_object(const void *obj, unsigned long len)
36{
37 const void * const stack = task_stack_page(current);
38 const void * const stackend = stack + THREAD_SIZE;
39 int ret;
40
41 /* Object is not on the stack at all. */
42 if (obj + len <= stack || stackend <= obj)
43 return NOT_STACK;
44
45 /*
46 * Reject: object partially overlaps the stack (passing the
47 * the check above means at least one end is within the stack,
48 * so if this check fails, the other end is outside the stack).
49 */
50 if (obj < stack || stackend < obj + len)
51 return BAD_STACK;
52
53 /* Check if object is safely within a valid frame. */
54 ret = arch_within_stack_frames(stack, stackend, obj, len);
55 if (ret)
56 return ret;
57
58 return GOOD_STACK;
59}
60
61/*
62 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
63 * an unexpected state during a copy_from_user() or copy_to_user() call.
64 * There are several checks being performed on the buffer by the
65 * __check_object_size() function. Normal stack buffer usage should never
66 * trip the checks, and kernel text addressing will always trip the check.
67 * For cache objects, it is checking that only the whitelisted range of
68 * bytes for a given cache is being accessed (via the cache's usersize and
69 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
70 * kmem_cache_create_usercopy() function to create the cache (and
71 * carefully audit the whitelist range).
72 */
73void usercopy_warn(const char *name, const char *detail, bool to_user,
74 unsigned long offset, unsigned long len)
75{
76 WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
77 to_user ? "exposure" : "overwrite",
78 to_user ? "from" : "to",
79 name ? : "unknown?!",
80 detail ? " '" : "", detail ? : "", detail ? "'" : "",
81 offset, len);
82}
83
84void __noreturn usercopy_abort(const char *name, const char *detail,
85 bool to_user, unsigned long offset,
86 unsigned long len)
87{
88 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
89 to_user ? "exposure" : "overwrite",
90 to_user ? "from" : "to",
91 name ? : "unknown?!",
92 detail ? " '" : "", detail ? : "", detail ? "'" : "",
93 offset, len);
94
95 /*
96 * For greater effect, it would be nice to do do_group_exit(),
97 * but BUG() actually hooks all the lock-breaking and per-arch
98 * Oops code, so that is used here instead.
99 */
100 BUG();
101}
102
103/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
104static bool overlaps(const unsigned long ptr, unsigned long n,
105 unsigned long low, unsigned long high)
106{
107 const unsigned long check_low = ptr;
108 unsigned long check_high = check_low + n;
109
110 /* Does not overlap if entirely above or entirely below. */
111 if (check_low >= high || check_high <= low)
112 return false;
113
114 return true;
115}
116
117/* Is this address range in the kernel text area? */
118static inline void check_kernel_text_object(const unsigned long ptr,
119 unsigned long n, bool to_user)
120{
121 unsigned long textlow = (unsigned long)_stext;
122 unsigned long texthigh = (unsigned long)_etext;
123 unsigned long textlow_linear, texthigh_linear;
124
125 if (overlaps(ptr, n, textlow, texthigh))
126 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
127
128 /*
129 * Some architectures have virtual memory mappings with a secondary
130 * mapping of the kernel text, i.e. there is more than one virtual
131 * kernel address that points to the kernel image. It is usually
132 * when there is a separate linear physical memory mapping, in that
133 * __pa() is not just the reverse of __va(). This can be detected
134 * and checked:
135 */
136 textlow_linear = (unsigned long)lm_alias(textlow);
137 /* No different mapping: we're done. */
138 if (textlow_linear == textlow)
139 return;
140
141 /* Check the secondary mapping... */
142 texthigh_linear = (unsigned long)lm_alias(texthigh);
143 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
144 usercopy_abort("linear kernel text", NULL, to_user,
145 ptr - textlow_linear, n);
146}
147
148static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
149 bool to_user)
150{
151 /* Reject if object wraps past end of memory. */
152 if (ptr + n < ptr)
153 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
154
155 /* Reject if NULL or ZERO-allocation. */
156 if (ZERO_OR_NULL_PTR(ptr))
157 usercopy_abort("null address", NULL, to_user, ptr, n);
158}
159
160/* Checks for allocs that are marked in some way as spanning multiple pages. */
161static inline void check_page_span(const void *ptr, unsigned long n,
162 struct page *page, bool to_user)
163{
164#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
165 const void *end = ptr + n - 1;
166 struct page *endpage;
167 bool is_reserved, is_cma;
168
169 /*
170 * Sometimes the kernel data regions are not marked Reserved (see
171 * check below). And sometimes [_sdata,_edata) does not cover
172 * rodata and/or bss, so check each range explicitly.
173 */
174
175 /* Allow reads of kernel rodata region (if not marked as Reserved). */
176 if (ptr >= (const void *)__start_rodata &&
177 end <= (const void *)__end_rodata) {
178 if (!to_user)
179 usercopy_abort("rodata", NULL, to_user, 0, n);
180 return;
181 }
182
183 /* Allow kernel data region (if not marked as Reserved). */
184 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
185 return;
186
187 /* Allow kernel bss region (if not marked as Reserved). */
188 if (ptr >= (const void *)__bss_start &&
189 end <= (const void *)__bss_stop)
190 return;
191
192 /* Is the object wholly within one base page? */
193 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
194 ((unsigned long)end & (unsigned long)PAGE_MASK)))
195 return;
196
197 /* Allow if fully inside the same compound (__GFP_COMP) page. */
198 endpage = virt_to_head_page(end);
199 if (likely(endpage == page))
200 return;
201
202 /*
203 * Reject if range is entirely either Reserved (i.e. special or
204 * device memory), or CMA. Otherwise, reject since the object spans
205 * several independently allocated pages.
206 */
207 is_reserved = PageReserved(page);
208 is_cma = is_migrate_cma_page(page);
209 if (!is_reserved && !is_cma)
210 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
211
212 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
213 page = virt_to_head_page(ptr);
214 if (is_reserved && !PageReserved(page))
215 usercopy_abort("spans Reserved and non-Reserved pages",
216 NULL, to_user, 0, n);
217 if (is_cma && !is_migrate_cma_page(page))
218 usercopy_abort("spans CMA and non-CMA pages", NULL,
219 to_user, 0, n);
220 }
221#endif
222}
223
224static inline void check_heap_object(const void *ptr, unsigned long n,
225 bool to_user)
226{
227 struct page *page;
228
229 if (!virt_addr_valid(ptr))
230 return;
231
232 page = virt_to_head_page(ptr);
233
234 if (PageSlab(page)) {
235 /* Check slab allocator for flags and size. */
236 __check_heap_object(ptr, n, page, to_user);
237 } else {
238 /* Verify object does not incorrectly span multiple pages. */
239 check_page_span(ptr, n, page, to_user);
240 }
241}
242
243/*
244 * Validates that the given object is:
245 * - not bogus address
246 * - known-safe heap or stack object
247 * - not in kernel text
248 */
249void __check_object_size(const void *ptr, unsigned long n, bool to_user)
250{
251 /* Skip all tests if size is zero. */
252 if (!n)
253 return;
254
255 /* Check for invalid addresses. */
256 check_bogus_address((const unsigned long)ptr, n, to_user);
257
258 /* Check for bad heap object. */
259 check_heap_object(ptr, n, to_user);
260
261 /* Check for bad stack object. */
262 switch (check_stack_object(ptr, n)) {
263 case NOT_STACK:
264 /* Object is not touching the current process stack. */
265 break;
266 case GOOD_FRAME:
267 case GOOD_STACK:
268 /*
269 * Object is either in the correct frame (when it
270 * is possible to check) or just generally on the
271 * process stack (when frame checking not available).
272 */
273 return;
274 default:
275 usercopy_abort("process stack", NULL, to_user, 0, n);
276 }
277
278 /* Check for object in kernel to avoid text exposure. */
279 check_kernel_text_object((const unsigned long)ptr, n, to_user);
280}
281EXPORT_SYMBOL(__check_object_size);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
4 * which are designed to protect kernel memory from needless exposure
5 * and overwrite under many unintended conditions. This code is based
6 * on PAX_USERCOPY, which is:
7 *
8 * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
9 * Security Inc.
10 */
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/mm.h>
14#include <linux/highmem.h>
15#include <linux/kstrtox.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/sched/task.h>
19#include <linux/sched/task_stack.h>
20#include <linux/thread_info.h>
21#include <linux/vmalloc.h>
22#include <linux/atomic.h>
23#include <linux/jump_label.h>
24#include <asm/sections.h>
25#include "slab.h"
26
27/*
28 * Checks if a given pointer and length is contained by the current
29 * stack frame (if possible).
30 *
31 * Returns:
32 * NOT_STACK: not at all on the stack
33 * GOOD_FRAME: fully within a valid stack frame
34 * GOOD_STACK: within the current stack (when can't frame-check exactly)
35 * BAD_STACK: error condition (invalid stack position or bad stack frame)
36 */
37static noinline int check_stack_object(const void *obj, unsigned long len)
38{
39 const void * const stack = task_stack_page(current);
40 const void * const stackend = stack + THREAD_SIZE;
41 int ret;
42
43 /* Object is not on the stack at all. */
44 if (obj + len <= stack || stackend <= obj)
45 return NOT_STACK;
46
47 /*
48 * Reject: object partially overlaps the stack (passing the
49 * check above means at least one end is within the stack,
50 * so if this check fails, the other end is outside the stack).
51 */
52 if (obj < stack || stackend < obj + len)
53 return BAD_STACK;
54
55 /* Check if object is safely within a valid frame. */
56 ret = arch_within_stack_frames(stack, stackend, obj, len);
57 if (ret)
58 return ret;
59
60 /* Finally, check stack depth if possible. */
61#ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
62 if (IS_ENABLED(CONFIG_STACK_GROWSUP)) {
63 if ((void *)current_stack_pointer < obj + len)
64 return BAD_STACK;
65 } else {
66 if (obj < (void *)current_stack_pointer)
67 return BAD_STACK;
68 }
69#endif
70
71 return GOOD_STACK;
72}
73
74/*
75 * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
76 * an unexpected state during a copy_from_user() or copy_to_user() call.
77 * There are several checks being performed on the buffer by the
78 * __check_object_size() function. Normal stack buffer usage should never
79 * trip the checks, and kernel text addressing will always trip the check.
80 * For cache objects, it is checking that only the whitelisted range of
81 * bytes for a given cache is being accessed (via the cache's usersize and
82 * useroffset fields). To adjust a cache whitelist, use the usercopy-aware
83 * kmem_cache_create_usercopy() function to create the cache (and
84 * carefully audit the whitelist range).
85 */
86void __noreturn usercopy_abort(const char *name, const char *detail,
87 bool to_user, unsigned long offset,
88 unsigned long len)
89{
90 pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
91 to_user ? "exposure" : "overwrite",
92 to_user ? "from" : "to",
93 name ? : "unknown?!",
94 detail ? " '" : "", detail ? : "", detail ? "'" : "",
95 offset, len);
96
97 /*
98 * For greater effect, it would be nice to do do_group_exit(),
99 * but BUG() actually hooks all the lock-breaking and per-arch
100 * Oops code, so that is used here instead.
101 */
102 BUG();
103}
104
105/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
106static bool overlaps(const unsigned long ptr, unsigned long n,
107 unsigned long low, unsigned long high)
108{
109 const unsigned long check_low = ptr;
110 unsigned long check_high = check_low + n;
111
112 /* Does not overlap if entirely above or entirely below. */
113 if (check_low >= high || check_high <= low)
114 return false;
115
116 return true;
117}
118
119/* Is this address range in the kernel text area? */
120static inline void check_kernel_text_object(const unsigned long ptr,
121 unsigned long n, bool to_user)
122{
123 unsigned long textlow = (unsigned long)_stext;
124 unsigned long texthigh = (unsigned long)_etext;
125 unsigned long textlow_linear, texthigh_linear;
126
127 if (overlaps(ptr, n, textlow, texthigh))
128 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
129
130 /*
131 * Some architectures have virtual memory mappings with a secondary
132 * mapping of the kernel text, i.e. there is more than one virtual
133 * kernel address that points to the kernel image. It is usually
134 * when there is a separate linear physical memory mapping, in that
135 * __pa() is not just the reverse of __va(). This can be detected
136 * and checked:
137 */
138 textlow_linear = (unsigned long)lm_alias(textlow);
139 /* No different mapping: we're done. */
140 if (textlow_linear == textlow)
141 return;
142
143 /* Check the secondary mapping... */
144 texthigh_linear = (unsigned long)lm_alias(texthigh);
145 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
146 usercopy_abort("linear kernel text", NULL, to_user,
147 ptr - textlow_linear, n);
148}
149
150static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
151 bool to_user)
152{
153 /* Reject if object wraps past end of memory. */
154 if (ptr + (n - 1) < ptr)
155 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
156
157 /* Reject if NULL or ZERO-allocation. */
158 if (ZERO_OR_NULL_PTR(ptr))
159 usercopy_abort("null address", NULL, to_user, ptr, n);
160}
161
162static inline void check_heap_object(const void *ptr, unsigned long n,
163 bool to_user)
164{
165 unsigned long addr = (unsigned long)ptr;
166 unsigned long offset;
167 struct folio *folio;
168
169 if (is_kmap_addr(ptr)) {
170 offset = offset_in_page(ptr);
171 if (n > PAGE_SIZE - offset)
172 usercopy_abort("kmap", NULL, to_user, offset, n);
173 return;
174 }
175
176 if (is_vmalloc_addr(ptr)) {
177 struct vmap_area *area = find_vmap_area(addr);
178
179 if (!area)
180 usercopy_abort("vmalloc", "no area", to_user, 0, n);
181
182 if (n > area->va_end - addr) {
183 offset = addr - area->va_start;
184 usercopy_abort("vmalloc", NULL, to_user, offset, n);
185 }
186 return;
187 }
188
189 if (!virt_addr_valid(ptr))
190 return;
191
192 folio = virt_to_folio(ptr);
193
194 if (folio_test_slab(folio)) {
195 /* Check slab allocator for flags and size. */
196 __check_heap_object(ptr, n, folio_slab(folio), to_user);
197 } else if (folio_test_large(folio)) {
198 offset = ptr - folio_address(folio);
199 if (n > folio_size(folio) - offset)
200 usercopy_abort("page alloc", NULL, to_user, offset, n);
201 }
202}
203
204static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
205
206/*
207 * Validates that the given object is:
208 * - not bogus address
209 * - fully contained by stack (or stack frame, when available)
210 * - fully within SLAB object (or object whitelist area, when available)
211 * - not in kernel text
212 */
213void __check_object_size(const void *ptr, unsigned long n, bool to_user)
214{
215 if (static_branch_unlikely(&bypass_usercopy_checks))
216 return;
217
218 /* Skip all tests if size is zero. */
219 if (!n)
220 return;
221
222 /* Check for invalid addresses. */
223 check_bogus_address((const unsigned long)ptr, n, to_user);
224
225 /* Check for bad stack object. */
226 switch (check_stack_object(ptr, n)) {
227 case NOT_STACK:
228 /* Object is not touching the current process stack. */
229 break;
230 case GOOD_FRAME:
231 case GOOD_STACK:
232 /*
233 * Object is either in the correct frame (when it
234 * is possible to check) or just generally on the
235 * process stack (when frame checking not available).
236 */
237 return;
238 default:
239 usercopy_abort("process stack", NULL, to_user,
240#ifdef CONFIG_ARCH_HAS_CURRENT_STACK_POINTER
241 IS_ENABLED(CONFIG_STACK_GROWSUP) ?
242 ptr - (void *)current_stack_pointer :
243 (void *)current_stack_pointer - ptr,
244#else
245 0,
246#endif
247 n);
248 }
249
250 /* Check for bad heap object. */
251 check_heap_object(ptr, n, to_user);
252
253 /* Check for object in kernel to avoid text exposure. */
254 check_kernel_text_object((const unsigned long)ptr, n, to_user);
255}
256EXPORT_SYMBOL(__check_object_size);
257
258static bool enable_checks __initdata = true;
259
260static int __init parse_hardened_usercopy(char *str)
261{
262 if (kstrtobool(str, &enable_checks))
263 pr_warn("Invalid option string for hardened_usercopy: '%s'\n",
264 str);
265 return 1;
266}
267
268__setup("hardened_usercopy=", parse_hardened_usercopy);
269
270static int __init set_hardened_usercopy(void)
271{
272 if (enable_checks == false)
273 static_branch_enable(&bypass_usercopy_checks);
274 return 1;
275}
276
277late_initcall(set_hardened_usercopy);