Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests related to copy_to_user() and copy_from_user()
  4 * hardening.
  5 */
  6#include "lkdtm.h"
  7#include <linux/slab.h>
  8#include <linux/highmem.h>
  9#include <linux/vmalloc.h>
 10#include <linux/sched/task_stack.h>
 11#include <linux/mman.h>
 12#include <linux/uaccess.h>
 13#include <asm/cacheflush.h>
 14
 15/*
 16 * Many of the tests here end up using const sizes, but those would
 17 * normally be ignored by hardened usercopy, so force the compiler
 18 * into choosing the non-const path to make sure we trigger the
 19 * hardened usercopy checks by added "unconst" to all the const copies,
 20 * and making sure "cache_size" isn't optimized into a const.
 21 */
 22static volatile size_t unconst;
 23static volatile size_t cache_size = 1024;
 24static struct kmem_cache *whitelist_cache;
 25
 26static const unsigned char test_text[] = "This is a test.\n";
 27
 28/*
 29 * Instead of adding -Wno-return-local-addr, just pass the stack address
 30 * through a function to obfuscate it from the compiler.
 31 */
 32static noinline unsigned char *trick_compiler(unsigned char *stack)
 33{
 34	return stack + unconst;
 35}
 36
 37static noinline unsigned char *do_usercopy_stack_callee(int value)
 38{
 39	unsigned char buf[128];
 40	int i;
 41
 42	/* Exercise stack to avoid everything living in registers. */
 43	for (i = 0; i < sizeof(buf); i++) {
 44		buf[i] = value & 0xff;
 45	}
 46
 47	/*
 48	 * Put the target buffer in the middle of stack allocation
 49	 * so that we don't step on future stack users regardless
 50	 * of stack growth direction.
 51	 */
 52	return trick_compiler(&buf[(128/2)-32]);
 53}
 54
 55static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 56{
 57	unsigned long user_addr;
 58	unsigned char good_stack[32];
 59	unsigned char *bad_stack;
 60	int i;
 61
 62	/* Exercise stack to avoid everything living in registers. */
 63	for (i = 0; i < sizeof(good_stack); i++)
 64		good_stack[i] = test_text[i % sizeof(test_text)];
 65
 66	/* This is a pointer to outside our current stack frame. */
 67	if (bad_frame) {
 68		bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
 69	} else {
 70		/* Put start address just inside stack. */
 71		bad_stack = task_stack_page(current) + THREAD_SIZE;
 72		bad_stack -= sizeof(unsigned long);
 73	}
 74
 75#ifdef ARCH_HAS_CURRENT_STACK_POINTER
 76	pr_info("stack     : %px\n", (void *)current_stack_pointer);
 77#endif
 78	pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
 79	pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
 80
 81	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
 82			    PROT_READ | PROT_WRITE | PROT_EXEC,
 83			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
 84	if (user_addr >= TASK_SIZE) {
 85		pr_warn("Failed to allocate user memory\n");
 86		return;
 87	}
 88
 89	if (to_user) {
 90		pr_info("attempting good copy_to_user of local stack\n");
 91		if (copy_to_user((void __user *)user_addr, good_stack,
 92				 unconst + sizeof(good_stack))) {
 93			pr_warn("copy_to_user failed unexpectedly?!\n");
 94			goto free_user;
 95		}
 96
 97		pr_info("attempting bad copy_to_user of distant stack\n");
 98		if (copy_to_user((void __user *)user_addr, bad_stack,
 99				 unconst + sizeof(good_stack))) {
100			pr_warn("copy_to_user failed, but lacked Oops\n");
101			goto free_user;
102		}
103	} else {
104		/*
105		 * There isn't a safe way to not be protected by usercopy
106		 * if we're going to write to another thread's stack.
107		 */
108		if (!bad_frame)
109			goto free_user;
110
111		pr_info("attempting good copy_from_user of local stack\n");
112		if (copy_from_user(good_stack, (void __user *)user_addr,
113				   unconst + sizeof(good_stack))) {
114			pr_warn("copy_from_user failed unexpectedly?!\n");
115			goto free_user;
116		}
117
118		pr_info("attempting bad copy_from_user of distant stack\n");
119		if (copy_from_user(bad_stack, (void __user *)user_addr,
120				   unconst + sizeof(good_stack))) {
121			pr_warn("copy_from_user failed, but lacked Oops\n");
122			goto free_user;
123		}
124	}
125
126free_user:
127	vm_munmap(user_addr, PAGE_SIZE);
128}
129
130/*
131 * This checks for whole-object size validation with hardened usercopy,
132 * with or without usercopy whitelisting.
133 */
134static void do_usercopy_slab_size(bool to_user)
135{
136	unsigned long user_addr;
137	unsigned char *one, *two;
138	void __user *test_user_addr;
139	void *test_kern_addr;
140	size_t size = unconst + 1024;
141
142	one = kmalloc(size, GFP_KERNEL);
143	two = kmalloc(size, GFP_KERNEL);
144	if (!one || !two) {
145		pr_warn("Failed to allocate kernel memory\n");
146		goto free_kernel;
147	}
148
149	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
150			    PROT_READ | PROT_WRITE | PROT_EXEC,
151			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
152	if (user_addr >= TASK_SIZE) {
153		pr_warn("Failed to allocate user memory\n");
154		goto free_kernel;
155	}
156
157	memset(one, 'A', size);
158	memset(two, 'B', size);
159
160	test_user_addr = (void __user *)(user_addr + 16);
161	test_kern_addr = one + 16;
162
163	if (to_user) {
164		pr_info("attempting good copy_to_user of correct size\n");
165		if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
166			pr_warn("copy_to_user failed unexpectedly?!\n");
167			goto free_user;
168		}
169
170		pr_info("attempting bad copy_to_user of too large size\n");
171		if (copy_to_user(test_user_addr, test_kern_addr, size)) {
172			pr_warn("copy_to_user failed, but lacked Oops\n");
173			goto free_user;
174		}
175	} else {
176		pr_info("attempting good copy_from_user of correct size\n");
177		if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
178			pr_warn("copy_from_user failed unexpectedly?!\n");
179			goto free_user;
180		}
181
182		pr_info("attempting bad copy_from_user of too large size\n");
183		if (copy_from_user(test_kern_addr, test_user_addr, size)) {
184			pr_warn("copy_from_user failed, but lacked Oops\n");
185			goto free_user;
186		}
187	}
188	pr_err("FAIL: bad usercopy not detected!\n");
189	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
190
191free_user:
192	vm_munmap(user_addr, PAGE_SIZE);
193free_kernel:
194	kfree(one);
195	kfree(two);
196}
197
198/*
199 * This checks for the specific whitelist window within an object. If this
200 * test passes, then do_usercopy_slab_size() tests will pass too.
201 */
202static void do_usercopy_slab_whitelist(bool to_user)
203{
204	unsigned long user_alloc;
205	unsigned char *buf = NULL;
206	unsigned char __user *user_addr;
207	size_t offset, size;
208
209	/* Make sure cache was prepared. */
210	if (!whitelist_cache) {
211		pr_warn("Failed to allocate kernel cache\n");
212		return;
213	}
214
215	/*
216	 * Allocate a buffer with a whitelisted window in the buffer.
217	 */
218	buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
219	if (!buf) {
220		pr_warn("Failed to allocate buffer from whitelist cache\n");
221		goto free_alloc;
222	}
223
224	/* Allocate user memory we'll poke at. */
225	user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
226			    PROT_READ | PROT_WRITE | PROT_EXEC,
227			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
228	if (user_alloc >= TASK_SIZE) {
229		pr_warn("Failed to allocate user memory\n");
230		goto free_alloc;
231	}
232	user_addr = (void __user *)user_alloc;
233
234	memset(buf, 'B', cache_size);
235
236	/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
237	offset = (cache_size / 4) + unconst;
238	size = (cache_size / 16) + unconst;
239
240	if (to_user) {
241		pr_info("attempting good copy_to_user inside whitelist\n");
242		if (copy_to_user(user_addr, buf + offset, size)) {
243			pr_warn("copy_to_user failed unexpectedly?!\n");
244			goto free_user;
245		}
246
247		pr_info("attempting bad copy_to_user outside whitelist\n");
248		if (copy_to_user(user_addr, buf + offset - 1, size)) {
249			pr_warn("copy_to_user failed, but lacked Oops\n");
250			goto free_user;
251		}
252	} else {
253		pr_info("attempting good copy_from_user inside whitelist\n");
254		if (copy_from_user(buf + offset, user_addr, size)) {
255			pr_warn("copy_from_user failed unexpectedly?!\n");
256			goto free_user;
257		}
258
259		pr_info("attempting bad copy_from_user outside whitelist\n");
260		if (copy_from_user(buf + offset - 1, user_addr, size)) {
261			pr_warn("copy_from_user failed, but lacked Oops\n");
262			goto free_user;
263		}
264	}
265	pr_err("FAIL: bad usercopy not detected!\n");
266	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
267
268free_user:
269	vm_munmap(user_alloc, PAGE_SIZE);
270free_alloc:
271	if (buf)
272		kmem_cache_free(whitelist_cache, buf);
273}
274
275/* Callable tests. */
276static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
277{
278	do_usercopy_slab_size(true);
279}
280
281static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
282{
283	do_usercopy_slab_size(false);
284}
285
286static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
287{
288	do_usercopy_slab_whitelist(true);
289}
290
291static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
292{
293	do_usercopy_slab_whitelist(false);
294}
295
296static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
297{
298	do_usercopy_stack(true, true);
299}
300
301static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
302{
303	do_usercopy_stack(false, true);
304}
305
306static void lkdtm_USERCOPY_STACK_BEYOND(void)
307{
308	do_usercopy_stack(true, false);
309}
310
311static void lkdtm_USERCOPY_KERNEL(void)
312{
313	unsigned long user_addr;
314
315	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
316			    PROT_READ | PROT_WRITE | PROT_EXEC,
317			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
318	if (user_addr >= TASK_SIZE) {
319		pr_warn("Failed to allocate user memory\n");
320		return;
321	}
322
323	pr_info("attempting good copy_to_user from kernel rodata: %px\n",
324		test_text);
325	if (copy_to_user((void __user *)user_addr, test_text,
326			 unconst + sizeof(test_text))) {
327		pr_warn("copy_to_user failed unexpectedly?!\n");
328		goto free_user;
329	}
330
331	pr_info("attempting bad copy_to_user from kernel text: %px\n",
332		vm_mmap);
333	if (copy_to_user((void __user *)user_addr, vm_mmap,
334			 unconst + PAGE_SIZE)) {
335		pr_warn("copy_to_user failed, but lacked Oops\n");
336		goto free_user;
337	}
338	pr_err("FAIL: bad copy_to_user() not detected!\n");
339	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
340
341free_user:
342	vm_munmap(user_addr, PAGE_SIZE);
343}
344
345/*
346 * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
347 * a more complete test that would include copy_from_user() would risk
348 * memory corruption. Just test copy_to_user() here, as that exercises
349 * almost exactly the same code paths.
350 */
351static void do_usercopy_page_span(const char *name, void *kaddr)
352{
353	unsigned long uaddr;
354
355	uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
356			MAP_ANONYMOUS | MAP_PRIVATE, 0);
357	if (uaddr >= TASK_SIZE) {
358		pr_warn("Failed to allocate user memory\n");
359		return;
360	}
361
362	/* Initialize contents. */
363	memset(kaddr, 0xAA, PAGE_SIZE);
364
365	/* Bump the kaddr forward to detect a page-spanning overflow. */
366	kaddr += PAGE_SIZE / 2;
367
368	pr_info("attempting good copy_to_user() from kernel %s: %px\n",
369		name, kaddr);
370	if (copy_to_user((void __user *)uaddr, kaddr,
371			 unconst + (PAGE_SIZE / 2))) {
372		pr_err("copy_to_user() failed unexpectedly?!\n");
373		goto free_user;
374	}
375
376	pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
377		name, kaddr);
378	if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
379		pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
380		goto free_user;
381	}
382
383	pr_err("FAIL: bad copy_to_user() not detected!\n");
384	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
385
386free_user:
387	vm_munmap(uaddr, PAGE_SIZE);
388}
389
390static void lkdtm_USERCOPY_VMALLOC(void)
391{
392	void *addr;
393
394	addr = vmalloc(PAGE_SIZE);
395	if (!addr) {
396		pr_err("vmalloc() failed!?\n");
397		return;
398	}
399	do_usercopy_page_span("vmalloc", addr);
400	vfree(addr);
401}
402
403static void lkdtm_USERCOPY_FOLIO(void)
404{
405	struct folio *folio;
406	void *addr;
407
408	/*
409	 * FIXME: Folio checking currently misses 0-order allocations, so
410	 * allocate and bump forward to the last page.
411	 */
412	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
413	if (!folio) {
414		pr_err("folio_alloc() failed!?\n");
415		return;
416	}
417	addr = folio_address(folio);
418	if (addr)
419		do_usercopy_page_span("folio", addr + PAGE_SIZE);
420	else
421		pr_err("folio_address() failed?!\n");
422	folio_put(folio);
423}
424
425void __init lkdtm_usercopy_init(void)
426{
427	/* Prepare cache that lacks SLAB_USERCOPY flag. */
428	whitelist_cache =
429		kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
430					   0, 0,
431					   cache_size / 4,
432					   cache_size / 16,
433					   NULL);
434}
435
436void __exit lkdtm_usercopy_exit(void)
437{
438	kmem_cache_destroy(whitelist_cache);
439}
440
441static struct crashtype crashtypes[] = {
442	CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
443	CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
444	CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
445	CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
446	CRASHTYPE(USERCOPY_STACK_FRAME_TO),
447	CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
448	CRASHTYPE(USERCOPY_STACK_BEYOND),
449	CRASHTYPE(USERCOPY_VMALLOC),
450	CRASHTYPE(USERCOPY_FOLIO),
451	CRASHTYPE(USERCOPY_KERNEL),
452};
453
454struct crashtype_category usercopy_crashtypes = {
455	.crashtypes = crashtypes,
456	.len	    = ARRAY_SIZE(crashtypes),
457};
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This is for all the tests related to copy_to_user() and copy_from_user()
  4 * hardening.
  5 */
  6#include "lkdtm.h"
  7#include <linux/slab.h>
 
  8#include <linux/vmalloc.h>
  9#include <linux/sched/task_stack.h>
 10#include <linux/mman.h>
 11#include <linux/uaccess.h>
 12#include <asm/cacheflush.h>
 13
 14/*
 15 * Many of the tests here end up using const sizes, but those would
 16 * normally be ignored by hardened usercopy, so force the compiler
 17 * into choosing the non-const path to make sure we trigger the
 18 * hardened usercopy checks by added "unconst" to all the const copies,
 19 * and making sure "cache_size" isn't optimized into a const.
 20 */
 21static volatile size_t unconst = 0;
 22static volatile size_t cache_size = 1024;
 23static struct kmem_cache *whitelist_cache;
 24
 25static const unsigned char test_text[] = "This is a test.\n";
 26
 27/*
 28 * Instead of adding -Wno-return-local-addr, just pass the stack address
 29 * through a function to obfuscate it from the compiler.
 30 */
 31static noinline unsigned char *trick_compiler(unsigned char *stack)
 32{
 33	return stack + 0;
 34}
 35
 36static noinline unsigned char *do_usercopy_stack_callee(int value)
 37{
 38	unsigned char buf[32];
 39	int i;
 40
 41	/* Exercise stack to avoid everything living in registers. */
 42	for (i = 0; i < sizeof(buf); i++) {
 43		buf[i] = value & 0xff;
 44	}
 45
 46	return trick_compiler(buf);
 
 
 
 
 
 47}
 48
 49static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
 50{
 51	unsigned long user_addr;
 52	unsigned char good_stack[32];
 53	unsigned char *bad_stack;
 54	int i;
 55
 56	/* Exercise stack to avoid everything living in registers. */
 57	for (i = 0; i < sizeof(good_stack); i++)
 58		good_stack[i] = test_text[i % sizeof(test_text)];
 59
 60	/* This is a pointer to outside our current stack frame. */
 61	if (bad_frame) {
 62		bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
 63	} else {
 64		/* Put start address just inside stack. */
 65		bad_stack = task_stack_page(current) + THREAD_SIZE;
 66		bad_stack -= sizeof(unsigned long);
 67	}
 68
 
 
 
 
 
 
 69	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
 70			    PROT_READ | PROT_WRITE | PROT_EXEC,
 71			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
 72	if (user_addr >= TASK_SIZE) {
 73		pr_warn("Failed to allocate user memory\n");
 74		return;
 75	}
 76
 77	if (to_user) {
 78		pr_info("attempting good copy_to_user of local stack\n");
 79		if (copy_to_user((void __user *)user_addr, good_stack,
 80				 unconst + sizeof(good_stack))) {
 81			pr_warn("copy_to_user failed unexpectedly?!\n");
 82			goto free_user;
 83		}
 84
 85		pr_info("attempting bad copy_to_user of distant stack\n");
 86		if (copy_to_user((void __user *)user_addr, bad_stack,
 87				 unconst + sizeof(good_stack))) {
 88			pr_warn("copy_to_user failed, but lacked Oops\n");
 89			goto free_user;
 90		}
 91	} else {
 92		/*
 93		 * There isn't a safe way to not be protected by usercopy
 94		 * if we're going to write to another thread's stack.
 95		 */
 96		if (!bad_frame)
 97			goto free_user;
 98
 99		pr_info("attempting good copy_from_user of local stack\n");
100		if (copy_from_user(good_stack, (void __user *)user_addr,
101				   unconst + sizeof(good_stack))) {
102			pr_warn("copy_from_user failed unexpectedly?!\n");
103			goto free_user;
104		}
105
106		pr_info("attempting bad copy_from_user of distant stack\n");
107		if (copy_from_user(bad_stack, (void __user *)user_addr,
108				   unconst + sizeof(good_stack))) {
109			pr_warn("copy_from_user failed, but lacked Oops\n");
110			goto free_user;
111		}
112	}
113
114free_user:
115	vm_munmap(user_addr, PAGE_SIZE);
116}
117
118/*
119 * This checks for whole-object size validation with hardened usercopy,
120 * with or without usercopy whitelisting.
121 */
122static void do_usercopy_heap_size(bool to_user)
123{
124	unsigned long user_addr;
125	unsigned char *one, *two;
126	void __user *test_user_addr;
127	void *test_kern_addr;
128	size_t size = unconst + 1024;
129
130	one = kmalloc(size, GFP_KERNEL);
131	two = kmalloc(size, GFP_KERNEL);
132	if (!one || !two) {
133		pr_warn("Failed to allocate kernel memory\n");
134		goto free_kernel;
135	}
136
137	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
138			    PROT_READ | PROT_WRITE | PROT_EXEC,
139			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
140	if (user_addr >= TASK_SIZE) {
141		pr_warn("Failed to allocate user memory\n");
142		goto free_kernel;
143	}
144
145	memset(one, 'A', size);
146	memset(two, 'B', size);
147
148	test_user_addr = (void __user *)(user_addr + 16);
149	test_kern_addr = one + 16;
150
151	if (to_user) {
152		pr_info("attempting good copy_to_user of correct size\n");
153		if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
154			pr_warn("copy_to_user failed unexpectedly?!\n");
155			goto free_user;
156		}
157
158		pr_info("attempting bad copy_to_user of too large size\n");
159		if (copy_to_user(test_user_addr, test_kern_addr, size)) {
160			pr_warn("copy_to_user failed, but lacked Oops\n");
161			goto free_user;
162		}
163	} else {
164		pr_info("attempting good copy_from_user of correct size\n");
165		if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
166			pr_warn("copy_from_user failed unexpectedly?!\n");
167			goto free_user;
168		}
169
170		pr_info("attempting bad copy_from_user of too large size\n");
171		if (copy_from_user(test_kern_addr, test_user_addr, size)) {
172			pr_warn("copy_from_user failed, but lacked Oops\n");
173			goto free_user;
174		}
175	}
 
 
176
177free_user:
178	vm_munmap(user_addr, PAGE_SIZE);
179free_kernel:
180	kfree(one);
181	kfree(two);
182}
183
184/*
185 * This checks for the specific whitelist window within an object. If this
186 * test passes, then do_usercopy_heap_size() tests will pass too.
187 */
188static void do_usercopy_heap_whitelist(bool to_user)
189{
190	unsigned long user_alloc;
191	unsigned char *buf = NULL;
192	unsigned char __user *user_addr;
193	size_t offset, size;
194
195	/* Make sure cache was prepared. */
196	if (!whitelist_cache) {
197		pr_warn("Failed to allocate kernel cache\n");
198		return;
199	}
200
201	/*
202	 * Allocate a buffer with a whitelisted window in the buffer.
203	 */
204	buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
205	if (!buf) {
206		pr_warn("Failed to allocate buffer from whitelist cache\n");
207		goto free_alloc;
208	}
209
210	/* Allocate user memory we'll poke at. */
211	user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
212			    PROT_READ | PROT_WRITE | PROT_EXEC,
213			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
214	if (user_alloc >= TASK_SIZE) {
215		pr_warn("Failed to allocate user memory\n");
216		goto free_alloc;
217	}
218	user_addr = (void __user *)user_alloc;
219
220	memset(buf, 'B', cache_size);
221
222	/* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
223	offset = (cache_size / 4) + unconst;
224	size = (cache_size / 16) + unconst;
225
226	if (to_user) {
227		pr_info("attempting good copy_to_user inside whitelist\n");
228		if (copy_to_user(user_addr, buf + offset, size)) {
229			pr_warn("copy_to_user failed unexpectedly?!\n");
230			goto free_user;
231		}
232
233		pr_info("attempting bad copy_to_user outside whitelist\n");
234		if (copy_to_user(user_addr, buf + offset - 1, size)) {
235			pr_warn("copy_to_user failed, but lacked Oops\n");
236			goto free_user;
237		}
238	} else {
239		pr_info("attempting good copy_from_user inside whitelist\n");
240		if (copy_from_user(buf + offset, user_addr, size)) {
241			pr_warn("copy_from_user failed unexpectedly?!\n");
242			goto free_user;
243		}
244
245		pr_info("attempting bad copy_from_user outside whitelist\n");
246		if (copy_from_user(buf + offset - 1, user_addr, size)) {
247			pr_warn("copy_from_user failed, but lacked Oops\n");
248			goto free_user;
249		}
250	}
 
 
251
252free_user:
253	vm_munmap(user_alloc, PAGE_SIZE);
254free_alloc:
255	if (buf)
256		kmem_cache_free(whitelist_cache, buf);
257}
258
259/* Callable tests. */
260void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
261{
262	do_usercopy_heap_size(true);
263}
264
265void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
266{
267	do_usercopy_heap_size(false);
268}
269
270void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
271{
272	do_usercopy_heap_whitelist(true);
273}
274
275void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
276{
277	do_usercopy_heap_whitelist(false);
278}
279
280void lkdtm_USERCOPY_STACK_FRAME_TO(void)
281{
282	do_usercopy_stack(true, true);
283}
284
285void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
286{
287	do_usercopy_stack(false, true);
288}
289
290void lkdtm_USERCOPY_STACK_BEYOND(void)
291{
292	do_usercopy_stack(true, false);
293}
294
295void lkdtm_USERCOPY_KERNEL(void)
296{
297	unsigned long user_addr;
298
299	user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
300			    PROT_READ | PROT_WRITE | PROT_EXEC,
301			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
302	if (user_addr >= TASK_SIZE) {
303		pr_warn("Failed to allocate user memory\n");
304		return;
305	}
306
307	pr_info("attempting good copy_to_user from kernel rodata\n");
 
308	if (copy_to_user((void __user *)user_addr, test_text,
309			 unconst + sizeof(test_text))) {
310		pr_warn("copy_to_user failed unexpectedly?!\n");
311		goto free_user;
312	}
313
314	pr_info("attempting bad copy_to_user from kernel text\n");
 
315	if (copy_to_user((void __user *)user_addr, vm_mmap,
316			 unconst + PAGE_SIZE)) {
317		pr_warn("copy_to_user failed, but lacked Oops\n");
318		goto free_user;
319	}
 
 
320
321free_user:
322	vm_munmap(user_addr, PAGE_SIZE);
323}
324
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325void __init lkdtm_usercopy_init(void)
326{
327	/* Prepare cache that lacks SLAB_USERCOPY flag. */
328	whitelist_cache =
329		kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
330					   0, 0,
331					   cache_size / 4,
332					   cache_size / 16,
333					   NULL);
334}
335
336void __exit lkdtm_usercopy_exit(void)
337{
338	kmem_cache_destroy(whitelist_cache);
339}