Loading...
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/uaccess.h>
8#include <linux/export.h>
9#include <linux/instrumented.h>
10
11#include <asm/tlbflush.h>
12
13/**
14 * copy_from_user_nmi - NMI safe copy from user
15 * @to: Pointer to the destination buffer
16 * @from: Pointer to a user space address of the current task
17 * @n: Number of bytes to copy
18 *
19 * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
20 *
21 * Contrary to other copy_from_user() variants this function can be called
22 * from NMI context. Despite the name it is not restricted to be called
23 * from NMI context. It is safe to be called from any other context as
24 * well. It disables pagefaults across the copy which means a fault will
25 * abort the copy.
26 *
27 * For NMI context invocations this relies on the nested NMI work to allow
28 * atomic faults from the NMI path; the nested NMI paths are careful to
29 * preserve CR2.
30 */
31unsigned long
32copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
33{
34 unsigned long ret;
35
36 if (!__access_ok(from, n))
37 return n;
38
39 if (!nmi_uaccess_okay())
40 return n;
41
42 /*
43 * Even though this function is typically called from NMI/IRQ context
44 * disable pagefaults so that its behaviour is consistent even when
45 * called from other contexts.
46 */
47 pagefault_disable();
48 instrument_copy_from_user_before(to, from, n);
49 ret = raw_copy_from_user(to, from, n);
50 instrument_copy_from_user_after(to, from, n, ret);
51 pagefault_enable();
52
53 return ret;
54}
55EXPORT_SYMBOL_GPL(copy_from_user_nmi);
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/highmem.h>
8#include <linux/module.h>
9
10#include <asm/word-at-a-time.h>
11#include <linux/sched.h>
12
13/*
14 * best effort, GUP based copy_from_user() that is NMI-safe
15 */
16unsigned long
17copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
18{
19 unsigned long offset, addr = (unsigned long)from;
20 unsigned long size, len = 0;
21 struct page *page;
22 void *map;
23 int ret;
24
25 if (__range_not_ok(from, n, TASK_SIZE))
26 return len;
27
28 do {
29 ret = __get_user_pages_fast(addr, 1, 0, &page);
30 if (!ret)
31 break;
32
33 offset = addr & (PAGE_SIZE - 1);
34 size = min(PAGE_SIZE - offset, n - len);
35
36 map = kmap_atomic(page);
37 memcpy(to, map+offset, size);
38 kunmap_atomic(map);
39 put_page(page);
40
41 len += size;
42 to += size;
43 addr += size;
44
45 } while (len < n);
46
47 return len;
48}
49EXPORT_SYMBOL_GPL(copy_from_user_nmi);