Loading...
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/highmem.h>
8#include <linux/module.h>
9
10/*
11 * best effort, GUP based copy_from_user() that is NMI-safe
12 */
13unsigned long
14copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
15{
16 unsigned long offset, addr = (unsigned long)from;
17 unsigned long size, len = 0;
18 struct page *page;
19 void *map;
20 int ret;
21
22 do {
23 ret = __get_user_pages_fast(addr, 1, 0, &page);
24 if (!ret)
25 break;
26
27 offset = addr & (PAGE_SIZE - 1);
28 size = min(PAGE_SIZE - offset, n - len);
29
30 map = kmap_atomic(page);
31 memcpy(to, map+offset, size);
32 kunmap_atomic(map);
33 put_page(page);
34
35 len += size;
36 to += size;
37 addr += size;
38
39 } while (len < n);
40
41 return len;
42}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi);
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/highmem.h>
8#include <linux/module.h>
9
10#include <asm/word-at-a-time.h>
11#include <linux/sched.h>
12
13/*
14 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
15 * nested NMI paths are careful to preserve CR2.
16 */
17unsigned long
18copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
19{
20 unsigned long ret;
21
22 if (__range_not_ok(from, n, TASK_SIZE))
23 return n;
24
25 /*
26 * Even though this function is typically called from NMI/IRQ context
27 * disable pagefaults so that its behaviour is consistent even when
28 * called form other contexts.
29 */
30 pagefault_disable();
31 ret = __copy_from_user_inatomic(to, from, n);
32 pagefault_enable();
33
34 return ret;
35}
36EXPORT_SYMBOL_GPL(copy_from_user_nmi);