Loading...
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/uaccess.h>
8#include <linux/export.h>
9
10/*
11 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
12 * nested NMI paths are careful to preserve CR2.
13 */
14unsigned long
15copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
16{
17 unsigned long ret;
18
19 if (__range_not_ok(from, n, TASK_SIZE))
20 return n;
21
22 /*
23 * Even though this function is typically called from NMI/IRQ context
24 * disable pagefaults so that its behaviour is consistent even when
25 * called form other contexts.
26 */
27 pagefault_disable();
28 ret = __copy_from_user_inatomic(to, from, n);
29 pagefault_enable();
30
31 return ret;
32}
33EXPORT_SYMBOL_GPL(copy_from_user_nmi);
1/*
2 * User address space access functions.
3 *
4 * For licencing details see kernel-base/COPYING
5 */
6
7#include <linux/uaccess.h>
8#include <linux/export.h>
9
10#include <asm/tlbflush.h>
11
12/*
13 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
14 * nested NMI paths are careful to preserve CR2.
15 */
16unsigned long
17copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
18{
19 unsigned long ret;
20
21 if (__range_not_ok(from, n, TASK_SIZE))
22 return n;
23
24 if (!nmi_uaccess_okay())
25 return n;
26
27 /*
28 * Even though this function is typically called from NMI/IRQ context
29 * disable pagefaults so that its behaviour is consistent even when
30 * called form other contexts.
31 */
32 pagefault_disable();
33 ret = __copy_from_user_inatomic(to, from, n);
34 pagefault_enable();
35
36 return ret;
37}
38EXPORT_SYMBOL_GPL(copy_from_user_nmi);