Linux Audio

Check our new training course

Loading...
v6.8
 1/*
 2 * User address space access functions.
 3 *
 4 *  For licencing details see kernel-base/COPYING
 5 */
 6
 7#include <linux/uaccess.h>
 8#include <linux/export.h>
 9#include <linux/instrumented.h>
10
11#include <asm/tlbflush.h>
12
13/**
14 * copy_from_user_nmi - NMI safe copy from user
15 * @to:		Pointer to the destination buffer
16 * @from:	Pointer to a user space address of the current task
17 * @n:		Number of bytes to copy
18 *
19 * Returns: The number of not copied bytes. 0 is success, i.e. all bytes copied
20 *
21 * Contrary to other copy_from_user() variants this function can be called
22 * from NMI context. Despite the name it is not restricted to be called
23 * from NMI context. It is safe to be called from any other context as
24 * well. It disables pagefaults across the copy which means a fault will
25 * abort the copy.
26 *
27 * For NMI context invocations this relies on the nested NMI work to allow
28 * atomic faults from the NMI path; the nested NMI paths are careful to
29 * preserve CR2.
30 */
31unsigned long
32copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
33{
34	unsigned long ret;
35
36	if (!__access_ok(from, n))
37		return n;
38
39	if (!nmi_uaccess_okay())
40		return n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
42	/*
43	 * Even though this function is typically called from NMI/IRQ context
44	 * disable pagefaults so that its behaviour is consistent even when
45	 * called from other contexts.
46	 */
47	pagefault_disable();
48	instrument_copy_from_user_before(to, from, n);
49	ret = raw_copy_from_user(to, from, n);
50	instrument_copy_from_user_after(to, from, n, ret);
51	pagefault_enable();
52
53	return ret;
54}
55EXPORT_SYMBOL_GPL(copy_from_user_nmi);
v3.1
 1/*
 2 * User address space access functions.
 3 *
 4 *  For licencing details see kernel-base/COPYING
 5 */
 6
 7#include <linux/highmem.h>
 8#include <linux/module.h>
 9
10/*
11 * best effort, GUP based copy_from_user() that is NMI-safe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12 */
13unsigned long
14copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
15{
16	unsigned long offset, addr = (unsigned long)from;
17	unsigned long size, len = 0;
18	struct page *page;
19	void *map;
20	int ret;
21
22	do {
23		ret = __get_user_pages_fast(addr, 1, 0, &page);
24		if (!ret)
25			break;
26
27		offset = addr & (PAGE_SIZE - 1);
28		size = min(PAGE_SIZE - offset, n - len);
29
30		map = kmap_atomic(page);
31		memcpy(to, map+offset, size);
32		kunmap_atomic(map);
33		put_page(page);
34
35		len  += size;
36		to   += size;
37		addr += size;
38
39	} while (len < n);
 
 
 
 
 
 
 
 
 
40
41	return len;
42}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi);