Linux Audio

Check our new training course

Loading...
v3.15
 
 1/* 
 2 * User address space access functions.
 3 *
 4 * Copyright 1997 Andi Kleen <ak@muc.de>
 5 * Copyright 1997 Linus Torvalds
 6 * Copyright 2002 Andi Kleen <ak@suse.de>
 7 */
 8#include <linux/module.h>
 9#include <asm/uaccess.h>
 
10
11/*
12 * Zero Userspace
13 */
14
15unsigned long __clear_user(void __user *addr, unsigned long size)
16{
17	long __d0;
18	might_fault();
19	/* no memory constraint because it doesn't change any memory gcc knows
20	   about */
21	stac();
22	asm volatile(
23		"	testq  %[size8],%[size8]\n"
24		"	jz     4f\n"
25		"0:	movq %[zero],(%[dst])\n"
26		"	addq   %[eight],%[dst]\n"
 
27		"	decl %%ecx ; jnz   0b\n"
28		"4:	movq  %[size1],%%rcx\n"
29		"	testl %%ecx,%%ecx\n"
30		"	jz     2f\n"
31		"1:	movb   %b[zero],(%[dst])\n"
32		"	incq   %[dst]\n"
33		"	decl %%ecx ; jnz  1b\n"
34		"2:\n"
35		".section .fixup,\"ax\"\n"
36		"3:	lea 0(%[size1],%[size8],8),%[size8]\n"
37		"	jmp 2b\n"
38		".previous\n"
39		_ASM_EXTABLE(0b,3b)
40		_ASM_EXTABLE(1b,2b)
41		: [size8] "=&c"(size), [dst] "=&D" (__d0)
42		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
43		  [zero] "r" (0UL), [eight] "r" (8UL));
44	clac();
45	return size;
46}
47EXPORT_SYMBOL(__clear_user);
48
49unsigned long clear_user(void __user *to, unsigned long n)
50{
51	if (access_ok(VERIFY_WRITE, to, n))
52		return __clear_user(to, n);
53	return n;
54}
55EXPORT_SYMBOL(clear_user);
56
57unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
 
 
 
 
 
 
58{
59	if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 
60		return copy_user_generic((__force void *)to, (__force void *)from, len);
61	} 
62	return len;		
 
 
 
 
 
 
 
63}
64EXPORT_SYMBOL(copy_in_user);
65
66/*
67 * Try to copy last bytes and clear the rest if needed.
68 * Since protection fault in copy_from/to_user is not a normal situation,
69 * it is not necessary to optimize tail handling.
 
 
 
 
 
70 */
71__visible unsigned long
72copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
 
 
 
 
 
 
 
 
 
 
 
73{
74	char c;
75	unsigned zero_len;
 
76
77	for (; len; --len, to++) {
78		if (__get_user_nocheck(c, from++, sizeof(char)))
79			break;
80		if (__put_user_nocheck(c, to, sizeof(char)))
81			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82	}
83
84	for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
85		if (__put_user_nocheck(c, to++, sizeof(char)))
86			break;
87	clac();
88	return len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* 
  3 * User address space access functions.
  4 *
  5 * Copyright 1997 Andi Kleen <ak@muc.de>
  6 * Copyright 1997 Linus Torvalds
  7 * Copyright 2002 Andi Kleen <ak@suse.de>
  8 */
  9#include <linux/export.h>
 10#include <linux/uaccess.h>
 11#include <linux/highmem.h>
 12
 13/*
 14 * Zero Userspace
 15 */
 16
 17unsigned long __clear_user(void __user *addr, unsigned long size)
 18{
 19	long __d0;
 20	might_fault();
 21	/* no memory constraint because it doesn't change any memory gcc knows
 22	   about */
 23	stac();
 24	asm volatile(
 25		"	testq  %[size8],%[size8]\n"
 26		"	jz     4f\n"
 27		"	.align 16\n"
 28		"0:	movq $0,(%[dst])\n"
 29		"	addq   $8,%[dst]\n"
 30		"	decl %%ecx ; jnz   0b\n"
 31		"4:	movq  %[size1],%%rcx\n"
 32		"	testl %%ecx,%%ecx\n"
 33		"	jz     2f\n"
 34		"1:	movb   $0,(%[dst])\n"
 35		"	incq   %[dst]\n"
 36		"	decl %%ecx ; jnz  1b\n"
 37		"2:\n"
 38		".section .fixup,\"ax\"\n"
 39		"3:	lea 0(%[size1],%[size8],8),%[size8]\n"
 40		"	jmp 2b\n"
 41		".previous\n"
 42		_ASM_EXTABLE_UA(0b, 3b)
 43		_ASM_EXTABLE_UA(1b, 2b)
 44		: [size8] "=&c"(size), [dst] "=&D" (__d0)
 45		: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
 
 46	clac();
 47	return size;
 48}
 49EXPORT_SYMBOL(__clear_user);
 50
 51unsigned long clear_user(void __user *to, unsigned long n)
 52{
 53	if (access_ok(to, n))
 54		return __clear_user(to, n);
 55	return n;
 56}
 57EXPORT_SYMBOL(clear_user);
 58
 59/*
 60 * Similar to copy_user_handle_tail, probe for the write fault point,
 61 * but reuse __memcpy_mcsafe in case a new read error is encountered.
 62 * clac() is handled in _copy_to_iter_mcsafe().
 63 */
 64__visible notrace unsigned long
 65mcsafe_handle_tail(char *to, char *from, unsigned len)
 66{
 67	for (; len; --len, to++, from++) {
 68		/*
 69		 * Call the assembly routine back directly since
 70		 * memcpy_mcsafe() may silently fallback to memcpy.
 71		 */
 72		unsigned long rem = __memcpy_mcsafe(to, from, 1);
 73
 74		if (rem)
 75			break;
 76	}
 77	return len;
 78}
 
 79
 80#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 81/**
 82 * clean_cache_range - write back a cache range with CLWB
 83 * @vaddr:	virtual start address
 84 * @size:	number of bytes to write back
 85 *
 86 * Write back a cache range using the CLWB (cache line write back)
 87 * instruction. Note that @size is internally rounded up to be cache
 88 * line size aligned.
 89 */
 90static void clean_cache_range(void *addr, size_t size)
 91{
 92	u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
 93	unsigned long clflush_mask = x86_clflush_size - 1;
 94	void *vend = addr + size;
 95	void *p;
 96
 97	for (p = (void *)((unsigned long)addr & ~clflush_mask);
 98	     p < vend; p += x86_clflush_size)
 99		clwb(p);
100}
101
102void arch_wb_cache_pmem(void *addr, size_t size)
103{
104	clean_cache_range(addr, size);
105}
106EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
107
108long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
109{
110	unsigned long flushed, dest = (unsigned long) dst;
111	long rc = __copy_user_nocache(dst, src, size, 0);
112
113	/*
114	 * __copy_user_nocache() uses non-temporal stores for the bulk
115	 * of the transfer, but we need to manually flush if the
116	 * transfer is unaligned. A cached memory copy is used when
117	 * destination or size is not naturally aligned. That is:
118	 *   - Require 8-byte alignment when size is 8 bytes or larger.
119	 *   - Require 4-byte alignment when size is 4 bytes.
120	 */
121	if (size < 8) {
122		if (!IS_ALIGNED(dest, 4) || size != 4)
123			clean_cache_range(dst, size);
124	} else {
125		if (!IS_ALIGNED(dest, 8)) {
126			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
127			clean_cache_range(dst, 1);
128		}
129
130		flushed = dest - (unsigned long) dst;
131		if (size > flushed && !IS_ALIGNED(size - flushed, 8))
132			clean_cache_range(dst + size - 1, 1);
133	}
134
135	return rc;
136}
137
138void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
139{
140	unsigned long dest = (unsigned long) _dst;
141	unsigned long source = (unsigned long) _src;
142
143	/* cache copy and flush to align dest */
144	if (!IS_ALIGNED(dest, 8)) {
145		unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
146
147		memcpy((void *) dest, (void *) source, len);
148		clean_cache_range((void *) dest, len);
149		dest += len;
150		source += len;
151		size -= len;
152		if (!size)
153			return;
154	}
155
156	/* 4x8 movnti loop */
157	while (size >= 32) {
158		asm("movq    (%0), %%r8\n"
159		    "movq   8(%0), %%r9\n"
160		    "movq  16(%0), %%r10\n"
161		    "movq  24(%0), %%r11\n"
162		    "movnti  %%r8,   (%1)\n"
163		    "movnti  %%r9,  8(%1)\n"
164		    "movnti %%r10, 16(%1)\n"
165		    "movnti %%r11, 24(%1)\n"
166		    :: "r" (source), "r" (dest)
167		    : "memory", "r8", "r9", "r10", "r11");
168		dest += 32;
169		source += 32;
170		size -= 32;
171	}
172
173	/* 1x8 movnti loop */
174	while (size >= 8) {
175		asm("movq    (%0), %%r8\n"
176		    "movnti  %%r8,   (%1)\n"
177		    :: "r" (source), "r" (dest)
178		    : "memory", "r8");
179		dest += 8;
180		source += 8;
181		size -= 8;
182	}
183
184	/* 1x4 movnti loop */
185	while (size >= 4) {
186		asm("movl    (%0), %%r8d\n"
187		    "movnti  %%r8d,   (%1)\n"
188		    :: "r" (source), "r" (dest)
189		    : "memory", "r8");
190		dest += 4;
191		source += 4;
192		size -= 4;
193	}
194
195	/* cache copy for remaining bytes */
196	if (size) {
197		memcpy((void *) dest, (void *) source, size);
198		clean_cache_range((void *) dest, size);
199	}
200}
201EXPORT_SYMBOL_GPL(__memcpy_flushcache);
202
203void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
204		size_t len)
205{
206	char *from = kmap_atomic(page);
207
208	memcpy_flushcache(to, from + offset, len);
209	kunmap_atomic(from);
210}
211#endif