Linux Audio

Check our new training course

Loading...
v4.6
 
  1#ifndef _ASM_X86_UACCESS_64_H
  2#define _ASM_X86_UACCESS_64_H
  3
  4/*
  5 * User space memory access functions
  6 */
  7#include <linux/compiler.h>
  8#include <linux/errno.h>
  9#include <linux/lockdep.h>
 
 10#include <asm/alternative.h>
 11#include <asm/cpufeatures.h>
 12#include <asm/page.h>
 13
 
 14/*
 15 * Copy To/From Userspace
 16 */
 17
 18/* Handles exceptions in both to and from, but doesn't do access_ok */
 19__must_check unsigned long
 20copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
 21__must_check unsigned long
 22copy_user_generic_string(void *to, const void *from, unsigned len);
 23__must_check unsigned long
 24copy_user_generic_unrolled(void *to, const void *from, unsigned len);
 25
 26static __always_inline __must_check unsigned long
 27copy_user_generic(void *to, const void *from, unsigned len)
 28{
 29	unsigned ret;
 30
 31	/*
 32	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
 33	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
 34	 * Otherwise, use copy_user_generic_unrolled.
 35	 */
 36	alternative_call_2(copy_user_generic_unrolled,
 37			 copy_user_generic_string,
 38			 X86_FEATURE_REP_GOOD,
 39			 copy_user_enhanced_fast_string,
 40			 X86_FEATURE_ERMS,
 41			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
 42				     "=d" (len)),
 43			 "1" (to), "2" (from), "3" (len)
 44			 : "memory", "rcx", "r8", "r9", "r10", "r11");
 45	return ret;
 46}
 47
 48__must_check unsigned long
 49copy_in_user(void __user *to, const void __user *from, unsigned len);
 
 
 50
 51static __always_inline __must_check
 52int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
 53{
 54	int ret = 0;
 55
 56	if (!__builtin_constant_p(size))
 57		return copy_user_generic(dst, (__force void *)src, size);
 58	switch (size) {
 59	case 1:
 60		__uaccess_begin();
 61		__get_user_asm(*(u8 *)dst, (u8 __user *)src,
 62			      ret, "b", "b", "=q", 1);
 63		__uaccess_end();
 64		return ret;
 65	case 2:
 66		__uaccess_begin();
 67		__get_user_asm(*(u16 *)dst, (u16 __user *)src,
 68			      ret, "w", "w", "=r", 2);
 69		__uaccess_end();
 70		return ret;
 71	case 4:
 72		__uaccess_begin();
 73		__get_user_asm(*(u32 *)dst, (u32 __user *)src,
 74			      ret, "l", "k", "=r", 4);
 75		__uaccess_end();
 76		return ret;
 77	case 8:
 78		__uaccess_begin();
 79		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
 80			      ret, "q", "", "=r", 8);
 81		__uaccess_end();
 82		return ret;
 83	case 10:
 84		__uaccess_begin();
 85		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
 86			       ret, "q", "", "=r", 10);
 87		if (likely(!ret))
 88			__get_user_asm(*(u16 *)(8 + (char *)dst),
 89				       (u16 __user *)(8 + (char __user *)src),
 90				       ret, "w", "w", "=r", 2);
 91		__uaccess_end();
 92		return ret;
 93	case 16:
 94		__uaccess_begin();
 95		__get_user_asm(*(u64 *)dst, (u64 __user *)src,
 96			       ret, "q", "", "=r", 16);
 97		if (likely(!ret))
 98			__get_user_asm(*(u64 *)(8 + (char *)dst),
 99				       (u64 __user *)(8 + (char __user *)src),
100				       ret, "q", "", "=r", 8);
101		__uaccess_end();
102		return ret;
103	default:
104		return copy_user_generic(dst, (__force void *)src, size);
105	}
106}
107
108static __always_inline __must_check
109int __copy_from_user(void *dst, const void __user *src, unsigned size)
110{
111	might_fault();
112	return __copy_from_user_nocheck(dst, src, size);
113}
114
115static __always_inline __must_check
116int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
117{
118	int ret = 0;
119
120	if (!__builtin_constant_p(size))
121		return copy_user_generic((__force void *)dst, src, size);
122	switch (size) {
123	case 1:
124		__uaccess_begin();
125		__put_user_asm(*(u8 *)src, (u8 __user *)dst,
126			      ret, "b", "b", "iq", 1);
127		__uaccess_end();
128		return ret;
129	case 2:
130		__uaccess_begin();
131		__put_user_asm(*(u16 *)src, (u16 __user *)dst,
132			      ret, "w", "w", "ir", 2);
133		__uaccess_end();
134		return ret;
135	case 4:
136		__uaccess_begin();
137		__put_user_asm(*(u32 *)src, (u32 __user *)dst,
138			      ret, "l", "k", "ir", 4);
139		__uaccess_end();
140		return ret;
141	case 8:
142		__uaccess_begin();
143		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
144			      ret, "q", "", "er", 8);
145		__uaccess_end();
146		return ret;
147	case 10:
148		__uaccess_begin();
149		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
150			       ret, "q", "", "er", 10);
151		if (likely(!ret)) {
152			asm("":::"memory");
153			__put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
154				       ret, "w", "w", "ir", 2);
155		}
156		__uaccess_end();
157		return ret;
158	case 16:
159		__uaccess_begin();
160		__put_user_asm(*(u64 *)src, (u64 __user *)dst,
161			       ret, "q", "", "er", 16);
162		if (likely(!ret)) {
163			asm("":::"memory");
164			__put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
165				       ret, "q", "", "er", 8);
166		}
167		__uaccess_end();
168		return ret;
169	default:
170		return copy_user_generic((__force void *)dst, src, size);
171	}
172}
173
174static __always_inline __must_check
175int __copy_to_user(void __user *dst, const void *src, unsigned size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176{
177	might_fault();
178	return __copy_to_user_nocheck(dst, src, size);
 
 
 
 
179}
 
180
181static __always_inline __must_check
182int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
183{
184	int ret = 0;
185
186	might_fault();
187	if (!__builtin_constant_p(size))
188		return copy_user_generic((__force void *)dst,
189					 (__force void *)src, size);
190	switch (size) {
191	case 1: {
192		u8 tmp;
193		__uaccess_begin();
194		__get_user_asm(tmp, (u8 __user *)src,
195			       ret, "b", "b", "=q", 1);
196		if (likely(!ret))
197			__put_user_asm(tmp, (u8 __user *)dst,
198				       ret, "b", "b", "iq", 1);
199		__uaccess_end();
200		return ret;
201	}
202	case 2: {
203		u16 tmp;
204		__uaccess_begin();
205		__get_user_asm(tmp, (u16 __user *)src,
206			       ret, "w", "w", "=r", 2);
207		if (likely(!ret))
208			__put_user_asm(tmp, (u16 __user *)dst,
209				       ret, "w", "w", "ir", 2);
210		__uaccess_end();
211		return ret;
212	}
213
214	case 4: {
215		u32 tmp;
216		__uaccess_begin();
217		__get_user_asm(tmp, (u32 __user *)src,
218			       ret, "l", "k", "=r", 4);
219		if (likely(!ret))
220			__put_user_asm(tmp, (u32 __user *)dst,
221				       ret, "l", "k", "ir", 4);
222		__uaccess_end();
223		return ret;
224	}
225	case 8: {
226		u64 tmp;
227		__uaccess_begin();
228		__get_user_asm(tmp, (u64 __user *)src,
229			       ret, "q", "", "=r", 8);
230		if (likely(!ret))
231			__put_user_asm(tmp, (u64 __user *)dst,
232				       ret, "q", "", "er", 8);
233		__uaccess_end();
234		return ret;
235	}
236	default:
237		return copy_user_generic((__force void *)dst,
238					 (__force void *)src, size);
239	}
240}
241
242static __must_check __always_inline int
243__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
244{
245	return __copy_from_user_nocheck(dst, src, size);
246}
247
248static __must_check __always_inline int
249__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
250{
251	return __copy_to_user_nocheck(dst, src, size);
252}
253
254extern long __copy_user_nocache(void *dst, const void __user *src,
255				unsigned size, int zerorest);
256
257static inline int
258__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
 
259{
260	might_fault();
261	return __copy_user_nocache(dst, src, size, 1);
 
 
 
 
262}
263
264static inline int
265__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
266				  unsigned size)
267{
268	return __copy_user_nocache(dst, src, size, 0);
 
269}
270
271unsigned long
272copy_user_handle_tail(char *to, char *from, unsigned len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
 
 
 
 
 
 
 
 
274#endif /* _ASM_X86_UACCESS_64_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_UACCESS_64_H
  3#define _ASM_X86_UACCESS_64_H
  4
  5/*
  6 * User space memory access functions
  7 */
  8#include <linux/compiler.h>
 
  9#include <linux/lockdep.h>
 10#include <linux/kasan-checks.h>
 11#include <asm/alternative.h>
 12#include <asm/cpufeatures.h>
 13#include <asm/page.h>
 14
 15#ifdef CONFIG_ADDRESS_MASKING
 16/*
 17 * Mask out tag bits from the address.
 18 */
 19static inline unsigned long __untagged_addr(unsigned long addr)
 
 
 
 
 
 
 
 
 
 
 20{
 
 
 21	/*
 22	 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
 23	 * in alternative instructions. The relocation gets wrong when gets
 24	 * copied to the target place.
 25	 */
 26	asm (ALTERNATIVE("",
 27			 "and %%gs:tlbstate_untag_mask, %[addr]\n\t", X86_FEATURE_LAM)
 28	     : [addr] "+r" (addr) : "m" (tlbstate_untag_mask));
 29
 30	return addr;
 
 
 
 
 
 31}
 32
 33#define untagged_addr(addr)	({					\
 34	unsigned long __addr = (__force unsigned long)(addr);		\
 35	(__force __typeof__(addr))__untagged_addr(__addr);		\
 36})
 37
 38static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
 39						   unsigned long addr)
 40{
 41	mmap_assert_locked(mm);
 42	return addr & (mm)->context.untag_mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43}
 44
 45#define untagged_addr_remote(mm, addr)	({				\
 46	unsigned long __addr = (__force unsigned long)(addr);		\
 47	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);	\
 48})
 
 
 49
 50#endif
 
 
 
 51
 52/*
 53 * The virtual address space space is logically divided into a kernel
 54 * half and a user half.  When cast to a signed type, user pointers
 55 * are positive and kernel pointers are negative.
 56 */
 57#define valid_user_address(x) ((long)(x) >= 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59/*
 60 * User pointers can have tag bits on x86-64.  This scheme tolerates
 61 * arbitrary values in those bits rather then masking them off.
 62 *
 63 * Enforce two rules:
 64 * 1. 'ptr' must be in the user half of the address space
 65 * 2. 'ptr+size' must not overflow into kernel addresses
 66 *
 67 * Note that addresses around the sign change are not valid addresses,
 68 * and will GP-fault even with LAM enabled if the sign bit is set (see
 69 * "CR3.LAM_SUP" that can narrow the canonicality check if we ever
 70 * enable it, but not remove it entirely).
 71 *
 72 * So the "overflow into kernel addresses" does not imply some sudden
 73 * exact boundary at the sign bit, and we can allow a lot of slop on the
 74 * size check.
 75 *
 76 * In fact, we could probably remove the size check entirely, since
 77 * any kernel accesses will be in increasing address order starting
 78 * at 'ptr', and even if the end might be in kernel space, we'll
 79 * hit the GP faults for non-canonical accesses before we ever get
 80 * there.
 81 *
 82 * That's a separate optimization, for now just handle the small
 83 * constant case.
 84 */
 85static inline bool __access_ok(const void __user *ptr, unsigned long size)
 86{
 87	if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
 88		return valid_user_address(ptr);
 89	} else {
 90		unsigned long sum = size + (unsigned long)ptr;
 91		return valid_user_address(sum) && sum >= (unsigned long)ptr;
 92	}
 93}
 94#define __access_ok __access_ok
 95
 96/*
 97 * Copy To/From Userspace
 98 */
 
 99
100/* Handles exceptions in both to and from, but doesn't do access_ok */
101__must_check unsigned long
102rep_movs_alternative(void *to, const void *from, unsigned len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
104static __always_inline __must_check unsigned long
105copy_user_generic(void *to, const void *from, unsigned long len)
106{
107	stac();
108	/*
109	 * If CPU has FSRM feature, use 'rep movs'.
110	 * Otherwise, use rep_movs_alternative.
111	 */
112	asm volatile(
113		"1:\n\t"
114		ALTERNATIVE("rep movsb",
115			    "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
116		"2:\n"
117		_ASM_EXTABLE_UA(1b, 2b)
118		:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
119		: : "memory", "rax");
120	clac();
121	return len;
 
 
 
 
 
 
 
 
122}
123
124static __always_inline __must_check unsigned long
125raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
126{
127	return copy_user_generic(dst, (__force void *)src, size);
128}
129
130static __always_inline __must_check unsigned long
131raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
132{
133	return copy_user_generic((__force void *)dst, src, size);
134}
135
136extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
137extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
138
139static inline int
140__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
141				  unsigned size)
142{
143	long ret;
144	kasan_check_write(dst, size);
145	stac();
146	ret = __copy_user_nocache(dst, src, size);
147	clac();
148	return ret;
149}
150
151static inline int
152__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
 
153{
154	kasan_check_write(dst, size);
155	return __copy_user_flushcache(dst, src, size);
156}
157
158/*
159 * Zero Userspace.
160 */
161
162__must_check unsigned long
163rep_stos_alternative(void __user *addr, unsigned long len);
164
165static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
166{
167	might_fault();
168	stac();
169
170	/*
171	 * No memory constraint because it doesn't change any memory gcc
172	 * knows about.
173	 */
174	asm volatile(
175		"1:\n\t"
176		ALTERNATIVE("rep stosb",
177			    "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
178		"2:\n"
179	       _ASM_EXTABLE_UA(1b, 2b)
180	       : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
181	       : "a" (0));
182
183	clac();
184
185	return size;
186}
187
188static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
189{
190	if (__access_ok(to, n))
191		return __clear_user(to, n);
192	return n;
193}
194#endif /* _ASM_X86_UACCESS_64_H */