Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_GENERIC_UACCESS_H
  3#define __ASM_GENERIC_UACCESS_H
  4
  5/*
  6 * User space memory access functions, these should work
  7 * on any machine that has kernel and user data in the same
  8 * address space, e.g. all NOMMU machines.
  9 */
 
 10#include <linux/string.h>
 11
 12#ifdef CONFIG_UACCESS_MEMCPY
 13static inline __must_check unsigned long
 14raw_copy_from_user(void *to, const void __user * from, unsigned long n)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15{
 16	if (__builtin_constant_p(n)) {
 17		switch(n) {
 18		case 1:
 19			*(u8 *)to = *(u8 __force *)from;
 20			return 0;
 21		case 2:
 22			*(u16 *)to = *(u16 __force *)from;
 23			return 0;
 24		case 4:
 25			*(u32 *)to = *(u32 __force *)from;
 26			return 0;
 27#ifdef CONFIG_64BIT
 28		case 8:
 29			*(u64 *)to = *(u64 __force *)from;
 30			return 0;
 31#endif
 
 
 32		}
 33	}
 34
 35	memcpy(to, (const void __force *)from, n);
 36	return 0;
 37}
 
 38
 39static inline __must_check unsigned long
 40raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 
 41{
 42	if (__builtin_constant_p(n)) {
 43		switch(n) {
 44		case 1:
 45			*(u8 __force *)to = *(u8 *)from;
 46			return 0;
 47		case 2:
 48			*(u16 __force *)to = *(u16 *)from;
 49			return 0;
 50		case 4:
 51			*(u32 __force *)to = *(u32 *)from;
 52			return 0;
 53#ifdef CONFIG_64BIT
 54		case 8:
 55			*(u64 __force *)to = *(u64 *)from;
 56			return 0;
 57#endif
 58		default:
 59			break;
 60		}
 61	}
 62
 63	memcpy((void __force *)to, from, n);
 64	return 0;
 65}
 66#define INLINE_COPY_FROM_USER
 67#define INLINE_COPY_TO_USER
 68#endif /* CONFIG_UACCESS_MEMCPY */
 69
 70#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 71
 72#ifndef KERNEL_DS
 73#define KERNEL_DS	MAKE_MM_SEG(~0UL)
 74#endif
 75
 76#ifndef USER_DS
 77#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
 78#endif
 79
 80#ifndef get_fs
 81#define get_fs()	(current_thread_info()->addr_limit)
 82
 83static inline void set_fs(mm_segment_t fs)
 84{
 85	current_thread_info()->addr_limit = fs;
 86}
 87#endif
 88
 89#ifndef segment_eq
 90#define segment_eq(a, b) ((a).seg == (b).seg)
 91#endif
 92
 93#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
 94
 95/*
 96 * The architecture should really override this if possible, at least
 97 * doing a check on the get_fs()
 98 */
 99#ifndef __access_ok
100static inline int __access_ok(unsigned long addr, unsigned long size)
101{
102	return 1;
103}
104#endif
105
106/*
107 * These are the main single-value transfer routines.  They automatically
108 * use the right size if we just have the right pointer type.
109 * This version just falls back to copy_{from,to}_user, which should
110 * provide a fast-path for small values.
111 */
112#define __put_user(x, ptr) \
113({								\
114	__typeof__(*(ptr)) __x = (x);				\
115	int __pu_err = -EFAULT;					\
116        __chk_user_ptr(ptr);                                    \
117	switch (sizeof (*(ptr))) {				\
118	case 1:							\
119	case 2:							\
120	case 4:							\
121	case 8:							\
122		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
123					 ptr, &__x);		\
124		break;						\
125	default:						\
126		__put_user_bad();				\
127		break;						\
128	 }							\
129	__pu_err;						\
130})
131
132#define put_user(x, ptr)					\
133({								\
134	void __user *__p = (ptr);				\
135	might_fault();						\
136	access_ok(__p, sizeof(*ptr)) ?		\
137		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
138		-EFAULT;					\
139})
140
141#ifndef __put_user_fn
142
143static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
144{
145	return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
 
146}
147
148#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
149
150#endif
151
152extern int __put_user_bad(void) __attribute__((noreturn));
153
154#define __get_user(x, ptr)					\
155({								\
156	int __gu_err = -EFAULT;					\
157	__chk_user_ptr(ptr);					\
158	switch (sizeof(*(ptr))) {				\
159	case 1: {						\
160		unsigned char __x = 0;				\
161		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
162					 ptr, &__x);		\
163		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
164		break;						\
165	};							\
166	case 2: {						\
167		unsigned short __x = 0;				\
168		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
169					 ptr, &__x);		\
170		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
171		break;						\
172	};							\
173	case 4: {						\
174		unsigned int __x = 0;				\
175		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
176					 ptr, &__x);		\
177		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
178		break;						\
179	};							\
180	case 8: {						\
181		unsigned long long __x = 0;			\
182		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
183					 ptr, &__x);		\
184		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
185		break;						\
186	};							\
187	default:						\
188		__get_user_bad();				\
189		break;						\
190	}							\
191	__gu_err;						\
192})
193
194#define get_user(x, ptr)					\
195({								\
196	const void __user *__p = (ptr);				\
197	might_fault();						\
198	access_ok(__p, sizeof(*ptr)) ?		\
199		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
200		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
201})
202
203#ifndef __get_user_fn
204static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
205{
206	return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
 
207}
208
209#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
210
211#endif
212
213extern int __get_user_bad(void) __attribute__((noreturn));
214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215/*
216 * Copy a null terminated string from userspace.
217 */
218#ifndef __strncpy_from_user
219static inline long
220__strncpy_from_user(char *dst, const char __user *src, long count)
221{
222	char *tmp;
223	strncpy(dst, (const char __force *)src, count);
224	for (tmp = dst; *tmp && count > 0; tmp++, count--)
225		;
226	return (tmp - dst);
227}
228#endif
229
230static inline long
231strncpy_from_user(char *dst, const char __user *src, long count)
232{
233	if (!access_ok(src, 1))
234		return -EFAULT;
235	return __strncpy_from_user(dst, src, count);
236}
237
238/*
239 * Return the size of a string (including the ending 0)
240 *
241 * Return 0 on exception, a value greater than N if too long
242 */
243#ifndef __strnlen_user
244#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
245#endif
246
247/*
248 * Unlike strnlen, strnlen_user includes the nul terminator in
249 * its returned count. Callers should check for a returned value
250 * greater than N as an indication the string is too long.
251 */
252static inline long strnlen_user(const char __user *src, long n)
253{
254	if (!access_ok(src, 1))
255		return 0;
256	return __strnlen_user(src, n);
257}
258
 
 
 
 
 
259/*
260 * Zero Userspace
261 */
262#ifndef __clear_user
263static inline __must_check unsigned long
264__clear_user(void __user *to, unsigned long n)
265{
266	memset((void __force *)to, 0, n);
267	return 0;
268}
269#endif
270
271static inline __must_check unsigned long
272clear_user(void __user *to, unsigned long n)
273{
274	might_fault();
275	if (!access_ok(to, n))
276		return n;
277
278	return __clear_user(to, n);
279}
280
281#include <asm/extable.h>
282
283#endif /* __ASM_GENERIC_UACCESS_H */
v4.6
 
  1#ifndef __ASM_GENERIC_UACCESS_H
  2#define __ASM_GENERIC_UACCESS_H
  3
  4/*
  5 * User space memory access functions, these should work
  6 * on any machine that has kernel and user data in the same
  7 * address space, e.g. all NOMMU machines.
  8 */
  9#include <linux/sched.h>
 10#include <linux/string.h>
 11
 12#include <asm/segment.h>
 13
 14#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 15
 16#ifndef KERNEL_DS
 17#define KERNEL_DS	MAKE_MM_SEG(~0UL)
 18#endif
 19
 20#ifndef USER_DS
 21#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
 22#endif
 23
 24#ifndef get_fs
 25#define get_ds()	(KERNEL_DS)
 26#define get_fs()	(current_thread_info()->addr_limit)
 27
 28static inline void set_fs(mm_segment_t fs)
 29{
 30	current_thread_info()->addr_limit = fs;
 31}
 32#endif
 33
 34#ifndef segment_eq
 35#define segment_eq(a, b) ((a).seg == (b).seg)
 36#endif
 37
 38#define VERIFY_READ	0
 39#define VERIFY_WRITE	1
 40
 41#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
 42
 43/*
 44 * The architecture should really override this if possible, at least
 45 * doing a check on the get_fs()
 46 */
 47#ifndef __access_ok
 48static inline int __access_ok(unsigned long addr, unsigned long size)
 49{
 50	return 1;
 51}
 52#endif
 53
 54/*
 55 * The exception table consists of pairs of addresses: the first is the
 56 * address of an instruction that is allowed to fault, and the second is
 57 * the address at which the program should continue.  No registers are
 58 * modified, so it is entirely up to the continuation code to figure out
 59 * what to do.
 60 *
 61 * All the routines below use bits of fixup code that are out of line
 62 * with the main instruction path.  This means when everything is well,
 63 * we don't even have to jump over them.  Further, they do not intrude
 64 * on our cache or tlb entries.
 65 */
 66
 67struct exception_table_entry
 68{
 69	unsigned long insn, fixup;
 70};
 71
 72/* Returns 0 if exception not found and fixup otherwise.  */
 73extern unsigned long search_exception_table(unsigned long);
 74
 75/*
 76 * architectures with an MMU should override these two
 77 */
 78#ifndef __copy_from_user
 79static inline __must_check long __copy_from_user(void *to,
 80		const void __user * from, unsigned long n)
 81{
 82	if (__builtin_constant_p(n)) {
 83		switch(n) {
 84		case 1:
 85			*(u8 *)to = *(u8 __force *)from;
 86			return 0;
 87		case 2:
 88			*(u16 *)to = *(u16 __force *)from;
 89			return 0;
 90		case 4:
 91			*(u32 *)to = *(u32 __force *)from;
 92			return 0;
 93#ifdef CONFIG_64BIT
 94		case 8:
 95			*(u64 *)to = *(u64 __force *)from;
 96			return 0;
 97#endif
 98		default:
 99			break;
100		}
101	}
102
103	memcpy(to, (const void __force *)from, n);
104	return 0;
105}
106#endif
107
108#ifndef __copy_to_user
109static inline __must_check long __copy_to_user(void __user *to,
110		const void *from, unsigned long n)
111{
112	if (__builtin_constant_p(n)) {
113		switch(n) {
114		case 1:
115			*(u8 __force *)to = *(u8 *)from;
116			return 0;
117		case 2:
118			*(u16 __force *)to = *(u16 *)from;
119			return 0;
120		case 4:
121			*(u32 __force *)to = *(u32 *)from;
122			return 0;
123#ifdef CONFIG_64BIT
124		case 8:
125			*(u64 __force *)to = *(u64 *)from;
126			return 0;
127#endif
128		default:
129			break;
130		}
131	}
132
133	memcpy((void __force *)to, from, n);
134	return 0;
135}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136#endif
137
138/*
139 * These are the main single-value transfer routines.  They automatically
140 * use the right size if we just have the right pointer type.
141 * This version just falls back to copy_{from,to}_user, which should
142 * provide a fast-path for small values.
143 */
144#define __put_user(x, ptr) \
145({								\
146	__typeof__(*(ptr)) __x = (x);				\
147	int __pu_err = -EFAULT;					\
148        __chk_user_ptr(ptr);                                    \
149	switch (sizeof (*(ptr))) {				\
150	case 1:							\
151	case 2:							\
152	case 4:							\
153	case 8:							\
154		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
155					 ptr, &__x);		\
156		break;						\
157	default:						\
158		__put_user_bad();				\
159		break;						\
160	 }							\
161	__pu_err;						\
162})
163
164#define put_user(x, ptr)					\
165({								\
166	void *__p = (ptr);					\
167	might_fault();						\
168	access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ?		\
169		__put_user((x), ((__typeof__(*(ptr)) *)__p)) :	\
170		-EFAULT;					\
171})
172
173#ifndef __put_user_fn
174
175static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
176{
177	size = __copy_to_user(ptr, x, size);
178	return size ? -EFAULT : size;
179}
180
181#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
182
183#endif
184
185extern int __put_user_bad(void) __attribute__((noreturn));
186
187#define __get_user(x, ptr)					\
188({								\
189	int __gu_err = -EFAULT;					\
190	__chk_user_ptr(ptr);					\
191	switch (sizeof(*(ptr))) {				\
192	case 1: {						\
193		unsigned char __x;				\
194		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
195					 ptr, &__x);		\
196		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
197		break;						\
198	};							\
199	case 2: {						\
200		unsigned short __x;				\
201		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
202					 ptr, &__x);		\
203		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
204		break;						\
205	};							\
206	case 4: {						\
207		unsigned int __x;				\
208		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
209					 ptr, &__x);		\
210		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
211		break;						\
212	};							\
213	case 8: {						\
214		unsigned long long __x;				\
215		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
216					 ptr, &__x);		\
217		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
218		break;						\
219	};							\
220	default:						\
221		__get_user_bad();				\
222		break;						\
223	}							\
224	__gu_err;						\
225})
226
227#define get_user(x, ptr)					\
228({								\
229	const void *__p = (ptr);				\
230	might_fault();						\
231	access_ok(VERIFY_READ, __p, sizeof(*ptr)) ?		\
232		__get_user((x), (__typeof__(*(ptr)) *)__p) :	\
233		-EFAULT;					\
234})
235
236#ifndef __get_user_fn
237static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
238{
239	size = __copy_from_user(x, ptr, size);
240	return size ? -EFAULT : size;
241}
242
243#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
244
245#endif
246
247extern int __get_user_bad(void) __attribute__((noreturn));
248
249#ifndef __copy_from_user_inatomic
250#define __copy_from_user_inatomic __copy_from_user
251#endif
252
253#ifndef __copy_to_user_inatomic
254#define __copy_to_user_inatomic __copy_to_user
255#endif
256
257static inline long copy_from_user(void *to,
258		const void __user * from, unsigned long n)
259{
260	might_fault();
261	if (access_ok(VERIFY_READ, from, n))
262		return __copy_from_user(to, from, n);
263	else
264		return n;
265}
266
267static inline long copy_to_user(void __user *to,
268		const void *from, unsigned long n)
269{
270	might_fault();
271	if (access_ok(VERIFY_WRITE, to, n))
272		return __copy_to_user(to, from, n);
273	else
274		return n;
275}
276
277/*
278 * Copy a null terminated string from userspace.
279 */
280#ifndef __strncpy_from_user
281static inline long
282__strncpy_from_user(char *dst, const char __user *src, long count)
283{
284	char *tmp;
285	strncpy(dst, (const char __force *)src, count);
286	for (tmp = dst; *tmp && count > 0; tmp++, count--)
287		;
288	return (tmp - dst);
289}
290#endif
291
292static inline long
293strncpy_from_user(char *dst, const char __user *src, long count)
294{
295	if (!access_ok(VERIFY_READ, src, 1))
296		return -EFAULT;
297	return __strncpy_from_user(dst, src, count);
298}
299
300/*
301 * Return the size of a string (including the ending 0)
302 *
303 * Return 0 on exception, a value greater than N if too long
304 */
305#ifndef __strnlen_user
306#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
307#endif
308
309/*
310 * Unlike strnlen, strnlen_user includes the nul terminator in
311 * its returned count. Callers should check for a returned value
312 * greater than N as an indication the string is too long.
313 */
314static inline long strnlen_user(const char __user *src, long n)
315{
316	if (!access_ok(VERIFY_READ, src, 1))
317		return 0;
318	return __strnlen_user(src, n);
319}
320
321static inline long strlen_user(const char __user *src)
322{
323	return strnlen_user(src, 32767);
324}
325
326/*
327 * Zero Userspace
328 */
329#ifndef __clear_user
330static inline __must_check unsigned long
331__clear_user(void __user *to, unsigned long n)
332{
333	memset((void __force *)to, 0, n);
334	return 0;
335}
336#endif
337
338static inline __must_check unsigned long
339clear_user(void __user *to, unsigned long n)
340{
341	might_fault();
342	if (!access_ok(VERIFY_WRITE, to, n))
343		return n;
344
345	return __clear_user(to, n);
346}
 
 
347
348#endif /* __ASM_GENERIC_UACCESS_H */