Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_GENERIC_UACCESS_H
  3#define __ASM_GENERIC_UACCESS_H
  4
  5/*
  6 * User space memory access functions, these should work
  7 * on any machine that has kernel and user data in the same
  8 * address space, e.g. all NOMMU machines.
  9 */
 
 10#include <linux/string.h>
 11#include <asm-generic/access_ok.h>
 12
 13#ifdef CONFIG_UACCESS_MEMCPY
 14#include <asm/unaligned.h>
 15
 16static __always_inline int
 17__get_user_fn(size_t size, const void __user *from, void *to)
 18{
 19	BUILD_BUG_ON(!__builtin_constant_p(size));
 20
 21	switch (size) {
 22	case 1:
 23		*(u8 *)to = *((u8 __force *)from);
 24		return 0;
 25	case 2:
 26		*(u16 *)to = get_unaligned((u16 __force *)from);
 27		return 0;
 28	case 4:
 29		*(u32 *)to = get_unaligned((u32 __force *)from);
 30		return 0;
 31	case 8:
 32		*(u64 *)to = get_unaligned((u64 __force *)from);
 33		return 0;
 34	default:
 35		BUILD_BUG();
 36		return 0;
 37	}
 38
 39}
 40#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
 
 41
 42static __always_inline int
 43__put_user_fn(size_t size, void __user *to, void *from)
 
 
 
 44{
 45	BUILD_BUG_ON(!__builtin_constant_p(size));
 
 
 46
 47	switch (size) {
 48	case 1:
 49		*(u8 __force *)to = *(u8 *)from;
 50		return 0;
 51	case 2:
 52		put_unaligned(*(u16 *)from, (u16 __force *)to);
 53		return 0;
 54	case 4:
 55		put_unaligned(*(u32 *)from, (u32 __force *)to);
 56		return 0;
 57	case 8:
 58		put_unaligned(*(u64 *)from, (u64 __force *)to);
 59		return 0;
 60	default:
 61		BUILD_BUG();
 62		return 0;
 63	}
 64}
 65#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
 66
 67#define __get_kernel_nofault(dst, src, type, err_label)			\
 68do {									\
 69	*((type *)dst) = get_unaligned((type *)(src));			\
 70	if (0) /* make sure the label looks used to the compiler */	\
 71		goto err_label;						\
 72} while (0)
 73
 74#define __put_kernel_nofault(dst, src, type, err_label)			\
 75do {									\
 76	put_unaligned(*((type *)src), (type *)(dst));			\
 77	if (0) /* make sure the label looks used to the compiler */	\
 78		goto err_label;						\
 79} while (0)
 80
 81static inline __must_check unsigned long
 82raw_copy_from_user(void *to, const void __user * from, unsigned long n)
 83{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84	memcpy(to, (const void __force *)from, n);
 85	return 0;
 86}
 
 87
 88static inline __must_check unsigned long
 89raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 
 90{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91	memcpy((void __force *)to, from, n);
 92	return 0;
 93}
 94#define INLINE_COPY_FROM_USER
 95#define INLINE_COPY_TO_USER
 96#endif /* CONFIG_UACCESS_MEMCPY */
 97
 98/*
 99 * These are the main single-value transfer routines.  They automatically
100 * use the right size if we just have the right pointer type.
101 * This version just falls back to copy_{from,to}_user, which should
102 * provide a fast-path for small values.
103 */
104#define __put_user(x, ptr) \
105({								\
106	__typeof__(*(ptr)) __x = (x);				\
107	int __pu_err = -EFAULT;					\
108        __chk_user_ptr(ptr);                                    \
109	switch (sizeof (*(ptr))) {				\
110	case 1:							\
111	case 2:							\
112	case 4:							\
113	case 8:							\
114		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
115					 ptr, &__x);		\
116		break;						\
117	default:						\
118		__put_user_bad();				\
119		break;						\
120	 }							\
121	__pu_err;						\
122})
123
124#define put_user(x, ptr)					\
125({								\
126	void __user *__p = (ptr);				\
127	might_fault();						\
128	access_ok(__p, sizeof(*ptr)) ?		\
129		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
130		-EFAULT;					\
131})
132
133#ifndef __put_user_fn
134
135static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
136{
137	return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
 
138}
139
140#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
141
142#endif
143
144extern int __put_user_bad(void) __attribute__((noreturn));
145
146#define __get_user(x, ptr)					\
147({								\
148	int __gu_err = -EFAULT;					\
149	__chk_user_ptr(ptr);					\
150	switch (sizeof(*(ptr))) {				\
151	case 1: {						\
152		unsigned char __x = 0;				\
153		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
154					 ptr, &__x);		\
155		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
156		break;						\
157	};							\
158	case 2: {						\
159		unsigned short __x = 0;				\
160		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
161					 ptr, &__x);		\
162		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
163		break;						\
164	};							\
165	case 4: {						\
166		unsigned int __x = 0;				\
167		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
168					 ptr, &__x);		\
169		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
170		break;						\
171	};							\
172	case 8: {						\
173		unsigned long long __x = 0;			\
174		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
175					 ptr, &__x);		\
176		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
177		break;						\
178	};							\
179	default:						\
180		__get_user_bad();				\
181		break;						\
182	}							\
183	__gu_err;						\
184})
185
186#define get_user(x, ptr)					\
187({								\
188	const void __user *__p = (ptr);				\
189	might_fault();						\
190	access_ok(__p, sizeof(*ptr)) ?		\
191		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
192		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
193})
194
195#ifndef __get_user_fn
196static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
197{
198	return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
 
199}
200
201#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
202
203#endif
204
205extern int __get_user_bad(void) __attribute__((noreturn));
206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207/*
208 * Zero Userspace
209 */
210#ifndef __clear_user
211static inline __must_check unsigned long
212__clear_user(void __user *to, unsigned long n)
213{
214	memset((void __force *)to, 0, n);
215	return 0;
216}
217#endif
218
219static inline __must_check unsigned long
220clear_user(void __user *to, unsigned long n)
221{
222	might_fault();
223	if (!access_ok(to, n))
224		return n;
225
226	return __clear_user(to, n);
227}
228
229#include <asm/extable.h>
230
231__must_check long strncpy_from_user(char *dst, const char __user *src,
232				    long count);
233__must_check long strnlen_user(const char __user *src, long n);
234
235#endif /* __ASM_GENERIC_UACCESS_H */
v3.15
 
  1#ifndef __ASM_GENERIC_UACCESS_H
  2#define __ASM_GENERIC_UACCESS_H
  3
  4/*
  5 * User space memory access functions, these should work
  6 * on any machine that has kernel and user data in the same
  7 * address space, e.g. all NOMMU machines.
  8 */
  9#include <linux/sched.h>
 10#include <linux/string.h>
 
 11
 12#include <asm/segment.h>
 
 13
 14#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 
 
 
 15
 16#ifndef KERNEL_DS
 17#define KERNEL_DS	MAKE_MM_SEG(~0UL)
 18#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20#ifndef USER_DS
 21#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
 22#endif
 23
 24#ifndef get_fs
 25#define get_ds()	(KERNEL_DS)
 26#define get_fs()	(current_thread_info()->addr_limit)
 27
 28static inline void set_fs(mm_segment_t fs)
 29{
 30	current_thread_info()->addr_limit = fs;
 31}
 32#endif
 33
 34#ifndef segment_eq
 35#define segment_eq(a, b) ((a).seg == (b).seg)
 36#endif
 37
 38#define VERIFY_READ	0
 39#define VERIFY_WRITE	1
 40
 41#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
 42
 43/*
 44 * The architecture should really override this if possible, at least
 45 * doing a check on the get_fs()
 46 */
 47#ifndef __access_ok
 48static inline int __access_ok(unsigned long addr, unsigned long size)
 49{
 50	return 1;
 51}
 52#endif
 53
 54/*
 55 * The exception table consists of pairs of addresses: the first is the
 56 * address of an instruction that is allowed to fault, and the second is
 57 * the address at which the program should continue.  No registers are
 58 * modified, so it is entirely up to the continuation code to figure out
 59 * what to do.
 60 *
 61 * All the routines below use bits of fixup code that are out of line
 62 * with the main instruction path.  This means when everything is well,
 63 * we don't even have to jump over them.  Further, they do not intrude
 64 * on our cache or tlb entries.
 65 */
 
 66
 67struct exception_table_entry
 
 68{
 69	unsigned long insn, fixup;
 70};
 71
 72/* Returns 0 if exception not found and fixup otherwise.  */
 73extern unsigned long search_exception_table(unsigned long);
 74
 75/*
 76 * architectures with an MMU should override these two
 77 */
 78#ifndef __copy_from_user
 79static inline __must_check long __copy_from_user(void *to,
 80		const void __user * from, unsigned long n)
 81{
 82	if (__builtin_constant_p(n)) {
 83		switch(n) {
 84		case 1:
 85			*(u8 *)to = *(u8 __force *)from;
 86			return 0;
 87		case 2:
 88			*(u16 *)to = *(u16 __force *)from;
 89			return 0;
 90		case 4:
 91			*(u32 *)to = *(u32 __force *)from;
 92			return 0;
 93#ifdef CONFIG_64BIT
 94		case 8:
 95			*(u64 *)to = *(u64 __force *)from;
 96			return 0;
 97#endif
 98		default:
 99			break;
100		}
101	}
102
103	memcpy(to, (const void __force *)from, n);
104	return 0;
105}
106#endif
107
108#ifndef __copy_to_user
109static inline __must_check long __copy_to_user(void __user *to,
110		const void *from, unsigned long n)
111{
112	if (__builtin_constant_p(n)) {
113		switch(n) {
114		case 1:
115			*(u8 __force *)to = *(u8 *)from;
116			return 0;
117		case 2:
118			*(u16 __force *)to = *(u16 *)from;
119			return 0;
120		case 4:
121			*(u32 __force *)to = *(u32 *)from;
122			return 0;
123#ifdef CONFIG_64BIT
124		case 8:
125			*(u64 __force *)to = *(u64 *)from;
126			return 0;
127#endif
128		default:
129			break;
130		}
131	}
132
133	memcpy((void __force *)to, from, n);
134	return 0;
135}
136#endif
 
 
137
138/*
139 * These are the main single-value transfer routines.  They automatically
140 * use the right size if we just have the right pointer type.
141 * This version just falls back to copy_{from,to}_user, which should
142 * provide a fast-path for small values.
143 */
144#define __put_user(x, ptr) \
145({								\
146	__typeof__(*(ptr)) __x = (x);				\
147	int __pu_err = -EFAULT;					\
148        __chk_user_ptr(ptr);                                    \
149	switch (sizeof (*(ptr))) {				\
150	case 1:							\
151	case 2:							\
152	case 4:							\
153	case 8:							\
154		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
155					 ptr, &__x);		\
156		break;						\
157	default:						\
158		__put_user_bad();				\
159		break;						\
160	 }							\
161	__pu_err;						\
162})
163
164#define put_user(x, ptr)					\
165({								\
 
166	might_fault();						\
167	access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ?		\
168		__put_user(x, ptr) :				\
169		-EFAULT;					\
170})
171
172#ifndef __put_user_fn
173
174static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
175{
176	size = __copy_to_user(ptr, x, size);
177	return size ? -EFAULT : size;
178}
179
180#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
181
182#endif
183
184extern int __put_user_bad(void) __attribute__((noreturn));
185
186#define __get_user(x, ptr)					\
187({								\
188	int __gu_err = -EFAULT;					\
189	__chk_user_ptr(ptr);					\
190	switch (sizeof(*(ptr))) {				\
191	case 1: {						\
192		unsigned char __x;				\
193		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
194					 ptr, &__x);		\
195		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
196		break;						\
197	};							\
198	case 2: {						\
199		unsigned short __x;				\
200		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
201					 ptr, &__x);		\
202		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
203		break;						\
204	};							\
205	case 4: {						\
206		unsigned int __x;				\
207		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
208					 ptr, &__x);		\
209		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
210		break;						\
211	};							\
212	case 8: {						\
213		unsigned long long __x;				\
214		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
215					 ptr, &__x);		\
216		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
217		break;						\
218	};							\
219	default:						\
220		__get_user_bad();				\
221		break;						\
222	}							\
223	__gu_err;						\
224})
225
226#define get_user(x, ptr)					\
227({								\
 
228	might_fault();						\
229	access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ?		\
230		__get_user(x, ptr) :				\
231		-EFAULT;					\
232})
233
234#ifndef __get_user_fn
235static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
236{
237	size = __copy_from_user(x, ptr, size);
238	return size ? -EFAULT : size;
239}
240
241#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
242
243#endif
244
245extern int __get_user_bad(void) __attribute__((noreturn));
246
247#ifndef __copy_from_user_inatomic
248#define __copy_from_user_inatomic __copy_from_user
249#endif
250
251#ifndef __copy_to_user_inatomic
252#define __copy_to_user_inatomic __copy_to_user
253#endif
254
255static inline long copy_from_user(void *to,
256		const void __user * from, unsigned long n)
257{
258	might_fault();
259	if (access_ok(VERIFY_READ, from, n))
260		return __copy_from_user(to, from, n);
261	else
262		return n;
263}
264
265static inline long copy_to_user(void __user *to,
266		const void *from, unsigned long n)
267{
268	might_fault();
269	if (access_ok(VERIFY_WRITE, to, n))
270		return __copy_to_user(to, from, n);
271	else
272		return n;
273}
274
275/*
276 * Copy a null terminated string from userspace.
277 */
278#ifndef __strncpy_from_user
279static inline long
280__strncpy_from_user(char *dst, const char __user *src, long count)
281{
282	char *tmp;
283	strncpy(dst, (const char __force *)src, count);
284	for (tmp = dst; *tmp && count > 0; tmp++, count--)
285		;
286	return (tmp - dst);
287}
288#endif
289
290static inline long
291strncpy_from_user(char *dst, const char __user *src, long count)
292{
293	if (!access_ok(VERIFY_READ, src, 1))
294		return -EFAULT;
295	return __strncpy_from_user(dst, src, count);
296}
297
298/*
299 * Return the size of a string (including the ending 0)
300 *
301 * Return 0 on exception, a value greater than N if too long
302 */
303#ifndef __strnlen_user
304#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
305#endif
306
307/*
308 * Unlike strnlen, strnlen_user includes the nul terminator in
309 * its returned count. Callers should check for a returned value
310 * greater than N as an indication the string is too long.
311 */
312static inline long strnlen_user(const char __user *src, long n)
313{
314	if (!access_ok(VERIFY_READ, src, 1))
315		return 0;
316	return __strnlen_user(src, n);
317}
318
319static inline long strlen_user(const char __user *src)
320{
321	return strnlen_user(src, 32767);
322}
323
324/*
325 * Zero Userspace
326 */
327#ifndef __clear_user
328static inline __must_check unsigned long
329__clear_user(void __user *to, unsigned long n)
330{
331	memset((void __force *)to, 0, n);
332	return 0;
333}
334#endif
335
336static inline __must_check unsigned long
337clear_user(void __user *to, unsigned long n)
338{
339	might_fault();
340	if (!access_ok(VERIFY_WRITE, to, n))
341		return n;
342
343	return __clear_user(to, n);
344}
 
 
 
 
 
 
345
346#endif /* __ASM_GENERIC_UACCESS_H */