Linux Audio

Check our new training course

Loading...
v3.1
 
  1#ifndef __ASM_GENERIC_UACCESS_H
  2#define __ASM_GENERIC_UACCESS_H
  3
  4/*
  5 * User space memory access functions, these should work
  6 * on a ny machine that has kernel and user data in the same
  7 * address space, e.g. all NOMMU machines.
  8 */
  9#include <linux/sched.h>
 10#include <linux/mm.h>
 11#include <linux/string.h>
 
 12
 13#include <asm/segment.h>
 
 14
 15#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 16
 17#ifndef KERNEL_DS
 18#define KERNEL_DS	MAKE_MM_SEG(~0UL)
 19#endif
 20
 21#ifndef USER_DS
 22#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
 23#endif
 24
 25#ifndef get_fs
 26#define get_ds()	(KERNEL_DS)
 27#define get_fs()	(current_thread_info()->addr_limit)
 28
 29static inline void set_fs(mm_segment_t fs)
 30{
 31	current_thread_info()->addr_limit = fs;
 32}
 33#endif
 34
 35#define segment_eq(a, b) ((a).seg == (b).seg)
 36
 37#define VERIFY_READ	0
 38#define VERIFY_WRITE	1
 39
 40#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
 
 
 
 
 
 
 
 
 
 
 
 
 
 41
 42/*
 43 * The architecture should really override this if possible, at least
 44 * doing a check on the get_fs()
 45 */
 46#ifndef __access_ok
 47static inline int __access_ok(unsigned long addr, unsigned long size)
 48{
 49	return 1;
 50}
 51#endif
 52
 53/*
 54 * The exception table consists of pairs of addresses: the first is the
 55 * address of an instruction that is allowed to fault, and the second is
 56 * the address at which the program should continue.  No registers are
 57 * modified, so it is entirely up to the continuation code to figure out
 58 * what to do.
 59 *
 60 * All the routines below use bits of fixup code that are out of line
 61 * with the main instruction path.  This means when everything is well,
 62 * we don't even have to jump over them.  Further, they do not intrude
 63 * on our cache or tlb entries.
 64 */
 65
 66struct exception_table_entry
 67{
 68	unsigned long insn, fixup;
 69};
 70
 71/* Returns 0 if exception not found and fixup otherwise.  */
 72extern unsigned long search_exception_table(unsigned long);
 73
 74/*
 75 * architectures with an MMU should override these two
 76 */
 77#ifndef __copy_from_user
 78static inline __must_check long __copy_from_user(void *to,
 79		const void __user * from, unsigned long n)
 80{
 81	if (__builtin_constant_p(n)) {
 82		switch(n) {
 83		case 1:
 84			*(u8 *)to = *(u8 __force *)from;
 85			return 0;
 86		case 2:
 87			*(u16 *)to = *(u16 __force *)from;
 88			return 0;
 89		case 4:
 90			*(u32 *)to = *(u32 __force *)from;
 91			return 0;
 92#ifdef CONFIG_64BIT
 93		case 8:
 94			*(u64 *)to = *(u64 __force *)from;
 95			return 0;
 96#endif
 97		default:
 98			break;
 99		}
100	}
 
 
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102	memcpy(to, (const void __force *)from, n);
103	return 0;
104}
105#endif
106
107#ifndef __copy_to_user
108static inline __must_check long __copy_to_user(void __user *to,
109		const void *from, unsigned long n)
110{
111	if (__builtin_constant_p(n)) {
112		switch(n) {
113		case 1:
114			*(u8 __force *)to = *(u8 *)from;
115			return 0;
116		case 2:
117			*(u16 __force *)to = *(u16 *)from;
118			return 0;
119		case 4:
120			*(u32 __force *)to = *(u32 *)from;
121			return 0;
122#ifdef CONFIG_64BIT
123		case 8:
124			*(u64 __force *)to = *(u64 *)from;
125			return 0;
126#endif
127		default:
128			break;
129		}
130	}
131
132	memcpy((void __force *)to, from, n);
133	return 0;
134}
135#endif
 
 
136
137/*
138 * These are the main single-value transfer routines.  They automatically
139 * use the right size if we just have the right pointer type.
140 * This version just falls back to copy_{from,to}_user, which should
141 * provide a fast-path for small values.
142 */
143#define __put_user(x, ptr) \
144({								\
145	__typeof__(*(ptr)) __x = (x);				\
146	int __pu_err = -EFAULT;					\
147        __chk_user_ptr(ptr);                                    \
148	switch (sizeof (*(ptr))) {				\
149	case 1:							\
150	case 2:							\
151	case 4:							\
152	case 8:							\
153		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
154					 ptr, &__x);		\
155		break;						\
156	default:						\
157		__put_user_bad();				\
158		break;						\
159	 }							\
160	__pu_err;						\
161})
162
163#define put_user(x, ptr)					\
164({								\
165	might_sleep();						\
166	access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ?		\
167		__put_user(x, ptr) :				\
 
168		-EFAULT;					\
169})
170
 
 
171static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
172{
173	size = __copy_to_user(ptr, x, size);
174	return size ? -EFAULT : size;
175}
176
 
 
 
 
177extern int __put_user_bad(void) __attribute__((noreturn));
178
179#define __get_user(x, ptr)					\
180({								\
181	int __gu_err = -EFAULT;					\
182	__chk_user_ptr(ptr);					\
183	switch (sizeof(*(ptr))) {				\
184	case 1: {						\
185		unsigned char __x;				\
186		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
187					 ptr, &__x);		\
188		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
189		break;						\
190	};							\
191	case 2: {						\
192		unsigned short __x;				\
193		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
194					 ptr, &__x);		\
195		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
196		break;						\
197	};							\
198	case 4: {						\
199		unsigned int __x;				\
200		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
201					 ptr, &__x);		\
202		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
203		break;						\
204	};							\
205	case 8: {						\
206		unsigned long long __x;				\
207		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
208					 ptr, &__x);		\
209		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
210		break;						\
211	};							\
212	default:						\
213		__get_user_bad();				\
214		break;						\
215	}							\
216	__gu_err;						\
217})
218
219#define get_user(x, ptr)					\
220({								\
221	might_sleep();						\
222	access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ?		\
223		__get_user(x, ptr) :				\
224		-EFAULT;					\
 
225})
226
 
227static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
228{
229	size = __copy_from_user(x, ptr, size);
230	return size ? -EFAULT : size;
231}
232
233extern int __get_user_bad(void) __attribute__((noreturn));
234
235#ifndef __copy_from_user_inatomic
236#define __copy_from_user_inatomic __copy_from_user
237#endif
238
239#ifndef __copy_to_user_inatomic
240#define __copy_to_user_inatomic __copy_to_user
241#endif
242
243static inline long copy_from_user(void *to,
244		const void __user * from, unsigned long n)
245{
246	might_sleep();
247	if (access_ok(VERIFY_READ, from, n))
248		return __copy_from_user(to, from, n);
249	else
250		return n;
251}
252
253static inline long copy_to_user(void __user *to,
254		const void *from, unsigned long n)
255{
256	might_sleep();
257	if (access_ok(VERIFY_WRITE, to, n))
258		return __copy_to_user(to, from, n);
259	else
260		return n;
261}
262
263/*
264 * Copy a null terminated string from userspace.
265 */
266#ifndef __strncpy_from_user
267static inline long
268__strncpy_from_user(char *dst, const char __user *src, long count)
269{
270	char *tmp;
271	strncpy(dst, (const char __force *)src, count);
272	for (tmp = dst; *tmp && count > 0; tmp++, count--)
273		;
274	return (tmp - dst);
275}
276#endif
277
278static inline long
279strncpy_from_user(char *dst, const char __user *src, long count)
280{
281	if (!access_ok(VERIFY_READ, src, 1))
282		return -EFAULT;
283	return __strncpy_from_user(dst, src, count);
284}
285
286/*
287 * Return the size of a string (including the ending 0)
288 *
289 * Return 0 on exception, a value greater than N if too long
290 */
291#ifndef __strnlen_user
292#define __strnlen_user strnlen
293#endif
294
295static inline long strnlen_user(const char __user *src, long n)
296{
297	if (!access_ok(VERIFY_READ, src, 1))
298		return 0;
299	return __strnlen_user(src, n);
300}
301
302static inline long strlen_user(const char __user *src)
303{
304	return strnlen_user(src, 32767);
305}
306
307/*
308 * Zero Userspace
309 */
310#ifndef __clear_user
311static inline __must_check unsigned long
312__clear_user(void __user *to, unsigned long n)
313{
314	memset((void __force *)to, 0, n);
315	return 0;
316}
317#endif
318
319static inline __must_check unsigned long
320clear_user(void __user *to, unsigned long n)
321{
322	might_sleep();
323	if (!access_ok(VERIFY_WRITE, to, n))
324		return n;
325
326	return __clear_user(to, n);
327}
 
 
 
 
 
 
328
329#endif /* __ASM_GENERIC_UACCESS_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_GENERIC_UACCESS_H
  3#define __ASM_GENERIC_UACCESS_H
  4
  5/*
  6 * User space memory access functions, these should work
  7 * on any machine that has kernel and user data in the same
  8 * address space, e.g. all NOMMU machines.
  9 */
 
 
 10#include <linux/string.h>
 11#include <asm-generic/access_ok.h>
 12
 13#ifdef CONFIG_UACCESS_MEMCPY
 14#include <asm/unaligned.h>
 15
 16static __always_inline int
 17__get_user_fn(size_t size, const void __user *from, void *to)
 
 
 
 
 
 
 
 
 
 
 
 
 
 18{
 19	BUILD_BUG_ON(!__builtin_constant_p(size));
 
 
 
 
 20
 21	switch (size) {
 22	case 1:
 23		*(u8 *)to = *((u8 __force *)from);
 24		return 0;
 25	case 2:
 26		*(u16 *)to = get_unaligned((u16 __force *)from);
 27		return 0;
 28	case 4:
 29		*(u32 *)to = get_unaligned((u32 __force *)from);
 30		return 0;
 31	case 8:
 32		*(u64 *)to = get_unaligned((u64 __force *)from);
 33		return 0;
 34	default:
 35		BUILD_BUG();
 36		return 0;
 37	}
 38
 
 
 
 
 
 
 
 
 39}
 40#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
 41
 42static __always_inline int
 43__put_user_fn(size_t size, void __user *to, void *from)
 
 
 
 
 
 
 
 
 
 
 
 
 44{
 45	BUILD_BUG_ON(!__builtin_constant_p(size));
 
 
 
 
 46
 47	switch (size) {
 48	case 1:
 49		*(u8 __force *)to = *(u8 *)from;
 50		return 0;
 51	case 2:
 52		put_unaligned(*(u16 *)from, (u16 __force *)to);
 53		return 0;
 54	case 4:
 55		put_unaligned(*(u32 *)from, (u32 __force *)to);
 56		return 0;
 57	case 8:
 58		put_unaligned(*(u64 *)from, (u64 __force *)to);
 59		return 0;
 60	default:
 61		BUILD_BUG();
 62		return 0;
 
 
 
 
 
 
 
 
 
 
 63	}
 64}
 65#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
 66
 67#define __get_kernel_nofault(dst, src, type, err_label)			\
 68do {									\
 69	*((type *)dst) = get_unaligned((type *)(src));			\
 70	if (0) /* make sure the label looks used to the compiler */	\
 71		goto err_label;						\
 72} while (0)
 73
 74#define __put_kernel_nofault(dst, src, type, err_label)			\
 75do {									\
 76	put_unaligned(*((type *)src), (type *)(dst));			\
 77	if (0) /* make sure the label looks used to the compiler */	\
 78		goto err_label;						\
 79} while (0)
 80
 81static inline __must_check unsigned long
 82raw_copy_from_user(void *to, const void __user * from, unsigned long n)
 83{
 84	memcpy(to, (const void __force *)from, n);
 85	return 0;
 86}
 
 87
 88static inline __must_check unsigned long
 89raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 
 90{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91	memcpy((void __force *)to, from, n);
 92	return 0;
 93}
 94#define INLINE_COPY_FROM_USER
 95#define INLINE_COPY_TO_USER
 96#endif /* CONFIG_UACCESS_MEMCPY */
 97
 98/*
 99 * These are the main single-value transfer routines.  They automatically
100 * use the right size if we just have the right pointer type.
101 * This version just falls back to copy_{from,to}_user, which should
102 * provide a fast-path for small values.
103 */
104#define __put_user(x, ptr) \
105({								\
106	__typeof__(*(ptr)) __x = (x);				\
107	int __pu_err = -EFAULT;					\
108        __chk_user_ptr(ptr);                                    \
109	switch (sizeof (*(ptr))) {				\
110	case 1:							\
111	case 2:							\
112	case 4:							\
113	case 8:							\
114		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
115					 ptr, &__x);		\
116		break;						\
117	default:						\
118		__put_user_bad();				\
119		break;						\
120	 }							\
121	__pu_err;						\
122})
123
124#define put_user(x, ptr)					\
125({								\
126	void __user *__p = (ptr);				\
127	might_fault();						\
128	access_ok(__p, sizeof(*ptr)) ?		\
129		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
130		-EFAULT;					\
131})
132
133#ifndef __put_user_fn
134
135static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
136{
137	return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
 
138}
139
140#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
141
142#endif
143
144extern int __put_user_bad(void) __attribute__((noreturn));
145
146#define __get_user(x, ptr)					\
147({								\
148	int __gu_err = -EFAULT;					\
149	__chk_user_ptr(ptr);					\
150	switch (sizeof(*(ptr))) {				\
151	case 1: {						\
152		unsigned char __x = 0;				\
153		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
154					 ptr, &__x);		\
155		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
156		break;						\
157	};							\
158	case 2: {						\
159		unsigned short __x = 0;				\
160		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
161					 ptr, &__x);		\
162		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
163		break;						\
164	};							\
165	case 4: {						\
166		unsigned int __x = 0;				\
167		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
168					 ptr, &__x);		\
169		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
170		break;						\
171	};							\
172	case 8: {						\
173		unsigned long long __x = 0;			\
174		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
175					 ptr, &__x);		\
176		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
177		break;						\
178	};							\
179	default:						\
180		__get_user_bad();				\
181		break;						\
182	}							\
183	__gu_err;						\
184})
185
186#define get_user(x, ptr)					\
187({								\
188	const void __user *__p = (ptr);				\
189	might_fault();						\
190	access_ok(__p, sizeof(*ptr)) ?		\
191		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
192		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
193})
194
195#ifndef __get_user_fn
196static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
197{
198	return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199}
 
200
201#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
 
 
 
 
 
 
202
 
 
 
 
 
 
 
203#endif
204
205extern int __get_user_bad(void) __attribute__((noreturn));
 
 
 
 
 
 
 
 
 
 
206
207/*
208 * Zero Userspace
209 */
210#ifndef __clear_user
211static inline __must_check unsigned long
212__clear_user(void __user *to, unsigned long n)
213{
214	memset((void __force *)to, 0, n);
215	return 0;
216}
217#endif
218
219static inline __must_check unsigned long
220clear_user(void __user *to, unsigned long n)
221{
222	might_fault();
223	if (!access_ok(to, n))
224		return n;
225
226	return __clear_user(to, n);
227}
228
229#include <asm/extable.h>
230
231__must_check long strncpy_from_user(char *dst, const char __user *src,
232				    long count);
233__must_check long strnlen_user(const char __user *src, long n);
234
235#endif /* __ASM_GENERIC_UACCESS_H */