Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.10.11
 
  1#ifndef __ASM_GENERIC_UACCESS_H
  2#define __ASM_GENERIC_UACCESS_H
  3
  4/*
  5 * User space memory access functions, these should work
  6 * on any machine that has kernel and user data in the same
  7 * address space, e.g. all NOMMU machines.
  8 */
  9#include <linux/sched.h>
 10#include <linux/string.h>
 
 11
 12#include <asm/segment.h>
 
 13
 14#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 15
 16#ifndef KERNEL_DS
 17#define KERNEL_DS	MAKE_MM_SEG(~0UL)
 18#endif
 19
 20#ifndef USER_DS
 21#define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
 22#endif
 23
 24#ifndef get_fs
 25#define get_ds()	(KERNEL_DS)
 26#define get_fs()	(current_thread_info()->addr_limit)
 27
 28static inline void set_fs(mm_segment_t fs)
 29{
 30	current_thread_info()->addr_limit = fs;
 31}
 32#endif
 33
 34#ifndef segment_eq
 35#define segment_eq(a, b) ((a).seg == (b).seg)
 36#endif
 37
 38#define VERIFY_READ	0
 39#define VERIFY_WRITE	1
 40
 41#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42
 43/*
 44 * The architecture should really override this if possible, at least
 45 * doing a check on the get_fs()
 46 */
 47#ifndef __access_ok
 48static inline int __access_ok(unsigned long addr, unsigned long size)
 49{
 50	return 1;
 51}
 52#endif
 53
 54/*
 55 * The exception table consists of pairs of addresses: the first is the
 56 * address of an instruction that is allowed to fault, and the second is
 57 * the address at which the program should continue.  No registers are
 58 * modified, so it is entirely up to the continuation code to figure out
 59 * what to do.
 60 *
 61 * All the routines below use bits of fixup code that are out of line
 62 * with the main instruction path.  This means when everything is well,
 63 * we don't even have to jump over them.  Further, they do not intrude
 64 * on our cache or tlb entries.
 65 */
 66
 67struct exception_table_entry
 
 68{
 69	unsigned long insn, fixup;
 70};
 71
 72/*
 73 * architectures with an MMU should override these two
 74 */
 75#ifndef __copy_from_user
 76static inline __must_check long __copy_from_user(void *to,
 77		const void __user * from, unsigned long n)
 78{
 79	if (__builtin_constant_p(n)) {
 80		switch(n) {
 81		case 1:
 82			*(u8 *)to = *(u8 __force *)from;
 83			return 0;
 84		case 2:
 85			*(u16 *)to = *(u16 __force *)from;
 86			return 0;
 87		case 4:
 88			*(u32 *)to = *(u32 __force *)from;
 89			return 0;
 90#ifdef CONFIG_64BIT
 91		case 8:
 92			*(u64 *)to = *(u64 __force *)from;
 93			return 0;
 94#endif
 95		default:
 96			break;
 97		}
 98	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99
 
 
 
100	memcpy(to, (const void __force *)from, n);
101	return 0;
102}
103#endif
104
105#ifndef __copy_to_user
106static inline __must_check long __copy_to_user(void __user *to,
107		const void *from, unsigned long n)
108{
109	if (__builtin_constant_p(n)) {
110		switch(n) {
111		case 1:
112			*(u8 __force *)to = *(u8 *)from;
113			return 0;
114		case 2:
115			*(u16 __force *)to = *(u16 *)from;
116			return 0;
117		case 4:
118			*(u32 __force *)to = *(u32 *)from;
119			return 0;
120#ifdef CONFIG_64BIT
121		case 8:
122			*(u64 __force *)to = *(u64 *)from;
123			return 0;
124#endif
125		default:
126			break;
127		}
128	}
129
130	memcpy((void __force *)to, from, n);
131	return 0;
132}
133#endif
 
 
134
135/*
136 * These are the main single-value transfer routines.  They automatically
137 * use the right size if we just have the right pointer type.
138 * This version just falls back to copy_{from,to}_user, which should
139 * provide a fast-path for small values.
140 */
141#define __put_user(x, ptr) \
142({								\
143	__typeof__(*(ptr)) __x = (x);				\
144	int __pu_err = -EFAULT;					\
145        __chk_user_ptr(ptr);                                    \
146	switch (sizeof (*(ptr))) {				\
147	case 1:							\
148	case 2:							\
149	case 4:							\
150	case 8:							\
151		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
152					 ptr, &__x);		\
153		break;						\
154	default:						\
155		__put_user_bad();				\
156		break;						\
157	 }							\
158	__pu_err;						\
159})
160
161#define put_user(x, ptr)					\
162({								\
163	void *__p = (ptr);					\
164	might_fault();						\
165	access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ?		\
166		__put_user((x), ((__typeof__(*(ptr)) *)__p)) :	\
167		-EFAULT;					\
168})
169
170#ifndef __put_user_fn
171
172static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
173{
174	size = __copy_to_user(ptr, x, size);
175	return size ? -EFAULT : size;
176}
177
178#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
179
180#endif
181
182extern int __put_user_bad(void) __attribute__((noreturn));
183
184#define __get_user(x, ptr)					\
185({								\
186	int __gu_err = -EFAULT;					\
187	__chk_user_ptr(ptr);					\
188	switch (sizeof(*(ptr))) {				\
189	case 1: {						\
190		unsigned char __x;				\
191		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
192					 ptr, &__x);		\
193		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
194		break;						\
195	};							\
196	case 2: {						\
197		unsigned short __x;				\
198		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
199					 ptr, &__x);		\
200		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
201		break;						\
202	};							\
203	case 4: {						\
204		unsigned int __x;				\
205		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
206					 ptr, &__x);		\
207		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
208		break;						\
209	};							\
210	case 8: {						\
211		unsigned long long __x;				\
212		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
213					 ptr, &__x);		\
214		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
215		break;						\
216	};							\
217	default:						\
218		__get_user_bad();				\
219		break;						\
220	}							\
221	__gu_err;						\
222})
223
224#define get_user(x, ptr)					\
225({								\
226	const void *__p = (ptr);				\
227	might_fault();						\
228	access_ok(VERIFY_READ, __p, sizeof(*ptr)) ?		\
229		__get_user((x), (__typeof__(*(ptr)) *)__p) :	\
230		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
231})
232
233#ifndef __get_user_fn
234static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
235{
236	size_t n = __copy_from_user(x, ptr, size);
237	if (unlikely(n)) {
238		memset(x + (size - n), 0, n);
239		return -EFAULT;
240	}
241	return 0;
242}
243
244#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
245
246#endif
247
248extern int __get_user_bad(void) __attribute__((noreturn));
249
250#ifndef __copy_from_user_inatomic
251#define __copy_from_user_inatomic __copy_from_user
252#endif
253
254#ifndef __copy_to_user_inatomic
255#define __copy_to_user_inatomic __copy_to_user
256#endif
257
258static inline long copy_from_user(void *to,
259		const void __user * from, unsigned long n)
260{
261	unsigned long res = n;
262	might_fault();
263	if (likely(access_ok(VERIFY_READ, from, n)))
264		res = __copy_from_user(to, from, n);
265	if (unlikely(res))
266		memset(to + (n - res), 0, res);
267	return res;
268}
269
270static inline long copy_to_user(void __user *to,
271		const void *from, unsigned long n)
272{
273	might_fault();
274	if (access_ok(VERIFY_WRITE, to, n))
275		return __copy_to_user(to, from, n);
276	else
277		return n;
278}
279
280/*
281 * Copy a null terminated string from userspace.
282 */
283#ifndef __strncpy_from_user
284static inline long
285__strncpy_from_user(char *dst, const char __user *src, long count)
286{
287	char *tmp;
288	strncpy(dst, (const char __force *)src, count);
289	for (tmp = dst; *tmp && count > 0; tmp++, count--)
290		;
291	return (tmp - dst);
292}
293#endif
294
295static inline long
296strncpy_from_user(char *dst, const char __user *src, long count)
297{
298	if (!access_ok(VERIFY_READ, src, 1))
299		return -EFAULT;
300	return __strncpy_from_user(dst, src, count);
301}
302
303/*
304 * Return the size of a string (including the ending 0)
305 *
306 * Return 0 on exception, a value greater than N if too long
307 */
308#ifndef __strnlen_user
309#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
310#endif
311
312/*
313 * Unlike strnlen, strnlen_user includes the nul terminator in
314 * its returned count. Callers should check for a returned value
315 * greater than N as an indication the string is too long.
316 */
317static inline long strnlen_user(const char __user *src, long n)
318{
319	if (!access_ok(VERIFY_READ, src, 1))
320		return 0;
321	return __strnlen_user(src, n);
322}
323
324static inline long strlen_user(const char __user *src)
325{
326	return strnlen_user(src, 32767);
327}
328
329/*
330 * Zero Userspace
331 */
332#ifndef __clear_user
333static inline __must_check unsigned long
334__clear_user(void __user *to, unsigned long n)
335{
336	memset((void __force *)to, 0, n);
337	return 0;
338}
339#endif
340
341static inline __must_check unsigned long
342clear_user(void __user *to, unsigned long n)
343{
344	might_fault();
345	if (!access_ok(VERIFY_WRITE, to, n))
346		return n;
347
348	return __clear_user(to, n);
349}
 
 
 
 
 
 
350
351#endif /* __ASM_GENERIC_UACCESS_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_GENERIC_UACCESS_H
  3#define __ASM_GENERIC_UACCESS_H
  4
  5/*
  6 * User space memory access functions, these should work
  7 * on any machine that has kernel and user data in the same
  8 * address space, e.g. all NOMMU machines.
  9 */
 
 10#include <linux/string.h>
 11#include <asm-generic/access_ok.h>
 12
 13#ifdef CONFIG_UACCESS_MEMCPY
 14#include <asm/unaligned.h>
 15
 16static __always_inline int
 17__get_user_fn(size_t size, const void __user *from, void *to)
 
 
 
 
 
 
 
 
 
 
 
 
 
 18{
 19	BUILD_BUG_ON(!__builtin_constant_p(size));
 
 
 
 
 
 
 
 
 
 20
 21	switch (size) {
 22	case 1:
 23		*(u8 *)to = *((u8 __force *)from);
 24		return 0;
 25	case 2:
 26		*(u16 *)to = get_unaligned((u16 __force *)from);
 27		return 0;
 28	case 4:
 29		*(u32 *)to = get_unaligned((u32 __force *)from);
 30		return 0;
 31	case 8:
 32		*(u64 *)to = get_unaligned((u64 __force *)from);
 33		return 0;
 34	default:
 35		BUILD_BUG();
 36		return 0;
 37	}
 38
 
 
 
 
 
 
 
 
 39}
 40#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
 
 
 
 
 
 
 
 
 
 
 
 
 
 41
 42static __always_inline int
 43__put_user_fn(size_t size, void __user *to, void *from)
 44{
 45	BUILD_BUG_ON(!__builtin_constant_p(size));
 
 46
 47	switch (size) {
 48	case 1:
 49		*(u8 __force *)to = *(u8 *)from;
 50		return 0;
 51	case 2:
 52		put_unaligned(*(u16 *)from, (u16 __force *)to);
 53		return 0;
 54	case 4:
 55		put_unaligned(*(u32 *)from, (u32 __force *)to);
 56		return 0;
 57	case 8:
 58		put_unaligned(*(u64 *)from, (u64 __force *)to);
 59		return 0;
 60	default:
 61		BUILD_BUG();
 62		return 0;
 
 
 
 
 
 
 
 
 
 
 63	}
 64}
 65#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
 66
 67#define __get_kernel_nofault(dst, src, type, err_label)			\
 68do {									\
 69	*((type *)dst) = get_unaligned((type *)(src));			\
 70	if (0) /* make sure the label looks used to the compiler */	\
 71		goto err_label;						\
 72} while (0)
 73
 74#define __put_kernel_nofault(dst, src, type, err_label)			\
 75do {									\
 76	put_unaligned(*((type *)src), (type *)(dst));			\
 77	if (0) /* make sure the label looks used to the compiler */	\
 78		goto err_label;						\
 79} while (0)
 80
 81static inline __must_check unsigned long
 82raw_copy_from_user(void *to, const void __user * from, unsigned long n)
 83{
 84	memcpy(to, (const void __force *)from, n);
 85	return 0;
 86}
 
 87
 88static inline __must_check unsigned long
 89raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 
 90{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 91	memcpy((void __force *)to, from, n);
 92	return 0;
 93}
 94#define INLINE_COPY_FROM_USER
 95#define INLINE_COPY_TO_USER
 96#endif /* CONFIG_UACCESS_MEMCPY */
 97
 98/*
 99 * These are the main single-value transfer routines.  They automatically
100 * use the right size if we just have the right pointer type.
101 * This version just falls back to copy_{from,to}_user, which should
102 * provide a fast-path for small values.
103 */
104#define __put_user(x, ptr) \
105({								\
106	__typeof__(*(ptr)) __x = (x);				\
107	int __pu_err = -EFAULT;					\
108        __chk_user_ptr(ptr);                                    \
109	switch (sizeof (*(ptr))) {				\
110	case 1:							\
111	case 2:							\
112	case 4:							\
113	case 8:							\
114		__pu_err = __put_user_fn(sizeof (*(ptr)),	\
115					 ptr, &__x);		\
116		break;						\
117	default:						\
118		__put_user_bad();				\
119		break;						\
120	 }							\
121	__pu_err;						\
122})
123
124#define put_user(x, ptr)					\
125({								\
126	void __user *__p = (ptr);				\
127	might_fault();						\
128	access_ok(__p, sizeof(*ptr)) ?		\
129		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
130		-EFAULT;					\
131})
132
133#ifndef __put_user_fn
134
135static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
136{
137	return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
 
138}
139
140#define __put_user_fn(sz, u, k)	__put_user_fn(sz, u, k)
141
142#endif
143
144extern int __put_user_bad(void) __attribute__((noreturn));
145
146#define __get_user(x, ptr)					\
147({								\
148	int __gu_err = -EFAULT;					\
149	__chk_user_ptr(ptr);					\
150	switch (sizeof(*(ptr))) {				\
151	case 1: {						\
152		unsigned char __x = 0;				\
153		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
154					 ptr, &__x);		\
155		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
156		break;						\
157	};							\
158	case 2: {						\
159		unsigned short __x = 0;				\
160		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
161					 ptr, &__x);		\
162		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
163		break;						\
164	};							\
165	case 4: {						\
166		unsigned int __x = 0;				\
167		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
168					 ptr, &__x);		\
169		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
170		break;						\
171	};							\
172	case 8: {						\
173		unsigned long long __x = 0;			\
174		__gu_err = __get_user_fn(sizeof (*(ptr)),	\
175					 ptr, &__x);		\
176		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
177		break;						\
178	};							\
179	default:						\
180		__get_user_bad();				\
181		break;						\
182	}							\
183	__gu_err;						\
184})
185
186#define get_user(x, ptr)					\
187({								\
188	const void __user *__p = (ptr);				\
189	might_fault();						\
190	access_ok(__p, sizeof(*ptr)) ?		\
191		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
192		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
193})
194
195#ifndef __get_user_fn
196static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
197{
198	return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
 
 
 
 
 
199}
200
201#define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
202
203#endif
204
205extern int __get_user_bad(void) __attribute__((noreturn));
206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207/*
208 * Zero Userspace
209 */
210#ifndef __clear_user
211static inline __must_check unsigned long
212__clear_user(void __user *to, unsigned long n)
213{
214	memset((void __force *)to, 0, n);
215	return 0;
216}
217#endif
218
219static inline __must_check unsigned long
220clear_user(void __user *to, unsigned long n)
221{
222	might_fault();
223	if (!access_ok(to, n))
224		return n;
225
226	return __clear_user(to, n);
227}
228
229#include <asm/extable.h>
230
231__must_check long strncpy_from_user(char *dst, const char __user *src,
232				    long count);
233__must_check long strnlen_user(const char __user *src, long n);
234
235#endif /* __ASM_GENERIC_UACCESS_H */