Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * uaccess.h: User space memore access functions.
  4 *
  5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7 */
  8#ifndef _ASM_UACCESS_H
  9#define _ASM_UACCESS_H
 10
 11#include <linux/compiler.h>
 12#include <linux/string.h>
 13
 14#include <asm/processor.h>
 15#include <asm-generic/access_ok.h>
 16
 17/* Uh, these should become the main single-value transfer routines..
 18 * They automatically use the right size if we just have the right
 19 * pointer type..
 20 *
 21 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 22 * and yet we don't want to do any pointers, because that is too much
 23 * of a performance impact. Thus we have a few rather ugly macros here,
 24 * and hide all the ugliness from the user.
 25 */
 26#define put_user(x, ptr) ({ \
 27	void __user *__pu_addr = (ptr); \
 28	__chk_user_ptr(ptr); \
 29	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
 30})
 31
 32#define get_user(x, ptr) ({ \
 33	const void __user *__gu_addr = (ptr); \
 34	__chk_user_ptr(ptr); \
 35	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
 36})
 37
 38/*
 39 * The "__xxx" versions do not do address space checking, useful when
 40 * doing multiple accesses to the same area (the user has to do the
 41 * checks by hand with "access_ok()")
 42 */
 43#define __put_user(x, ptr) \
 44	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 45#define __get_user(x, ptr) \
 46    __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
 47
 48struct __large_struct { unsigned long buf[100]; };
 49#define __m(x) ((struct __large_struct __user *)(x))
 50
 51#define __put_user_check(x, addr, size) ({ \
 52	register int __pu_ret; \
 53	if (__access_ok(addr, size)) { \
 54		switch (size) { \
 55		case 1: \
 56			__put_user_asm(x, b, addr, __pu_ret); \
 57			break; \
 58		case 2: \
 59			__put_user_asm(x, h, addr, __pu_ret); \
 60			break; \
 61		case 4: \
 62			__put_user_asm(x, , addr, __pu_ret); \
 63			break; \
 64		case 8: \
 65			__put_user_asm(x, d, addr, __pu_ret); \
 66			break; \
 67		default: \
 68			__pu_ret = __put_user_bad(); \
 69			break; \
 70		} \
 71	} else { \
 72		__pu_ret = -EFAULT; \
 73	} \
 74	__pu_ret; \
 75})
 76
 77#define __put_user_nocheck(x, addr, size) ({			\
 78	register int __pu_ret;					\
 79	switch (size) {						\
 80	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
 81	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
 82	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
 83	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
 84	default: __pu_ret = __put_user_bad(); break;		\
 85	} \
 86	__pu_ret; \
 87})
 88
 89#define __put_user_asm(x, size, addr, ret)				\
 90__asm__ __volatile__(							\
 91		"/* Put user asm, inline. */\n"				\
 92	"1:\t"	"st"#size " %1, %2\n\t"					\
 93		"clr	%0\n"						\
 94	"2:\n\n\t"							\
 95		".section .fixup,#alloc,#execinstr\n\t"			\
 96		".align	4\n"						\
 97	"3:\n\t"							\
 98		"b	2b\n\t"						\
 99		" mov	%3, %0\n\t"					\
100		".previous\n\n\t"					\
101		".section __ex_table,#alloc\n\t"			\
102		".align	4\n\t"						\
103		".word	1b, 3b\n\t"					\
104		".previous\n\n\t"					\
105	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
106		 "i" (-EFAULT))
107
108int __put_user_bad(void);
109
110#define __get_user_check(x, addr, size, type) ({ \
111	register int __gu_ret; \
112	register unsigned long __gu_val; \
113	if (__access_ok(addr, size)) { \
114		switch (size) { \
115		case 1: \
116			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
117			break; \
118		case 2: \
119			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
120			break; \
121		case 4: \
122			__get_user_asm(__gu_val, , addr, __gu_ret); \
123			break; \
124		case 8: \
125			__get_user_asm(__gu_val, d, addr, __gu_ret); \
126			break; \
127		default: \
128			__gu_val = 0; \
129			__gu_ret = __get_user_bad(); \
130			break; \
131		} \
132	 } else { \
133		 __gu_val = 0; \
134		 __gu_ret = -EFAULT; \
135	} \
136	x = (__force type) __gu_val; \
137	__gu_ret; \
138})
139
140#define __get_user_nocheck(x, addr, size, type) ({			\
141	register int __gu_ret;						\
142	register unsigned long __gu_val;				\
143	switch (size) {							\
144	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
145	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
146	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
147	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
148	default:							\
149		__gu_val = 0;						\
150		__gu_ret = __get_user_bad();				\
151		break;							\
152	}								\
153	x = (__force type) __gu_val;					\
154	__gu_ret;							\
155})
156
157#define __get_user_asm(x, size, addr, ret)				\
158__asm__ __volatile__(							\
159		"/* Get user asm, inline. */\n"				\
160	"1:\t"	"ld"#size " %2, %1\n\t"					\
161		"clr	%0\n"						\
162	"2:\n\n\t"							\
163		".section .fixup,#alloc,#execinstr\n\t"			\
164		".align	4\n"						\
165	"3:\n\t"							\
166		"clr	%1\n\t"						\
167		"b	2b\n\t"						\
168		" mov	%3, %0\n\n\t"					\
169		".previous\n\t"						\
170		".section __ex_table,#alloc\n\t"			\
171		".align	4\n\t"						\
172		".word	1b, 3b\n\n\t"					\
173		".previous\n\t"						\
174	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
175		 "i" (-EFAULT))
176
177int __get_user_bad(void);
178
179unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
180
181static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
182{
183	return __copy_user(to, (__force void __user *) from, n);
184}
185
186static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
187{
188	return __copy_user((__force void __user *) to, from, n);
189}
190
191#define INLINE_COPY_FROM_USER
192#define INLINE_COPY_TO_USER
193
194static inline unsigned long __clear_user(void __user *addr, unsigned long size)
195{
196	unsigned long ret;
197
198	__asm__ __volatile__ (
199		"mov %2, %%o1\n"
200		"call __bzero\n\t"
201		" mov %1, %%o0\n\t"
202		"mov %%o0, %0\n"
203		: "=r" (ret) : "r" (addr), "r" (size) :
204		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
205		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
206
207	return ret;
208}
209
210static inline unsigned long clear_user(void __user *addr, unsigned long n)
211{
212	if (n && __access_ok(addr, n))
213		return __clear_user(addr, n);
214	else
215		return n;
216}
217
218__must_check long strnlen_user(const char __user *str, long n);
219
220#endif /* _ASM_UACCESS_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * uaccess.h: User space memore access functions.
  4 *
  5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7 */
  8#ifndef _ASM_UACCESS_H
  9#define _ASM_UACCESS_H
 10
 11#include <linux/compiler.h>
 12#include <linux/string.h>
 13
 14#include <asm/processor.h>
 15#include <asm-generic/access_ok.h>
 16
 17/* Uh, these should become the main single-value transfer routines..
 18 * They automatically use the right size if we just have the right
 19 * pointer type..
 20 *
 21 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 22 * and yet we don't want to do any pointers, because that is too much
 23 * of a performance impact. Thus we have a few rather ugly macros here,
 24 * and hide all the ugliness from the user.
 25 */
 26#define put_user(x, ptr) ({ \
 27	void __user *__pu_addr = (ptr); \
 28	__chk_user_ptr(ptr); \
 29	__put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \
 30})
 31
 32#define get_user(x, ptr) ({ \
 33	const void __user *__gu_addr = (ptr); \
 34	__chk_user_ptr(ptr); \
 35	__get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \
 36})
 37
 38/*
 39 * The "__xxx" versions do not do address space checking, useful when
 40 * doing multiple accesses to the same area (the user has to do the
 41 * checks by hand with "access_ok()")
 42 */
 43#define __put_user(x, ptr) \
 44	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 45#define __get_user(x, ptr) \
 46    __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr)))
 47
 48struct __large_struct { unsigned long buf[100]; };
 49#define __m(x) ((struct __large_struct __user *)(x))
 50
 51#define __put_user_check(x, addr, size) ({ \
 52	register int __pu_ret; \
 53	if (__access_ok(addr, size)) { \
 54		switch (size) { \
 55		case 1: \
 56			__put_user_asm(x, b, addr, __pu_ret); \
 57			break; \
 58		case 2: \
 59			__put_user_asm(x, h, addr, __pu_ret); \
 60			break; \
 61		case 4: \
 62			__put_user_asm(x, , addr, __pu_ret); \
 63			break; \
 64		case 8: \
 65			__put_user_asm(x, d, addr, __pu_ret); \
 66			break; \
 67		default: \
 68			__pu_ret = __put_user_bad(); \
 69			break; \
 70		} \
 71	} else { \
 72		__pu_ret = -EFAULT; \
 73	} \
 74	__pu_ret; \
 75})
 76
 77#define __put_user_nocheck(x, addr, size) ({			\
 78	register int __pu_ret;					\
 79	switch (size) {						\
 80	case 1: __put_user_asm(x, b, addr, __pu_ret); break;	\
 81	case 2: __put_user_asm(x, h, addr, __pu_ret); break;	\
 82	case 4: __put_user_asm(x, , addr, __pu_ret); break;	\
 83	case 8: __put_user_asm(x, d, addr, __pu_ret); break;	\
 84	default: __pu_ret = __put_user_bad(); break;		\
 85	} \
 86	__pu_ret; \
 87})
 88
 89#define __put_user_asm(x, size, addr, ret)				\
 90__asm__ __volatile__(							\
 91		"/* Put user asm, inline. */\n"				\
 92	"1:\t"	"st"#size " %1, %2\n\t"					\
 93		"clr	%0\n"						\
 94	"2:\n\n\t"							\
 95		".section .fixup,#alloc,#execinstr\n\t"			\
 96		".align	4\n"						\
 97	"3:\n\t"							\
 98		"b	2b\n\t"						\
 99		" mov	%3, %0\n\t"					\
100		".previous\n\n\t"					\
101		".section __ex_table,#alloc\n\t"			\
102		".align	4\n\t"						\
103		".word	1b, 3b\n\t"					\
104		".previous\n\n\t"					\
105	       : "=&r" (ret) : "r" (x), "m" (*__m(addr)),		\
106		 "i" (-EFAULT))
107
108int __put_user_bad(void);
109
110#define __get_user_check(x, addr, size, type) ({ \
111	register int __gu_ret; \
112	register unsigned long __gu_val; \
113	if (__access_ok(addr, size)) { \
114		switch (size) { \
115		case 1: \
116			 __get_user_asm(__gu_val, ub, addr, __gu_ret); \
117			break; \
118		case 2: \
119			__get_user_asm(__gu_val, uh, addr, __gu_ret); \
120			break; \
121		case 4: \
122			__get_user_asm(__gu_val, , addr, __gu_ret); \
123			break; \
124		case 8: \
125			__get_user_asm(__gu_val, d, addr, __gu_ret); \
126			break; \
127		default: \
128			__gu_val = 0; \
129			__gu_ret = __get_user_bad(); \
130			break; \
131		} \
132	 } else { \
133		 __gu_val = 0; \
134		 __gu_ret = -EFAULT; \
135	} \
136	x = (__force type) __gu_val; \
137	__gu_ret; \
138})
139
140#define __get_user_nocheck(x, addr, size, type) ({			\
141	register int __gu_ret;						\
142	register unsigned long __gu_val;				\
143	switch (size) {							\
144	case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break;	\
145	case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break;	\
146	case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break;	\
147	case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break;	\
148	default:							\
149		__gu_val = 0;						\
150		__gu_ret = __get_user_bad();				\
151		break;							\
152	}								\
153	x = (__force type) __gu_val;					\
154	__gu_ret;							\
155})
156
157#define __get_user_asm(x, size, addr, ret)				\
158__asm__ __volatile__(							\
159		"/* Get user asm, inline. */\n"				\
160	"1:\t"	"ld"#size " %2, %1\n\t"					\
161		"clr	%0\n"						\
162	"2:\n\n\t"							\
163		".section .fixup,#alloc,#execinstr\n\t"			\
164		".align	4\n"						\
165	"3:\n\t"							\
166		"clr	%1\n\t"						\
167		"b	2b\n\t"						\
168		" mov	%3, %0\n\n\t"					\
169		".previous\n\t"						\
170		".section __ex_table,#alloc\n\t"			\
171		".align	4\n\t"						\
172		".word	1b, 3b\n\n\t"					\
173		".previous\n\t"						\
174	       : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)),		\
175		 "i" (-EFAULT))
176
177int __get_user_bad(void);
178
179unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
180
181static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n)
182{
183	return __copy_user(to, (__force void __user *) from, n);
184}
185
186static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
187{
188	return __copy_user((__force void __user *) to, from, n);
189}
190
191#define INLINE_COPY_FROM_USER
192#define INLINE_COPY_TO_USER
193
194static inline unsigned long __clear_user(void __user *addr, unsigned long size)
195{
196	unsigned long ret;
197
198	__asm__ __volatile__ (
199		"mov %2, %%o1\n"
200		"call __bzero\n\t"
201		" mov %1, %%o0\n\t"
202		"mov %%o0, %0\n"
203		: "=r" (ret) : "r" (addr), "r" (size) :
204		"o0", "o1", "o2", "o3", "o4", "o5", "o7",
205		"g1", "g2", "g3", "g4", "g5", "g7", "cc");
206
207	return ret;
208}
209
210static inline unsigned long clear_user(void __user *addr, unsigned long n)
211{
212	if (n && __access_ok(addr, n))
213		return __clear_user(addr, n);
214	else
215		return n;
216}
217
218__must_check long strnlen_user(const char __user *str, long n);
219
220#endif /* _ASM_UACCESS_H */