Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_UACCESS_H
  3#define _ASM_UACCESS_H
  4
  5/*
  6 * User space memory access functions
  7 */
  8
 
 
  9#include <linux/compiler.h>
 10#include <linux/string.h>
 
 11#include <asm/asi.h>
 12#include <asm/spitfire.h>
 13#include <asm/extable_64.h>
 
 
 
 14
 15#include <asm/processor.h>
 16
 17/*
 18 * Sparc64 is segmented, though more like the M68K than the I386.
 19 * We use the secondary ASI to address user memory, which references a
 20 * completely different VM map, thus there is zero chance of the user
 21 * doing something queer and tricking us into poking kernel memory.
 22 *
 23 * What is left here is basically what is needed for the other parts of
 24 * the kernel that expect to be able to manipulate, erum, "segments".
 25 * Or perhaps more properly, permissions.
 26 *
 27 * "For historical reasons, these macros are grossly misnamed." -Linus
 28 */
 29
 30#define KERNEL_DS   ((mm_segment_t) { ASI_P })
 31#define USER_DS     ((mm_segment_t) { ASI_AIUS })	/* har har har */
 32
 
 
 
 33#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
 
 34
 35#define segment_eq(a, b)  ((a).seg == (b).seg)
 36
 37#define set_fs(val)								\
 38do {										\
 39	current_thread_info()->current_ds = (val).seg;				\
 40	__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));	\
 41} while(0)
 42
 43/*
 44 * Test whether a block of memory is a valid user space address.
 45 * Returns 0 if the range is valid, nonzero otherwise.
 46 */
 47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
 48{
 49	if (__builtin_constant_p(size))
 50		return addr > limit - size;
 51
 52	addr += size;
 53	if (addr < size)
 54		return true;
 55
 56	return addr > limit;
 57}
 58
 59#define __range_not_ok(addr, size, limit)                               \
 60({                                                                      \
 61	__chk_user_ptr(addr);                                           \
 62	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
 63})
 64
 65static inline int __access_ok(const void __user * addr, unsigned long size)
 66{
 67	return 1;
 68}
 69
 70static inline int access_ok(const void __user * addr, unsigned long size)
 71{
 72	return 1;
 73}
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75void __retl_efault(void);
 76
 77/* Uh, these should become the main single-value transfer routines..
 78 * They automatically use the right size if we just have the right
 79 * pointer type..
 80 *
 81 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 82 * and yet we don't want to do any pointers, because that is too much
 83 * of a performance impact. Thus we have a few rather ugly macros here,
 84 * and hide all the ugliness from the user.
 85 */
 86#define put_user(x, ptr) ({ \
 87	unsigned long __pu_addr = (unsigned long)(ptr); \
 88	__chk_user_ptr(ptr); \
 89	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
 90})
 91
 92#define get_user(x, ptr) ({ \
 93	unsigned long __gu_addr = (unsigned long)(ptr); \
 94	__chk_user_ptr(ptr); \
 95	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
 96})
 97
 98#define __put_user(x, ptr) put_user(x, ptr)
 99#define __get_user(x, ptr) get_user(x, ptr)
100
101struct __large_struct { unsigned long buf[100]; };
102#define __m(x) ((struct __large_struct *)(x))
103
104#define __put_user_nocheck(data, addr, size) ({			\
105	register int __pu_ret;					\
106	switch (size) {						\
107	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
108	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
109	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
110	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
111	default: __pu_ret = __put_user_bad(); break;		\
112	}							\
113	__pu_ret;						\
114})
115
116#define __put_user_asm(x, size, addr, ret)				\
117__asm__ __volatile__(							\
118		"/* Put user asm, inline. */\n"				\
119	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
120		"clr	%0\n"						\
121	"2:\n\n\t"							\
122		".section .fixup,#alloc,#execinstr\n\t"			\
123		".align	4\n"						\
124	"3:\n\t"							\
125		"sethi	%%hi(2b), %0\n\t"				\
126		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
127		" mov	%3, %0\n\n\t"					\
128		".previous\n\t"						\
129		".section __ex_table,\"a\"\n\t"				\
130		".align	4\n\t"						\
131		".word	1b, 3b\n\t"					\
132		".previous\n\n\t"					\
133	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
134		 "i" (-EFAULT))
135
136int __put_user_bad(void);
137
138#define __get_user_nocheck(data, addr, size, type) ({			     \
139	register int __gu_ret;						     \
140	register unsigned long __gu_val;				     \
141	switch (size) {							     \
142		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
143		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
144		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
145		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
146		default:						     \
147			__gu_val = 0;					     \
148			__gu_ret = __get_user_bad();			     \
149			break;						     \
150	} 								     \
151	data = (__force type) __gu_val;					     \
152	 __gu_ret;							     \
153})
154
155#define __get_user_asm(x, size, addr, ret)				\
156__asm__ __volatile__(							\
157		"/* Get user asm, inline. */\n"				\
158	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
159		"clr	%0\n"						\
160	"2:\n\n\t"							\
161		".section .fixup,#alloc,#execinstr\n\t"			\
162		".align	4\n"						\
163	"3:\n\t"							\
164		"sethi	%%hi(2b), %0\n\t"				\
165		"clr	%1\n\t"						\
166		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
167		" mov	%3, %0\n\n\t"					\
168		".previous\n\t"						\
169		".section __ex_table,\"a\"\n\t"				\
170		".align	4\n\t"						\
171		".word	1b, 3b\n\n\t"					\
172		".previous\n\t"						\
173	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
174		 "i" (-EFAULT))
175
176int __get_user_bad(void);
177
178unsigned long __must_check raw_copy_from_user(void *to,
179					     const void __user *from,
180					     unsigned long size);
 
 
 
 
 
 
181
182unsigned long __must_check raw_copy_to_user(void __user *to,
 
 
 
 
 
 
 
183					   const void *from,
184					   unsigned long size);
185#define INLINE_COPY_FROM_USER
186#define INLINE_COPY_TO_USER
 
 
 
 
187
188unsigned long __must_check raw_copy_in_user(void __user *to,
 
 
 
 
 
 
189					   const void __user *from,
190					   unsigned long size);
 
 
 
 
 
 
 
 
 
 
 
 
191
192unsigned long __must_check __clear_user(void __user *, unsigned long);
193
194#define clear_user __clear_user
195
 
196__must_check long strnlen_user(const char __user *str, long n);
197
 
 
 
198struct pt_regs;
199unsigned long compute_effective_address(struct pt_regs *,
200					unsigned int insn,
201					unsigned int rd);
 
 
202
203#endif /* _ASM_UACCESS_H */
v4.6
 
  1#ifndef _ASM_UACCESS_H
  2#define _ASM_UACCESS_H
  3
  4/*
  5 * User space memory access functions
  6 */
  7
  8#ifdef __KERNEL__
  9#include <linux/errno.h>
 10#include <linux/compiler.h>
 11#include <linux/string.h>
 12#include <linux/thread_info.h>
 13#include <asm/asi.h>
 14#include <asm/spitfire.h>
 15#include <asm-generic/uaccess-unaligned.h>
 16#endif
 17
 18#ifndef __ASSEMBLY__
 19
 20#include <asm/processor.h>
 21
 22/*
 23 * Sparc64 is segmented, though more like the M68K than the I386.
 24 * We use the secondary ASI to address user memory, which references a
 25 * completely different VM map, thus there is zero chance of the user
 26 * doing something queer and tricking us into poking kernel memory.
 27 *
 28 * What is left here is basically what is needed for the other parts of
 29 * the kernel that expect to be able to manipulate, erum, "segments".
 30 * Or perhaps more properly, permissions.
 31 *
 32 * "For historical reasons, these macros are grossly misnamed." -Linus
 33 */
 34
 35#define KERNEL_DS   ((mm_segment_t) { ASI_P })
 36#define USER_DS     ((mm_segment_t) { ASI_AIUS })	/* har har har */
 37
 38#define VERIFY_READ	0
 39#define VERIFY_WRITE	1
 40
 41#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
 42#define get_ds() (KERNEL_DS)
 43
 44#define segment_eq(a, b)  ((a).seg == (b).seg)
 45
 46#define set_fs(val)								\
 47do {										\
 48	current_thread_info()->current_ds = (val).seg;				\
 49	__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));	\
 50} while(0)
 51
 52/*
 53 * Test whether a block of memory is a valid user space address.
 54 * Returns 0 if the range is valid, nonzero otherwise.
 55 */
 56static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
 57{
 58	if (__builtin_constant_p(size))
 59		return addr > limit - size;
 60
 61	addr += size;
 62	if (addr < size)
 63		return true;
 64
 65	return addr > limit;
 66}
 67
 68#define __range_not_ok(addr, size, limit)                               \
 69({                                                                      \
 70	__chk_user_ptr(addr);                                           \
 71	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
 72})
 73
 74static inline int __access_ok(const void __user * addr, unsigned long size)
 75{
 76	return 1;
 77}
 78
 79static inline int access_ok(int type, const void __user * addr, unsigned long size)
 80{
 81	return 1;
 82}
 83
 84/*
 85 * The exception table consists of pairs of addresses: the first is the
 86 * address of an instruction that is allowed to fault, and the second is
 87 * the address at which the program should continue.  No registers are
 88 * modified, so it is entirely up to the continuation code to figure out
 89 * what to do.
 90 *
 91 * All the routines below use bits of fixup code that are out of line
 92 * with the main instruction path.  This means when everything is well,
 93 * we don't even have to jump over them.  Further, they do not intrude
 94 * on our cache or tlb entries.
 95 */
 96
 97struct exception_table_entry {
 98        unsigned int insn, fixup;
 99};
100
101void __ret_efault(void);
102void __retl_efault(void);
103
104/* Uh, these should become the main single-value transfer routines..
105 * They automatically use the right size if we just have the right
106 * pointer type..
107 *
108 * This gets kind of ugly. We want to return _two_ values in "get_user()"
109 * and yet we don't want to do any pointers, because that is too much
110 * of a performance impact. Thus we have a few rather ugly macros here,
111 * and hide all the ugliness from the user.
112 */
113#define put_user(x, ptr) ({ \
114	unsigned long __pu_addr = (unsigned long)(ptr); \
115	__chk_user_ptr(ptr); \
116	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
117})
118
119#define get_user(x, ptr) ({ \
120	unsigned long __gu_addr = (unsigned long)(ptr); \
121	__chk_user_ptr(ptr); \
122	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
123})
124
125#define __put_user(x, ptr) put_user(x, ptr)
126#define __get_user(x, ptr) get_user(x, ptr)
127
128struct __large_struct { unsigned long buf[100]; };
129#define __m(x) ((struct __large_struct *)(x))
130
131#define __put_user_nocheck(data, addr, size) ({			\
132	register int __pu_ret;					\
133	switch (size) {						\
134	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
135	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
136	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
137	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
138	default: __pu_ret = __put_user_bad(); break;		\
139	}							\
140	__pu_ret;						\
141})
142
143#define __put_user_asm(x, size, addr, ret)				\
144__asm__ __volatile__(							\
145		"/* Put user asm, inline. */\n"				\
146	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
147		"clr	%0\n"						\
148	"2:\n\n\t"							\
149		".section .fixup,#alloc,#execinstr\n\t"			\
150		".align	4\n"						\
151	"3:\n\t"							\
152		"sethi	%%hi(2b), %0\n\t"				\
153		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
154		" mov	%3, %0\n\n\t"					\
155		".previous\n\t"						\
156		".section __ex_table,\"a\"\n\t"				\
157		".align	4\n\t"						\
158		".word	1b, 3b\n\t"					\
159		".previous\n\n\t"					\
160	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
161		 "i" (-EFAULT))
162
163int __put_user_bad(void);
164
165#define __get_user_nocheck(data, addr, size, type) ({			     \
166	register int __gu_ret;						     \
167	register unsigned long __gu_val;				     \
168	switch (size) {							     \
169		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
170		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
171		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
172		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
173		default:						     \
174			__gu_val = 0;					     \
175			__gu_ret = __get_user_bad();			     \
176			break;						     \
177	} 								     \
178	data = (__force type) __gu_val;					     \
179	 __gu_ret;							     \
180})
181
182#define __get_user_asm(x, size, addr, ret)				\
183__asm__ __volatile__(							\
184		"/* Get user asm, inline. */\n"				\
185	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
186		"clr	%0\n"						\
187	"2:\n\n\t"							\
188		".section .fixup,#alloc,#execinstr\n\t"			\
189		".align	4\n"						\
190	"3:\n\t"							\
191		"sethi	%%hi(2b), %0\n\t"				\
192		"clr	%1\n\t"						\
193		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
194		" mov	%3, %0\n\n\t"					\
195		".previous\n\t"						\
196		".section __ex_table,\"a\"\n\t"				\
197		".align	4\n\t"						\
198		".word	1b, 3b\n\n\t"					\
199		".previous\n\t"						\
200	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
201		 "i" (-EFAULT))
202
203int __get_user_bad(void);
204
205unsigned long __must_check ___copy_from_user(void *to,
206					     const void __user *from,
207					     unsigned long size);
208unsigned long copy_from_user_fixup(void *to, const void __user *from,
209				   unsigned long size);
210static inline unsigned long __must_check
211copy_from_user(void *to, const void __user *from, unsigned long size)
212{
213	unsigned long ret = ___copy_from_user(to, from, size);
214
215	if (unlikely(ret))
216		ret = copy_from_user_fixup(to, from, size);
217
218	return ret;
219}
220#define __copy_from_user copy_from_user
221
222unsigned long __must_check ___copy_to_user(void __user *to,
223					   const void *from,
224					   unsigned long size);
225unsigned long copy_to_user_fixup(void __user *to, const void *from,
226				 unsigned long size);
227static inline unsigned long __must_check
228copy_to_user(void __user *to, const void *from, unsigned long size)
229{
230	unsigned long ret = ___copy_to_user(to, from, size);
231
232	if (unlikely(ret))
233		ret = copy_to_user_fixup(to, from, size);
234	return ret;
235}
236#define __copy_to_user copy_to_user
237
238unsigned long __must_check ___copy_in_user(void __user *to,
239					   const void __user *from,
240					   unsigned long size);
241unsigned long copy_in_user_fixup(void __user *to, void __user *from,
242				 unsigned long size);
243static inline unsigned long __must_check
244copy_in_user(void __user *to, void __user *from, unsigned long size)
245{
246	unsigned long ret = ___copy_in_user(to, from, size);
247
248	if (unlikely(ret))
249		ret = copy_in_user_fixup(to, from, size);
250	return ret;
251}
252#define __copy_in_user copy_in_user
253
254unsigned long __must_check __clear_user(void __user *, unsigned long);
255
256#define clear_user __clear_user
257
258__must_check long strlen_user(const char __user *str);
259__must_check long strnlen_user(const char __user *str, long n);
260
261#define __copy_to_user_inatomic __copy_to_user
262#define __copy_from_user_inatomic __copy_from_user
263
264struct pt_regs;
265unsigned long compute_effective_address(struct pt_regs *,
266					unsigned int insn,
267					unsigned int rd);
268
269#endif  /* __ASSEMBLY__ */
270
271#endif /* _ASM_UACCESS_H */