Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_UACCESS_H
  3#define _ASM_UACCESS_H
  4
  5/*
  6 * User space memory access functions
  7 */
  8
 
 
  9#include <linux/compiler.h>
 10#include <linux/string.h>
 11#include <linux/mm_types.h>
 12#include <asm/asi.h>
 13#include <asm/spitfire.h>
 14#include <asm/pgtable.h>
 
 
 
 
 15
 16#include <asm/processor.h>
 17#include <asm-generic/access_ok.h>
 18
 19/*
 20 * Sparc64 is segmented, though more like the M68K than the I386.
 21 * We use the secondary ASI to address user memory, which references a
 22 * completely different VM map, thus there is zero chance of the user
 23 * doing something queer and tricking us into poking kernel memory.
 
 
 
 
 
 
 24 */
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26/*
 27 * Test whether a block of memory is a valid user space address.
 28 * Returns 0 if the range is valid, nonzero otherwise.
 29 */
 30static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
 31{
 32	if (__builtin_constant_p(size))
 33		return addr > limit - size;
 34
 35	addr += size;
 36	if (addr < size)
 37		return true;
 38
 39	return addr > limit;
 40}
 41
 42#define __range_not_ok(addr, size, limit)                               \
 43({                                                                      \
 44	__chk_user_ptr(addr);                                           \
 45	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
 46})
 47
 
 
 
 
 
 
 
 
 
 
 48void __retl_efault(void);
 49
 50/* Uh, these should become the main single-value transfer routines..
 51 * They automatically use the right size if we just have the right
 52 * pointer type..
 53 *
 54 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 55 * and yet we don't want to do any pointers, because that is too much
 56 * of a performance impact. Thus we have a few rather ugly macros here,
 57 * and hide all the ugliness from the user.
 58 */
 59#define put_user(x, ptr) ({ \
 60	unsigned long __pu_addr = (unsigned long)(ptr); \
 61	__chk_user_ptr(ptr); \
 62	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
 63})
 64
 65#define get_user(x, ptr) ({ \
 66	unsigned long __gu_addr = (unsigned long)(ptr); \
 67	__chk_user_ptr(ptr); \
 68	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
 69})
 70
 71#define __put_user(x, ptr) put_user(x, ptr)
 72#define __get_user(x, ptr) get_user(x, ptr)
 73
 74struct __large_struct { unsigned long buf[100]; };
 75#define __m(x) ((struct __large_struct *)(x))
 76
 77#define __put_kernel_nofault(dst, src, type, label)			\
 78do {									\
 79	type *addr = (type __force *)(dst);				\
 80	type data = *(type *)src;					\
 81	register int __pu_ret;						\
 82	switch (sizeof(type)) {						\
 83	case 1: __put_kernel_asm(data, b, addr, __pu_ret); break;	\
 84	case 2: __put_kernel_asm(data, h, addr, __pu_ret); break;	\
 85	case 4: __put_kernel_asm(data, w, addr, __pu_ret); break;	\
 86	case 8: __put_kernel_asm(data, x, addr, __pu_ret); break;	\
 87	default: __pu_ret = __put_user_bad(); break;			\
 88	}								\
 89	if (__pu_ret)							\
 90		goto label;						\
 91} while (0)
 92
 93#define __put_kernel_asm(x, size, addr, ret)				\
 94__asm__ __volatile__(							\
 95		"/* Put kernel asm, inline. */\n"			\
 96	"1:\t"	"st"#size " %1, [%2]\n\t"				\
 97		"clr	%0\n"						\
 98	"2:\n\n\t"							\
 99		".section .fixup,#alloc,#execinstr\n\t"			\
100		".align	4\n"						\
101	"3:\n\t"							\
102		"sethi	%%hi(2b), %0\n\t"				\
103		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
104		" mov	%3, %0\n\n\t"					\
105		".previous\n\t"						\
106		".section __ex_table,\"a\"\n\t"				\
107		".align	4\n\t"						\
108		".word	1b, 3b\n\t"					\
109		".previous\n\n\t"					\
110	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
111		 "i" (-EFAULT))
112
113#define __put_user_nocheck(data, addr, size) ({			\
114	register int __pu_ret;					\
115	switch (size) {						\
116	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
117	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
118	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
119	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
120	default: __pu_ret = __put_user_bad(); break;		\
121	}							\
122	__pu_ret;						\
123})
124
125#define __put_user_asm(x, size, addr, ret)				\
126__asm__ __volatile__(							\
127		"/* Put user asm, inline. */\n"				\
128	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
129		"clr	%0\n"						\
130	"2:\n\n\t"							\
131		".section .fixup,#alloc,#execinstr\n\t"			\
132		".align	4\n"						\
133	"3:\n\t"							\
134		"sethi	%%hi(2b), %0\n\t"				\
135		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
136		" mov	%3, %0\n\n\t"					\
137		".previous\n\t"						\
138		".section __ex_table,\"a\"\n\t"				\
139		".align	4\n\t"						\
140		".word	1b, 3b\n\t"					\
141		".previous\n\n\t"					\
142	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
143		 "i" (-EFAULT))
144
145int __put_user_bad(void);
146
147#define __get_kernel_nofault(dst, src, type, label)			     \
148do {									     \
149	type *addr = (type __force *)(src);		     		     \
150	register int __gu_ret;						     \
151	register unsigned long __gu_val;				     \
152	switch (sizeof(type)) {						     \
153		case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
154		case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
155		case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
156		case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break;  \
157		default:						     \
158			__gu_val = 0;					     \
159			__gu_ret = __get_user_bad();			     \
160			break;						     \
161	} 								     \
162	if (__gu_ret)							     \
163		goto label;						     \
164	*(type *)dst = (__force type) __gu_val;				     \
165} while (0)
166#define __get_kernel_asm(x, size, addr, ret)				\
167__asm__ __volatile__(							\
168		"/* Get kernel asm, inline. */\n"			\
169	"1:\t"	"ld"#size " [%2], %1\n\t"				\
170		"clr	%0\n"						\
171	"2:\n\n\t"							\
172		".section .fixup,#alloc,#execinstr\n\t"			\
173		".align	4\n"						\
174	"3:\n\t"							\
175		"sethi	%%hi(2b), %0\n\t"				\
176		"clr	%1\n\t"						\
177		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
178		" mov	%3, %0\n\n\t"					\
179		".previous\n\t"						\
180		".section __ex_table,\"a\"\n\t"				\
181		".align	4\n\t"						\
182		".word	1b, 3b\n\n\t"					\
183		".previous\n\t"						\
184	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
185		 "i" (-EFAULT))
186
187#define __get_user_nocheck(data, addr, size, type) ({			     \
188	register int __gu_ret;						     \
189	register unsigned long __gu_val;				     \
190	switch (size) {							     \
191		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
192		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
193		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
194		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
195		default:						     \
196			__gu_val = 0;					     \
197			__gu_ret = __get_user_bad();			     \
198			break;						     \
199	} 								     \
200	data = (__force type) __gu_val;					     \
201	 __gu_ret;							     \
202})
203
204#define __get_user_asm(x, size, addr, ret)				\
205__asm__ __volatile__(							\
206		"/* Get user asm, inline. */\n"				\
207	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
208		"clr	%0\n"						\
209	"2:\n\n\t"							\
210		".section .fixup,#alloc,#execinstr\n\t"			\
211		".align	4\n"						\
212	"3:\n\t"							\
213		"sethi	%%hi(2b), %0\n\t"				\
214		"clr	%1\n\t"						\
215		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
216		" mov	%3, %0\n\n\t"					\
217		".previous\n\t"						\
218		".section __ex_table,\"a\"\n\t"				\
219		".align	4\n\t"						\
220		".word	1b, 3b\n\n\t"					\
221		".previous\n\t"						\
222	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
223		 "i" (-EFAULT))
224
225int __get_user_bad(void);
226
227unsigned long __must_check raw_copy_from_user(void *to,
228					     const void __user *from,
229					     unsigned long size);
 
 
 
 
 
 
 
 
230
231unsigned long __must_check raw_copy_to_user(void __user *to,
232					   const void *from,
233					   unsigned long size);
234#define INLINE_COPY_FROM_USER
235#define INLINE_COPY_TO_USER
 
 
 
 
 
 
236
237unsigned long __must_check raw_copy_in_user(void __user *to,
238					   const void __user *from,
239					   unsigned long size);
 
 
 
 
 
 
240
241unsigned long __must_check __clear_user(void __user *, unsigned long);
242
243#define clear_user __clear_user
244
 
245__must_check long strnlen_user(const char __user *str, long n);
246
 
 
 
247struct pt_regs;
248unsigned long compute_effective_address(struct pt_regs *,
249					unsigned int insn,
250					unsigned int rd);
 
 
251
252#endif /* _ASM_UACCESS_H */
v4.10.11
 
  1#ifndef _ASM_UACCESS_H
  2#define _ASM_UACCESS_H
  3
  4/*
  5 * User space memory access functions
  6 */
  7
  8#ifdef __KERNEL__
  9#include <linux/errno.h>
 10#include <linux/compiler.h>
 11#include <linux/string.h>
 12#include <linux/thread_info.h>
 13#include <asm/asi.h>
 14#include <asm/spitfire.h>
 15#include <asm-generic/uaccess-unaligned.h>
 16#include <asm/extable_64.h>
 17#endif
 18
 19#ifndef __ASSEMBLY__
 20
 21#include <asm/processor.h>
 
 22
 23/*
 24 * Sparc64 is segmented, though more like the M68K than the I386.
 25 * We use the secondary ASI to address user memory, which references a
 26 * completely different VM map, thus there is zero chance of the user
 27 * doing something queer and tricking us into poking kernel memory.
 28 *
 29 * What is left here is basically what is needed for the other parts of
 30 * the kernel that expect to be able to manipulate, erum, "segments".
 31 * Or perhaps more properly, permissions.
 32 *
 33 * "For historical reasons, these macros are grossly misnamed." -Linus
 34 */
 35
 36#define KERNEL_DS   ((mm_segment_t) { ASI_P })
 37#define USER_DS     ((mm_segment_t) { ASI_AIUS })	/* har har har */
 38
 39#define VERIFY_READ	0
 40#define VERIFY_WRITE	1
 41
 42#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
 43#define get_ds() (KERNEL_DS)
 44
 45#define segment_eq(a, b)  ((a).seg == (b).seg)
 46
 47#define set_fs(val)								\
 48do {										\
 49	current_thread_info()->current_ds = (val).seg;				\
 50	__asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg));	\
 51} while(0)
 52
 53/*
 54 * Test whether a block of memory is a valid user space address.
 55 * Returns 0 if the range is valid, nonzero otherwise.
 56 */
 57static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
 58{
 59	if (__builtin_constant_p(size))
 60		return addr > limit - size;
 61
 62	addr += size;
 63	if (addr < size)
 64		return true;
 65
 66	return addr > limit;
 67}
 68
 69#define __range_not_ok(addr, size, limit)                               \
 70({                                                                      \
 71	__chk_user_ptr(addr);                                           \
 72	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
 73})
 74
 75static inline int __access_ok(const void __user * addr, unsigned long size)
 76{
 77	return 1;
 78}
 79
 80static inline int access_ok(int type, const void __user * addr, unsigned long size)
 81{
 82	return 1;
 83}
 84
 85void __retl_efault(void);
 86
 87/* Uh, these should become the main single-value transfer routines..
 88 * They automatically use the right size if we just have the right
 89 * pointer type..
 90 *
 91 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 92 * and yet we don't want to do any pointers, because that is too much
 93 * of a performance impact. Thus we have a few rather ugly macros here,
 94 * and hide all the ugliness from the user.
 95 */
 96#define put_user(x, ptr) ({ \
 97	unsigned long __pu_addr = (unsigned long)(ptr); \
 98	__chk_user_ptr(ptr); \
 99	__put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
100})
101
102#define get_user(x, ptr) ({ \
103	unsigned long __gu_addr = (unsigned long)(ptr); \
104	__chk_user_ptr(ptr); \
105	__get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
106})
107
108#define __put_user(x, ptr) put_user(x, ptr)
109#define __get_user(x, ptr) get_user(x, ptr)
110
111struct __large_struct { unsigned long buf[100]; };
112#define __m(x) ((struct __large_struct *)(x))
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114#define __put_user_nocheck(data, addr, size) ({			\
115	register int __pu_ret;					\
116	switch (size) {						\
117	case 1: __put_user_asm(data, b, addr, __pu_ret); break;	\
118	case 2: __put_user_asm(data, h, addr, __pu_ret); break;	\
119	case 4: __put_user_asm(data, w, addr, __pu_ret); break;	\
120	case 8: __put_user_asm(data, x, addr, __pu_ret); break;	\
121	default: __pu_ret = __put_user_bad(); break;		\
122	}							\
123	__pu_ret;						\
124})
125
126#define __put_user_asm(x, size, addr, ret)				\
127__asm__ __volatile__(							\
128		"/* Put user asm, inline. */\n"				\
129	"1:\t"	"st"#size "a %1, [%2] %%asi\n\t"			\
130		"clr	%0\n"						\
131	"2:\n\n\t"							\
132		".section .fixup,#alloc,#execinstr\n\t"			\
133		".align	4\n"						\
134	"3:\n\t"							\
135		"sethi	%%hi(2b), %0\n\t"				\
136		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
137		" mov	%3, %0\n\n\t"					\
138		".previous\n\t"						\
139		".section __ex_table,\"a\"\n\t"				\
140		".align	4\n\t"						\
141		".word	1b, 3b\n\t"					\
142		".previous\n\n\t"					\
143	       : "=r" (ret) : "r" (x), "r" (__m(addr)),			\
144		 "i" (-EFAULT))
145
146int __put_user_bad(void);
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148#define __get_user_nocheck(data, addr, size, type) ({			     \
149	register int __gu_ret;						     \
150	register unsigned long __gu_val;				     \
151	switch (size) {							     \
152		case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
153		case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
154		case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
155		case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break;  \
156		default:						     \
157			__gu_val = 0;					     \
158			__gu_ret = __get_user_bad();			     \
159			break;						     \
160	} 								     \
161	data = (__force type) __gu_val;					     \
162	 __gu_ret;							     \
163})
164
165#define __get_user_asm(x, size, addr, ret)				\
166__asm__ __volatile__(							\
167		"/* Get user asm, inline. */\n"				\
168	"1:\t"	"ld"#size "a [%2] %%asi, %1\n\t"			\
169		"clr	%0\n"						\
170	"2:\n\n\t"							\
171		".section .fixup,#alloc,#execinstr\n\t"			\
172		".align	4\n"						\
173	"3:\n\t"							\
174		"sethi	%%hi(2b), %0\n\t"				\
175		"clr	%1\n\t"						\
176		"jmpl	%0 + %%lo(2b), %%g0\n\t"			\
177		" mov	%3, %0\n\n\t"					\
178		".previous\n\t"						\
179		".section __ex_table,\"a\"\n\t"				\
180		".align	4\n\t"						\
181		".word	1b, 3b\n\n\t"					\
182		".previous\n\t"						\
183	       : "=r" (ret), "=r" (x) : "r" (__m(addr)),		\
184		 "i" (-EFAULT))
185
186int __get_user_bad(void);
187
188unsigned long __must_check ___copy_from_user(void *to,
189					     const void __user *from,
190					     unsigned long size);
191static inline unsigned long __must_check
192copy_from_user(void *to, const void __user *from, unsigned long size)
193{
194	check_object_size(to, size, false);
195
196	return ___copy_from_user(to, from, size);
197}
198#define __copy_from_user copy_from_user
199
200unsigned long __must_check ___copy_to_user(void __user *to,
201					   const void *from,
202					   unsigned long size);
203static inline unsigned long __must_check
204copy_to_user(void __user *to, const void *from, unsigned long size)
205{
206	check_object_size(from, size, true);
207
208	return ___copy_to_user(to, from, size);
209}
210#define __copy_to_user copy_to_user
211
212unsigned long __must_check ___copy_in_user(void __user *to,
213					   const void __user *from,
214					   unsigned long size);
215static inline unsigned long __must_check
216copy_in_user(void __user *to, void __user *from, unsigned long size)
217{
218	return ___copy_in_user(to, from, size);
219}
220#define __copy_in_user copy_in_user
221
222unsigned long __must_check __clear_user(void __user *, unsigned long);
223
224#define clear_user __clear_user
225
226__must_check long strlen_user(const char __user *str);
227__must_check long strnlen_user(const char __user *str, long n);
228
229#define __copy_to_user_inatomic __copy_to_user
230#define __copy_from_user_inatomic __copy_from_user
231
232struct pt_regs;
233unsigned long compute_effective_address(struct pt_regs *,
234					unsigned int insn,
235					unsigned int rd);
236
237#endif  /* __ASSEMBLY__ */
238
239#endif /* _ASM_UACCESS_H */