Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_UACCESS_H
3#define _ASM_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
8
9#include <linux/compiler.h>
10#include <linux/string.h>
11#include <linux/mm_types.h>
12#include <asm/asi.h>
13#include <asm/spitfire.h>
14#include <asm/pgtable.h>
15
16#include <asm/processor.h>
17#include <asm-generic/access_ok.h>
18
19/*
20 * Sparc64 is segmented, though more like the M68K than the I386.
21 * We use the secondary ASI to address user memory, which references a
22 * completely different VM map, thus there is zero chance of the user
23 * doing something queer and tricking us into poking kernel memory.
24 */
25
26/*
27 * Test whether a block of memory is a valid user space address.
28 * Returns 0 if the range is valid, nonzero otherwise.
29 */
30static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
31{
32 if (__builtin_constant_p(size))
33 return addr > limit - size;
34
35 addr += size;
36 if (addr < size)
37 return true;
38
39 return addr > limit;
40}
41
42#define __range_not_ok(addr, size, limit) \
43({ \
44 __chk_user_ptr(addr); \
45 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
46})
47
48void __retl_efault(void);
49
50/* Uh, these should become the main single-value transfer routines..
51 * They automatically use the right size if we just have the right
52 * pointer type..
53 *
54 * This gets kind of ugly. We want to return _two_ values in "get_user()"
55 * and yet we don't want to do any pointers, because that is too much
56 * of a performance impact. Thus we have a few rather ugly macros here,
57 * and hide all the ugliness from the user.
58 */
59#define put_user(x, ptr) ({ \
60 unsigned long __pu_addr = (unsigned long)(ptr); \
61 __chk_user_ptr(ptr); \
62 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
63})
64
65#define get_user(x, ptr) ({ \
66 unsigned long __gu_addr = (unsigned long)(ptr); \
67 __chk_user_ptr(ptr); \
68 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
69})
70
71#define __put_user(x, ptr) put_user(x, ptr)
72#define __get_user(x, ptr) get_user(x, ptr)
73
74struct __large_struct { unsigned long buf[100]; };
75#define __m(x) ((struct __large_struct *)(x))
76
77#define __put_kernel_nofault(dst, src, type, label) \
78do { \
79 type *addr = (type __force *)(dst); \
80 type data = *(type *)src; \
81 register int __pu_ret; \
82 switch (sizeof(type)) { \
83 case 1: __put_kernel_asm(data, b, addr, __pu_ret); break; \
84 case 2: __put_kernel_asm(data, h, addr, __pu_ret); break; \
85 case 4: __put_kernel_asm(data, w, addr, __pu_ret); break; \
86 case 8: __put_kernel_asm(data, x, addr, __pu_ret); break; \
87 default: __pu_ret = __put_user_bad(); break; \
88 } \
89 if (__pu_ret) \
90 goto label; \
91} while (0)
92
93#define __put_kernel_asm(x, size, addr, ret) \
94__asm__ __volatile__( \
95 "/* Put kernel asm, inline. */\n" \
96 "1:\t" "st"#size " %1, [%2]\n\t" \
97 "clr %0\n" \
98 "2:\n\n\t" \
99 ".section .fixup,#alloc,#execinstr\n\t" \
100 ".align 4\n" \
101 "3:\n\t" \
102 "sethi %%hi(2b), %0\n\t" \
103 "jmpl %0 + %%lo(2b), %%g0\n\t" \
104 " mov %3, %0\n\n\t" \
105 ".previous\n\t" \
106 ".section __ex_table,\"a\"\n\t" \
107 ".align 4\n\t" \
108 ".word 1b, 3b\n\t" \
109 ".previous\n\n\t" \
110 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
111 "i" (-EFAULT))
112
113#define __put_user_nocheck(data, addr, size) ({ \
114 register int __pu_ret; \
115 switch (size) { \
116 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
117 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
118 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
119 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
120 default: __pu_ret = __put_user_bad(); break; \
121 } \
122 __pu_ret; \
123})
124
125#define __put_user_asm(x, size, addr, ret) \
126__asm__ __volatile__( \
127 "/* Put user asm, inline. */\n" \
128 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
129 "clr %0\n" \
130 "2:\n\n\t" \
131 ".section .fixup,#alloc,#execinstr\n\t" \
132 ".align 4\n" \
133 "3:\n\t" \
134 "sethi %%hi(2b), %0\n\t" \
135 "jmpl %0 + %%lo(2b), %%g0\n\t" \
136 " mov %3, %0\n\n\t" \
137 ".previous\n\t" \
138 ".section __ex_table,\"a\"\n\t" \
139 ".align 4\n\t" \
140 ".word 1b, 3b\n\t" \
141 ".previous\n\n\t" \
142 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
143 "i" (-EFAULT))
144
145int __put_user_bad(void);
146
147#define __get_kernel_nofault(dst, src, type, label) \
148do { \
149 type *addr = (type __force *)(src); \
150 register int __gu_ret; \
151 register unsigned long __gu_val; \
152 switch (sizeof(type)) { \
153 case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
154 case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
155 case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
156 case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break; \
157 default: \
158 __gu_val = 0; \
159 __gu_ret = __get_user_bad(); \
160 break; \
161 } \
162 if (__gu_ret) \
163 goto label; \
164 *(type *)dst = (__force type) __gu_val; \
165} while (0)
166#define __get_kernel_asm(x, size, addr, ret) \
167__asm__ __volatile__( \
168 "/* Get kernel asm, inline. */\n" \
169 "1:\t" "ld"#size " [%2], %1\n\t" \
170 "clr %0\n" \
171 "2:\n\n\t" \
172 ".section .fixup,#alloc,#execinstr\n\t" \
173 ".align 4\n" \
174 "3:\n\t" \
175 "sethi %%hi(2b), %0\n\t" \
176 "clr %1\n\t" \
177 "jmpl %0 + %%lo(2b), %%g0\n\t" \
178 " mov %3, %0\n\n\t" \
179 ".previous\n\t" \
180 ".section __ex_table,\"a\"\n\t" \
181 ".align 4\n\t" \
182 ".word 1b, 3b\n\n\t" \
183 ".previous\n\t" \
184 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
185 "i" (-EFAULT))
186
187#define __get_user_nocheck(data, addr, size, type) ({ \
188 register int __gu_ret; \
189 register unsigned long __gu_val; \
190 switch (size) { \
191 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
192 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
193 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
194 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
195 default: \
196 __gu_val = 0; \
197 __gu_ret = __get_user_bad(); \
198 break; \
199 } \
200 data = (__force type) __gu_val; \
201 __gu_ret; \
202})
203
204#define __get_user_asm(x, size, addr, ret) \
205__asm__ __volatile__( \
206 "/* Get user asm, inline. */\n" \
207 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
208 "clr %0\n" \
209 "2:\n\n\t" \
210 ".section .fixup,#alloc,#execinstr\n\t" \
211 ".align 4\n" \
212 "3:\n\t" \
213 "sethi %%hi(2b), %0\n\t" \
214 "clr %1\n\t" \
215 "jmpl %0 + %%lo(2b), %%g0\n\t" \
216 " mov %3, %0\n\n\t" \
217 ".previous\n\t" \
218 ".section __ex_table,\"a\"\n\t" \
219 ".align 4\n\t" \
220 ".word 1b, 3b\n\n\t" \
221 ".previous\n\t" \
222 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
223 "i" (-EFAULT))
224
225int __get_user_bad(void);
226
227unsigned long __must_check raw_copy_from_user(void *to,
228 const void __user *from,
229 unsigned long size);
230
231unsigned long __must_check raw_copy_to_user(void __user *to,
232 const void *from,
233 unsigned long size);
234#define INLINE_COPY_FROM_USER
235#define INLINE_COPY_TO_USER
236
237unsigned long __must_check raw_copy_in_user(void __user *to,
238 const void __user *from,
239 unsigned long size);
240
241unsigned long __must_check __clear_user(void __user *, unsigned long);
242
243#define clear_user __clear_user
244
245__must_check long strnlen_user(const char __user *str, long n);
246
247struct pt_regs;
248unsigned long compute_effective_address(struct pt_regs *,
249 unsigned int insn,
250 unsigned int rd);
251
252#endif /* _ASM_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_UACCESS_H
3#define _ASM_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
8
9#include <linux/compiler.h>
10#include <linux/string.h>
11#include <asm/asi.h>
12#include <asm/spitfire.h>
13
14#include <asm/processor.h>
15#include <asm-generic/access_ok.h>
16
17/*
18 * Sparc64 is segmented, though more like the M68K than the I386.
19 * We use the secondary ASI to address user memory, which references a
20 * completely different VM map, thus there is zero chance of the user
21 * doing something queer and tricking us into poking kernel memory.
22 */
23
24/*
25 * Test whether a block of memory is a valid user space address.
26 * Returns 0 if the range is valid, nonzero otherwise.
27 */
28static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
29{
30 if (__builtin_constant_p(size))
31 return addr > limit - size;
32
33 addr += size;
34 if (addr < size)
35 return true;
36
37 return addr > limit;
38}
39
40#define __range_not_ok(addr, size, limit) \
41({ \
42 __chk_user_ptr(addr); \
43 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
44})
45
46void __retl_efault(void);
47
48/* Uh, these should become the main single-value transfer routines..
49 * They automatically use the right size if we just have the right
50 * pointer type..
51 *
52 * This gets kind of ugly. We want to return _two_ values in "get_user()"
53 * and yet we don't want to do any pointers, because that is too much
54 * of a performance impact. Thus we have a few rather ugly macros here,
55 * and hide all the ugliness from the user.
56 */
57#define put_user(x, ptr) ({ \
58 unsigned long __pu_addr = (unsigned long)(ptr); \
59 __chk_user_ptr(ptr); \
60 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
61})
62
63#define get_user(x, ptr) ({ \
64 unsigned long __gu_addr = (unsigned long)(ptr); \
65 __chk_user_ptr(ptr); \
66 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
67})
68
69#define __put_user(x, ptr) put_user(x, ptr)
70#define __get_user(x, ptr) get_user(x, ptr)
71
72struct __large_struct { unsigned long buf[100]; };
73#define __m(x) ((struct __large_struct *)(x))
74
75#define __put_kernel_nofault(dst, src, type, label) \
76do { \
77 type *addr = (type __force *)(dst); \
78 type data = *(type *)src; \
79 register int __pu_ret; \
80 switch (sizeof(type)) { \
81 case 1: __put_kernel_asm(data, b, addr, __pu_ret); break; \
82 case 2: __put_kernel_asm(data, h, addr, __pu_ret); break; \
83 case 4: __put_kernel_asm(data, w, addr, __pu_ret); break; \
84 case 8: __put_kernel_asm(data, x, addr, __pu_ret); break; \
85 default: __pu_ret = __put_user_bad(); break; \
86 } \
87 if (__pu_ret) \
88 goto label; \
89} while (0)
90
91#define __put_kernel_asm(x, size, addr, ret) \
92__asm__ __volatile__( \
93 "/* Put kernel asm, inline. */\n" \
94 "1:\t" "st"#size " %1, [%2]\n\t" \
95 "clr %0\n" \
96 "2:\n\n\t" \
97 ".section .fixup,#alloc,#execinstr\n\t" \
98 ".align 4\n" \
99 "3:\n\t" \
100 "sethi %%hi(2b), %0\n\t" \
101 "jmpl %0 + %%lo(2b), %%g0\n\t" \
102 " mov %3, %0\n\n\t" \
103 ".previous\n\t" \
104 ".section __ex_table,\"a\"\n\t" \
105 ".align 4\n\t" \
106 ".word 1b, 3b\n\t" \
107 ".previous\n\n\t" \
108 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
109 "i" (-EFAULT))
110
111#define __put_user_nocheck(data, addr, size) ({ \
112 register int __pu_ret; \
113 switch (size) { \
114 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
115 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
116 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
117 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
118 default: __pu_ret = __put_user_bad(); break; \
119 } \
120 __pu_ret; \
121})
122
123#define __put_user_asm(x, size, addr, ret) \
124__asm__ __volatile__( \
125 "/* Put user asm, inline. */\n" \
126 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
127 "clr %0\n" \
128 "2:\n\n\t" \
129 ".section .fixup,#alloc,#execinstr\n\t" \
130 ".align 4\n" \
131 "3:\n\t" \
132 "sethi %%hi(2b), %0\n\t" \
133 "jmpl %0 + %%lo(2b), %%g0\n\t" \
134 " mov %3, %0\n\n\t" \
135 ".previous\n\t" \
136 ".section __ex_table,\"a\"\n\t" \
137 ".align 4\n\t" \
138 ".word 1b, 3b\n\t" \
139 ".previous\n\n\t" \
140 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
141 "i" (-EFAULT))
142
143int __put_user_bad(void);
144
145#define __get_kernel_nofault(dst, src, type, label) \
146do { \
147 type *addr = (type __force *)(src); \
148 register int __gu_ret; \
149 register unsigned long __gu_val; \
150 switch (sizeof(type)) { \
151 case 1: __get_kernel_asm(__gu_val, ub, addr, __gu_ret); break; \
152 case 2: __get_kernel_asm(__gu_val, uh, addr, __gu_ret); break; \
153 case 4: __get_kernel_asm(__gu_val, uw, addr, __gu_ret); break; \
154 case 8: __get_kernel_asm(__gu_val, x, addr, __gu_ret); break; \
155 default: \
156 __gu_val = 0; \
157 __gu_ret = __get_user_bad(); \
158 break; \
159 } \
160 if (__gu_ret) \
161 goto label; \
162 *(type *)dst = (__force type) __gu_val; \
163} while (0)
164#define __get_kernel_asm(x, size, addr, ret) \
165__asm__ __volatile__( \
166 "/* Get kernel asm, inline. */\n" \
167 "1:\t" "ld"#size " [%2], %1\n\t" \
168 "clr %0\n" \
169 "2:\n\n\t" \
170 ".section .fixup,#alloc,#execinstr\n\t" \
171 ".align 4\n" \
172 "3:\n\t" \
173 "sethi %%hi(2b), %0\n\t" \
174 "clr %1\n\t" \
175 "jmpl %0 + %%lo(2b), %%g0\n\t" \
176 " mov %3, %0\n\n\t" \
177 ".previous\n\t" \
178 ".section __ex_table,\"a\"\n\t" \
179 ".align 4\n\t" \
180 ".word 1b, 3b\n\n\t" \
181 ".previous\n\t" \
182 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
183 "i" (-EFAULT))
184
185#define __get_user_nocheck(data, addr, size, type) ({ \
186 register int __gu_ret; \
187 register unsigned long __gu_val; \
188 switch (size) { \
189 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
190 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
191 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
192 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
193 default: \
194 __gu_val = 0; \
195 __gu_ret = __get_user_bad(); \
196 break; \
197 } \
198 data = (__force type) __gu_val; \
199 __gu_ret; \
200})
201
202#define __get_user_asm(x, size, addr, ret) \
203__asm__ __volatile__( \
204 "/* Get user asm, inline. */\n" \
205 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
206 "clr %0\n" \
207 "2:\n\n\t" \
208 ".section .fixup,#alloc,#execinstr\n\t" \
209 ".align 4\n" \
210 "3:\n\t" \
211 "sethi %%hi(2b), %0\n\t" \
212 "clr %1\n\t" \
213 "jmpl %0 + %%lo(2b), %%g0\n\t" \
214 " mov %3, %0\n\n\t" \
215 ".previous\n\t" \
216 ".section __ex_table,\"a\"\n\t" \
217 ".align 4\n\t" \
218 ".word 1b, 3b\n\n\t" \
219 ".previous\n\t" \
220 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
221 "i" (-EFAULT))
222
223int __get_user_bad(void);
224
225unsigned long __must_check raw_copy_from_user(void *to,
226 const void __user *from,
227 unsigned long size);
228
229unsigned long __must_check raw_copy_to_user(void __user *to,
230 const void *from,
231 unsigned long size);
232#define INLINE_COPY_FROM_USER
233#define INLINE_COPY_TO_USER
234
235unsigned long __must_check raw_copy_in_user(void __user *to,
236 const void __user *from,
237 unsigned long size);
238
239unsigned long __must_check __clear_user(void __user *, unsigned long);
240
241#define clear_user __clear_user
242
243__must_check long strnlen_user(const char __user *str, long n);
244
245struct pt_regs;
246unsigned long compute_effective_address(struct pt_regs *,
247 unsigned int insn,
248 unsigned int rd);
249
250#endif /* _ASM_UACCESS_H */