Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __ASM_CSKY_UACCESS_H
4#define __ASM_CSKY_UACCESS_H
5
6/*
7 * __put_user_fn
8 */
9extern int __put_user_bad(void);
10
11#define __put_user_asm_b(x, ptr, err) \
12do { \
13 int errcode; \
14 __asm__ __volatile__( \
15 "1: stb %1, (%2,0) \n" \
16 " br 3f \n" \
17 "2: mov %0, %3 \n" \
18 " br 3f \n" \
19 ".section __ex_table, \"a\" \n" \
20 ".align 2 \n" \
21 ".long 1b,2b \n" \
22 ".previous \n" \
23 "3: \n" \
24 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
25 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
26 : "memory"); \
27} while (0)
28
29#define __put_user_asm_h(x, ptr, err) \
30do { \
31 int errcode; \
32 __asm__ __volatile__( \
33 "1: sth %1, (%2,0) \n" \
34 " br 3f \n" \
35 "2: mov %0, %3 \n" \
36 " br 3f \n" \
37 ".section __ex_table, \"a\" \n" \
38 ".align 2 \n" \
39 ".long 1b,2b \n" \
40 ".previous \n" \
41 "3: \n" \
42 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
43 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
44 : "memory"); \
45} while (0)
46
47#define __put_user_asm_w(x, ptr, err) \
48do { \
49 int errcode; \
50 __asm__ __volatile__( \
51 "1: stw %1, (%2,0) \n" \
52 " br 3f \n" \
53 "2: mov %0, %3 \n" \
54 " br 3f \n" \
55 ".section __ex_table,\"a\" \n" \
56 ".align 2 \n" \
57 ".long 1b, 2b \n" \
58 ".previous \n" \
59 "3: \n" \
60 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
61 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
62 : "memory"); \
63} while (0)
64
65#define __put_user_asm_64(x, ptr, err) \
66do { \
67 int tmp; \
68 int errcode; \
69 \
70 __asm__ __volatile__( \
71 " ldw %3, (%1, 0) \n" \
72 "1: stw %3, (%2, 0) \n" \
73 " ldw %3, (%1, 4) \n" \
74 "2: stw %3, (%2, 4) \n" \
75 " br 4f \n" \
76 "3: mov %0, %4 \n" \
77 " br 4f \n" \
78 ".section __ex_table, \"a\" \n" \
79 ".align 2 \n" \
80 ".long 1b, 3b \n" \
81 ".long 2b, 3b \n" \
82 ".previous \n" \
83 "4: \n" \
84 : "=r"(err), "=r"(x), "=r"(ptr), \
85 "=r"(tmp), "=r"(errcode) \
86 : "0"(err), "1"(x), "2"(ptr), "3"(0), \
87 "4"(-EFAULT) \
88 : "memory"); \
89} while (0)
90
91static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
92{
93 int retval = 0;
94 u32 tmp;
95
96 switch (size) {
97 case 1:
98 tmp = *(u8 *)x;
99 __put_user_asm_b(tmp, ptr, retval);
100 break;
101 case 2:
102 tmp = *(u16 *)x;
103 __put_user_asm_h(tmp, ptr, retval);
104 break;
105 case 4:
106 tmp = *(u32 *)x;
107 __put_user_asm_w(tmp, ptr, retval);
108 break;
109 case 8:
110 __put_user_asm_64(x, (u64 *)ptr, retval);
111 break;
112 }
113
114 return retval;
115}
116#define __put_user_fn __put_user_fn
117
118/*
119 * __get_user_fn
120 */
121extern int __get_user_bad(void);
122
123#define __get_user_asm_common(x, ptr, ins, err) \
124do { \
125 int errcode; \
126 __asm__ __volatile__( \
127 "1: " ins " %1, (%4, 0) \n" \
128 " br 3f \n" \
129 "2: mov %0, %2 \n" \
130 " movi %1, 0 \n" \
131 " br 3f \n" \
132 ".section __ex_table,\"a\" \n" \
133 ".align 2 \n" \
134 ".long 1b, 2b \n" \
135 ".previous \n" \
136 "3: \n" \
137 : "=r"(err), "=r"(x), "=r"(errcode) \
138 : "0"(0), "r"(ptr), "2"(-EFAULT) \
139 : "memory"); \
140} while (0)
141
142#define __get_user_asm_64(x, ptr, err) \
143do { \
144 int tmp; \
145 int errcode; \
146 \
147 __asm__ __volatile__( \
148 "1: ldw %3, (%2, 0) \n" \
149 " stw %3, (%1, 0) \n" \
150 "2: ldw %3, (%2, 4) \n" \
151 " stw %3, (%1, 4) \n" \
152 " br 4f \n" \
153 "3: mov %0, %4 \n" \
154 " br 4f \n" \
155 ".section __ex_table, \"a\" \n" \
156 ".align 2 \n" \
157 ".long 1b, 3b \n" \
158 ".long 2b, 3b \n" \
159 ".previous \n" \
160 "4: \n" \
161 : "=r"(err), "=r"(x), "=r"(ptr), \
162 "=r"(tmp), "=r"(errcode) \
163 : "0"(err), "1"(x), "2"(ptr), "3"(0), \
164 "4"(-EFAULT) \
165 : "memory"); \
166} while (0)
167
168static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
169{
170 int retval;
171 u32 tmp;
172
173 switch (size) {
174 case 1:
175 __get_user_asm_common(tmp, ptr, "ldb", retval);
176 *(u8 *)x = (u8)tmp;
177 break;
178 case 2:
179 __get_user_asm_common(tmp, ptr, "ldh", retval);
180 *(u16 *)x = (u16)tmp;
181 break;
182 case 4:
183 __get_user_asm_common(tmp, ptr, "ldw", retval);
184 *(u32 *)x = (u32)tmp;
185 break;
186 case 8:
187 __get_user_asm_64(x, ptr, retval);
188 break;
189 }
190
191 return retval;
192}
193#define __get_user_fn __get_user_fn
194
195unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
196unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
197
198unsigned long __clear_user(void __user *to, unsigned long n);
199#define __clear_user __clear_user
200
201#include <asm-generic/uaccess.h>
202
203#endif /* __ASM_CSKY_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#ifndef __ASM_CSKY_UACCESS_H
5#define __ASM_CSKY_UACCESS_H
6
7/*
8 * User space memory access functions
9 */
10#include <linux/compiler.h>
11#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/string.h>
15#include <linux/version.h>
16#include <asm/segment.h>
17
18static inline int access_ok(const void *addr, unsigned long size)
19{
20 unsigned long limit = current_thread_info()->addr_limit.seg;
21
22 return (((unsigned long)addr < limit) &&
23 ((unsigned long)(addr + size) < limit));
24}
25
26#define __addr_ok(addr) (access_ok(addr, 0))
27
28extern int __put_user_bad(void);
29
30/*
31 * Tell gcc we read from memory instead of writing: this is because
32 * we do not write to any memory gcc knows about, so there are no
33 * aliasing issues.
34 */
35
36/*
37 * These are the main single-value transfer routines. They automatically
38 * use the right size if we just have the right pointer type.
39 *
40 * This gets kind of ugly. We want to return _two_ values in "get_user()"
41 * and yet we don't want to do any pointers, because that is too much
42 * of a performance impact. Thus we have a few rather ugly macros here,
43 * and hide all the ugliness from the user.
44 *
45 * The "__xxx" versions of the user access functions are versions that
46 * do not verify the address space, that must have been done previously
47 * with a separate "access_ok()" call (this is used when we do multiple
48 * accesses to the same area of user memory).
49 *
50 * As we use the same address space for kernel and user data on
51 * Ckcore, we can just do these as direct assignments. (Of course, the
52 * exception handling means that it's no longer "just"...)
53 */
54
55#define put_user(x, ptr) \
56 __put_user_check((x), (ptr), sizeof(*(ptr)))
57
58#define __put_user(x, ptr) \
59 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
60
61#define __ptr(x) ((unsigned long *)(x))
62
63#define get_user(x, ptr) \
64 __get_user_check((x), (ptr), sizeof(*(ptr)))
65
66#define __get_user(x, ptr) \
67 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
68
69#define __put_user_nocheck(x, ptr, size) \
70({ \
71 long __pu_err = 0; \
72 typeof(*(ptr)) *__pu_addr = (ptr); \
73 typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
74 if (__pu_addr) \
75 __put_user_size(__pu_val, (__pu_addr), (size), \
76 __pu_err); \
77 __pu_err; \
78})
79
80#define __put_user_check(x, ptr, size) \
81({ \
82 long __pu_err = -EFAULT; \
83 typeof(*(ptr)) *__pu_addr = (ptr); \
84 typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
85 if (access_ok(__pu_addr, size) && __pu_addr) \
86 __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
87 __pu_err; \
88})
89
90#define __put_user_size(x, ptr, size, retval) \
91do { \
92 retval = 0; \
93 switch (size) { \
94 case 1: \
95 __put_user_asm_b(x, ptr, retval); \
96 break; \
97 case 2: \
98 __put_user_asm_h(x, ptr, retval); \
99 break; \
100 case 4: \
101 __put_user_asm_w(x, ptr, retval); \
102 break; \
103 case 8: \
104 __put_user_asm_64(x, ptr, retval); \
105 break; \
106 default: \
107 __put_user_bad(); \
108 } \
109} while (0)
110
111/*
112 * We don't tell gcc that we are accessing memory, but this is OK
113 * because we do not write to any memory gcc knows about, so there
114 * are no aliasing issues.
115 *
116 * Note that PC at a fault is the address *after* the faulting
117 * instruction.
118 */
119#define __put_user_asm_b(x, ptr, err) \
120do { \
121 int errcode; \
122 asm volatile( \
123 "1: stb %1, (%2,0) \n" \
124 " br 3f \n" \
125 "2: mov %0, %3 \n" \
126 " br 3f \n" \
127 ".section __ex_table, \"a\" \n" \
128 ".align 2 \n" \
129 ".long 1b,2b \n" \
130 ".previous \n" \
131 "3: \n" \
132 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
133 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
134 : "memory"); \
135} while (0)
136
137#define __put_user_asm_h(x, ptr, err) \
138do { \
139 int errcode; \
140 asm volatile( \
141 "1: sth %1, (%2,0) \n" \
142 " br 3f \n" \
143 "2: mov %0, %3 \n" \
144 " br 3f \n" \
145 ".section __ex_table, \"a\" \n" \
146 ".align 2 \n" \
147 ".long 1b,2b \n" \
148 ".previous \n" \
149 "3: \n" \
150 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
151 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
152 : "memory"); \
153} while (0)
154
155#define __put_user_asm_w(x, ptr, err) \
156do { \
157 int errcode; \
158 asm volatile( \
159 "1: stw %1, (%2,0) \n" \
160 " br 3f \n" \
161 "2: mov %0, %3 \n" \
162 " br 3f \n" \
163 ".section __ex_table,\"a\" \n" \
164 ".align 2 \n" \
165 ".long 1b, 2b \n" \
166 ".previous \n" \
167 "3: \n" \
168 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
169 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
170 : "memory"); \
171} while (0)
172
173#define __put_user_asm_64(x, ptr, err) \
174do { \
175 int tmp; \
176 int errcode; \
177 typeof(*(ptr))src = (typeof(*(ptr)))x; \
178 typeof(*(ptr))*psrc = &src; \
179 \
180 asm volatile( \
181 " ldw %3, (%1, 0) \n" \
182 "1: stw %3, (%2, 0) \n" \
183 " ldw %3, (%1, 4) \n" \
184 "2: stw %3, (%2, 4) \n" \
185 " br 4f \n" \
186 "3: mov %0, %4 \n" \
187 " br 4f \n" \
188 ".section __ex_table, \"a\" \n" \
189 ".align 2 \n" \
190 ".long 1b, 3b \n" \
191 ".long 2b, 3b \n" \
192 ".previous \n" \
193 "4: \n" \
194 : "=r"(err), "=r"(psrc), "=r"(ptr), \
195 "=r"(tmp), "=r"(errcode) \
196 : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
197 : "memory"); \
198} while (0)
199
200#define __get_user_nocheck(x, ptr, size) \
201({ \
202 long __gu_err; \
203 __get_user_size(x, (ptr), (size), __gu_err); \
204 __gu_err; \
205})
206
207#define __get_user_check(x, ptr, size) \
208({ \
209 int __gu_err = -EFAULT; \
210 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
211 if (access_ok(__gu_ptr, size) && __gu_ptr) \
212 __get_user_size(x, __gu_ptr, size, __gu_err); \
213 __gu_err; \
214})
215
216#define __get_user_size(x, ptr, size, retval) \
217do { \
218 switch (size) { \
219 case 1: \
220 __get_user_asm_common((x), ptr, "ldb", retval); \
221 break; \
222 case 2: \
223 __get_user_asm_common((x), ptr, "ldh", retval); \
224 break; \
225 case 4: \
226 __get_user_asm_common((x), ptr, "ldw", retval); \
227 break; \
228 default: \
229 x = 0; \
230 (retval) = __get_user_bad(); \
231 } \
232} while (0)
233
234#define __get_user_asm_common(x, ptr, ins, err) \
235do { \
236 int errcode; \
237 asm volatile( \
238 "1: " ins " %1, (%4,0) \n" \
239 " br 3f \n" \
240 /* Fix up codes */ \
241 "2: mov %0, %2 \n" \
242 " movi %1, 0 \n" \
243 " br 3f \n" \
244 ".section __ex_table,\"a\" \n" \
245 ".align 2 \n" \
246 ".long 1b, 2b \n" \
247 ".previous \n" \
248 "3: \n" \
249 : "=r"(err), "=r"(x), "=r"(errcode) \
250 : "0"(0), "r"(ptr), "2"(-EFAULT) \
251 : "memory"); \
252} while (0)
253
254extern int __get_user_bad(void);
255
256#define ___copy_to_user(to, from, n) \
257do { \
258 int w0, w1, w2, w3; \
259 asm volatile( \
260 "0: cmpnei %1, 0 \n" \
261 " bf 8f \n" \
262 " mov %3, %1 \n" \
263 " or %3, %2 \n" \
264 " andi %3, 3 \n" \
265 " cmpnei %3, 0 \n" \
266 " bf 1f \n" \
267 " br 5f \n" \
268 "1: cmplti %0, 16 \n" /* 4W */ \
269 " bt 3f \n" \
270 " ldw %3, (%2, 0) \n" \
271 " ldw %4, (%2, 4) \n" \
272 " ldw %5, (%2, 8) \n" \
273 " ldw %6, (%2, 12) \n" \
274 "2: stw %3, (%1, 0) \n" \
275 "9: stw %4, (%1, 4) \n" \
276 "10: stw %5, (%1, 8) \n" \
277 "11: stw %6, (%1, 12) \n" \
278 " addi %2, 16 \n" \
279 " addi %1, 16 \n" \
280 " subi %0, 16 \n" \
281 " br 1b \n" \
282 "3: cmplti %0, 4 \n" /* 1W */ \
283 " bt 5f \n" \
284 " ldw %3, (%2, 0) \n" \
285 "4: stw %3, (%1, 0) \n" \
286 " addi %2, 4 \n" \
287 " addi %1, 4 \n" \
288 " subi %0, 4 \n" \
289 " br 3b \n" \
290 "5: cmpnei %0, 0 \n" /* 1B */ \
291 " bf 13f \n" \
292 " ldb %3, (%2, 0) \n" \
293 "6: stb %3, (%1, 0) \n" \
294 " addi %2, 1 \n" \
295 " addi %1, 1 \n" \
296 " subi %0, 1 \n" \
297 " br 5b \n" \
298 "7: subi %0, 4 \n" \
299 "8: subi %0, 4 \n" \
300 "12: subi %0, 4 \n" \
301 " br 13f \n" \
302 ".section __ex_table, \"a\" \n" \
303 ".align 2 \n" \
304 ".long 2b, 13f \n" \
305 ".long 4b, 13f \n" \
306 ".long 6b, 13f \n" \
307 ".long 9b, 12b \n" \
308 ".long 10b, 8b \n" \
309 ".long 11b, 7b \n" \
310 ".previous \n" \
311 "13: \n" \
312 : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
313 "=r"(w1), "=r"(w2), "=r"(w3) \
314 : "0"(n), "1"(to), "2"(from) \
315 : "memory"); \
316} while (0)
317
318#define ___copy_from_user(to, from, n) \
319do { \
320 int tmp; \
321 int nsave; \
322 asm volatile( \
323 "0: cmpnei %1, 0 \n" \
324 " bf 7f \n" \
325 " mov %3, %1 \n" \
326 " or %3, %2 \n" \
327 " andi %3, 3 \n" \
328 " cmpnei %3, 0 \n" \
329 " bf 1f \n" \
330 " br 5f \n" \
331 "1: cmplti %0, 16 \n" \
332 " bt 3f \n" \
333 "2: ldw %3, (%2, 0) \n" \
334 "10: ldw %4, (%2, 4) \n" \
335 " stw %3, (%1, 0) \n" \
336 " stw %4, (%1, 4) \n" \
337 "11: ldw %3, (%2, 8) \n" \
338 "12: ldw %4, (%2, 12) \n" \
339 " stw %3, (%1, 8) \n" \
340 " stw %4, (%1, 12) \n" \
341 " addi %2, 16 \n" \
342 " addi %1, 16 \n" \
343 " subi %0, 16 \n" \
344 " br 1b \n" \
345 "3: cmplti %0, 4 \n" \
346 " bt 5f \n" \
347 "4: ldw %3, (%2, 0) \n" \
348 " stw %3, (%1, 0) \n" \
349 " addi %2, 4 \n" \
350 " addi %1, 4 \n" \
351 " subi %0, 4 \n" \
352 " br 3b \n" \
353 "5: cmpnei %0, 0 \n" \
354 " bf 7f \n" \
355 "6: ldb %3, (%2, 0) \n" \
356 " stb %3, (%1, 0) \n" \
357 " addi %2, 1 \n" \
358 " addi %1, 1 \n" \
359 " subi %0, 1 \n" \
360 " br 5b \n" \
361 "8: stw %3, (%1, 0) \n" \
362 " subi %0, 4 \n" \
363 " bf 7f \n" \
364 "9: subi %0, 8 \n" \
365 " bf 7f \n" \
366 "13: stw %3, (%1, 8) \n" \
367 " subi %0, 12 \n" \
368 " bf 7f \n" \
369 ".section __ex_table, \"a\" \n" \
370 ".align 2 \n" \
371 ".long 2b, 7f \n" \
372 ".long 4b, 7f \n" \
373 ".long 6b, 7f \n" \
374 ".long 10b, 8b \n" \
375 ".long 11b, 9b \n" \
376 ".long 12b,13b \n" \
377 ".previous \n" \
378 "7: \n" \
379 : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
380 "=r"(tmp) \
381 : "0"(n), "1"(to), "2"(from) \
382 : "memory"); \
383} while (0)
384
385unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
386unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
387
388unsigned long clear_user(void *to, unsigned long n);
389unsigned long __clear_user(void __user *to, unsigned long n);
390
391long strncpy_from_user(char *dst, const char *src, long count);
392long __strncpy_from_user(char *dst, const char *src, long count);
393
394/*
395 * Return the size of a string (including the ending 0)
396 *
397 * Return 0 on exception, a value greater than N if too long
398 */
399long strnlen_user(const char *src, long n);
400
401#define strlen_user(str) strnlen_user(str, 32767)
402
403struct exception_table_entry {
404 unsigned long insn;
405 unsigned long nextinsn;
406};
407
408extern int fixup_exception(struct pt_regs *regs);
409
410#endif /* __ASM_CSKY_UACCESS_H */