Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef __ASM_CSKY_UACCESS_H
4#define __ASM_CSKY_UACCESS_H
5
6/*
7 * __put_user_fn
8 */
9extern int __put_user_bad(void);
10
11#define __put_user_asm_b(x, ptr, err) \
12do { \
13 int errcode; \
14 __asm__ __volatile__( \
15 "1: stb %1, (%2,0) \n" \
16 " br 3f \n" \
17 "2: mov %0, %3 \n" \
18 " br 3f \n" \
19 ".section __ex_table, \"a\" \n" \
20 ".align 2 \n" \
21 ".long 1b,2b \n" \
22 ".previous \n" \
23 "3: \n" \
24 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
25 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
26 : "memory"); \
27} while (0)
28
29#define __put_user_asm_h(x, ptr, err) \
30do { \
31 int errcode; \
32 __asm__ __volatile__( \
33 "1: sth %1, (%2,0) \n" \
34 " br 3f \n" \
35 "2: mov %0, %3 \n" \
36 " br 3f \n" \
37 ".section __ex_table, \"a\" \n" \
38 ".align 2 \n" \
39 ".long 1b,2b \n" \
40 ".previous \n" \
41 "3: \n" \
42 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
43 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
44 : "memory"); \
45} while (0)
46
47#define __put_user_asm_w(x, ptr, err) \
48do { \
49 int errcode; \
50 __asm__ __volatile__( \
51 "1: stw %1, (%2,0) \n" \
52 " br 3f \n" \
53 "2: mov %0, %3 \n" \
54 " br 3f \n" \
55 ".section __ex_table,\"a\" \n" \
56 ".align 2 \n" \
57 ".long 1b, 2b \n" \
58 ".previous \n" \
59 "3: \n" \
60 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
61 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
62 : "memory"); \
63} while (0)
64
65#define __put_user_asm_64(x, ptr, err) \
66do { \
67 int tmp; \
68 int errcode; \
69 \
70 __asm__ __volatile__( \
71 " ldw %3, (%1, 0) \n" \
72 "1: stw %3, (%2, 0) \n" \
73 " ldw %3, (%1, 4) \n" \
74 "2: stw %3, (%2, 4) \n" \
75 " br 4f \n" \
76 "3: mov %0, %4 \n" \
77 " br 4f \n" \
78 ".section __ex_table, \"a\" \n" \
79 ".align 2 \n" \
80 ".long 1b, 3b \n" \
81 ".long 2b, 3b \n" \
82 ".previous \n" \
83 "4: \n" \
84 : "=r"(err), "=r"(x), "=r"(ptr), \
85 "=r"(tmp), "=r"(errcode) \
86 : "0"(err), "1"(x), "2"(ptr), "3"(0), \
87 "4"(-EFAULT) \
88 : "memory"); \
89} while (0)
90
91static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
92{
93 int retval = 0;
94 u32 tmp;
95
96 switch (size) {
97 case 1:
98 tmp = *(u8 *)x;
99 __put_user_asm_b(tmp, ptr, retval);
100 break;
101 case 2:
102 tmp = *(u16 *)x;
103 __put_user_asm_h(tmp, ptr, retval);
104 break;
105 case 4:
106 tmp = *(u32 *)x;
107 __put_user_asm_w(tmp, ptr, retval);
108 break;
109 case 8:
110 __put_user_asm_64(x, (u64 *)ptr, retval);
111 break;
112 }
113
114 return retval;
115}
116#define __put_user_fn __put_user_fn
117
118/*
119 * __get_user_fn
120 */
121extern int __get_user_bad(void);
122
123#define __get_user_asm_common(x, ptr, ins, err) \
124do { \
125 int errcode; \
126 __asm__ __volatile__( \
127 "1: " ins " %1, (%4, 0) \n" \
128 " br 3f \n" \
129 "2: mov %0, %2 \n" \
130 " movi %1, 0 \n" \
131 " br 3f \n" \
132 ".section __ex_table,\"a\" \n" \
133 ".align 2 \n" \
134 ".long 1b, 2b \n" \
135 ".previous \n" \
136 "3: \n" \
137 : "=r"(err), "=r"(x), "=r"(errcode) \
138 : "0"(0), "r"(ptr), "2"(-EFAULT) \
139 : "memory"); \
140} while (0)
141
142#define __get_user_asm_64(x, ptr, err) \
143do { \
144 int tmp; \
145 int errcode; \
146 \
147 __asm__ __volatile__( \
148 "1: ldw %3, (%2, 0) \n" \
149 " stw %3, (%1, 0) \n" \
150 "2: ldw %3, (%2, 4) \n" \
151 " stw %3, (%1, 4) \n" \
152 " br 4f \n" \
153 "3: mov %0, %4 \n" \
154 " br 4f \n" \
155 ".section __ex_table, \"a\" \n" \
156 ".align 2 \n" \
157 ".long 1b, 3b \n" \
158 ".long 2b, 3b \n" \
159 ".previous \n" \
160 "4: \n" \
161 : "=r"(err), "=r"(x), "=r"(ptr), \
162 "=r"(tmp), "=r"(errcode) \
163 : "0"(err), "1"(x), "2"(ptr), "3"(0), \
164 "4"(-EFAULT) \
165 : "memory"); \
166} while (0)
167
168static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
169{
170 int retval;
171 u32 tmp;
172
173 switch (size) {
174 case 1:
175 __get_user_asm_common(tmp, ptr, "ldb", retval);
176 *(u8 *)x = (u8)tmp;
177 break;
178 case 2:
179 __get_user_asm_common(tmp, ptr, "ldh", retval);
180 *(u16 *)x = (u16)tmp;
181 break;
182 case 4:
183 __get_user_asm_common(tmp, ptr, "ldw", retval);
184 *(u32 *)x = (u32)tmp;
185 break;
186 case 8:
187 __get_user_asm_64(x, ptr, retval);
188 break;
189 }
190
191 return retval;
192}
193#define __get_user_fn __get_user_fn
194
195unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
196unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
197
198unsigned long __clear_user(void __user *to, unsigned long n);
199#define __clear_user __clear_user
200
201#include <asm-generic/uaccess.h>
202
203#endif /* __ASM_CSKY_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4#ifndef __ASM_CSKY_UACCESS_H
5#define __ASM_CSKY_UACCESS_H
6
7/*
8 * User space memory access functions
9 */
10#include <linux/compiler.h>
11#include <linux/errno.h>
12#include <linux/types.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/string.h>
16#include <linux/version.h>
17#include <asm/segment.h>
18
19static inline int access_ok(const void *addr, unsigned long size)
20{
21 unsigned long limit = current_thread_info()->addr_limit.seg;
22
23 return (((unsigned long)addr < limit) &&
24 ((unsigned long)(addr + size) < limit));
25}
26
27#define __addr_ok(addr) (access_ok(addr, 0))
28
29extern int __put_user_bad(void);
30
31/*
32 * Tell gcc we read from memory instead of writing: this is because
33 * we do not write to any memory gcc knows about, so there are no
34 * aliasing issues.
35 */
36
37/*
38 * These are the main single-value transfer routines. They automatically
39 * use the right size if we just have the right pointer type.
40 *
41 * This gets kind of ugly. We want to return _two_ values in "get_user()"
42 * and yet we don't want to do any pointers, because that is too much
43 * of a performance impact. Thus we have a few rather ugly macros here,
44 * and hide all the ugliness from the user.
45 *
46 * The "__xxx" versions of the user access functions are versions that
47 * do not verify the address space, that must have been done previously
48 * with a separate "access_ok()" call (this is used when we do multiple
49 * accesses to the same area of user memory).
50 *
51 * As we use the same address space for kernel and user data on
52 * Ckcore, we can just do these as direct assignments. (Of course, the
53 * exception handling means that it's no longer "just"...)
54 */
55
56#define put_user(x, ptr) \
57 __put_user_check((x), (ptr), sizeof(*(ptr)))
58
59#define __put_user(x, ptr) \
60 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
61
62#define __ptr(x) ((unsigned long *)(x))
63
64#define get_user(x, ptr) \
65 __get_user_check((x), (ptr), sizeof(*(ptr)))
66
67#define __get_user(x, ptr) \
68 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
69
70#define __put_user_nocheck(x, ptr, size) \
71({ \
72 long __pu_err = 0; \
73 typeof(*(ptr)) *__pu_addr = (ptr); \
74 typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
75 if (__pu_addr) \
76 __put_user_size(__pu_val, (__pu_addr), (size), \
77 __pu_err); \
78 __pu_err; \
79})
80
81#define __put_user_check(x, ptr, size) \
82({ \
83 long __pu_err = -EFAULT; \
84 typeof(*(ptr)) *__pu_addr = (ptr); \
85 typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
86 if (access_ok(__pu_addr, size) && __pu_addr) \
87 __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
88 __pu_err; \
89})
90
91#define __put_user_size(x, ptr, size, retval) \
92do { \
93 retval = 0; \
94 switch (size) { \
95 case 1: \
96 __put_user_asm_b(x, ptr, retval); \
97 break; \
98 case 2: \
99 __put_user_asm_h(x, ptr, retval); \
100 break; \
101 case 4: \
102 __put_user_asm_w(x, ptr, retval); \
103 break; \
104 case 8: \
105 __put_user_asm_64(x, ptr, retval); \
106 break; \
107 default: \
108 __put_user_bad(); \
109 } \
110} while (0)
111
112/*
113 * We don't tell gcc that we are accessing memory, but this is OK
114 * because we do not write to any memory gcc knows about, so there
115 * are no aliasing issues.
116 *
117 * Note that PC at a fault is the address *after* the faulting
118 * instruction.
119 */
120#define __put_user_asm_b(x, ptr, err) \
121do { \
122 int errcode; \
123 asm volatile( \
124 "1: stb %1, (%2,0) \n" \
125 " br 3f \n" \
126 "2: mov %0, %3 \n" \
127 " br 3f \n" \
128 ".section __ex_table, \"a\" \n" \
129 ".align 2 \n" \
130 ".long 1b,2b \n" \
131 ".previous \n" \
132 "3: \n" \
133 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
134 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
135 : "memory"); \
136} while (0)
137
138#define __put_user_asm_h(x, ptr, err) \
139do { \
140 int errcode; \
141 asm volatile( \
142 "1: sth %1, (%2,0) \n" \
143 " br 3f \n" \
144 "2: mov %0, %3 \n" \
145 " br 3f \n" \
146 ".section __ex_table, \"a\" \n" \
147 ".align 2 \n" \
148 ".long 1b,2b \n" \
149 ".previous \n" \
150 "3: \n" \
151 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
152 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
153 : "memory"); \
154} while (0)
155
156#define __put_user_asm_w(x, ptr, err) \
157do { \
158 int errcode; \
159 asm volatile( \
160 "1: stw %1, (%2,0) \n" \
161 " br 3f \n" \
162 "2: mov %0, %3 \n" \
163 " br 3f \n" \
164 ".section __ex_table,\"a\" \n" \
165 ".align 2 \n" \
166 ".long 1b, 2b \n" \
167 ".previous \n" \
168 "3: \n" \
169 : "=r"(err), "=r"(x), "=r"(ptr), "=r"(errcode) \
170 : "0"(err), "1"(x), "2"(ptr), "3"(-EFAULT) \
171 : "memory"); \
172} while (0)
173
174#define __put_user_asm_64(x, ptr, err) \
175do { \
176 int tmp; \
177 int errcode; \
178 typeof(*(ptr))src = (typeof(*(ptr)))x; \
179 typeof(*(ptr))*psrc = &src; \
180 \
181 asm volatile( \
182 " ldw %3, (%1, 0) \n" \
183 "1: stw %3, (%2, 0) \n" \
184 " ldw %3, (%1, 4) \n" \
185 "2: stw %3, (%2, 4) \n" \
186 " br 4f \n" \
187 "3: mov %0, %4 \n" \
188 " br 4f \n" \
189 ".section __ex_table, \"a\" \n" \
190 ".align 2 \n" \
191 ".long 1b, 3b \n" \
192 ".long 2b, 3b \n" \
193 ".previous \n" \
194 "4: \n" \
195 : "=r"(err), "=r"(psrc), "=r"(ptr), \
196 "=r"(tmp), "=r"(errcode) \
197 : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
198 : "memory"); \
199} while (0)
200
201#define __get_user_nocheck(x, ptr, size) \
202({ \
203 long __gu_err; \
204 __get_user_size(x, (ptr), (size), __gu_err); \
205 __gu_err; \
206})
207
208#define __get_user_check(x, ptr, size) \
209({ \
210 int __gu_err = -EFAULT; \
211 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
212 if (access_ok(__gu_ptr, size) && __gu_ptr) \
213 __get_user_size(x, __gu_ptr, size, __gu_err); \
214 __gu_err; \
215})
216
217#define __get_user_size(x, ptr, size, retval) \
218do { \
219 switch (size) { \
220 case 1: \
221 __get_user_asm_common((x), ptr, "ldb", retval); \
222 break; \
223 case 2: \
224 __get_user_asm_common((x), ptr, "ldh", retval); \
225 break; \
226 case 4: \
227 __get_user_asm_common((x), ptr, "ldw", retval); \
228 break; \
229 default: \
230 x = 0; \
231 (retval) = __get_user_bad(); \
232 } \
233} while (0)
234
235#define __get_user_asm_common(x, ptr, ins, err) \
236do { \
237 int errcode; \
238 asm volatile( \
239 "1: " ins " %1, (%4,0) \n" \
240 " br 3f \n" \
241 /* Fix up codes */ \
242 "2: mov %0, %2 \n" \
243 " movi %1, 0 \n" \
244 " br 3f \n" \
245 ".section __ex_table,\"a\" \n" \
246 ".align 2 \n" \
247 ".long 1b, 2b \n" \
248 ".previous \n" \
249 "3: \n" \
250 : "=r"(err), "=r"(x), "=r"(errcode) \
251 : "0"(0), "r"(ptr), "2"(-EFAULT) \
252 : "memory"); \
253} while (0)
254
255extern int __get_user_bad(void);
256
257#define __copy_user(to, from, n) \
258do { \
259 int w0, w1, w2, w3; \
260 asm volatile( \
261 "0: cmpnei %1, 0 \n" \
262 " bf 8f \n" \
263 " mov %3, %1 \n" \
264 " or %3, %2 \n" \
265 " andi %3, 3 \n" \
266 " cmpnei %3, 0 \n" \
267 " bf 1f \n" \
268 " br 5f \n" \
269 "1: cmplti %0, 16 \n" /* 4W */ \
270 " bt 3f \n" \
271 " ldw %3, (%2, 0) \n" \
272 " ldw %4, (%2, 4) \n" \
273 " ldw %5, (%2, 8) \n" \
274 " ldw %6, (%2, 12) \n" \
275 "2: stw %3, (%1, 0) \n" \
276 "9: stw %4, (%1, 4) \n" \
277 "10: stw %5, (%1, 8) \n" \
278 "11: stw %6, (%1, 12) \n" \
279 " addi %2, 16 \n" \
280 " addi %1, 16 \n" \
281 " subi %0, 16 \n" \
282 " br 1b \n" \
283 "3: cmplti %0, 4 \n" /* 1W */ \
284 " bt 5f \n" \
285 " ldw %3, (%2, 0) \n" \
286 "4: stw %3, (%1, 0) \n" \
287 " addi %2, 4 \n" \
288 " addi %1, 4 \n" \
289 " subi %0, 4 \n" \
290 " br 3b \n" \
291 "5: cmpnei %0, 0 \n" /* 1B */ \
292 " bf 8f \n" \
293 " ldb %3, (%2, 0) \n" \
294 "6: stb %3, (%1, 0) \n" \
295 " addi %2, 1 \n" \
296 " addi %1, 1 \n" \
297 " subi %0, 1 \n" \
298 " br 5b \n" \
299 "7: br 8f \n" \
300 ".section __ex_table, \"a\" \n" \
301 ".align 2 \n" \
302 ".long 2b, 7b \n" \
303 ".long 9b, 7b \n" \
304 ".long 10b, 7b \n" \
305 ".long 11b, 7b \n" \
306 ".long 4b, 7b \n" \
307 ".long 6b, 7b \n" \
308 ".previous \n" \
309 "8: \n" \
310 : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
311 "=r"(w1), "=r"(w2), "=r"(w3) \
312 : "0"(n), "1"(to), "2"(from) \
313 : "memory"); \
314} while (0)
315
316#define __copy_user_zeroing(to, from, n) \
317do { \
318 int tmp; \
319 int nsave; \
320 asm volatile( \
321 "0: cmpnei %1, 0 \n" \
322 " bf 7f \n" \
323 " mov %3, %1 \n" \
324 " or %3, %2 \n" \
325 " andi %3, 3 \n" \
326 " cmpnei %3, 0 \n" \
327 " bf 1f \n" \
328 " br 5f \n" \
329 "1: cmplti %0, 16 \n" \
330 " bt 3f \n" \
331 "2: ldw %3, (%2, 0) \n" \
332 "10: ldw %4, (%2, 4) \n" \
333 " stw %3, (%1, 0) \n" \
334 " stw %4, (%1, 4) \n" \
335 "11: ldw %3, (%2, 8) \n" \
336 "12: ldw %4, (%2, 12) \n" \
337 " stw %3, (%1, 8) \n" \
338 " stw %4, (%1, 12) \n" \
339 " addi %2, 16 \n" \
340 " addi %1, 16 \n" \
341 " subi %0, 16 \n" \
342 " br 1b \n" \
343 "3: cmplti %0, 4 \n" \
344 " bt 5f \n" \
345 "4: ldw %3, (%2, 0) \n" \
346 " stw %3, (%1, 0) \n" \
347 " addi %2, 4 \n" \
348 " addi %1, 4 \n" \
349 " subi %0, 4 \n" \
350 " br 3b \n" \
351 "5: cmpnei %0, 0 \n" \
352 " bf 7f \n" \
353 "6: ldb %3, (%2, 0) \n" \
354 " stb %3, (%1, 0) \n" \
355 " addi %2, 1 \n" \
356 " addi %1, 1 \n" \
357 " subi %0, 1 \n" \
358 " br 5b \n" \
359 "8: mov %3, %0 \n" \
360 " movi %4, 0 \n" \
361 "9: stb %4, (%1, 0) \n" \
362 " addi %1, 1 \n" \
363 " subi %3, 1 \n" \
364 " cmpnei %3, 0 \n" \
365 " bt 9b \n" \
366 " br 7f \n" \
367 ".section __ex_table, \"a\" \n" \
368 ".align 2 \n" \
369 ".long 2b, 8b \n" \
370 ".long 10b, 8b \n" \
371 ".long 11b, 8b \n" \
372 ".long 12b, 8b \n" \
373 ".long 4b, 8b \n" \
374 ".long 6b, 8b \n" \
375 ".previous \n" \
376 "7: \n" \
377 : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
378 "=r"(tmp) \
379 : "0"(n), "1"(to), "2"(from) \
380 : "memory"); \
381} while (0)
382
383unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
384unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
385
386unsigned long clear_user(void *to, unsigned long n);
387unsigned long __clear_user(void __user *to, unsigned long n);
388
389long strncpy_from_user(char *dst, const char *src, long count);
390long __strncpy_from_user(char *dst, const char *src, long count);
391
392/*
393 * Return the size of a string (including the ending 0)
394 *
395 * Return 0 on exception, a value greater than N if too long
396 */
397long strnlen_user(const char *src, long n);
398
399#define strlen_user(str) strnlen_user(str, 32767)
400
401struct exception_table_entry {
402 unsigned long insn;
403 unsigned long nextinsn;
404};
405
406extern int fixup_exception(struct pt_regs *regs);
407
408#endif /* __ASM_CSKY_UACCESS_H */