Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * vineetg: June 2010
6 * -__clear_user( ) called multiple times during elf load was byte loop
7 * converted to do as much word clear as possible.
8 *
9 * vineetg: Dec 2009
10 * -Hand crafted constant propagation for "constant" copy sizes
11 * -stock kernel shrunk by 33K at -O3
12 *
13 * vineetg: Sept 2009
14 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
15 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
16 * -Enabled when doing -Os
17 *
18 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
19 */
20
21#ifndef _ASM_ARC_UACCESS_H
22#define _ASM_ARC_UACCESS_H
23
24#include <linux/string.h> /* for generic string functions */
25
26/*********** Single byte/hword/word copies ******************/
27
28#define __get_user_fn(sz, u, k) \
29({ \
30 long __ret = 0; /* success by default */ \
31 switch (sz) { \
32 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
33 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
34 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
35 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
36 } \
37 __ret; \
38})
39
40/*
41 * Returns 0 on success, -EFAULT if not.
42 * @ret already contains 0 - given that errors will be less likely
43 * (hence +r asm constraint below).
44 * In case of error, fixup code will make it -EFAULT
45 */
46#define __arc_get_user_one(dst, src, op, ret) \
47 __asm__ __volatile__( \
48 "1: "op" %1,[%2]\n" \
49 "2: ;nop\n" \
50 " .section .fixup, \"ax\"\n" \
51 " .align 4\n" \
52 "3: # return -EFAULT\n" \
53 " mov %0, %3\n" \
54 " # zero out dst ptr\n" \
55 " mov %1, 0\n" \
56 " j 2b\n" \
57 " .previous\n" \
58 " .section __ex_table, \"a\"\n" \
59 " .align 4\n" \
60 " .word 1b,3b\n" \
61 " .previous\n" \
62 \
63 : "+r" (ret), "=r" (dst) \
64 : "r" (src), "ir" (-EFAULT))
65
66#define __arc_get_user_one_64(dst, src, ret) \
67 __asm__ __volatile__( \
68 "1: ld %1,[%2]\n" \
69 "4: ld %R1,[%2, 4]\n" \
70 "2: ;nop\n" \
71 " .section .fixup, \"ax\"\n" \
72 " .align 4\n" \
73 "3: # return -EFAULT\n" \
74 " mov %0, %3\n" \
75 " # zero out dst ptr\n" \
76 " mov %1, 0\n" \
77 " mov %R1, 0\n" \
78 " j 2b\n" \
79 " .previous\n" \
80 " .section __ex_table, \"a\"\n" \
81 " .align 4\n" \
82 " .word 1b,3b\n" \
83 " .word 4b,3b\n" \
84 " .previous\n" \
85 \
86 : "+r" (ret), "=r" (dst) \
87 : "r" (src), "ir" (-EFAULT))
88
89#define __put_user_fn(sz, u, k) \
90({ \
91 long __ret = 0; /* success by default */ \
92 switch (sz) { \
93 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
94 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
95 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
96 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
97 } \
98 __ret; \
99})
100
101#define __arc_put_user_one(src, dst, op, ret) \
102 __asm__ __volatile__( \
103 "1: "op" %1,[%2]\n" \
104 "2: ;nop\n" \
105 " .section .fixup, \"ax\"\n" \
106 " .align 4\n" \
107 "3: mov %0, %3\n" \
108 " j 2b\n" \
109 " .previous\n" \
110 " .section __ex_table, \"a\"\n" \
111 " .align 4\n" \
112 " .word 1b,3b\n" \
113 " .previous\n" \
114 \
115 : "+r" (ret) \
116 : "r" (src), "r" (dst), "ir" (-EFAULT))
117
118#define __arc_put_user_one_64(src, dst, ret) \
119 __asm__ __volatile__( \
120 "1: st %1,[%2]\n" \
121 "4: st %R1,[%2, 4]\n" \
122 "2: ;nop\n" \
123 " .section .fixup, \"ax\"\n" \
124 " .align 4\n" \
125 "3: mov %0, %3\n" \
126 " j 2b\n" \
127 " .previous\n" \
128 " .section __ex_table, \"a\"\n" \
129 " .align 4\n" \
130 " .word 1b,3b\n" \
131 " .word 4b,3b\n" \
132 " .previous\n" \
133 \
134 : "+r" (ret) \
135 : "r" (src), "r" (dst), "ir" (-EFAULT))
136
137
138static inline unsigned long
139raw_copy_from_user(void *to, const void __user *from, unsigned long n)
140{
141 long res = 0;
142 char val;
143 unsigned long tmp1, tmp2, tmp3, tmp4;
144 unsigned long orig_n = n;
145
146 if (n == 0)
147 return 0;
148
149 /* fallback for unaligned access when hardware doesn't support */
150 if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
151 (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
152
153 unsigned char tmp;
154
155 __asm__ __volatile__ (
156 " mov.f lp_count, %0 \n"
157 " lpnz 2f \n"
158 "1: ldb.ab %1, [%3, 1] \n"
159 " stb.ab %1, [%2, 1] \n"
160 " sub %0,%0,1 \n"
161 "2: ;nop \n"
162 " .section .fixup, \"ax\" \n"
163 " .align 4 \n"
164 "3: j 2b \n"
165 " .previous \n"
166 " .section __ex_table, \"a\" \n"
167 " .align 4 \n"
168 " .word 1b, 3b \n"
169 " .previous \n"
170
171 : "+r" (n),
172 /*
173 * Note as an '&' earlyclobber operand to make sure the
174 * temporary register inside the loop is not the same as
175 * FROM or TO.
176 */
177 "=&r" (tmp), "+r" (to), "+r" (from)
178 :
179 : "lp_count", "memory");
180
181 return n;
182 }
183
184 /*
185 * Hand-crafted constant propagation to reduce code sz of the
186 * laddered copy 16x,8,4,2,1
187 */
188 if (__builtin_constant_p(orig_n)) {
189 res = orig_n;
190
191 if (orig_n / 16) {
192 orig_n = orig_n % 16;
193
194 __asm__ __volatile__(
195 " lsr lp_count, %7,4 \n"
196 " lp 3f \n"
197 "1: ld.ab %3, [%2, 4] \n"
198 "11: ld.ab %4, [%2, 4] \n"
199 "12: ld.ab %5, [%2, 4] \n"
200 "13: ld.ab %6, [%2, 4] \n"
201 " st.ab %3, [%1, 4] \n"
202 " st.ab %4, [%1, 4] \n"
203 " st.ab %5, [%1, 4] \n"
204 " st.ab %6, [%1, 4] \n"
205 " sub %0,%0,16 \n"
206 "3: ;nop \n"
207 " .section .fixup, \"ax\" \n"
208 " .align 4 \n"
209 "4: j 3b \n"
210 " .previous \n"
211 " .section __ex_table, \"a\" \n"
212 " .align 4 \n"
213 " .word 1b, 4b \n"
214 " .word 11b,4b \n"
215 " .word 12b,4b \n"
216 " .word 13b,4b \n"
217 " .previous \n"
218 : "+r" (res), "+r"(to), "+r"(from),
219 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
220 : "ir"(n)
221 : "lp_count", "memory");
222 }
223 if (orig_n / 8) {
224 orig_n = orig_n % 8;
225
226 __asm__ __volatile__(
227 "14: ld.ab %3, [%2,4] \n"
228 "15: ld.ab %4, [%2,4] \n"
229 " st.ab %3, [%1,4] \n"
230 " st.ab %4, [%1,4] \n"
231 " sub %0,%0,8 \n"
232 "31: ;nop \n"
233 " .section .fixup, \"ax\" \n"
234 " .align 4 \n"
235 "4: j 31b \n"
236 " .previous \n"
237 " .section __ex_table, \"a\" \n"
238 " .align 4 \n"
239 " .word 14b,4b \n"
240 " .word 15b,4b \n"
241 " .previous \n"
242 : "+r" (res), "+r"(to), "+r"(from),
243 "=r"(tmp1), "=r"(tmp2)
244 :
245 : "memory");
246 }
247 if (orig_n / 4) {
248 orig_n = orig_n % 4;
249
250 __asm__ __volatile__(
251 "16: ld.ab %3, [%2,4] \n"
252 " st.ab %3, [%1,4] \n"
253 " sub %0,%0,4 \n"
254 "32: ;nop \n"
255 " .section .fixup, \"ax\" \n"
256 " .align 4 \n"
257 "4: j 32b \n"
258 " .previous \n"
259 " .section __ex_table, \"a\" \n"
260 " .align 4 \n"
261 " .word 16b,4b \n"
262 " .previous \n"
263 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
264 :
265 : "memory");
266 }
267 if (orig_n / 2) {
268 orig_n = orig_n % 2;
269
270 __asm__ __volatile__(
271 "17: ldw.ab %3, [%2,2] \n"
272 " stw.ab %3, [%1,2] \n"
273 " sub %0,%0,2 \n"
274 "33: ;nop \n"
275 " .section .fixup, \"ax\" \n"
276 " .align 4 \n"
277 "4: j 33b \n"
278 " .previous \n"
279 " .section __ex_table, \"a\" \n"
280 " .align 4 \n"
281 " .word 17b,4b \n"
282 " .previous \n"
283 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
284 :
285 : "memory");
286 }
287 if (orig_n & 1) {
288 __asm__ __volatile__(
289 "18: ldb.ab %3, [%2,2] \n"
290 " stb.ab %3, [%1,2] \n"
291 " sub %0,%0,1 \n"
292 "34: ; nop \n"
293 " .section .fixup, \"ax\" \n"
294 " .align 4 \n"
295 "4: j 34b \n"
296 " .previous \n"
297 " .section __ex_table, \"a\" \n"
298 " .align 4 \n"
299 " .word 18b,4b \n"
300 " .previous \n"
301 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
302 :
303 : "memory");
304 }
305 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
306
307 __asm__ __volatile__(
308 " mov %0,%3 \n"
309 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
310 " lpnz 3f \n"
311 "1: ld.ab %5, [%2, 4] \n"
312 "11: ld.ab %6, [%2, 4] \n"
313 "12: ld.ab %7, [%2, 4] \n"
314 "13: ld.ab %8, [%2, 4] \n"
315 " st.ab %5, [%1, 4] \n"
316 " st.ab %6, [%1, 4] \n"
317 " st.ab %7, [%1, 4] \n"
318 " st.ab %8, [%1, 4] \n"
319 " sub %0,%0,16 \n"
320 "3: and.f %3,%3,0xf \n" /* stragglers */
321 " bz 34f \n"
322 " bbit0 %3,3,31f \n" /* 8 bytes left */
323 "14: ld.ab %5, [%2,4] \n"
324 "15: ld.ab %6, [%2,4] \n"
325 " st.ab %5, [%1,4] \n"
326 " st.ab %6, [%1,4] \n"
327 " sub.f %0,%0,8 \n"
328 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
329 "16: ld.ab %5, [%2,4] \n"
330 " st.ab %5, [%1,4] \n"
331 " sub.f %0,%0,4 \n"
332 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
333 "17: ldw.ab %5, [%2,2] \n"
334 " stw.ab %5, [%1,2] \n"
335 " sub.f %0,%0,2 \n"
336 "33: bbit0 %3,0,34f \n"
337 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
338 " stb.ab %5, [%1,1] \n"
339 " sub.f %0,%0,1 \n"
340 "34: ;nop \n"
341 " .section .fixup, \"ax\" \n"
342 " .align 4 \n"
343 "4: j 34b \n"
344 " .previous \n"
345 " .section __ex_table, \"a\" \n"
346 " .align 4 \n"
347 " .word 1b, 4b \n"
348 " .word 11b,4b \n"
349 " .word 12b,4b \n"
350 " .word 13b,4b \n"
351 " .word 14b,4b \n"
352 " .word 15b,4b \n"
353 " .word 16b,4b \n"
354 " .word 17b,4b \n"
355 " .word 18b,4b \n"
356 " .previous \n"
357 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
358 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
359 :
360 : "lp_count", "memory");
361 }
362
363 return res;
364}
365
366static inline unsigned long
367raw_copy_to_user(void __user *to, const void *from, unsigned long n)
368{
369 long res = 0;
370 char val;
371 unsigned long tmp1, tmp2, tmp3, tmp4;
372 unsigned long orig_n = n;
373
374 if (n == 0)
375 return 0;
376
377 /* fallback for unaligned access when hardware doesn't support */
378 if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
379 (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
380
381 unsigned char tmp;
382
383 __asm__ __volatile__(
384 " mov.f lp_count, %0 \n"
385 " lpnz 3f \n"
386 " ldb.ab %1, [%3, 1] \n"
387 "1: stb.ab %1, [%2, 1] \n"
388 " sub %0, %0, 1 \n"
389 "3: ;nop \n"
390 " .section .fixup, \"ax\" \n"
391 " .align 4 \n"
392 "4: j 3b \n"
393 " .previous \n"
394 " .section __ex_table, \"a\" \n"
395 " .align 4 \n"
396 " .word 1b, 4b \n"
397 " .previous \n"
398
399 : "+r" (n),
400 /* Note as an '&' earlyclobber operand to make sure the
401 * temporary register inside the loop is not the same as
402 * FROM or TO.
403 */
404 "=&r" (tmp), "+r" (to), "+r" (from)
405 :
406 : "lp_count", "memory");
407
408 return n;
409 }
410
411 if (__builtin_constant_p(orig_n)) {
412 res = orig_n;
413
414 if (orig_n / 16) {
415 orig_n = orig_n % 16;
416
417 __asm__ __volatile__(
418 " lsr lp_count, %7,4 \n"
419 " lp 3f \n"
420 " ld.ab %3, [%2, 4] \n"
421 " ld.ab %4, [%2, 4] \n"
422 " ld.ab %5, [%2, 4] \n"
423 " ld.ab %6, [%2, 4] \n"
424 "1: st.ab %3, [%1, 4] \n"
425 "11: st.ab %4, [%1, 4] \n"
426 "12: st.ab %5, [%1, 4] \n"
427 "13: st.ab %6, [%1, 4] \n"
428 " sub %0, %0, 16 \n"
429 "3:;nop \n"
430 " .section .fixup, \"ax\" \n"
431 " .align 4 \n"
432 "4: j 3b \n"
433 " .previous \n"
434 " .section __ex_table, \"a\" \n"
435 " .align 4 \n"
436 " .word 1b, 4b \n"
437 " .word 11b,4b \n"
438 " .word 12b,4b \n"
439 " .word 13b,4b \n"
440 " .previous \n"
441 : "+r" (res), "+r"(to), "+r"(from),
442 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
443 : "ir"(n)
444 : "lp_count", "memory");
445 }
446 if (orig_n / 8) {
447 orig_n = orig_n % 8;
448
449 __asm__ __volatile__(
450 " ld.ab %3, [%2,4] \n"
451 " ld.ab %4, [%2,4] \n"
452 "14: st.ab %3, [%1,4] \n"
453 "15: st.ab %4, [%1,4] \n"
454 " sub %0, %0, 8 \n"
455 "31:;nop \n"
456 " .section .fixup, \"ax\" \n"
457 " .align 4 \n"
458 "4: j 31b \n"
459 " .previous \n"
460 " .section __ex_table, \"a\" \n"
461 " .align 4 \n"
462 " .word 14b,4b \n"
463 " .word 15b,4b \n"
464 " .previous \n"
465 : "+r" (res), "+r"(to), "+r"(from),
466 "=r"(tmp1), "=r"(tmp2)
467 :
468 : "memory");
469 }
470 if (orig_n / 4) {
471 orig_n = orig_n % 4;
472
473 __asm__ __volatile__(
474 " ld.ab %3, [%2,4] \n"
475 "16: st.ab %3, [%1,4] \n"
476 " sub %0, %0, 4 \n"
477 "32:;nop \n"
478 " .section .fixup, \"ax\" \n"
479 " .align 4 \n"
480 "4: j 32b \n"
481 " .previous \n"
482 " .section __ex_table, \"a\" \n"
483 " .align 4 \n"
484 " .word 16b,4b \n"
485 " .previous \n"
486 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
487 :
488 : "memory");
489 }
490 if (orig_n / 2) {
491 orig_n = orig_n % 2;
492
493 __asm__ __volatile__(
494 " ldw.ab %3, [%2,2] \n"
495 "17: stw.ab %3, [%1,2] \n"
496 " sub %0, %0, 2 \n"
497 "33:;nop \n"
498 " .section .fixup, \"ax\" \n"
499 " .align 4 \n"
500 "4: j 33b \n"
501 " .previous \n"
502 " .section __ex_table, \"a\" \n"
503 " .align 4 \n"
504 " .word 17b,4b \n"
505 " .previous \n"
506 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
507 :
508 : "memory");
509 }
510 if (orig_n & 1) {
511 __asm__ __volatile__(
512 " ldb.ab %3, [%2,1] \n"
513 "18: stb.ab %3, [%1,1] \n"
514 " sub %0, %0, 1 \n"
515 "34: ;nop \n"
516 " .section .fixup, \"ax\" \n"
517 " .align 4 \n"
518 "4: j 34b \n"
519 " .previous \n"
520 " .section __ex_table, \"a\" \n"
521 " .align 4 \n"
522 " .word 18b,4b \n"
523 " .previous \n"
524 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
525 :
526 : "memory");
527 }
528 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
529
530 __asm__ __volatile__(
531 " mov %0,%3 \n"
532 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
533 " lpnz 3f \n"
534 " ld.ab %5, [%2, 4] \n"
535 " ld.ab %6, [%2, 4] \n"
536 " ld.ab %7, [%2, 4] \n"
537 " ld.ab %8, [%2, 4] \n"
538 "1: st.ab %5, [%1, 4] \n"
539 "11: st.ab %6, [%1, 4] \n"
540 "12: st.ab %7, [%1, 4] \n"
541 "13: st.ab %8, [%1, 4] \n"
542 " sub %0, %0, 16 \n"
543 "3: and.f %3,%3,0xf \n" /* stragglers */
544 " bz 34f \n"
545 " bbit0 %3,3,31f \n" /* 8 bytes left */
546 " ld.ab %5, [%2,4] \n"
547 " ld.ab %6, [%2,4] \n"
548 "14: st.ab %5, [%1,4] \n"
549 "15: st.ab %6, [%1,4] \n"
550 " sub.f %0, %0, 8 \n"
551 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
552 " ld.ab %5, [%2,4] \n"
553 "16: st.ab %5, [%1,4] \n"
554 " sub.f %0, %0, 4 \n"
555 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
556 " ldw.ab %5, [%2,2] \n"
557 "17: stw.ab %5, [%1,2] \n"
558 " sub.f %0, %0, 2 \n"
559 "33: bbit0 %3,0,34f \n"
560 " ldb.ab %5, [%2,1] \n" /* 1 byte left */
561 "18: stb.ab %5, [%1,1] \n"
562 " sub.f %0, %0, 1 \n"
563 "34: ;nop \n"
564 " .section .fixup, \"ax\" \n"
565 " .align 4 \n"
566 "4: j 34b \n"
567 " .previous \n"
568 " .section __ex_table, \"a\" \n"
569 " .align 4 \n"
570 " .word 1b, 4b \n"
571 " .word 11b,4b \n"
572 " .word 12b,4b \n"
573 " .word 13b,4b \n"
574 " .word 14b,4b \n"
575 " .word 15b,4b \n"
576 " .word 16b,4b \n"
577 " .word 17b,4b \n"
578 " .word 18b,4b \n"
579 " .previous \n"
580 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
581 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
582 :
583 : "lp_count", "memory");
584 }
585
586 return res;
587}
588
589static inline unsigned long __clear_user(void __user *to, unsigned long n)
590{
591 long res = n;
592 unsigned char *d_char = to;
593
594 __asm__ __volatile__(
595 " bbit0 %0, 0, 1f \n"
596 "75: stb.ab %2, [%0,1] \n"
597 " sub %1, %1, 1 \n"
598 "1: bbit0 %0, 1, 2f \n"
599 "76: stw.ab %2, [%0,2] \n"
600 " sub %1, %1, 2 \n"
601 "2: asr.f lp_count, %1, 2 \n"
602 " lpnz 3f \n"
603 "77: st.ab %2, [%0,4] \n"
604 " sub %1, %1, 4 \n"
605 "3: bbit0 %1, 1, 4f \n"
606 "78: stw.ab %2, [%0,2] \n"
607 " sub %1, %1, 2 \n"
608 "4: bbit0 %1, 0, 5f \n"
609 "79: stb.ab %2, [%0,1] \n"
610 " sub %1, %1, 1 \n"
611 "5: \n"
612 " .section .fixup, \"ax\" \n"
613 " .align 4 \n"
614 "3: j 5b \n"
615 " .previous \n"
616 " .section __ex_table, \"a\" \n"
617 " .align 4 \n"
618 " .word 75b, 3b \n"
619 " .word 76b, 3b \n"
620 " .word 77b, 3b \n"
621 " .word 78b, 3b \n"
622 " .word 79b, 3b \n"
623 " .previous \n"
624 : "+r"(d_char), "+r"(res)
625 : "i"(0)
626 : "lp_count", "memory");
627
628 return res;
629}
630
631#define INLINE_COPY_TO_USER
632#define INLINE_COPY_FROM_USER
633
634#define __clear_user __clear_user
635
636#include <asm-generic/uaccess.h>
637
638#endif
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: June 2010
9 * -__clear_user( ) called multiple times during elf load was byte loop
10 * converted to do as much word clear as possible.
11 *
12 * vineetg: Dec 2009
13 * -Hand crafted constant propagation for "constant" copy sizes
14 * -stock kernel shrunk by 33K at -O3
15 *
16 * vineetg: Sept 2009
17 * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
18 * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
19 * -Enabled when doing -Os
20 *
21 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
22 */
23
24#ifndef _ASM_ARC_UACCESS_H
25#define _ASM_ARC_UACCESS_H
26
27#include <linux/sched.h>
28#include <asm/errno.h>
29#include <linux/string.h> /* for generic string functions */
30
31
32#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
33
34/*
35 * Algorthmically, for __user_ok() we want do:
36 * (start < TASK_SIZE) && (start+len < TASK_SIZE)
37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
38 * emitted directly in code.
39 *
40 * This can however be rewritten as follows:
41 * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
42 *
43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too.
45 *
46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed.
49 *
50 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
51 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
52 * would already have been done at this call site for __kernel_ok()
53 *
54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 ((addr) <= (get_fs() - (sz))))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz))))
59
60/*********** Single byte/hword/word copies ******************/
61
62#define __get_user_fn(sz, u, k) \
63({ \
64 long __ret = 0; /* success by default */ \
65 switch (sz) { \
66 case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
67 case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
68 case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
69 case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
70 } \
71 __ret; \
72})
73
74/*
75 * Returns 0 on success, -EFAULT if not.
76 * @ret already contains 0 - given that errors will be less likely
77 * (hence +r asm constraint below).
78 * In case of error, fixup code will make it -EFAULT
79 */
80#define __arc_get_user_one(dst, src, op, ret) \
81 __asm__ __volatile__( \
82 "1: "op" %1,[%2]\n" \
83 "2: ;nop\n" \
84 " .section .fixup, \"ax\"\n" \
85 " .align 4\n" \
86 "3: mov %0, %3\n" \
87 " j 2b\n" \
88 " .previous\n" \
89 " .section __ex_table, \"a\"\n" \
90 " .align 4\n" \
91 " .word 1b,3b\n" \
92 " .previous\n" \
93 \
94 : "+r" (ret), "=r" (dst) \
95 : "r" (src), "ir" (-EFAULT))
96
97#define __arc_get_user_one_64(dst, src, ret) \
98 __asm__ __volatile__( \
99 "1: ld %1,[%2]\n" \
100 "4: ld %R1,[%2, 4]\n" \
101 "2: ;nop\n" \
102 " .section .fixup, \"ax\"\n" \
103 " .align 4\n" \
104 "3: mov %0, %3\n" \
105 " j 2b\n" \
106 " .previous\n" \
107 " .section __ex_table, \"a\"\n" \
108 " .align 4\n" \
109 " .word 1b,3b\n" \
110 " .word 4b,3b\n" \
111 " .previous\n" \
112 \
113 : "+r" (ret), "=r" (dst) \
114 : "r" (src), "ir" (-EFAULT))
115
116#define __put_user_fn(sz, u, k) \
117({ \
118 long __ret = 0; /* success by default */ \
119 switch (sz) { \
120 case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
121 case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
122 case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
123 case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
124 } \
125 __ret; \
126})
127
128#define __arc_put_user_one(src, dst, op, ret) \
129 __asm__ __volatile__( \
130 "1: "op" %1,[%2]\n" \
131 "2: ;nop\n" \
132 " .section .fixup, \"ax\"\n" \
133 " .align 4\n" \
134 "3: mov %0, %3\n" \
135 " j 2b\n" \
136 " .previous\n" \
137 " .section __ex_table, \"a\"\n" \
138 " .align 4\n" \
139 " .word 1b,3b\n" \
140 " .previous\n" \
141 \
142 : "+r" (ret) \
143 : "r" (src), "r" (dst), "ir" (-EFAULT))
144
145#define __arc_put_user_one_64(src, dst, ret) \
146 __asm__ __volatile__( \
147 "1: st %1,[%2]\n" \
148 "4: st %R1,[%2, 4]\n" \
149 "2: ;nop\n" \
150 " .section .fixup, \"ax\"\n" \
151 " .align 4\n" \
152 "3: mov %0, %3\n" \
153 " j 2b\n" \
154 " .previous\n" \
155 " .section __ex_table, \"a\"\n" \
156 " .align 4\n" \
157 " .word 1b,3b\n" \
158 " .word 4b,3b\n" \
159 " .previous\n" \
160 \
161 : "+r" (ret) \
162 : "r" (src), "r" (dst), "ir" (-EFAULT))
163
164
165static inline unsigned long
166__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
167{
168 long res = 0;
169 char val;
170 unsigned long tmp1, tmp2, tmp3, tmp4;
171 unsigned long orig_n = n;
172
173 if (n == 0)
174 return 0;
175
176 /* unaligned */
177 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
178
179 unsigned char tmp;
180
181 __asm__ __volatile__ (
182 " mov.f lp_count, %0 \n"
183 " lpnz 2f \n"
184 "1: ldb.ab %1, [%3, 1] \n"
185 " stb.ab %1, [%2, 1] \n"
186 " sub %0,%0,1 \n"
187 "2: ;nop \n"
188 " .section .fixup, \"ax\" \n"
189 " .align 4 \n"
190 "3: j 2b \n"
191 " .previous \n"
192 " .section __ex_table, \"a\" \n"
193 " .align 4 \n"
194 " .word 1b, 3b \n"
195 " .previous \n"
196
197 : "+r" (n),
198 /*
199 * Note as an '&' earlyclobber operand to make sure the
200 * temporary register inside the loop is not the same as
201 * FROM or TO.
202 */
203 "=&r" (tmp), "+r" (to), "+r" (from)
204 :
205 : "lp_count", "lp_start", "lp_end", "memory");
206
207 return n;
208 }
209
210 /*
211 * Hand-crafted constant propagation to reduce code sz of the
212 * laddered copy 16x,8,4,2,1
213 */
214 if (__builtin_constant_p(orig_n)) {
215 res = orig_n;
216
217 if (orig_n / 16) {
218 orig_n = orig_n % 16;
219
220 __asm__ __volatile__(
221 " lsr lp_count, %7,4 \n"
222 " lp 3f \n"
223 "1: ld.ab %3, [%2, 4] \n"
224 "11: ld.ab %4, [%2, 4] \n"
225 "12: ld.ab %5, [%2, 4] \n"
226 "13: ld.ab %6, [%2, 4] \n"
227 " st.ab %3, [%1, 4] \n"
228 " st.ab %4, [%1, 4] \n"
229 " st.ab %5, [%1, 4] \n"
230 " st.ab %6, [%1, 4] \n"
231 " sub %0,%0,16 \n"
232 "3: ;nop \n"
233 " .section .fixup, \"ax\" \n"
234 " .align 4 \n"
235 "4: j 3b \n"
236 " .previous \n"
237 " .section __ex_table, \"a\" \n"
238 " .align 4 \n"
239 " .word 1b, 4b \n"
240 " .word 11b,4b \n"
241 " .word 12b,4b \n"
242 " .word 13b,4b \n"
243 " .previous \n"
244 : "+r" (res), "+r"(to), "+r"(from),
245 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
246 : "ir"(n)
247 : "lp_count", "memory");
248 }
249 if (orig_n / 8) {
250 orig_n = orig_n % 8;
251
252 __asm__ __volatile__(
253 "14: ld.ab %3, [%2,4] \n"
254 "15: ld.ab %4, [%2,4] \n"
255 " st.ab %3, [%1,4] \n"
256 " st.ab %4, [%1,4] \n"
257 " sub %0,%0,8 \n"
258 "31: ;nop \n"
259 " .section .fixup, \"ax\" \n"
260 " .align 4 \n"
261 "4: j 31b \n"
262 " .previous \n"
263 " .section __ex_table, \"a\" \n"
264 " .align 4 \n"
265 " .word 14b,4b \n"
266 " .word 15b,4b \n"
267 " .previous \n"
268 : "+r" (res), "+r"(to), "+r"(from),
269 "=r"(tmp1), "=r"(tmp2)
270 :
271 : "memory");
272 }
273 if (orig_n / 4) {
274 orig_n = orig_n % 4;
275
276 __asm__ __volatile__(
277 "16: ld.ab %3, [%2,4] \n"
278 " st.ab %3, [%1,4] \n"
279 " sub %0,%0,4 \n"
280 "32: ;nop \n"
281 " .section .fixup, \"ax\" \n"
282 " .align 4 \n"
283 "4: j 32b \n"
284 " .previous \n"
285 " .section __ex_table, \"a\" \n"
286 " .align 4 \n"
287 " .word 16b,4b \n"
288 " .previous \n"
289 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
290 :
291 : "memory");
292 }
293 if (orig_n / 2) {
294 orig_n = orig_n % 2;
295
296 __asm__ __volatile__(
297 "17: ldw.ab %3, [%2,2] \n"
298 " stw.ab %3, [%1,2] \n"
299 " sub %0,%0,2 \n"
300 "33: ;nop \n"
301 " .section .fixup, \"ax\" \n"
302 " .align 4 \n"
303 "4: j 33b \n"
304 " .previous \n"
305 " .section __ex_table, \"a\" \n"
306 " .align 4 \n"
307 " .word 17b,4b \n"
308 " .previous \n"
309 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
310 :
311 : "memory");
312 }
313 if (orig_n & 1) {
314 __asm__ __volatile__(
315 "18: ldb.ab %3, [%2,2] \n"
316 " stb.ab %3, [%1,2] \n"
317 " sub %0,%0,1 \n"
318 "34: ; nop \n"
319 " .section .fixup, \"ax\" \n"
320 " .align 4 \n"
321 "4: j 34b \n"
322 " .previous \n"
323 " .section __ex_table, \"a\" \n"
324 " .align 4 \n"
325 " .word 18b,4b \n"
326 " .previous \n"
327 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
328 :
329 : "memory");
330 }
331 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
332
333 __asm__ __volatile__(
334 " mov %0,%3 \n"
335 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
336 " lpnz 3f \n"
337 "1: ld.ab %5, [%2, 4] \n"
338 "11: ld.ab %6, [%2, 4] \n"
339 "12: ld.ab %7, [%2, 4] \n"
340 "13: ld.ab %8, [%2, 4] \n"
341 " st.ab %5, [%1, 4] \n"
342 " st.ab %6, [%1, 4] \n"
343 " st.ab %7, [%1, 4] \n"
344 " st.ab %8, [%1, 4] \n"
345 " sub %0,%0,16 \n"
346 "3: and.f %3,%3,0xf \n" /* stragglers */
347 " bz 34f \n"
348 " bbit0 %3,3,31f \n" /* 8 bytes left */
349 "14: ld.ab %5, [%2,4] \n"
350 "15: ld.ab %6, [%2,4] \n"
351 " st.ab %5, [%1,4] \n"
352 " st.ab %6, [%1,4] \n"
353 " sub.f %0,%0,8 \n"
354 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
355 "16: ld.ab %5, [%2,4] \n"
356 " st.ab %5, [%1,4] \n"
357 " sub.f %0,%0,4 \n"
358 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
359 "17: ldw.ab %5, [%2,2] \n"
360 " stw.ab %5, [%1,2] \n"
361 " sub.f %0,%0,2 \n"
362 "33: bbit0 %3,0,34f \n"
363 "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
364 " stb.ab %5, [%1,1] \n"
365 " sub.f %0,%0,1 \n"
366 "34: ;nop \n"
367 " .section .fixup, \"ax\" \n"
368 " .align 4 \n"
369 "4: j 34b \n"
370 " .previous \n"
371 " .section __ex_table, \"a\" \n"
372 " .align 4 \n"
373 " .word 1b, 4b \n"
374 " .word 11b,4b \n"
375 " .word 12b,4b \n"
376 " .word 13b,4b \n"
377 " .word 14b,4b \n"
378 " .word 15b,4b \n"
379 " .word 16b,4b \n"
380 " .word 17b,4b \n"
381 " .word 18b,4b \n"
382 " .previous \n"
383 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
384 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
385 :
386 : "lp_count", "memory");
387 }
388
389 return res;
390}
391
392extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
393 unsigned long n);
394
395static inline unsigned long
396__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
397{
398 long res = 0;
399 char val;
400 unsigned long tmp1, tmp2, tmp3, tmp4;
401 unsigned long orig_n = n;
402
403 if (n == 0)
404 return 0;
405
406 /* unaligned */
407 if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
408
409 unsigned char tmp;
410
411 __asm__ __volatile__(
412 " mov.f lp_count, %0 \n"
413 " lpnz 3f \n"
414 " ldb.ab %1, [%3, 1] \n"
415 "1: stb.ab %1, [%2, 1] \n"
416 " sub %0, %0, 1 \n"
417 "3: ;nop \n"
418 " .section .fixup, \"ax\" \n"
419 " .align 4 \n"
420 "4: j 3b \n"
421 " .previous \n"
422 " .section __ex_table, \"a\" \n"
423 " .align 4 \n"
424 " .word 1b, 4b \n"
425 " .previous \n"
426
427 : "+r" (n),
428 /* Note as an '&' earlyclobber operand to make sure the
429 * temporary register inside the loop is not the same as
430 * FROM or TO.
431 */
432 "=&r" (tmp), "+r" (to), "+r" (from)
433 :
434 : "lp_count", "lp_start", "lp_end", "memory");
435
436 return n;
437 }
438
439 if (__builtin_constant_p(orig_n)) {
440 res = orig_n;
441
442 if (orig_n / 16) {
443 orig_n = orig_n % 16;
444
445 __asm__ __volatile__(
446 " lsr lp_count, %7,4 \n"
447 " lp 3f \n"
448 " ld.ab %3, [%2, 4] \n"
449 " ld.ab %4, [%2, 4] \n"
450 " ld.ab %5, [%2, 4] \n"
451 " ld.ab %6, [%2, 4] \n"
452 "1: st.ab %3, [%1, 4] \n"
453 "11: st.ab %4, [%1, 4] \n"
454 "12: st.ab %5, [%1, 4] \n"
455 "13: st.ab %6, [%1, 4] \n"
456 " sub %0, %0, 16 \n"
457 "3:;nop \n"
458 " .section .fixup, \"ax\" \n"
459 " .align 4 \n"
460 "4: j 3b \n"
461 " .previous \n"
462 " .section __ex_table, \"a\" \n"
463 " .align 4 \n"
464 " .word 1b, 4b \n"
465 " .word 11b,4b \n"
466 " .word 12b,4b \n"
467 " .word 13b,4b \n"
468 " .previous \n"
469 : "+r" (res), "+r"(to), "+r"(from),
470 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
471 : "ir"(n)
472 : "lp_count", "memory");
473 }
474 if (orig_n / 8) {
475 orig_n = orig_n % 8;
476
477 __asm__ __volatile__(
478 " ld.ab %3, [%2,4] \n"
479 " ld.ab %4, [%2,4] \n"
480 "14: st.ab %3, [%1,4] \n"
481 "15: st.ab %4, [%1,4] \n"
482 " sub %0, %0, 8 \n"
483 "31:;nop \n"
484 " .section .fixup, \"ax\" \n"
485 " .align 4 \n"
486 "4: j 31b \n"
487 " .previous \n"
488 " .section __ex_table, \"a\" \n"
489 " .align 4 \n"
490 " .word 14b,4b \n"
491 " .word 15b,4b \n"
492 " .previous \n"
493 : "+r" (res), "+r"(to), "+r"(from),
494 "=r"(tmp1), "=r"(tmp2)
495 :
496 : "memory");
497 }
498 if (orig_n / 4) {
499 orig_n = orig_n % 4;
500
501 __asm__ __volatile__(
502 " ld.ab %3, [%2,4] \n"
503 "16: st.ab %3, [%1,4] \n"
504 " sub %0, %0, 4 \n"
505 "32:;nop \n"
506 " .section .fixup, \"ax\" \n"
507 " .align 4 \n"
508 "4: j 32b \n"
509 " .previous \n"
510 " .section __ex_table, \"a\" \n"
511 " .align 4 \n"
512 " .word 16b,4b \n"
513 " .previous \n"
514 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
515 :
516 : "memory");
517 }
518 if (orig_n / 2) {
519 orig_n = orig_n % 2;
520
521 __asm__ __volatile__(
522 " ldw.ab %3, [%2,2] \n"
523 "17: stw.ab %3, [%1,2] \n"
524 " sub %0, %0, 2 \n"
525 "33:;nop \n"
526 " .section .fixup, \"ax\" \n"
527 " .align 4 \n"
528 "4: j 33b \n"
529 " .previous \n"
530 " .section __ex_table, \"a\" \n"
531 " .align 4 \n"
532 " .word 17b,4b \n"
533 " .previous \n"
534 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
535 :
536 : "memory");
537 }
538 if (orig_n & 1) {
539 __asm__ __volatile__(
540 " ldb.ab %3, [%2,1] \n"
541 "18: stb.ab %3, [%1,1] \n"
542 " sub %0, %0, 1 \n"
543 "34: ;nop \n"
544 " .section .fixup, \"ax\" \n"
545 " .align 4 \n"
546 "4: j 34b \n"
547 " .previous \n"
548 " .section __ex_table, \"a\" \n"
549 " .align 4 \n"
550 " .word 18b,4b \n"
551 " .previous \n"
552 : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
553 :
554 : "memory");
555 }
556 } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
557
558 __asm__ __volatile__(
559 " mov %0,%3 \n"
560 " lsr.f lp_count, %3,4 \n" /* 16x bytes */
561 " lpnz 3f \n"
562 " ld.ab %5, [%2, 4] \n"
563 " ld.ab %6, [%2, 4] \n"
564 " ld.ab %7, [%2, 4] \n"
565 " ld.ab %8, [%2, 4] \n"
566 "1: st.ab %5, [%1, 4] \n"
567 "11: st.ab %6, [%1, 4] \n"
568 "12: st.ab %7, [%1, 4] \n"
569 "13: st.ab %8, [%1, 4] \n"
570 " sub %0, %0, 16 \n"
571 "3: and.f %3,%3,0xf \n" /* stragglers */
572 " bz 34f \n"
573 " bbit0 %3,3,31f \n" /* 8 bytes left */
574 " ld.ab %5, [%2,4] \n"
575 " ld.ab %6, [%2,4] \n"
576 "14: st.ab %5, [%1,4] \n"
577 "15: st.ab %6, [%1,4] \n"
578 " sub.f %0, %0, 8 \n"
579 "31: bbit0 %3,2,32f \n" /* 4 bytes left */
580 " ld.ab %5, [%2,4] \n"
581 "16: st.ab %5, [%1,4] \n"
582 " sub.f %0, %0, 4 \n"
583 "32: bbit0 %3,1,33f \n" /* 2 bytes left */
584 " ldw.ab %5, [%2,2] \n"
585 "17: stw.ab %5, [%1,2] \n"
586 " sub.f %0, %0, 2 \n"
587 "33: bbit0 %3,0,34f \n"
588 " ldb.ab %5, [%2,1] \n" /* 1 byte left */
589 "18: stb.ab %5, [%1,1] \n"
590 " sub.f %0, %0, 1 \n"
591 "34: ;nop \n"
592 " .section .fixup, \"ax\" \n"
593 " .align 4 \n"
594 "4: j 34b \n"
595 " .previous \n"
596 " .section __ex_table, \"a\" \n"
597 " .align 4 \n"
598 " .word 1b, 4b \n"
599 " .word 11b,4b \n"
600 " .word 12b,4b \n"
601 " .word 13b,4b \n"
602 " .word 14b,4b \n"
603 " .word 15b,4b \n"
604 " .word 16b,4b \n"
605 " .word 17b,4b \n"
606 " .word 18b,4b \n"
607 " .previous \n"
608 : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
609 "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
610 :
611 : "lp_count", "memory");
612 }
613
614 return res;
615}
616
617static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
618{
619 long res = n;
620 unsigned char *d_char = to;
621
622 __asm__ __volatile__(
623 " bbit0 %0, 0, 1f \n"
624 "75: stb.ab %2, [%0,1] \n"
625 " sub %1, %1, 1 \n"
626 "1: bbit0 %0, 1, 2f \n"
627 "76: stw.ab %2, [%0,2] \n"
628 " sub %1, %1, 2 \n"
629 "2: asr.f lp_count, %1, 2 \n"
630 " lpnz 3f \n"
631 "77: st.ab %2, [%0,4] \n"
632 " sub %1, %1, 4 \n"
633 "3: bbit0 %1, 1, 4f \n"
634 "78: stw.ab %2, [%0,2] \n"
635 " sub %1, %1, 2 \n"
636 "4: bbit0 %1, 0, 5f \n"
637 "79: stb.ab %2, [%0,1] \n"
638 " sub %1, %1, 1 \n"
639 "5: \n"
640 " .section .fixup, \"ax\" \n"
641 " .align 4 \n"
642 "3: j 5b \n"
643 " .previous \n"
644 " .section __ex_table, \"a\" \n"
645 " .align 4 \n"
646 " .word 75b, 3b \n"
647 " .word 76b, 3b \n"
648 " .word 77b, 3b \n"
649 " .word 78b, 3b \n"
650 " .word 79b, 3b \n"
651 " .previous \n"
652 : "+r"(d_char), "+r"(res)
653 : "i"(0)
654 : "lp_count", "lp_start", "lp_end", "memory");
655
656 return res;
657}
658
659static inline long
660__arc_strncpy_from_user(char *dst, const char __user *src, long count)
661{
662 long res = 0;
663 char val;
664
665 if (count == 0)
666 return 0;
667
668 __asm__ __volatile__(
669 " lp 3f \n"
670 "1: ldb.ab %3, [%2, 1] \n"
671 " breq.d %3, 0, 3f \n"
672 " stb.ab %3, [%1, 1] \n"
673 " add %0, %0, 1 # Num of NON NULL bytes copied \n"
674 "3: \n"
675 " .section .fixup, \"ax\" \n"
676 " .align 4 \n"
677 "4: mov %0, %4 # sets @res as -EFAULT \n"
678 " j 3b \n"
679 " .previous \n"
680 " .section __ex_table, \"a\" \n"
681 " .align 4 \n"
682 " .word 1b, 4b \n"
683 " .previous \n"
684 : "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
685 : "g"(-EFAULT), "l"(count)
686 : "memory");
687
688 return res;
689}
690
691static inline long __arc_strnlen_user(const char __user *s, long n)
692{
693 long res, tmp1, cnt;
694 char val;
695
696 __asm__ __volatile__(
697 " mov %2, %1 \n"
698 "1: ldb.ab %3, [%0, 1] \n"
699 " breq.d %3, 0, 2f \n"
700 " sub.f %2, %2, 1 \n"
701 " bnz 1b \n"
702 " sub %2, %2, 1 \n"
703 "2: sub %0, %1, %2 \n"
704 "3: ;nop \n"
705 " .section .fixup, \"ax\" \n"
706 " .align 4 \n"
707 "4: mov %0, 0 \n"
708 " j 3b \n"
709 " .previous \n"
710 " .section __ex_table, \"a\" \n"
711 " .align 4 \n"
712 " .word 1b, 4b \n"
713 " .previous \n"
714 : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
715 : "0"(s), "1"(n)
716 : "memory");
717
718 return res;
719}
720
721#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
722#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
723#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
724#define __clear_user(d, n) __arc_clear_user(d, n)
725#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
726#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
727#else
728extern long arc_copy_from_user_noinline(void *to, const void __user * from,
729 unsigned long n);
730extern long arc_copy_to_user_noinline(void __user *to, const void *from,
731 unsigned long n);
732extern unsigned long arc_clear_user_noinline(void __user *to,
733 unsigned long n);
734extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
735 long count);
736extern long arc_strnlen_user_noinline(const char __user *src, long n);
737
738#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
739#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
740#define __clear_user(d, n) arc_clear_user_noinline(d, n)
741#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
742#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
743
744#endif
745
746#include <asm-generic/uaccess.h>
747
748extern int fixup_exception(struct pt_regs *regs);
749
750#endif