Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki
9 * Copyright (C) 2014, Imagination Technologies Ltd.
10 */
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/thread_info.h>
17#include <asm/asm-eva.h>
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26#ifdef CONFIG_32BIT
27
28#ifdef CONFIG_KVM_GUEST
29#define __UA_LIMIT 0x40000000UL
30#else
31#define __UA_LIMIT 0x80000000UL
32#endif
33
34#define __UA_ADDR ".word"
35#define __UA_LA "la"
36#define __UA_ADDU "addu"
37#define __UA_t0 "$8"
38#define __UA_t1 "$9"
39
40#endif /* CONFIG_32BIT */
41
42#ifdef CONFIG_64BIT
43
44extern u64 __ua_limit;
45
46#define __UA_LIMIT __ua_limit
47
48#define __UA_ADDR ".dword"
49#define __UA_LA "dla"
50#define __UA_ADDU "daddu"
51#define __UA_t0 "$12"
52#define __UA_t1 "$13"
53
54#endif /* CONFIG_64BIT */
55
56/*
57 * USER_DS is a bitmask that has the bits set that may not be set in a valid
58 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
59 * the arithmetic we're doing only works if the limit is a power of two, so
60 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
61 * address in this range it's the process's problem, not ours :-)
62 */
63
64#ifdef CONFIG_KVM_GUEST
65#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
66#define USER_DS ((mm_segment_t) { 0xC0000000UL })
67#else
68#define KERNEL_DS ((mm_segment_t) { 0UL })
69#define USER_DS ((mm_segment_t) { __UA_LIMIT })
70#endif
71
72#define VERIFY_READ 0
73#define VERIFY_WRITE 1
74
75#define get_ds() (KERNEL_DS)
76#define get_fs() (current_thread_info()->addr_limit)
77#define set_fs(x) (current_thread_info()->addr_limit = (x))
78
79#define segment_eq(a, b) ((a).seg == (b).seg)
80
81
82/*
83 * Is a address valid? This does a straighforward calculation rather
84 * than tests.
85 *
86 * Address valid if:
87 * - "addr" doesn't have any high-bits set
88 * - AND "size" doesn't have any high-bits set
89 * - AND "addr+size" doesn't have any high-bits set
90 * - OR we are in kernel mode.
91 *
92 * __ua_size() is a trick to avoid runtime checking of positive constant
93 * sizes; for those we already know at compile time that the size is ok.
94 */
95#define __ua_size(size) \
96 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
97
98/*
99 * access_ok: - Checks if a user space pointer is valid
100 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
101 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
102 * to write to a block, it is always safe to read from it.
103 * @addr: User space pointer to start of block to check
104 * @size: Size of block to check
105 *
106 * Context: User context only. This function may sleep.
107 *
108 * Checks if a pointer to a block of memory in user space is valid.
109 *
110 * Returns true (nonzero) if the memory block may be valid, false (zero)
111 * if it is definitely invalid.
112 *
113 * Note that, depending on architecture, this function probably just
114 * checks that the pointer is in the user space range - after calling
115 * this function, memory access functions may still return -EFAULT.
116 */
117
118#define __access_mask get_fs().seg
119
120#define __access_ok(addr, size, mask) \
121({ \
122 unsigned long __addr = (unsigned long) (addr); \
123 unsigned long __size = size; \
124 unsigned long __mask = mask; \
125 unsigned long __ok; \
126 \
127 __chk_user_ptr(addr); \
128 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
129 __ua_size(__size))); \
130 __ok == 0; \
131})
132
133#define access_ok(type, addr, size) \
134 likely(__access_ok((addr), (size), __access_mask))
135
136/*
137 * put_user: - Write a simple value into user space.
138 * @x: Value to copy to user space.
139 * @ptr: Destination address, in user space.
140 *
141 * Context: User context only. This function may sleep.
142 *
143 * This macro copies a single simple value from kernel space to user
144 * space. It supports simple types like char and int, but not larger
145 * data types like structures or arrays.
146 *
147 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
148 * to the result of dereferencing @ptr.
149 *
150 * Returns zero on success, or -EFAULT on error.
151 */
152#define put_user(x,ptr) \
153 __put_user_check((x), (ptr), sizeof(*(ptr)))
154
155/*
156 * get_user: - Get a simple variable from user space.
157 * @x: Variable to store result.
158 * @ptr: Source address, in user space.
159 *
160 * Context: User context only. This function may sleep.
161 *
162 * This macro copies a single simple variable from user space to kernel
163 * space. It supports simple types like char and int, but not larger
164 * data types like structures or arrays.
165 *
166 * @ptr must have pointer-to-simple-variable type, and the result of
167 * dereferencing @ptr must be assignable to @x without a cast.
168 *
169 * Returns zero on success, or -EFAULT on error.
170 * On error, the variable @x is set to zero.
171 */
172#define get_user(x,ptr) \
173 __get_user_check((x), (ptr), sizeof(*(ptr)))
174
175/*
176 * __put_user: - Write a simple value into user space, with less checking.
177 * @x: Value to copy to user space.
178 * @ptr: Destination address, in user space.
179 *
180 * Context: User context only. This function may sleep.
181 *
182 * This macro copies a single simple value from kernel space to user
183 * space. It supports simple types like char and int, but not larger
184 * data types like structures or arrays.
185 *
186 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
187 * to the result of dereferencing @ptr.
188 *
189 * Caller must check the pointer with access_ok() before calling this
190 * function.
191 *
192 * Returns zero on success, or -EFAULT on error.
193 */
194#define __put_user(x,ptr) \
195 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
196
197/*
198 * __get_user: - Get a simple variable from user space, with less checking.
199 * @x: Variable to store result.
200 * @ptr: Source address, in user space.
201 *
202 * Context: User context only. This function may sleep.
203 *
204 * This macro copies a single simple variable from user space to kernel
205 * space. It supports simple types like char and int, but not larger
206 * data types like structures or arrays.
207 *
208 * @ptr must have pointer-to-simple-variable type, and the result of
209 * dereferencing @ptr must be assignable to @x without a cast.
210 *
211 * Caller must check the pointer with access_ok() before calling this
212 * function.
213 *
214 * Returns zero on success, or -EFAULT on error.
215 * On error, the variable @x is set to zero.
216 */
217#define __get_user(x,ptr) \
218 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
219
220struct __large_struct { unsigned long buf[100]; };
221#define __m(x) (*(struct __large_struct __user *)(x))
222
223/*
224 * Yuck. We need two variants, one for 64bit operation and one
225 * for 32 bit mode and old iron.
226 */
227#ifndef CONFIG_EVA
228#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229#else
230/*
231 * Kernel specific functions for EVA. We need to use normal load instructions
232 * to read data from kernel when operating in EVA mode. We use these macros to
233 * avoid redefining __get_user_asm for EVA.
234 */
235#undef _loadd
236#undef _loadw
237#undef _loadh
238#undef _loadb
239#ifdef CONFIG_32BIT
240#define _loadd _loadw
241#else
242#define _loadd(reg, addr) "ld " reg ", " addr
243#endif
244#define _loadw(reg, addr) "lw " reg ", " addr
245#define _loadh(reg, addr) "lh " reg ", " addr
246#define _loadb(reg, addr) "lb " reg ", " addr
247
248#define __get_kernel_common(val, size, ptr) \
249do { \
250 switch (size) { \
251 case 1: __get_data_asm(val, _loadb, ptr); break; \
252 case 2: __get_data_asm(val, _loadh, ptr); break; \
253 case 4: __get_data_asm(val, _loadw, ptr); break; \
254 case 8: __GET_DW(val, _loadd, ptr); break; \
255 default: __get_user_unknown(); break; \
256 } \
257} while (0)
258#endif
259
260#ifdef CONFIG_32BIT
261#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
262#endif
263#ifdef CONFIG_64BIT
264#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
265#endif
266
267extern void __get_user_unknown(void);
268
269#define __get_user_common(val, size, ptr) \
270do { \
271 switch (size) { \
272 case 1: __get_data_asm(val, user_lb, ptr); break; \
273 case 2: __get_data_asm(val, user_lh, ptr); break; \
274 case 4: __get_data_asm(val, user_lw, ptr); break; \
275 case 8: __GET_DW(val, user_ld, ptr); break; \
276 default: __get_user_unknown(); break; \
277 } \
278} while (0)
279
280#define __get_user_nocheck(x, ptr, size) \
281({ \
282 int __gu_err; \
283 \
284 if (segment_eq(get_fs(), get_ds())) { \
285 __get_kernel_common((x), size, ptr); \
286 } else { \
287 __chk_user_ptr(ptr); \
288 __get_user_common((x), size, ptr); \
289 } \
290 __gu_err; \
291})
292
293#define __get_user_check(x, ptr, size) \
294({ \
295 int __gu_err = -EFAULT; \
296 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
297 \
298 might_fault(); \
299 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
300 if (segment_eq(get_fs(), get_ds())) \
301 __get_kernel_common((x), size, __gu_ptr); \
302 else \
303 __get_user_common((x), size, __gu_ptr); \
304 } \
305 \
306 __gu_err; \
307})
308
309#define __get_data_asm(val, insn, addr) \
310{ \
311 long __gu_tmp; \
312 \
313 __asm__ __volatile__( \
314 "1: "insn("%1", "%3")" \n" \
315 "2: \n" \
316 " .insn \n" \
317 " .section .fixup,\"ax\" \n" \
318 "3: li %0, %4 \n" \
319 " j 2b \n" \
320 " .previous \n" \
321 " .section __ex_table,\"a\" \n" \
322 " "__UA_ADDR "\t1b, 3b \n" \
323 " .previous \n" \
324 : "=r" (__gu_err), "=r" (__gu_tmp) \
325 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
326 \
327 (val) = (__typeof__(*(addr))) __gu_tmp; \
328}
329
330/*
331 * Get a long long 64 using 32 bit registers.
332 */
333#define __get_data_asm_ll32(val, insn, addr) \
334{ \
335 union { \
336 unsigned long long l; \
337 __typeof__(*(addr)) t; \
338 } __gu_tmp; \
339 \
340 __asm__ __volatile__( \
341 "1: " insn("%1", "(%3)")" \n" \
342 "2: " insn("%D1", "4(%3)")" \n" \
343 "3: \n" \
344 " .insn \n" \
345 " .section .fixup,\"ax\" \n" \
346 "4: li %0, %4 \n" \
347 " move %1, $0 \n" \
348 " move %D1, $0 \n" \
349 " j 3b \n" \
350 " .previous \n" \
351 " .section __ex_table,\"a\" \n" \
352 " " __UA_ADDR " 1b, 4b \n" \
353 " " __UA_ADDR " 2b, 4b \n" \
354 " .previous \n" \
355 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
356 : "0" (0), "r" (addr), "i" (-EFAULT)); \
357 \
358 (val) = __gu_tmp.t; \
359}
360
361#ifndef CONFIG_EVA
362#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
363#else
364/*
365 * Kernel specific functions for EVA. We need to use normal load instructions
366 * to read data from kernel when operating in EVA mode. We use these macros to
367 * avoid redefining __get_data_asm for EVA.
368 */
369#undef _stored
370#undef _storew
371#undef _storeh
372#undef _storeb
373#ifdef CONFIG_32BIT
374#define _stored _storew
375#else
376#define _stored(reg, addr) "ld " reg ", " addr
377#endif
378
379#define _storew(reg, addr) "sw " reg ", " addr
380#define _storeh(reg, addr) "sh " reg ", " addr
381#define _storeb(reg, addr) "sb " reg ", " addr
382
383#define __put_kernel_common(ptr, size) \
384do { \
385 switch (size) { \
386 case 1: __put_data_asm(_storeb, ptr); break; \
387 case 2: __put_data_asm(_storeh, ptr); break; \
388 case 4: __put_data_asm(_storew, ptr); break; \
389 case 8: __PUT_DW(_stored, ptr); break; \
390 default: __put_user_unknown(); break; \
391 } \
392} while(0)
393#endif
394
395/*
396 * Yuck. We need two variants, one for 64bit operation and one
397 * for 32 bit mode and old iron.
398 */
399#ifdef CONFIG_32BIT
400#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
401#endif
402#ifdef CONFIG_64BIT
403#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
404#endif
405
406#define __put_user_common(ptr, size) \
407do { \
408 switch (size) { \
409 case 1: __put_data_asm(user_sb, ptr); break; \
410 case 2: __put_data_asm(user_sh, ptr); break; \
411 case 4: __put_data_asm(user_sw, ptr); break; \
412 case 8: __PUT_DW(user_sd, ptr); break; \
413 default: __put_user_unknown(); break; \
414 } \
415} while (0)
416
417#define __put_user_nocheck(x, ptr, size) \
418({ \
419 __typeof__(*(ptr)) __pu_val; \
420 int __pu_err = 0; \
421 \
422 __pu_val = (x); \
423 if (segment_eq(get_fs(), get_ds())) { \
424 __put_kernel_common(ptr, size); \
425 } else { \
426 __chk_user_ptr(ptr); \
427 __put_user_common(ptr, size); \
428 } \
429 __pu_err; \
430})
431
432#define __put_user_check(x, ptr, size) \
433({ \
434 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
435 __typeof__(*(ptr)) __pu_val = (x); \
436 int __pu_err = -EFAULT; \
437 \
438 might_fault(); \
439 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
440 if (segment_eq(get_fs(), get_ds())) \
441 __put_kernel_common(__pu_addr, size); \
442 else \
443 __put_user_common(__pu_addr, size); \
444 } \
445 \
446 __pu_err; \
447})
448
449#define __put_data_asm(insn, ptr) \
450{ \
451 __asm__ __volatile__( \
452 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
453 "2: \n" \
454 " .insn \n" \
455 " .section .fixup,\"ax\" \n" \
456 "3: li %0, %4 \n" \
457 " j 2b \n" \
458 " .previous \n" \
459 " .section __ex_table,\"a\" \n" \
460 " " __UA_ADDR " 1b, 3b \n" \
461 " .previous \n" \
462 : "=r" (__pu_err) \
463 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
464 "i" (-EFAULT)); \
465}
466
467#define __put_data_asm_ll32(insn, ptr) \
468{ \
469 __asm__ __volatile__( \
470 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
471 "2: "insn("%D2", "4(%3)")" \n" \
472 "3: \n" \
473 " .insn \n" \
474 " .section .fixup,\"ax\" \n" \
475 "4: li %0, %4 \n" \
476 " j 3b \n" \
477 " .previous \n" \
478 " .section __ex_table,\"a\" \n" \
479 " " __UA_ADDR " 1b, 4b \n" \
480 " " __UA_ADDR " 2b, 4b \n" \
481 " .previous" \
482 : "=r" (__pu_err) \
483 : "0" (0), "r" (__pu_val), "r" (ptr), \
484 "i" (-EFAULT)); \
485}
486
487extern void __put_user_unknown(void);
488
489/*
490 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
491 * EVA unaligned access is handled in the ADE exception handler.
492 */
493#ifndef CONFIG_EVA
494/*
495 * put_user_unaligned: - Write a simple value into user space.
496 * @x: Value to copy to user space.
497 * @ptr: Destination address, in user space.
498 *
499 * Context: User context only. This function may sleep.
500 *
501 * This macro copies a single simple value from kernel space to user
502 * space. It supports simple types like char and int, but not larger
503 * data types like structures or arrays.
504 *
505 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
506 * to the result of dereferencing @ptr.
507 *
508 * Returns zero on success, or -EFAULT on error.
509 */
510#define put_user_unaligned(x,ptr) \
511 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
512
513/*
514 * get_user_unaligned: - Get a simple variable from user space.
515 * @x: Variable to store result.
516 * @ptr: Source address, in user space.
517 *
518 * Context: User context only. This function may sleep.
519 *
520 * This macro copies a single simple variable from user space to kernel
521 * space. It supports simple types like char and int, but not larger
522 * data types like structures or arrays.
523 *
524 * @ptr must have pointer-to-simple-variable type, and the result of
525 * dereferencing @ptr must be assignable to @x without a cast.
526 *
527 * Returns zero on success, or -EFAULT on error.
528 * On error, the variable @x is set to zero.
529 */
530#define get_user_unaligned(x,ptr) \
531 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
532
533/*
534 * __put_user_unaligned: - Write a simple value into user space, with less checking.
535 * @x: Value to copy to user space.
536 * @ptr: Destination address, in user space.
537 *
538 * Context: User context only. This function may sleep.
539 *
540 * This macro copies a single simple value from kernel space to user
541 * space. It supports simple types like char and int, but not larger
542 * data types like structures or arrays.
543 *
544 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
545 * to the result of dereferencing @ptr.
546 *
547 * Caller must check the pointer with access_ok() before calling this
548 * function.
549 *
550 * Returns zero on success, or -EFAULT on error.
551 */
552#define __put_user_unaligned(x,ptr) \
553 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
554
555/*
556 * __get_user_unaligned: - Get a simple variable from user space, with less checking.
557 * @x: Variable to store result.
558 * @ptr: Source address, in user space.
559 *
560 * Context: User context only. This function may sleep.
561 *
562 * This macro copies a single simple variable from user space to kernel
563 * space. It supports simple types like char and int, but not larger
564 * data types like structures or arrays.
565 *
566 * @ptr must have pointer-to-simple-variable type, and the result of
567 * dereferencing @ptr must be assignable to @x without a cast.
568 *
569 * Caller must check the pointer with access_ok() before calling this
570 * function.
571 *
572 * Returns zero on success, or -EFAULT on error.
573 * On error, the variable @x is set to zero.
574 */
575#define __get_user_unaligned(x,ptr) \
576 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
577
578/*
579 * Yuck. We need two variants, one for 64bit operation and one
580 * for 32 bit mode and old iron.
581 */
582#ifdef CONFIG_32BIT
583#define __GET_USER_UNALIGNED_DW(val, ptr) \
584 __get_user_unaligned_asm_ll32(val, ptr)
585#endif
586#ifdef CONFIG_64BIT
587#define __GET_USER_UNALIGNED_DW(val, ptr) \
588 __get_user_unaligned_asm(val, "uld", ptr)
589#endif
590
591extern void __get_user_unaligned_unknown(void);
592
593#define __get_user_unaligned_common(val, size, ptr) \
594do { \
595 switch (size) { \
596 case 1: __get_data_asm(val, "lb", ptr); break; \
597 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
598 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
599 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
600 default: __get_user_unaligned_unknown(); break; \
601 } \
602} while (0)
603
604#define __get_user_unaligned_nocheck(x,ptr,size) \
605({ \
606 int __gu_err; \
607 \
608 __get_user_unaligned_common((x), size, ptr); \
609 __gu_err; \
610})
611
612#define __get_user_unaligned_check(x,ptr,size) \
613({ \
614 int __gu_err = -EFAULT; \
615 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
616 \
617 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
618 __get_user_unaligned_common((x), size, __gu_ptr); \
619 \
620 __gu_err; \
621})
622
623#define __get_data_unaligned_asm(val, insn, addr) \
624{ \
625 long __gu_tmp; \
626 \
627 __asm__ __volatile__( \
628 "1: " insn " %1, %3 \n" \
629 "2: \n" \
630 " .insn \n" \
631 " .section .fixup,\"ax\" \n" \
632 "3: li %0, %4 \n" \
633 " j 2b \n" \
634 " .previous \n" \
635 " .section __ex_table,\"a\" \n" \
636 " "__UA_ADDR "\t1b, 3b \n" \
637 " "__UA_ADDR "\t1b + 4, 3b \n" \
638 " .previous \n" \
639 : "=r" (__gu_err), "=r" (__gu_tmp) \
640 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
641 \
642 (val) = (__typeof__(*(addr))) __gu_tmp; \
643}
644
645/*
646 * Get a long long 64 using 32 bit registers.
647 */
648#define __get_user_unaligned_asm_ll32(val, addr) \
649{ \
650 unsigned long long __gu_tmp; \
651 \
652 __asm__ __volatile__( \
653 "1: ulw %1, (%3) \n" \
654 "2: ulw %D1, 4(%3) \n" \
655 " move %0, $0 \n" \
656 "3: \n" \
657 " .insn \n" \
658 " .section .fixup,\"ax\" \n" \
659 "4: li %0, %4 \n" \
660 " move %1, $0 \n" \
661 " move %D1, $0 \n" \
662 " j 3b \n" \
663 " .previous \n" \
664 " .section __ex_table,\"a\" \n" \
665 " " __UA_ADDR " 1b, 4b \n" \
666 " " __UA_ADDR " 1b + 4, 4b \n" \
667 " " __UA_ADDR " 2b, 4b \n" \
668 " " __UA_ADDR " 2b + 4, 4b \n" \
669 " .previous \n" \
670 : "=r" (__gu_err), "=&r" (__gu_tmp) \
671 : "0" (0), "r" (addr), "i" (-EFAULT)); \
672 (val) = (__typeof__(*(addr))) __gu_tmp; \
673}
674
675/*
676 * Yuck. We need two variants, one for 64bit operation and one
677 * for 32 bit mode and old iron.
678 */
679#ifdef CONFIG_32BIT
680#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
681#endif
682#ifdef CONFIG_64BIT
683#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
684#endif
685
686#define __put_user_unaligned_common(ptr, size) \
687do { \
688 switch (size) { \
689 case 1: __put_data_asm("sb", ptr); break; \
690 case 2: __put_user_unaligned_asm("ush", ptr); break; \
691 case 4: __put_user_unaligned_asm("usw", ptr); break; \
692 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
693 default: __put_user_unaligned_unknown(); break; \
694} while (0)
695
696#define __put_user_unaligned_nocheck(x,ptr,size) \
697({ \
698 __typeof__(*(ptr)) __pu_val; \
699 int __pu_err = 0; \
700 \
701 __pu_val = (x); \
702 __put_user_unaligned_common(ptr, size); \
703 __pu_err; \
704})
705
706#define __put_user_unaligned_check(x,ptr,size) \
707({ \
708 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
709 __typeof__(*(ptr)) __pu_val = (x); \
710 int __pu_err = -EFAULT; \
711 \
712 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
713 __put_user_unaligned_common(__pu_addr, size); \
714 \
715 __pu_err; \
716})
717
718#define __put_user_unaligned_asm(insn, ptr) \
719{ \
720 __asm__ __volatile__( \
721 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
722 "2: \n" \
723 " .insn \n" \
724 " .section .fixup,\"ax\" \n" \
725 "3: li %0, %4 \n" \
726 " j 2b \n" \
727 " .previous \n" \
728 " .section __ex_table,\"a\" \n" \
729 " " __UA_ADDR " 1b, 3b \n" \
730 " .previous \n" \
731 : "=r" (__pu_err) \
732 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
733 "i" (-EFAULT)); \
734}
735
736#define __put_user_unaligned_asm_ll32(ptr) \
737{ \
738 __asm__ __volatile__( \
739 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
740 "2: sw %D2, 4(%3) \n" \
741 "3: \n" \
742 " .insn \n" \
743 " .section .fixup,\"ax\" \n" \
744 "4: li %0, %4 \n" \
745 " j 3b \n" \
746 " .previous \n" \
747 " .section __ex_table,\"a\" \n" \
748 " " __UA_ADDR " 1b, 4b \n" \
749 " " __UA_ADDR " 1b + 4, 4b \n" \
750 " " __UA_ADDR " 2b, 4b \n" \
751 " " __UA_ADDR " 2b + 4, 4b \n" \
752 " .previous" \
753 : "=r" (__pu_err) \
754 : "0" (0), "r" (__pu_val), "r" (ptr), \
755 "i" (-EFAULT)); \
756}
757
758extern void __put_user_unaligned_unknown(void);
759#endif
760
761/*
762 * We're generating jump to subroutines which will be outside the range of
763 * jump instructions
764 */
765#ifdef MODULE
766#define __MODULE_JAL(destination) \
767 ".set\tnoat\n\t" \
768 __UA_LA "\t$1, " #destination "\n\t" \
769 "jalr\t$1\n\t" \
770 ".set\tat\n\t"
771#else
772#define __MODULE_JAL(destination) \
773 "jal\t" #destination "\n\t"
774#endif
775
776#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
777#define DADDI_SCRATCH "$0"
778#else
779#define DADDI_SCRATCH "$3"
780#endif
781
782extern size_t __copy_user(void *__to, const void *__from, size_t __n);
783
784#ifndef CONFIG_EVA
785#define __invoke_copy_to_user(to, from, n) \
786({ \
787 register void __user *__cu_to_r __asm__("$4"); \
788 register const void *__cu_from_r __asm__("$5"); \
789 register long __cu_len_r __asm__("$6"); \
790 \
791 __cu_to_r = (to); \
792 __cu_from_r = (from); \
793 __cu_len_r = (n); \
794 __asm__ __volatile__( \
795 __MODULE_JAL(__copy_user) \
796 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
797 : \
798 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
799 DADDI_SCRATCH, "memory"); \
800 __cu_len_r; \
801})
802
803#define __invoke_copy_to_kernel(to, from, n) \
804 __invoke_copy_to_user(to, from, n)
805
806#endif
807
808/*
809 * __copy_to_user: - Copy a block of data into user space, with less checking.
810 * @to: Destination address, in user space.
811 * @from: Source address, in kernel space.
812 * @n: Number of bytes to copy.
813 *
814 * Context: User context only. This function may sleep.
815 *
816 * Copy data from kernel space to user space. Caller must check
817 * the specified block with access_ok() before calling this function.
818 *
819 * Returns number of bytes that could not be copied.
820 * On success, this will be zero.
821 */
822#define __copy_to_user(to, from, n) \
823({ \
824 void __user *__cu_to; \
825 const void *__cu_from; \
826 long __cu_len; \
827 \
828 __cu_to = (to); \
829 __cu_from = (from); \
830 __cu_len = (n); \
831 might_fault(); \
832 if (segment_eq(get_fs(), get_ds())) \
833 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
834 __cu_len); \
835 else \
836 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
837 __cu_len); \
838 __cu_len; \
839})
840
841extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
842
843#define __copy_to_user_inatomic(to, from, n) \
844({ \
845 void __user *__cu_to; \
846 const void *__cu_from; \
847 long __cu_len; \
848 \
849 __cu_to = (to); \
850 __cu_from = (from); \
851 __cu_len = (n); \
852 if (segment_eq(get_fs(), get_ds())) \
853 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
854 __cu_len); \
855 else \
856 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
857 __cu_len); \
858 __cu_len; \
859})
860
861#define __copy_from_user_inatomic(to, from, n) \
862({ \
863 void *__cu_to; \
864 const void __user *__cu_from; \
865 long __cu_len; \
866 \
867 __cu_to = (to); \
868 __cu_from = (from); \
869 __cu_len = (n); \
870 if (segment_eq(get_fs(), get_ds())) \
871 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
872 __cu_from,\
873 __cu_len);\
874 else \
875 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
876 __cu_from, \
877 __cu_len); \
878 __cu_len; \
879})
880
881/*
882 * copy_to_user: - Copy a block of data into user space.
883 * @to: Destination address, in user space.
884 * @from: Source address, in kernel space.
885 * @n: Number of bytes to copy.
886 *
887 * Context: User context only. This function may sleep.
888 *
889 * Copy data from kernel space to user space.
890 *
891 * Returns number of bytes that could not be copied.
892 * On success, this will be zero.
893 */
894#define copy_to_user(to, from, n) \
895({ \
896 void __user *__cu_to; \
897 const void *__cu_from; \
898 long __cu_len; \
899 \
900 __cu_to = (to); \
901 __cu_from = (from); \
902 __cu_len = (n); \
903 if (segment_eq(get_fs(), get_ds())) { \
904 __cu_len = __invoke_copy_to_kernel(__cu_to, \
905 __cu_from, \
906 __cu_len); \
907 } else { \
908 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
909 might_fault(); \
910 __cu_len = __invoke_copy_to_user(__cu_to, \
911 __cu_from, \
912 __cu_len); \
913 } \
914 } \
915 __cu_len; \
916})
917
918#ifndef CONFIG_EVA
919
920#define __invoke_copy_from_user(to, from, n) \
921({ \
922 register void *__cu_to_r __asm__("$4"); \
923 register const void __user *__cu_from_r __asm__("$5"); \
924 register long __cu_len_r __asm__("$6"); \
925 \
926 __cu_to_r = (to); \
927 __cu_from_r = (from); \
928 __cu_len_r = (n); \
929 __asm__ __volatile__( \
930 ".set\tnoreorder\n\t" \
931 __MODULE_JAL(__copy_user) \
932 ".set\tnoat\n\t" \
933 __UA_ADDU "\t$1, %1, %2\n\t" \
934 ".set\tat\n\t" \
935 ".set\treorder" \
936 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
937 : \
938 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
939 DADDI_SCRATCH, "memory"); \
940 __cu_len_r; \
941})
942
943#define __invoke_copy_from_kernel(to, from, n) \
944 __invoke_copy_from_user(to, from, n)
945
946/* For userland <-> userland operations */
947#define ___invoke_copy_in_user(to, from, n) \
948 __invoke_copy_from_user(to, from, n)
949
950/* For kernel <-> kernel operations */
951#define ___invoke_copy_in_kernel(to, from, n) \
952 __invoke_copy_from_user(to, from, n)
953
954#define __invoke_copy_from_user_inatomic(to, from, n) \
955({ \
956 register void *__cu_to_r __asm__("$4"); \
957 register const void __user *__cu_from_r __asm__("$5"); \
958 register long __cu_len_r __asm__("$6"); \
959 \
960 __cu_to_r = (to); \
961 __cu_from_r = (from); \
962 __cu_len_r = (n); \
963 __asm__ __volatile__( \
964 ".set\tnoreorder\n\t" \
965 __MODULE_JAL(__copy_user_inatomic) \
966 ".set\tnoat\n\t" \
967 __UA_ADDU "\t$1, %1, %2\n\t" \
968 ".set\tat\n\t" \
969 ".set\treorder" \
970 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
971 : \
972 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
973 DADDI_SCRATCH, "memory"); \
974 __cu_len_r; \
975})
976
977#define __invoke_copy_from_kernel_inatomic(to, from, n) \
978 __invoke_copy_from_user_inatomic(to, from, n) \
979
980#else
981
982/* EVA specific functions */
983
984extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
985 size_t __n);
986extern size_t __copy_from_user_eva(void *__to, const void *__from,
987 size_t __n);
988extern size_t __copy_to_user_eva(void *__to, const void *__from,
989 size_t __n);
990extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
991
992#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
993({ \
994 register void *__cu_to_r __asm__("$4"); \
995 register const void __user *__cu_from_r __asm__("$5"); \
996 register long __cu_len_r __asm__("$6"); \
997 \
998 __cu_to_r = (to); \
999 __cu_from_r = (from); \
1000 __cu_len_r = (n); \
1001 __asm__ __volatile__( \
1002 ".set\tnoreorder\n\t" \
1003 __MODULE_JAL(func_ptr) \
1004 ".set\tnoat\n\t" \
1005 __UA_ADDU "\t$1, %1, %2\n\t" \
1006 ".set\tat\n\t" \
1007 ".set\treorder" \
1008 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1009 : \
1010 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1011 DADDI_SCRATCH, "memory"); \
1012 __cu_len_r; \
1013})
1014
1015#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1016({ \
1017 register void *__cu_to_r __asm__("$4"); \
1018 register const void __user *__cu_from_r __asm__("$5"); \
1019 register long __cu_len_r __asm__("$6"); \
1020 \
1021 __cu_to_r = (to); \
1022 __cu_from_r = (from); \
1023 __cu_len_r = (n); \
1024 __asm__ __volatile__( \
1025 __MODULE_JAL(func_ptr) \
1026 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1027 : \
1028 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1029 DADDI_SCRATCH, "memory"); \
1030 __cu_len_r; \
1031})
1032
1033/*
1034 * Source or destination address is in userland. We need to go through
1035 * the TLB
1036 */
1037#define __invoke_copy_from_user(to, from, n) \
1038 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1039
1040#define __invoke_copy_from_user_inatomic(to, from, n) \
1041 __invoke_copy_from_user_eva_generic(to, from, n, \
1042 __copy_user_inatomic_eva)
1043
1044#define __invoke_copy_to_user(to, from, n) \
1045 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1046
1047#define ___invoke_copy_in_user(to, from, n) \
1048 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1049
1050/*
1051 * Source or destination address in the kernel. We are not going through
1052 * the TLB
1053 */
1054#define __invoke_copy_from_kernel(to, from, n) \
1055 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1056
1057#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1058 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1059
1060#define __invoke_copy_to_kernel(to, from, n) \
1061 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1062
1063#define ___invoke_copy_in_kernel(to, from, n) \
1064 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1065
1066#endif /* CONFIG_EVA */
1067
1068/*
1069 * __copy_from_user: - Copy a block of data from user space, with less checking.
1070 * @to: Destination address, in kernel space.
1071 * @from: Source address, in user space.
1072 * @n: Number of bytes to copy.
1073 *
1074 * Context: User context only. This function may sleep.
1075 *
1076 * Copy data from user space to kernel space. Caller must check
1077 * the specified block with access_ok() before calling this function.
1078 *
1079 * Returns number of bytes that could not be copied.
1080 * On success, this will be zero.
1081 *
1082 * If some data could not be copied, this function will pad the copied
1083 * data to the requested size using zero bytes.
1084 */
1085#define __copy_from_user(to, from, n) \
1086({ \
1087 void *__cu_to; \
1088 const void __user *__cu_from; \
1089 long __cu_len; \
1090 \
1091 __cu_to = (to); \
1092 __cu_from = (from); \
1093 __cu_len = (n); \
1094 might_fault(); \
1095 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
1096 __cu_len); \
1097 __cu_len; \
1098})
1099
1100/*
1101 * copy_from_user: - Copy a block of data from user space.
1102 * @to: Destination address, in kernel space.
1103 * @from: Source address, in user space.
1104 * @n: Number of bytes to copy.
1105 *
1106 * Context: User context only. This function may sleep.
1107 *
1108 * Copy data from user space to kernel space.
1109 *
1110 * Returns number of bytes that could not be copied.
1111 * On success, this will be zero.
1112 *
1113 * If some data could not be copied, this function will pad the copied
1114 * data to the requested size using zero bytes.
1115 */
1116#define copy_from_user(to, from, n) \
1117({ \
1118 void *__cu_to; \
1119 const void __user *__cu_from; \
1120 long __cu_len; \
1121 \
1122 __cu_to = (to); \
1123 __cu_from = (from); \
1124 __cu_len = (n); \
1125 if (segment_eq(get_fs(), get_ds())) { \
1126 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1127 __cu_from, \
1128 __cu_len); \
1129 } else { \
1130 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1131 might_fault(); \
1132 __cu_len = __invoke_copy_from_user(__cu_to, \
1133 __cu_from, \
1134 __cu_len); \
1135 } \
1136 } \
1137 __cu_len; \
1138})
1139
1140#define __copy_in_user(to, from, n) \
1141({ \
1142 void __user *__cu_to; \
1143 const void __user *__cu_from; \
1144 long __cu_len; \
1145 \
1146 __cu_to = (to); \
1147 __cu_from = (from); \
1148 __cu_len = (n); \
1149 if (segment_eq(get_fs(), get_ds())) { \
1150 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1151 __cu_len); \
1152 } else { \
1153 might_fault(); \
1154 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1155 __cu_len); \
1156 } \
1157 __cu_len; \
1158})
1159
1160#define copy_in_user(to, from, n) \
1161({ \
1162 void __user *__cu_to; \
1163 const void __user *__cu_from; \
1164 long __cu_len; \
1165 \
1166 __cu_to = (to); \
1167 __cu_from = (from); \
1168 __cu_len = (n); \
1169 if (segment_eq(get_fs(), get_ds())) { \
1170 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1171 __cu_len); \
1172 } else { \
1173 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1174 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1175 might_fault(); \
1176 __cu_len = ___invoke_copy_in_user(__cu_to, \
1177 __cu_from, \
1178 __cu_len); \
1179 } \
1180 } \
1181 __cu_len; \
1182})
1183
1184/*
1185 * __clear_user: - Zero a block of memory in user space, with less checking.
1186 * @to: Destination address, in user space.
1187 * @n: Number of bytes to zero.
1188 *
1189 * Zero a block of memory in user space. Caller must check
1190 * the specified block with access_ok() before calling this function.
1191 *
1192 * Returns number of bytes that could not be cleared.
1193 * On success, this will be zero.
1194 */
1195static inline __kernel_size_t
1196__clear_user(void __user *addr, __kernel_size_t size)
1197{
1198 __kernel_size_t res;
1199
1200 might_fault();
1201 __asm__ __volatile__(
1202 "move\t$4, %1\n\t"
1203 "move\t$5, $0\n\t"
1204 "move\t$6, %2\n\t"
1205 __MODULE_JAL(__bzero)
1206 "move\t%0, $6"
1207 : "=r" (res)
1208 : "r" (addr), "r" (size)
1209 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1210
1211 return res;
1212}
1213
1214#define clear_user(addr,n) \
1215({ \
1216 void __user * __cl_addr = (addr); \
1217 unsigned long __cl_size = (n); \
1218 if (__cl_size && access_ok(VERIFY_WRITE, \
1219 __cl_addr, __cl_size)) \
1220 __cl_size = __clear_user(__cl_addr, __cl_size); \
1221 __cl_size; \
1222})
1223
1224/*
1225 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1226 * @dst: Destination address, in kernel space. This buffer must be at
1227 * least @count bytes long.
1228 * @src: Source address, in user space.
1229 * @count: Maximum number of bytes to copy, including the trailing NUL.
1230 *
1231 * Copies a NUL-terminated string from userspace to kernel space.
1232 * Caller must check the specified block with access_ok() before calling
1233 * this function.
1234 *
1235 * On success, returns the length of the string (not including the trailing
1236 * NUL).
1237 *
1238 * If access to userspace fails, returns -EFAULT (some data may have been
1239 * copied).
1240 *
1241 * If @count is smaller than the length of the string, copies @count bytes
1242 * and returns @count.
1243 */
1244static inline long
1245__strncpy_from_user(char *__to, const char __user *__from, long __len)
1246{
1247 long res;
1248
1249 if (segment_eq(get_fs(), get_ds())) {
1250 __asm__ __volatile__(
1251 "move\t$4, %1\n\t"
1252 "move\t$5, %2\n\t"
1253 "move\t$6, %3\n\t"
1254 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1255 "move\t%0, $2"
1256 : "=r" (res)
1257 : "r" (__to), "r" (__from), "r" (__len)
1258 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1259 } else {
1260 might_fault();
1261 __asm__ __volatile__(
1262 "move\t$4, %1\n\t"
1263 "move\t$5, %2\n\t"
1264 "move\t$6, %3\n\t"
1265 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1266 "move\t%0, $2"
1267 : "=r" (res)
1268 : "r" (__to), "r" (__from), "r" (__len)
1269 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1270 }
1271
1272 return res;
1273}
1274
1275/*
1276 * strncpy_from_user: - Copy a NUL terminated string from userspace.
1277 * @dst: Destination address, in kernel space. This buffer must be at
1278 * least @count bytes long.
1279 * @src: Source address, in user space.
1280 * @count: Maximum number of bytes to copy, including the trailing NUL.
1281 *
1282 * Copies a NUL-terminated string from userspace to kernel space.
1283 *
1284 * On success, returns the length of the string (not including the trailing
1285 * NUL).
1286 *
1287 * If access to userspace fails, returns -EFAULT (some data may have been
1288 * copied).
1289 *
1290 * If @count is smaller than the length of the string, copies @count bytes
1291 * and returns @count.
1292 */
1293static inline long
1294strncpy_from_user(char *__to, const char __user *__from, long __len)
1295{
1296 long res;
1297
1298 if (segment_eq(get_fs(), get_ds())) {
1299 __asm__ __volatile__(
1300 "move\t$4, %1\n\t"
1301 "move\t$5, %2\n\t"
1302 "move\t$6, %3\n\t"
1303 __MODULE_JAL(__strncpy_from_kernel_asm)
1304 "move\t%0, $2"
1305 : "=r" (res)
1306 : "r" (__to), "r" (__from), "r" (__len)
1307 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1308 } else {
1309 might_fault();
1310 __asm__ __volatile__(
1311 "move\t$4, %1\n\t"
1312 "move\t$5, %2\n\t"
1313 "move\t$6, %3\n\t"
1314 __MODULE_JAL(__strncpy_from_user_asm)
1315 "move\t%0, $2"
1316 : "=r" (res)
1317 : "r" (__to), "r" (__from), "r" (__len)
1318 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1319 }
1320
1321 return res;
1322}
1323
1324/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1325static inline long __strlen_user(const char __user *s)
1326{
1327 long res;
1328
1329 if (segment_eq(get_fs(), get_ds())) {
1330 __asm__ __volatile__(
1331 "move\t$4, %1\n\t"
1332 __MODULE_JAL(__strlen_kernel_nocheck_asm)
1333 "move\t%0, $2"
1334 : "=r" (res)
1335 : "r" (s)
1336 : "$2", "$4", __UA_t0, "$31");
1337 } else {
1338 might_fault();
1339 __asm__ __volatile__(
1340 "move\t$4, %1\n\t"
1341 __MODULE_JAL(__strlen_user_nocheck_asm)
1342 "move\t%0, $2"
1343 : "=r" (res)
1344 : "r" (s)
1345 : "$2", "$4", __UA_t0, "$31");
1346 }
1347
1348 return res;
1349}
1350
1351/*
1352 * strlen_user: - Get the size of a string in user space.
1353 * @str: The string to measure.
1354 *
1355 * Context: User context only. This function may sleep.
1356 *
1357 * Get the size of a NUL-terminated string in user space.
1358 *
1359 * Returns the size of the string INCLUDING the terminating NUL.
1360 * On exception, returns 0.
1361 *
1362 * If there is a limit on the length of a valid string, you may wish to
1363 * consider using strnlen_user() instead.
1364 */
1365static inline long strlen_user(const char __user *s)
1366{
1367 long res;
1368
1369 if (segment_eq(get_fs(), get_ds())) {
1370 __asm__ __volatile__(
1371 "move\t$4, %1\n\t"
1372 __MODULE_JAL(__strlen_kernel_asm)
1373 "move\t%0, $2"
1374 : "=r" (res)
1375 : "r" (s)
1376 : "$2", "$4", __UA_t0, "$31");
1377 } else {
1378 might_fault();
1379 __asm__ __volatile__(
1380 "move\t$4, %1\n\t"
1381 __MODULE_JAL(__strlen_kernel_asm)
1382 "move\t%0, $2"
1383 : "=r" (res)
1384 : "r" (s)
1385 : "$2", "$4", __UA_t0, "$31");
1386 }
1387
1388 return res;
1389}
1390
1391/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1392static inline long __strnlen_user(const char __user *s, long n)
1393{
1394 long res;
1395
1396 if (segment_eq(get_fs(), get_ds())) {
1397 __asm__ __volatile__(
1398 "move\t$4, %1\n\t"
1399 "move\t$5, %2\n\t"
1400 __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1401 "move\t%0, $2"
1402 : "=r" (res)
1403 : "r" (s), "r" (n)
1404 : "$2", "$4", "$5", __UA_t0, "$31");
1405 } else {
1406 might_fault();
1407 __asm__ __volatile__(
1408 "move\t$4, %1\n\t"
1409 "move\t$5, %2\n\t"
1410 __MODULE_JAL(__strnlen_user_nocheck_asm)
1411 "move\t%0, $2"
1412 : "=r" (res)
1413 : "r" (s), "r" (n)
1414 : "$2", "$4", "$5", __UA_t0, "$31");
1415 }
1416
1417 return res;
1418}
1419
1420/*
1421 * strlen_user: - Get the size of a string in user space.
1422 * @str: The string to measure.
1423 *
1424 * Context: User context only. This function may sleep.
1425 *
1426 * Get the size of a NUL-terminated string in user space.
1427 *
1428 * Returns the size of the string INCLUDING the terminating NUL.
1429 * On exception, returns 0.
1430 *
1431 * If there is a limit on the length of a valid string, you may wish to
1432 * consider using strnlen_user() instead.
1433 */
1434static inline long strnlen_user(const char __user *s, long n)
1435{
1436 long res;
1437
1438 might_fault();
1439 if (segment_eq(get_fs(), get_ds())) {
1440 __asm__ __volatile__(
1441 "move\t$4, %1\n\t"
1442 "move\t$5, %2\n\t"
1443 __MODULE_JAL(__strnlen_kernel_asm)
1444 "move\t%0, $2"
1445 : "=r" (res)
1446 : "r" (s), "r" (n)
1447 : "$2", "$4", "$5", __UA_t0, "$31");
1448 } else {
1449 __asm__ __volatile__(
1450 "move\t$4, %1\n\t"
1451 "move\t$5, %2\n\t"
1452 __MODULE_JAL(__strnlen_user_asm)
1453 "move\t%0, $2"
1454 : "=r" (res)
1455 : "r" (s), "r" (n)
1456 : "$2", "$4", "$5", __UA_t0, "$31");
1457 }
1458
1459 return res;
1460}
1461
1462struct exception_table_entry
1463{
1464 unsigned long insn;
1465 unsigned long nextinsn;
1466};
1467
1468extern int fixup_exception(struct pt_regs *regs);
1469
1470#endif /* _ASM_UACCESS_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki
9 * Copyright (C) 2014, Imagination Technologies Ltd.
10 */
11#ifndef _ASM_UACCESS_H
12#define _ASM_UACCESS_H
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/thread_info.h>
17#include <asm/asm-eva.h>
18
19/*
20 * The fs value determines whether argument validity checking should be
21 * performed or not. If get_fs() == USER_DS, checking is performed, with
22 * get_fs() == KERNEL_DS, checking is bypassed.
23 *
24 * For historical reasons, these macros are grossly misnamed.
25 */
26#ifdef CONFIG_32BIT
27
28#ifdef CONFIG_KVM_GUEST
29#define __UA_LIMIT 0x40000000UL
30#else
31#define __UA_LIMIT 0x80000000UL
32#endif
33
34#define __UA_ADDR ".word"
35#define __UA_LA "la"
36#define __UA_ADDU "addu"
37#define __UA_t0 "$8"
38#define __UA_t1 "$9"
39
40#endif /* CONFIG_32BIT */
41
42#ifdef CONFIG_64BIT
43
44extern u64 __ua_limit;
45
46#define __UA_LIMIT __ua_limit
47
48#define __UA_ADDR ".dword"
49#define __UA_LA "dla"
50#define __UA_ADDU "daddu"
51#define __UA_t0 "$12"
52#define __UA_t1 "$13"
53
54#endif /* CONFIG_64BIT */
55
56/*
57 * USER_DS is a bitmask that has the bits set that may not be set in a valid
58 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
59 * the arithmetic we're doing only works if the limit is a power of two, so
60 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
61 * address in this range it's the process's problem, not ours :-)
62 */
63
64#ifdef CONFIG_KVM_GUEST
65#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
66#define USER_DS ((mm_segment_t) { 0xC0000000UL })
67#else
68#define KERNEL_DS ((mm_segment_t) { 0UL })
69#define USER_DS ((mm_segment_t) { __UA_LIMIT })
70#endif
71
72#define VERIFY_READ 0
73#define VERIFY_WRITE 1
74
75#define get_ds() (KERNEL_DS)
76#define get_fs() (current_thread_info()->addr_limit)
77#define set_fs(x) (current_thread_info()->addr_limit = (x))
78
79#define segment_eq(a, b) ((a).seg == (b).seg)
80
81/*
82 * eva_kernel_access() - determine whether kernel memory access on an EVA system
83 *
84 * Determines whether memory accesses should be performed to kernel memory
85 * on a system using Extended Virtual Addressing (EVA).
86 *
87 * Return: true if a kernel memory access on an EVA system, else false.
88 */
89static inline bool eva_kernel_access(void)
90{
91 if (!config_enabled(CONFIG_EVA))
92 return false;
93
94 return segment_eq(get_fs(), get_ds());
95}
96
97/*
98 * Is a address valid? This does a straightforward calculation rather
99 * than tests.
100 *
101 * Address valid if:
102 * - "addr" doesn't have any high-bits set
103 * - AND "size" doesn't have any high-bits set
104 * - AND "addr+size" doesn't have any high-bits set
105 * - OR we are in kernel mode.
106 *
107 * __ua_size() is a trick to avoid runtime checking of positive constant
108 * sizes; for those we already know at compile time that the size is ok.
109 */
110#define __ua_size(size) \
111 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
112
113/*
114 * access_ok: - Checks if a user space pointer is valid
115 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
116 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
117 * to write to a block, it is always safe to read from it.
118 * @addr: User space pointer to start of block to check
119 * @size: Size of block to check
120 *
121 * Context: User context only. This function may sleep if pagefaults are
122 * enabled.
123 *
124 * Checks if a pointer to a block of memory in user space is valid.
125 *
126 * Returns true (nonzero) if the memory block may be valid, false (zero)
127 * if it is definitely invalid.
128 *
129 * Note that, depending on architecture, this function probably just
130 * checks that the pointer is in the user space range - after calling
131 * this function, memory access functions may still return -EFAULT.
132 */
133
134#define __access_mask get_fs().seg
135
136#define __access_ok(addr, size, mask) \
137({ \
138 unsigned long __addr = (unsigned long) (addr); \
139 unsigned long __size = size; \
140 unsigned long __mask = mask; \
141 unsigned long __ok; \
142 \
143 __chk_user_ptr(addr); \
144 __ok = (signed long)(__mask & (__addr | (__addr + __size) | \
145 __ua_size(__size))); \
146 __ok == 0; \
147})
148
149#define access_ok(type, addr, size) \
150 likely(__access_ok((addr), (size), __access_mask))
151
152/*
153 * put_user: - Write a simple value into user space.
154 * @x: Value to copy to user space.
155 * @ptr: Destination address, in user space.
156 *
157 * Context: User context only. This function may sleep if pagefaults are
158 * enabled.
159 *
160 * This macro copies a single simple value from kernel space to user
161 * space. It supports simple types like char and int, but not larger
162 * data types like structures or arrays.
163 *
164 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
165 * to the result of dereferencing @ptr.
166 *
167 * Returns zero on success, or -EFAULT on error.
168 */
169#define put_user(x,ptr) \
170 __put_user_check((x), (ptr), sizeof(*(ptr)))
171
172/*
173 * get_user: - Get a simple variable from user space.
174 * @x: Variable to store result.
175 * @ptr: Source address, in user space.
176 *
177 * Context: User context only. This function may sleep if pagefaults are
178 * enabled.
179 *
180 * This macro copies a single simple variable from user space to kernel
181 * space. It supports simple types like char and int, but not larger
182 * data types like structures or arrays.
183 *
184 * @ptr must have pointer-to-simple-variable type, and the result of
185 * dereferencing @ptr must be assignable to @x without a cast.
186 *
187 * Returns zero on success, or -EFAULT on error.
188 * On error, the variable @x is set to zero.
189 */
190#define get_user(x,ptr) \
191 __get_user_check((x), (ptr), sizeof(*(ptr)))
192
193/*
194 * __put_user: - Write a simple value into user space, with less checking.
195 * @x: Value to copy to user space.
196 * @ptr: Destination address, in user space.
197 *
198 * Context: User context only. This function may sleep if pagefaults are
199 * enabled.
200 *
201 * This macro copies a single simple value from kernel space to user
202 * space. It supports simple types like char and int, but not larger
203 * data types like structures or arrays.
204 *
205 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
206 * to the result of dereferencing @ptr.
207 *
208 * Caller must check the pointer with access_ok() before calling this
209 * function.
210 *
211 * Returns zero on success, or -EFAULT on error.
212 */
213#define __put_user(x,ptr) \
214 __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
215
216/*
217 * __get_user: - Get a simple variable from user space, with less checking.
218 * @x: Variable to store result.
219 * @ptr: Source address, in user space.
220 *
221 * Context: User context only. This function may sleep if pagefaults are
222 * enabled.
223 *
224 * This macro copies a single simple variable from user space to kernel
225 * space. It supports simple types like char and int, but not larger
226 * data types like structures or arrays.
227 *
228 * @ptr must have pointer-to-simple-variable type, and the result of
229 * dereferencing @ptr must be assignable to @x without a cast.
230 *
231 * Caller must check the pointer with access_ok() before calling this
232 * function.
233 *
234 * Returns zero on success, or -EFAULT on error.
235 * On error, the variable @x is set to zero.
236 */
237#define __get_user(x,ptr) \
238 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
239
240struct __large_struct { unsigned long buf[100]; };
241#define __m(x) (*(struct __large_struct __user *)(x))
242
243/*
244 * Yuck. We need two variants, one for 64bit operation and one
245 * for 32 bit mode and old iron.
246 */
247#ifndef CONFIG_EVA
248#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
249#else
250/*
251 * Kernel specific functions for EVA. We need to use normal load instructions
252 * to read data from kernel when operating in EVA mode. We use these macros to
253 * avoid redefining __get_user_asm for EVA.
254 */
255#undef _loadd
256#undef _loadw
257#undef _loadh
258#undef _loadb
259#ifdef CONFIG_32BIT
260#define _loadd _loadw
261#else
262#define _loadd(reg, addr) "ld " reg ", " addr
263#endif
264#define _loadw(reg, addr) "lw " reg ", " addr
265#define _loadh(reg, addr) "lh " reg ", " addr
266#define _loadb(reg, addr) "lb " reg ", " addr
267
268#define __get_kernel_common(val, size, ptr) \
269do { \
270 switch (size) { \
271 case 1: __get_data_asm(val, _loadb, ptr); break; \
272 case 2: __get_data_asm(val, _loadh, ptr); break; \
273 case 4: __get_data_asm(val, _loadw, ptr); break; \
274 case 8: __GET_DW(val, _loadd, ptr); break; \
275 default: __get_user_unknown(); break; \
276 } \
277} while (0)
278#endif
279
280#ifdef CONFIG_32BIT
281#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
282#endif
283#ifdef CONFIG_64BIT
284#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
285#endif
286
287extern void __get_user_unknown(void);
288
289#define __get_user_common(val, size, ptr) \
290do { \
291 switch (size) { \
292 case 1: __get_data_asm(val, user_lb, ptr); break; \
293 case 2: __get_data_asm(val, user_lh, ptr); break; \
294 case 4: __get_data_asm(val, user_lw, ptr); break; \
295 case 8: __GET_DW(val, user_ld, ptr); break; \
296 default: __get_user_unknown(); break; \
297 } \
298} while (0)
299
300#define __get_user_nocheck(x, ptr, size) \
301({ \
302 int __gu_err; \
303 \
304 if (eva_kernel_access()) { \
305 __get_kernel_common((x), size, ptr); \
306 } else { \
307 __chk_user_ptr(ptr); \
308 __get_user_common((x), size, ptr); \
309 } \
310 __gu_err; \
311})
312
313#define __get_user_check(x, ptr, size) \
314({ \
315 int __gu_err = -EFAULT; \
316 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
317 \
318 might_fault(); \
319 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
320 if (eva_kernel_access()) \
321 __get_kernel_common((x), size, __gu_ptr); \
322 else \
323 __get_user_common((x), size, __gu_ptr); \
324 } else \
325 (x) = 0; \
326 \
327 __gu_err; \
328})
329
330#define __get_data_asm(val, insn, addr) \
331{ \
332 long __gu_tmp; \
333 \
334 __asm__ __volatile__( \
335 "1: "insn("%1", "%3")" \n" \
336 "2: \n" \
337 " .insn \n" \
338 " .section .fixup,\"ax\" \n" \
339 "3: li %0, %4 \n" \
340 " move %1, $0 \n" \
341 " j 2b \n" \
342 " .previous \n" \
343 " .section __ex_table,\"a\" \n" \
344 " "__UA_ADDR "\t1b, 3b \n" \
345 " .previous \n" \
346 : "=r" (__gu_err), "=r" (__gu_tmp) \
347 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
348 \
349 (val) = (__typeof__(*(addr))) __gu_tmp; \
350}
351
352/*
353 * Get a long long 64 using 32 bit registers.
354 */
355#define __get_data_asm_ll32(val, insn, addr) \
356{ \
357 union { \
358 unsigned long long l; \
359 __typeof__(*(addr)) t; \
360 } __gu_tmp; \
361 \
362 __asm__ __volatile__( \
363 "1: " insn("%1", "(%3)")" \n" \
364 "2: " insn("%D1", "4(%3)")" \n" \
365 "3: \n" \
366 " .insn \n" \
367 " .section .fixup,\"ax\" \n" \
368 "4: li %0, %4 \n" \
369 " move %1, $0 \n" \
370 " move %D1, $0 \n" \
371 " j 3b \n" \
372 " .previous \n" \
373 " .section __ex_table,\"a\" \n" \
374 " " __UA_ADDR " 1b, 4b \n" \
375 " " __UA_ADDR " 2b, 4b \n" \
376 " .previous \n" \
377 : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
378 : "0" (0), "r" (addr), "i" (-EFAULT)); \
379 \
380 (val) = __gu_tmp.t; \
381}
382
383#ifndef CONFIG_EVA
384#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
385#else
386/*
387 * Kernel specific functions for EVA. We need to use normal load instructions
388 * to read data from kernel when operating in EVA mode. We use these macros to
389 * avoid redefining __get_data_asm for EVA.
390 */
391#undef _stored
392#undef _storew
393#undef _storeh
394#undef _storeb
395#ifdef CONFIG_32BIT
396#define _stored _storew
397#else
398#define _stored(reg, addr) "ld " reg ", " addr
399#endif
400
401#define _storew(reg, addr) "sw " reg ", " addr
402#define _storeh(reg, addr) "sh " reg ", " addr
403#define _storeb(reg, addr) "sb " reg ", " addr
404
405#define __put_kernel_common(ptr, size) \
406do { \
407 switch (size) { \
408 case 1: __put_data_asm(_storeb, ptr); break; \
409 case 2: __put_data_asm(_storeh, ptr); break; \
410 case 4: __put_data_asm(_storew, ptr); break; \
411 case 8: __PUT_DW(_stored, ptr); break; \
412 default: __put_user_unknown(); break; \
413 } \
414} while(0)
415#endif
416
417/*
418 * Yuck. We need two variants, one for 64bit operation and one
419 * for 32 bit mode and old iron.
420 */
421#ifdef CONFIG_32BIT
422#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
423#endif
424#ifdef CONFIG_64BIT
425#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
426#endif
427
428#define __put_user_common(ptr, size) \
429do { \
430 switch (size) { \
431 case 1: __put_data_asm(user_sb, ptr); break; \
432 case 2: __put_data_asm(user_sh, ptr); break; \
433 case 4: __put_data_asm(user_sw, ptr); break; \
434 case 8: __PUT_DW(user_sd, ptr); break; \
435 default: __put_user_unknown(); break; \
436 } \
437} while (0)
438
439#define __put_user_nocheck(x, ptr, size) \
440({ \
441 __typeof__(*(ptr)) __pu_val; \
442 int __pu_err = 0; \
443 \
444 __pu_val = (x); \
445 if (eva_kernel_access()) { \
446 __put_kernel_common(ptr, size); \
447 } else { \
448 __chk_user_ptr(ptr); \
449 __put_user_common(ptr, size); \
450 } \
451 __pu_err; \
452})
453
454#define __put_user_check(x, ptr, size) \
455({ \
456 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
457 __typeof__(*(ptr)) __pu_val = (x); \
458 int __pu_err = -EFAULT; \
459 \
460 might_fault(); \
461 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
462 if (eva_kernel_access()) \
463 __put_kernel_common(__pu_addr, size); \
464 else \
465 __put_user_common(__pu_addr, size); \
466 } \
467 \
468 __pu_err; \
469})
470
471#define __put_data_asm(insn, ptr) \
472{ \
473 __asm__ __volatile__( \
474 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
475 "2: \n" \
476 " .insn \n" \
477 " .section .fixup,\"ax\" \n" \
478 "3: li %0, %4 \n" \
479 " j 2b \n" \
480 " .previous \n" \
481 " .section __ex_table,\"a\" \n" \
482 " " __UA_ADDR " 1b, 3b \n" \
483 " .previous \n" \
484 : "=r" (__pu_err) \
485 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
486 "i" (-EFAULT)); \
487}
488
489#define __put_data_asm_ll32(insn, ptr) \
490{ \
491 __asm__ __volatile__( \
492 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
493 "2: "insn("%D2", "4(%3)")" \n" \
494 "3: \n" \
495 " .insn \n" \
496 " .section .fixup,\"ax\" \n" \
497 "4: li %0, %4 \n" \
498 " j 3b \n" \
499 " .previous \n" \
500 " .section __ex_table,\"a\" \n" \
501 " " __UA_ADDR " 1b, 4b \n" \
502 " " __UA_ADDR " 2b, 4b \n" \
503 " .previous" \
504 : "=r" (__pu_err) \
505 : "0" (0), "r" (__pu_val), "r" (ptr), \
506 "i" (-EFAULT)); \
507}
508
509extern void __put_user_unknown(void);
510
511/*
512 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
513 * EVA unaligned access is handled in the ADE exception handler.
514 */
515#ifndef CONFIG_EVA
516/*
517 * put_user_unaligned: - Write a simple value into user space.
518 * @x: Value to copy to user space.
519 * @ptr: Destination address, in user space.
520 *
521 * Context: User context only. This function may sleep if pagefaults are
522 * enabled.
523 *
524 * This macro copies a single simple value from kernel space to user
525 * space. It supports simple types like char and int, but not larger
526 * data types like structures or arrays.
527 *
528 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
529 * to the result of dereferencing @ptr.
530 *
531 * Returns zero on success, or -EFAULT on error.
532 */
533#define put_user_unaligned(x,ptr) \
534 __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
535
536/*
537 * get_user_unaligned: - Get a simple variable from user space.
538 * @x: Variable to store result.
539 * @ptr: Source address, in user space.
540 *
541 * Context: User context only. This function may sleep if pagefaults are
542 * enabled.
543 *
544 * This macro copies a single simple variable from user space to kernel
545 * space. It supports simple types like char and int, but not larger
546 * data types like structures or arrays.
547 *
548 * @ptr must have pointer-to-simple-variable type, and the result of
549 * dereferencing @ptr must be assignable to @x without a cast.
550 *
551 * Returns zero on success, or -EFAULT on error.
552 * On error, the variable @x is set to zero.
553 */
554#define get_user_unaligned(x,ptr) \
555 __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
556
557/*
558 * __put_user_unaligned: - Write a simple value into user space, with less checking.
559 * @x: Value to copy to user space.
560 * @ptr: Destination address, in user space.
561 *
562 * Context: User context only. This function may sleep if pagefaults are
563 * enabled.
564 *
565 * This macro copies a single simple value from kernel space to user
566 * space. It supports simple types like char and int, but not larger
567 * data types like structures or arrays.
568 *
569 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
570 * to the result of dereferencing @ptr.
571 *
572 * Caller must check the pointer with access_ok() before calling this
573 * function.
574 *
575 * Returns zero on success, or -EFAULT on error.
576 */
577#define __put_user_unaligned(x,ptr) \
578 __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
579
580/*
581 * __get_user_unaligned: - Get a simple variable from user space, with less checking.
582 * @x: Variable to store result.
583 * @ptr: Source address, in user space.
584 *
585 * Context: User context only. This function may sleep if pagefaults are
586 * enabled.
587 *
588 * This macro copies a single simple variable from user space to kernel
589 * space. It supports simple types like char and int, but not larger
590 * data types like structures or arrays.
591 *
592 * @ptr must have pointer-to-simple-variable type, and the result of
593 * dereferencing @ptr must be assignable to @x without a cast.
594 *
595 * Caller must check the pointer with access_ok() before calling this
596 * function.
597 *
598 * Returns zero on success, or -EFAULT on error.
599 * On error, the variable @x is set to zero.
600 */
601#define __get_user_unaligned(x,ptr) \
602 __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
603
604/*
605 * Yuck. We need two variants, one for 64bit operation and one
606 * for 32 bit mode and old iron.
607 */
608#ifdef CONFIG_32BIT
609#define __GET_USER_UNALIGNED_DW(val, ptr) \
610 __get_user_unaligned_asm_ll32(val, ptr)
611#endif
612#ifdef CONFIG_64BIT
613#define __GET_USER_UNALIGNED_DW(val, ptr) \
614 __get_user_unaligned_asm(val, "uld", ptr)
615#endif
616
617extern void __get_user_unaligned_unknown(void);
618
619#define __get_user_unaligned_common(val, size, ptr) \
620do { \
621 switch (size) { \
622 case 1: __get_data_asm(val, "lb", ptr); break; \
623 case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
624 case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
625 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
626 default: __get_user_unaligned_unknown(); break; \
627 } \
628} while (0)
629
630#define __get_user_unaligned_nocheck(x,ptr,size) \
631({ \
632 int __gu_err; \
633 \
634 __get_user_unaligned_common((x), size, ptr); \
635 __gu_err; \
636})
637
638#define __get_user_unaligned_check(x,ptr,size) \
639({ \
640 int __gu_err = -EFAULT; \
641 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
642 \
643 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
644 __get_user_unaligned_common((x), size, __gu_ptr); \
645 \
646 __gu_err; \
647})
648
649#define __get_data_unaligned_asm(val, insn, addr) \
650{ \
651 long __gu_tmp; \
652 \
653 __asm__ __volatile__( \
654 "1: " insn " %1, %3 \n" \
655 "2: \n" \
656 " .insn \n" \
657 " .section .fixup,\"ax\" \n" \
658 "3: li %0, %4 \n" \
659 " move %1, $0 \n" \
660 " j 2b \n" \
661 " .previous \n" \
662 " .section __ex_table,\"a\" \n" \
663 " "__UA_ADDR "\t1b, 3b \n" \
664 " "__UA_ADDR "\t1b + 4, 3b \n" \
665 " .previous \n" \
666 : "=r" (__gu_err), "=r" (__gu_tmp) \
667 : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
668 \
669 (val) = (__typeof__(*(addr))) __gu_tmp; \
670}
671
672/*
673 * Get a long long 64 using 32 bit registers.
674 */
675#define __get_user_unaligned_asm_ll32(val, addr) \
676{ \
677 unsigned long long __gu_tmp; \
678 \
679 __asm__ __volatile__( \
680 "1: ulw %1, (%3) \n" \
681 "2: ulw %D1, 4(%3) \n" \
682 " move %0, $0 \n" \
683 "3: \n" \
684 " .insn \n" \
685 " .section .fixup,\"ax\" \n" \
686 "4: li %0, %4 \n" \
687 " move %1, $0 \n" \
688 " move %D1, $0 \n" \
689 " j 3b \n" \
690 " .previous \n" \
691 " .section __ex_table,\"a\" \n" \
692 " " __UA_ADDR " 1b, 4b \n" \
693 " " __UA_ADDR " 1b + 4, 4b \n" \
694 " " __UA_ADDR " 2b, 4b \n" \
695 " " __UA_ADDR " 2b + 4, 4b \n" \
696 " .previous \n" \
697 : "=r" (__gu_err), "=&r" (__gu_tmp) \
698 : "0" (0), "r" (addr), "i" (-EFAULT)); \
699 (val) = (__typeof__(*(addr))) __gu_tmp; \
700}
701
702/*
703 * Yuck. We need two variants, one for 64bit operation and one
704 * for 32 bit mode and old iron.
705 */
706#ifdef CONFIG_32BIT
707#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
708#endif
709#ifdef CONFIG_64BIT
710#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
711#endif
712
713#define __put_user_unaligned_common(ptr, size) \
714do { \
715 switch (size) { \
716 case 1: __put_data_asm("sb", ptr); break; \
717 case 2: __put_user_unaligned_asm("ush", ptr); break; \
718 case 4: __put_user_unaligned_asm("usw", ptr); break; \
719 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
720 default: __put_user_unaligned_unknown(); break; \
721} while (0)
722
723#define __put_user_unaligned_nocheck(x,ptr,size) \
724({ \
725 __typeof__(*(ptr)) __pu_val; \
726 int __pu_err = 0; \
727 \
728 __pu_val = (x); \
729 __put_user_unaligned_common(ptr, size); \
730 __pu_err; \
731})
732
733#define __put_user_unaligned_check(x,ptr,size) \
734({ \
735 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
736 __typeof__(*(ptr)) __pu_val = (x); \
737 int __pu_err = -EFAULT; \
738 \
739 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
740 __put_user_unaligned_common(__pu_addr, size); \
741 \
742 __pu_err; \
743})
744
745#define __put_user_unaligned_asm(insn, ptr) \
746{ \
747 __asm__ __volatile__( \
748 "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \
749 "2: \n" \
750 " .insn \n" \
751 " .section .fixup,\"ax\" \n" \
752 "3: li %0, %4 \n" \
753 " j 2b \n" \
754 " .previous \n" \
755 " .section __ex_table,\"a\" \n" \
756 " " __UA_ADDR " 1b, 3b \n" \
757 " .previous \n" \
758 : "=r" (__pu_err) \
759 : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
760 "i" (-EFAULT)); \
761}
762
763#define __put_user_unaligned_asm_ll32(ptr) \
764{ \
765 __asm__ __volatile__( \
766 "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
767 "2: sw %D2, 4(%3) \n" \
768 "3: \n" \
769 " .insn \n" \
770 " .section .fixup,\"ax\" \n" \
771 "4: li %0, %4 \n" \
772 " j 3b \n" \
773 " .previous \n" \
774 " .section __ex_table,\"a\" \n" \
775 " " __UA_ADDR " 1b, 4b \n" \
776 " " __UA_ADDR " 1b + 4, 4b \n" \
777 " " __UA_ADDR " 2b, 4b \n" \
778 " " __UA_ADDR " 2b + 4, 4b \n" \
779 " .previous" \
780 : "=r" (__pu_err) \
781 : "0" (0), "r" (__pu_val), "r" (ptr), \
782 "i" (-EFAULT)); \
783}
784
785extern void __put_user_unaligned_unknown(void);
786#endif
787
788/*
789 * We're generating jump to subroutines which will be outside the range of
790 * jump instructions
791 */
792#ifdef MODULE
793#define __MODULE_JAL(destination) \
794 ".set\tnoat\n\t" \
795 __UA_LA "\t$1, " #destination "\n\t" \
796 "jalr\t$1\n\t" \
797 ".set\tat\n\t"
798#else
799#define __MODULE_JAL(destination) \
800 "jal\t" #destination "\n\t"
801#endif
802
803#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
804 defined(CONFIG_CPU_HAS_PREFETCH))
805#define DADDI_SCRATCH "$3"
806#else
807#define DADDI_SCRATCH "$0"
808#endif
809
810extern size_t __copy_user(void *__to, const void *__from, size_t __n);
811
812#ifndef CONFIG_EVA
813#define __invoke_copy_to_user(to, from, n) \
814({ \
815 register void __user *__cu_to_r __asm__("$4"); \
816 register const void *__cu_from_r __asm__("$5"); \
817 register long __cu_len_r __asm__("$6"); \
818 \
819 __cu_to_r = (to); \
820 __cu_from_r = (from); \
821 __cu_len_r = (n); \
822 __asm__ __volatile__( \
823 __MODULE_JAL(__copy_user) \
824 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
825 : \
826 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
827 DADDI_SCRATCH, "memory"); \
828 __cu_len_r; \
829})
830
831#define __invoke_copy_to_kernel(to, from, n) \
832 __invoke_copy_to_user(to, from, n)
833
834#endif
835
836/*
837 * __copy_to_user: - Copy a block of data into user space, with less checking.
838 * @to: Destination address, in user space.
839 * @from: Source address, in kernel space.
840 * @n: Number of bytes to copy.
841 *
842 * Context: User context only. This function may sleep if pagefaults are
843 * enabled.
844 *
845 * Copy data from kernel space to user space. Caller must check
846 * the specified block with access_ok() before calling this function.
847 *
848 * Returns number of bytes that could not be copied.
849 * On success, this will be zero.
850 */
851#define __copy_to_user(to, from, n) \
852({ \
853 void __user *__cu_to; \
854 const void *__cu_from; \
855 long __cu_len; \
856 \
857 __cu_to = (to); \
858 __cu_from = (from); \
859 __cu_len = (n); \
860 might_fault(); \
861 if (eva_kernel_access()) \
862 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
863 __cu_len); \
864 else \
865 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
866 __cu_len); \
867 __cu_len; \
868})
869
870extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
871
872#define __copy_to_user_inatomic(to, from, n) \
873({ \
874 void __user *__cu_to; \
875 const void *__cu_from; \
876 long __cu_len; \
877 \
878 __cu_to = (to); \
879 __cu_from = (from); \
880 __cu_len = (n); \
881 if (eva_kernel_access()) \
882 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
883 __cu_len); \
884 else \
885 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
886 __cu_len); \
887 __cu_len; \
888})
889
890#define __copy_from_user_inatomic(to, from, n) \
891({ \
892 void *__cu_to; \
893 const void __user *__cu_from; \
894 long __cu_len; \
895 \
896 __cu_to = (to); \
897 __cu_from = (from); \
898 __cu_len = (n); \
899 if (eva_kernel_access()) \
900 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
901 __cu_from,\
902 __cu_len);\
903 else \
904 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
905 __cu_from, \
906 __cu_len); \
907 __cu_len; \
908})
909
910/*
911 * copy_to_user: - Copy a block of data into user space.
912 * @to: Destination address, in user space.
913 * @from: Source address, in kernel space.
914 * @n: Number of bytes to copy.
915 *
916 * Context: User context only. This function may sleep if pagefaults are
917 * enabled.
918 *
919 * Copy data from kernel space to user space.
920 *
921 * Returns number of bytes that could not be copied.
922 * On success, this will be zero.
923 */
924#define copy_to_user(to, from, n) \
925({ \
926 void __user *__cu_to; \
927 const void *__cu_from; \
928 long __cu_len; \
929 \
930 __cu_to = (to); \
931 __cu_from = (from); \
932 __cu_len = (n); \
933 if (eva_kernel_access()) { \
934 __cu_len = __invoke_copy_to_kernel(__cu_to, \
935 __cu_from, \
936 __cu_len); \
937 } else { \
938 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
939 might_fault(); \
940 __cu_len = __invoke_copy_to_user(__cu_to, \
941 __cu_from, \
942 __cu_len); \
943 } \
944 } \
945 __cu_len; \
946})
947
948#ifndef CONFIG_EVA
949
950#define __invoke_copy_from_user(to, from, n) \
951({ \
952 register void *__cu_to_r __asm__("$4"); \
953 register const void __user *__cu_from_r __asm__("$5"); \
954 register long __cu_len_r __asm__("$6"); \
955 \
956 __cu_to_r = (to); \
957 __cu_from_r = (from); \
958 __cu_len_r = (n); \
959 __asm__ __volatile__( \
960 ".set\tnoreorder\n\t" \
961 __MODULE_JAL(__copy_user) \
962 ".set\tnoat\n\t" \
963 __UA_ADDU "\t$1, %1, %2\n\t" \
964 ".set\tat\n\t" \
965 ".set\treorder" \
966 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
967 : \
968 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
969 DADDI_SCRATCH, "memory"); \
970 __cu_len_r; \
971})
972
973#define __invoke_copy_from_kernel(to, from, n) \
974 __invoke_copy_from_user(to, from, n)
975
976/* For userland <-> userland operations */
977#define ___invoke_copy_in_user(to, from, n) \
978 __invoke_copy_from_user(to, from, n)
979
980/* For kernel <-> kernel operations */
981#define ___invoke_copy_in_kernel(to, from, n) \
982 __invoke_copy_from_user(to, from, n)
983
984#define __invoke_copy_from_user_inatomic(to, from, n) \
985({ \
986 register void *__cu_to_r __asm__("$4"); \
987 register const void __user *__cu_from_r __asm__("$5"); \
988 register long __cu_len_r __asm__("$6"); \
989 \
990 __cu_to_r = (to); \
991 __cu_from_r = (from); \
992 __cu_len_r = (n); \
993 __asm__ __volatile__( \
994 ".set\tnoreorder\n\t" \
995 __MODULE_JAL(__copy_user_inatomic) \
996 ".set\tnoat\n\t" \
997 __UA_ADDU "\t$1, %1, %2\n\t" \
998 ".set\tat\n\t" \
999 ".set\treorder" \
1000 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1001 : \
1002 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1003 DADDI_SCRATCH, "memory"); \
1004 __cu_len_r; \
1005})
1006
1007#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1008 __invoke_copy_from_user_inatomic(to, from, n) \
1009
1010#else
1011
1012/* EVA specific functions */
1013
1014extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1015 size_t __n);
1016extern size_t __copy_from_user_eva(void *__to, const void *__from,
1017 size_t __n);
1018extern size_t __copy_to_user_eva(void *__to, const void *__from,
1019 size_t __n);
1020extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1021
1022#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
1023({ \
1024 register void *__cu_to_r __asm__("$4"); \
1025 register const void __user *__cu_from_r __asm__("$5"); \
1026 register long __cu_len_r __asm__("$6"); \
1027 \
1028 __cu_to_r = (to); \
1029 __cu_from_r = (from); \
1030 __cu_len_r = (n); \
1031 __asm__ __volatile__( \
1032 ".set\tnoreorder\n\t" \
1033 __MODULE_JAL(func_ptr) \
1034 ".set\tnoat\n\t" \
1035 __UA_ADDU "\t$1, %1, %2\n\t" \
1036 ".set\tat\n\t" \
1037 ".set\treorder" \
1038 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1039 : \
1040 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1041 DADDI_SCRATCH, "memory"); \
1042 __cu_len_r; \
1043})
1044
1045#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1046({ \
1047 register void *__cu_to_r __asm__("$4"); \
1048 register const void __user *__cu_from_r __asm__("$5"); \
1049 register long __cu_len_r __asm__("$6"); \
1050 \
1051 __cu_to_r = (to); \
1052 __cu_from_r = (from); \
1053 __cu_len_r = (n); \
1054 __asm__ __volatile__( \
1055 __MODULE_JAL(func_ptr) \
1056 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1057 : \
1058 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1059 DADDI_SCRATCH, "memory"); \
1060 __cu_len_r; \
1061})
1062
1063/*
1064 * Source or destination address is in userland. We need to go through
1065 * the TLB
1066 */
1067#define __invoke_copy_from_user(to, from, n) \
1068 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1069
1070#define __invoke_copy_from_user_inatomic(to, from, n) \
1071 __invoke_copy_from_user_eva_generic(to, from, n, \
1072 __copy_user_inatomic_eva)
1073
1074#define __invoke_copy_to_user(to, from, n) \
1075 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1076
1077#define ___invoke_copy_in_user(to, from, n) \
1078 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1079
1080/*
1081 * Source or destination address in the kernel. We are not going through
1082 * the TLB
1083 */
1084#define __invoke_copy_from_kernel(to, from, n) \
1085 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1086
1087#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1088 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1089
1090#define __invoke_copy_to_kernel(to, from, n) \
1091 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1092
1093#define ___invoke_copy_in_kernel(to, from, n) \
1094 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1095
1096#endif /* CONFIG_EVA */
1097
1098/*
1099 * __copy_from_user: - Copy a block of data from user space, with less checking.
1100 * @to: Destination address, in kernel space.
1101 * @from: Source address, in user space.
1102 * @n: Number of bytes to copy.
1103 *
1104 * Context: User context only. This function may sleep if pagefaults are
1105 * enabled.
1106 *
1107 * Copy data from user space to kernel space. Caller must check
1108 * the specified block with access_ok() before calling this function.
1109 *
1110 * Returns number of bytes that could not be copied.
1111 * On success, this will be zero.
1112 *
1113 * If some data could not be copied, this function will pad the copied
1114 * data to the requested size using zero bytes.
1115 */
1116#define __copy_from_user(to, from, n) \
1117({ \
1118 void *__cu_to; \
1119 const void __user *__cu_from; \
1120 long __cu_len; \
1121 \
1122 __cu_to = (to); \
1123 __cu_from = (from); \
1124 __cu_len = (n); \
1125 if (eva_kernel_access()) { \
1126 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1127 __cu_from, \
1128 __cu_len); \
1129 } else { \
1130 might_fault(); \
1131 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
1132 __cu_len); \
1133 } \
1134 __cu_len; \
1135})
1136
1137/*
1138 * copy_from_user: - Copy a block of data from user space.
1139 * @to: Destination address, in kernel space.
1140 * @from: Source address, in user space.
1141 * @n: Number of bytes to copy.
1142 *
1143 * Context: User context only. This function may sleep if pagefaults are
1144 * enabled.
1145 *
1146 * Copy data from user space to kernel space.
1147 *
1148 * Returns number of bytes that could not be copied.
1149 * On success, this will be zero.
1150 *
1151 * If some data could not be copied, this function will pad the copied
1152 * data to the requested size using zero bytes.
1153 */
1154#define copy_from_user(to, from, n) \
1155({ \
1156 void *__cu_to; \
1157 const void __user *__cu_from; \
1158 long __cu_len; \
1159 \
1160 __cu_to = (to); \
1161 __cu_from = (from); \
1162 __cu_len = (n); \
1163 if (eva_kernel_access()) { \
1164 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1165 __cu_from, \
1166 __cu_len); \
1167 } else { \
1168 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1169 might_fault(); \
1170 __cu_len = __invoke_copy_from_user(__cu_to, \
1171 __cu_from, \
1172 __cu_len); \
1173 } \
1174 } \
1175 __cu_len; \
1176})
1177
1178#define __copy_in_user(to, from, n) \
1179({ \
1180 void __user *__cu_to; \
1181 const void __user *__cu_from; \
1182 long __cu_len; \
1183 \
1184 __cu_to = (to); \
1185 __cu_from = (from); \
1186 __cu_len = (n); \
1187 if (eva_kernel_access()) { \
1188 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1189 __cu_len); \
1190 } else { \
1191 might_fault(); \
1192 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1193 __cu_len); \
1194 } \
1195 __cu_len; \
1196})
1197
1198#define copy_in_user(to, from, n) \
1199({ \
1200 void __user *__cu_to; \
1201 const void __user *__cu_from; \
1202 long __cu_len; \
1203 \
1204 __cu_to = (to); \
1205 __cu_from = (from); \
1206 __cu_len = (n); \
1207 if (eva_kernel_access()) { \
1208 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
1209 __cu_len); \
1210 } else { \
1211 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1212 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1213 might_fault(); \
1214 __cu_len = ___invoke_copy_in_user(__cu_to, \
1215 __cu_from, \
1216 __cu_len); \
1217 } \
1218 } \
1219 __cu_len; \
1220})
1221
1222/*
1223 * __clear_user: - Zero a block of memory in user space, with less checking.
1224 * @to: Destination address, in user space.
1225 * @n: Number of bytes to zero.
1226 *
1227 * Zero a block of memory in user space. Caller must check
1228 * the specified block with access_ok() before calling this function.
1229 *
1230 * Returns number of bytes that could not be cleared.
1231 * On success, this will be zero.
1232 */
1233static inline __kernel_size_t
1234__clear_user(void __user *addr, __kernel_size_t size)
1235{
1236 __kernel_size_t res;
1237
1238 if (eva_kernel_access()) {
1239 __asm__ __volatile__(
1240 "move\t$4, %1\n\t"
1241 "move\t$5, $0\n\t"
1242 "move\t$6, %2\n\t"
1243 __MODULE_JAL(__bzero_kernel)
1244 "move\t%0, $6"
1245 : "=r" (res)
1246 : "r" (addr), "r" (size)
1247 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1248 } else {
1249 might_fault();
1250 __asm__ __volatile__(
1251 "move\t$4, %1\n\t"
1252 "move\t$5, $0\n\t"
1253 "move\t$6, %2\n\t"
1254 __MODULE_JAL(__bzero)
1255 "move\t%0, $6"
1256 : "=r" (res)
1257 : "r" (addr), "r" (size)
1258 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1259 }
1260
1261 return res;
1262}
1263
1264#define clear_user(addr,n) \
1265({ \
1266 void __user * __cl_addr = (addr); \
1267 unsigned long __cl_size = (n); \
1268 if (__cl_size && access_ok(VERIFY_WRITE, \
1269 __cl_addr, __cl_size)) \
1270 __cl_size = __clear_user(__cl_addr, __cl_size); \
1271 __cl_size; \
1272})
1273
1274/*
1275 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1276 * @dst: Destination address, in kernel space. This buffer must be at
1277 * least @count bytes long.
1278 * @src: Source address, in user space.
1279 * @count: Maximum number of bytes to copy, including the trailing NUL.
1280 *
1281 * Copies a NUL-terminated string from userspace to kernel space.
1282 * Caller must check the specified block with access_ok() before calling
1283 * this function.
1284 *
1285 * On success, returns the length of the string (not including the trailing
1286 * NUL).
1287 *
1288 * If access to userspace fails, returns -EFAULT (some data may have been
1289 * copied).
1290 *
1291 * If @count is smaller than the length of the string, copies @count bytes
1292 * and returns @count.
1293 */
1294static inline long
1295__strncpy_from_user(char *__to, const char __user *__from, long __len)
1296{
1297 long res;
1298
1299 if (eva_kernel_access()) {
1300 __asm__ __volatile__(
1301 "move\t$4, %1\n\t"
1302 "move\t$5, %2\n\t"
1303 "move\t$6, %3\n\t"
1304 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1305 "move\t%0, $2"
1306 : "=r" (res)
1307 : "r" (__to), "r" (__from), "r" (__len)
1308 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1309 } else {
1310 might_fault();
1311 __asm__ __volatile__(
1312 "move\t$4, %1\n\t"
1313 "move\t$5, %2\n\t"
1314 "move\t$6, %3\n\t"
1315 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1316 "move\t%0, $2"
1317 : "=r" (res)
1318 : "r" (__to), "r" (__from), "r" (__len)
1319 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1320 }
1321
1322 return res;
1323}
1324
1325/*
1326 * strncpy_from_user: - Copy a NUL terminated string from userspace.
1327 * @dst: Destination address, in kernel space. This buffer must be at
1328 * least @count bytes long.
1329 * @src: Source address, in user space.
1330 * @count: Maximum number of bytes to copy, including the trailing NUL.
1331 *
1332 * Copies a NUL-terminated string from userspace to kernel space.
1333 *
1334 * On success, returns the length of the string (not including the trailing
1335 * NUL).
1336 *
1337 * If access to userspace fails, returns -EFAULT (some data may have been
1338 * copied).
1339 *
1340 * If @count is smaller than the length of the string, copies @count bytes
1341 * and returns @count.
1342 */
1343static inline long
1344strncpy_from_user(char *__to, const char __user *__from, long __len)
1345{
1346 long res;
1347
1348 if (eva_kernel_access()) {
1349 __asm__ __volatile__(
1350 "move\t$4, %1\n\t"
1351 "move\t$5, %2\n\t"
1352 "move\t$6, %3\n\t"
1353 __MODULE_JAL(__strncpy_from_kernel_asm)
1354 "move\t%0, $2"
1355 : "=r" (res)
1356 : "r" (__to), "r" (__from), "r" (__len)
1357 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1358 } else {
1359 might_fault();
1360 __asm__ __volatile__(
1361 "move\t$4, %1\n\t"
1362 "move\t$5, %2\n\t"
1363 "move\t$6, %3\n\t"
1364 __MODULE_JAL(__strncpy_from_user_asm)
1365 "move\t%0, $2"
1366 : "=r" (res)
1367 : "r" (__to), "r" (__from), "r" (__len)
1368 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1369 }
1370
1371 return res;
1372}
1373
1374/*
1375 * strlen_user: - Get the size of a string in user space.
1376 * @str: The string to measure.
1377 *
1378 * Context: User context only. This function may sleep if pagefaults are
1379 * enabled.
1380 *
1381 * Get the size of a NUL-terminated string in user space.
1382 *
1383 * Returns the size of the string INCLUDING the terminating NUL.
1384 * On exception, returns 0.
1385 *
1386 * If there is a limit on the length of a valid string, you may wish to
1387 * consider using strnlen_user() instead.
1388 */
1389static inline long strlen_user(const char __user *s)
1390{
1391 long res;
1392
1393 if (eva_kernel_access()) {
1394 __asm__ __volatile__(
1395 "move\t$4, %1\n\t"
1396 __MODULE_JAL(__strlen_kernel_asm)
1397 "move\t%0, $2"
1398 : "=r" (res)
1399 : "r" (s)
1400 : "$2", "$4", __UA_t0, "$31");
1401 } else {
1402 might_fault();
1403 __asm__ __volatile__(
1404 "move\t$4, %1\n\t"
1405 __MODULE_JAL(__strlen_user_asm)
1406 "move\t%0, $2"
1407 : "=r" (res)
1408 : "r" (s)
1409 : "$2", "$4", __UA_t0, "$31");
1410 }
1411
1412 return res;
1413}
1414
1415/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1416static inline long __strnlen_user(const char __user *s, long n)
1417{
1418 long res;
1419
1420 if (eva_kernel_access()) {
1421 __asm__ __volatile__(
1422 "move\t$4, %1\n\t"
1423 "move\t$5, %2\n\t"
1424 __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1425 "move\t%0, $2"
1426 : "=r" (res)
1427 : "r" (s), "r" (n)
1428 : "$2", "$4", "$5", __UA_t0, "$31");
1429 } else {
1430 might_fault();
1431 __asm__ __volatile__(
1432 "move\t$4, %1\n\t"
1433 "move\t$5, %2\n\t"
1434 __MODULE_JAL(__strnlen_user_nocheck_asm)
1435 "move\t%0, $2"
1436 : "=r" (res)
1437 : "r" (s), "r" (n)
1438 : "$2", "$4", "$5", __UA_t0, "$31");
1439 }
1440
1441 return res;
1442}
1443
1444/*
1445 * strnlen_user: - Get the size of a string in user space.
1446 * @str: The string to measure.
1447 *
1448 * Context: User context only. This function may sleep if pagefaults are
1449 * enabled.
1450 *
1451 * Get the size of a NUL-terminated string in user space.
1452 *
1453 * Returns the size of the string INCLUDING the terminating NUL.
1454 * On exception, returns 0.
1455 * If the string is too long, returns a value greater than @n.
1456 */
1457static inline long strnlen_user(const char __user *s, long n)
1458{
1459 long res;
1460
1461 might_fault();
1462 if (eva_kernel_access()) {
1463 __asm__ __volatile__(
1464 "move\t$4, %1\n\t"
1465 "move\t$5, %2\n\t"
1466 __MODULE_JAL(__strnlen_kernel_asm)
1467 "move\t%0, $2"
1468 : "=r" (res)
1469 : "r" (s), "r" (n)
1470 : "$2", "$4", "$5", __UA_t0, "$31");
1471 } else {
1472 __asm__ __volatile__(
1473 "move\t$4, %1\n\t"
1474 "move\t$5, %2\n\t"
1475 __MODULE_JAL(__strnlen_user_asm)
1476 "move\t%0, $2"
1477 : "=r" (res)
1478 : "r" (s), "r" (n)
1479 : "$2", "$4", "$5", __UA_t0, "$31");
1480 }
1481
1482 return res;
1483}
1484
1485struct exception_table_entry
1486{
1487 unsigned long insn;
1488 unsigned long nextinsn;
1489};
1490
1491extern int fixup_exception(struct pt_regs *regs);
1492
1493#endif /* _ASM_UACCESS_H */