Loading...
1/*
2 * arch/arm/include/asm/uaccess.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#ifndef _ASMARM_UACCESS_H
9#define _ASMARM_UACCESS_H
10
11/*
12 * User space memory access functions
13 */
14#include <linux/string.h>
15#include <linux/thread_info.h>
16#include <asm/errno.h>
17#include <asm/memory.h>
18#include <asm/domain.h>
19#include <asm/unified.h>
20#include <asm/compiler.h>
21
22#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23#include <asm-generic/uaccess-unaligned.h>
24#else
25#define __get_user_unaligned __get_user
26#define __put_user_unaligned __put_user
27#endif
28
29#define VERIFY_READ 0
30#define VERIFY_WRITE 1
31
32/*
33 * The exception table consists of pairs of addresses: the first is the
34 * address of an instruction that is allowed to fault, and the second is
35 * the address at which the program should continue. No registers are
36 * modified, so it is entirely up to the continuation code to figure out
37 * what to do.
38 *
39 * All the routines below use bits of fixup code that are out of line
40 * with the main instruction path. This means when everything is well,
41 * we don't even have to jump over them. Further, they do not intrude
42 * on our cache or tlb entries.
43 */
44
45struct exception_table_entry
46{
47 unsigned long insn, fixup;
48};
49
50extern int fixup_exception(struct pt_regs *regs);
51
52/*
53 * These two functions allow hooking accesses to userspace to increase
54 * system integrity by ensuring that the kernel can not inadvertantly
55 * perform such accesses (eg, via list poison values) which could then
56 * be exploited for priviledge escalation.
57 */
58static inline unsigned int uaccess_save_and_enable(void)
59{
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61 unsigned int old_domain = get_domain();
62
63 /* Set the current domain access to permit user accesses */
64 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66
67 return old_domain;
68#else
69 return 0;
70#endif
71}
72
73static inline void uaccess_restore(unsigned int flags)
74{
75#ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 /* Restore the user access mask */
77 set_domain(flags);
78#endif
79}
80
81/*
82 * These two are intentionally not defined anywhere - if the kernel
83 * code generates any references to them, that's a bug.
84 */
85extern int __get_user_bad(void);
86extern int __put_user_bad(void);
87
88/*
89 * Note that this is actually 0x1,0000,0000
90 */
91#define KERNEL_DS 0x00000000
92#define get_ds() (KERNEL_DS)
93
94#ifdef CONFIG_MMU
95
96#define USER_DS TASK_SIZE
97#define get_fs() (current_thread_info()->addr_limit)
98
99static inline void set_fs(mm_segment_t fs)
100{
101 current_thread_info()->addr_limit = fs;
102 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
103}
104
105#define segment_eq(a, b) ((a) == (b))
106
107/* We use 33-bit arithmetic here... */
108#define __range_ok(addr, size) ({ \
109 unsigned long flag, roksum; \
110 __chk_user_ptr(addr); \
111 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
112 : "=&r" (flag), "=&r" (roksum) \
113 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
114 : "cc"); \
115 flag; })
116
117/*
118 * Single-value transfer routines. They automatically use the right
119 * size if we just have the right pointer type. Note that the functions
120 * which read from user space (*get_*) need to take care not to leak
121 * kernel data even if the calling code is buggy and fails to check
122 * the return value. This means zeroing out the destination variable
123 * or buffer on error. Normally this is done out of line by the
124 * fixup code, but there are a few places where it intrudes on the
125 * main code path. When we only write to user space, there is no
126 * problem.
127 */
128extern int __get_user_1(void *);
129extern int __get_user_2(void *);
130extern int __get_user_4(void *);
131extern int __get_user_32t_8(void *);
132extern int __get_user_8(void *);
133extern int __get_user_64t_1(void *);
134extern int __get_user_64t_2(void *);
135extern int __get_user_64t_4(void *);
136
137#define __GUP_CLOBBER_1 "lr", "cc"
138#ifdef CONFIG_CPU_USE_DOMAINS
139#define __GUP_CLOBBER_2 "ip", "lr", "cc"
140#else
141#define __GUP_CLOBBER_2 "lr", "cc"
142#endif
143#define __GUP_CLOBBER_4 "lr", "cc"
144#define __GUP_CLOBBER_32t_8 "lr", "cc"
145#define __GUP_CLOBBER_8 "lr", "cc"
146
147#define __get_user_x(__r2, __p, __e, __l, __s) \
148 __asm__ __volatile__ ( \
149 __asmeq("%0", "r0") __asmeq("%1", "r2") \
150 __asmeq("%3", "r1") \
151 "bl __get_user_" #__s \
152 : "=&r" (__e), "=r" (__r2) \
153 : "0" (__p), "r" (__l) \
154 : __GUP_CLOBBER_##__s)
155
156/* narrowing a double-word get into a single 32bit word register: */
157#ifdef __ARMEB__
158#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
159 __get_user_x(__r2, __p, __e, __l, 32t_8)
160#else
161#define __get_user_x_32t __get_user_x
162#endif
163
164/*
165 * storing result into proper least significant word of 64bit target var,
166 * different only for big endian case where 64 bit __r2 lsw is r3:
167 */
168#ifdef __ARMEB__
169#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
170 __asm__ __volatile__ ( \
171 __asmeq("%0", "r0") __asmeq("%1", "r2") \
172 __asmeq("%3", "r1") \
173 "bl __get_user_64t_" #__s \
174 : "=&r" (__e), "=r" (__r2) \
175 : "0" (__p), "r" (__l) \
176 : __GUP_CLOBBER_##__s)
177#else
178#define __get_user_x_64t __get_user_x
179#endif
180
181
182#define __get_user_check(x, p) \
183 ({ \
184 unsigned long __limit = current_thread_info()->addr_limit - 1; \
185 register const typeof(*(p)) __user *__p asm("r0") = (p);\
186 register typeof(x) __r2 asm("r2"); \
187 register unsigned long __l asm("r1") = __limit; \
188 register int __e asm("r0"); \
189 unsigned int __ua_flags = uaccess_save_and_enable(); \
190 switch (sizeof(*(__p))) { \
191 case 1: \
192 if (sizeof((x)) >= 8) \
193 __get_user_x_64t(__r2, __p, __e, __l, 1); \
194 else \
195 __get_user_x(__r2, __p, __e, __l, 1); \
196 break; \
197 case 2: \
198 if (sizeof((x)) >= 8) \
199 __get_user_x_64t(__r2, __p, __e, __l, 2); \
200 else \
201 __get_user_x(__r2, __p, __e, __l, 2); \
202 break; \
203 case 4: \
204 if (sizeof((x)) >= 8) \
205 __get_user_x_64t(__r2, __p, __e, __l, 4); \
206 else \
207 __get_user_x(__r2, __p, __e, __l, 4); \
208 break; \
209 case 8: \
210 if (sizeof((x)) < 8) \
211 __get_user_x_32t(__r2, __p, __e, __l, 4); \
212 else \
213 __get_user_x(__r2, __p, __e, __l, 8); \
214 break; \
215 default: __e = __get_user_bad(); break; \
216 } \
217 uaccess_restore(__ua_flags); \
218 x = (typeof(*(p))) __r2; \
219 __e; \
220 })
221
222#define get_user(x, p) \
223 ({ \
224 might_fault(); \
225 __get_user_check(x, p); \
226 })
227
228extern int __put_user_1(void *, unsigned int);
229extern int __put_user_2(void *, unsigned int);
230extern int __put_user_4(void *, unsigned int);
231extern int __put_user_8(void *, unsigned long long);
232
233#define __put_user_check(__pu_val, __ptr, __err, __s) \
234 ({ \
235 unsigned long __limit = current_thread_info()->addr_limit - 1; \
236 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
237 register const void __user *__p asm("r0") = __ptr; \
238 register unsigned long __l asm("r1") = __limit; \
239 register int __e asm("r0"); \
240 __asm__ __volatile__ ( \
241 __asmeq("%0", "r0") __asmeq("%2", "r2") \
242 __asmeq("%3", "r1") \
243 "bl __put_user_" #__s \
244 : "=&r" (__e) \
245 : "0" (__p), "r" (__r2), "r" (__l) \
246 : "ip", "lr", "cc"); \
247 __err = __e; \
248 })
249
250#else /* CONFIG_MMU */
251
252/*
253 * uClinux has only one addr space, so has simplified address limits.
254 */
255#define USER_DS KERNEL_DS
256
257#define segment_eq(a, b) (1)
258#define __addr_ok(addr) ((void)(addr), 1)
259#define __range_ok(addr, size) ((void)(addr), 0)
260#define get_fs() (KERNEL_DS)
261
262static inline void set_fs(mm_segment_t fs)
263{
264}
265
266#define get_user(x, p) __get_user(x, p)
267#define __put_user_check __put_user_nocheck
268
269#endif /* CONFIG_MMU */
270
271#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
272
273#define user_addr_max() \
274 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
275
276/*
277 * The "__xxx" versions of the user access functions do not verify the
278 * address space - it must have been done previously with a separate
279 * "access_ok()" call.
280 *
281 * The "xxx_error" versions set the third argument to EFAULT if an
282 * error occurs, and leave it unchanged on success. Note that these
283 * versions are void (ie, don't return a value as such).
284 */
285#define __get_user(x, ptr) \
286({ \
287 long __gu_err = 0; \
288 __get_user_err((x), (ptr), __gu_err); \
289 __gu_err; \
290})
291
292#define __get_user_error(x, ptr, err) \
293({ \
294 __get_user_err((x), (ptr), err); \
295 (void) 0; \
296})
297
298#define __get_user_err(x, ptr, err) \
299do { \
300 unsigned long __gu_addr = (unsigned long)(ptr); \
301 unsigned long __gu_val; \
302 unsigned int __ua_flags; \
303 __chk_user_ptr(ptr); \
304 might_fault(); \
305 __ua_flags = uaccess_save_and_enable(); \
306 switch (sizeof(*(ptr))) { \
307 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
308 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
309 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
310 default: (__gu_val) = __get_user_bad(); \
311 } \
312 uaccess_restore(__ua_flags); \
313 (x) = (__typeof__(*(ptr)))__gu_val; \
314} while (0)
315
316#define __get_user_asm(x, addr, err, instr) \
317 __asm__ __volatile__( \
318 "1: " TUSER(instr) " %1, [%2], #0\n" \
319 "2:\n" \
320 " .pushsection .text.fixup,\"ax\"\n" \
321 " .align 2\n" \
322 "3: mov %0, %3\n" \
323 " mov %1, #0\n" \
324 " b 2b\n" \
325 " .popsection\n" \
326 " .pushsection __ex_table,\"a\"\n" \
327 " .align 3\n" \
328 " .long 1b, 3b\n" \
329 " .popsection" \
330 : "+r" (err), "=&r" (x) \
331 : "r" (addr), "i" (-EFAULT) \
332 : "cc")
333
334#define __get_user_asm_byte(x, addr, err) \
335 __get_user_asm(x, addr, err, ldrb)
336
337#ifndef __ARMEB__
338#define __get_user_asm_half(x, __gu_addr, err) \
339({ \
340 unsigned long __b1, __b2; \
341 __get_user_asm_byte(__b1, __gu_addr, err); \
342 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
343 (x) = __b1 | (__b2 << 8); \
344})
345#else
346#define __get_user_asm_half(x, __gu_addr, err) \
347({ \
348 unsigned long __b1, __b2; \
349 __get_user_asm_byte(__b1, __gu_addr, err); \
350 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
351 (x) = (__b1 << 8) | __b2; \
352})
353#endif
354
355#define __get_user_asm_word(x, addr, err) \
356 __get_user_asm(x, addr, err, ldr)
357
358
359#define __put_user_switch(x, ptr, __err, __fn) \
360 do { \
361 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
362 __typeof__(*(ptr)) __pu_val = (x); \
363 unsigned int __ua_flags; \
364 might_fault(); \
365 __ua_flags = uaccess_save_and_enable(); \
366 switch (sizeof(*(ptr))) { \
367 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
368 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
369 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
370 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
371 default: __err = __put_user_bad(); break; \
372 } \
373 uaccess_restore(__ua_flags); \
374 } while (0)
375
376#define put_user(x, ptr) \
377({ \
378 int __pu_err = 0; \
379 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
380 __pu_err; \
381})
382
383#define __put_user(x, ptr) \
384({ \
385 long __pu_err = 0; \
386 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
387 __pu_err; \
388})
389
390#define __put_user_error(x, ptr, err) \
391({ \
392 __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
393 (void) 0; \
394})
395
396#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
397 do { \
398 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
399 __put_user_nocheck_##__size(x, __pu_addr, __err); \
400 } while (0)
401
402#define __put_user_nocheck_1 __put_user_asm_byte
403#define __put_user_nocheck_2 __put_user_asm_half
404#define __put_user_nocheck_4 __put_user_asm_word
405#define __put_user_nocheck_8 __put_user_asm_dword
406
407#define __put_user_asm(x, __pu_addr, err, instr) \
408 __asm__ __volatile__( \
409 "1: " TUSER(instr) " %1, [%2], #0\n" \
410 "2:\n" \
411 " .pushsection .text.fixup,\"ax\"\n" \
412 " .align 2\n" \
413 "3: mov %0, %3\n" \
414 " b 2b\n" \
415 " .popsection\n" \
416 " .pushsection __ex_table,\"a\"\n" \
417 " .align 3\n" \
418 " .long 1b, 3b\n" \
419 " .popsection" \
420 : "+r" (err) \
421 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
422 : "cc")
423
424#define __put_user_asm_byte(x, __pu_addr, err) \
425 __put_user_asm(x, __pu_addr, err, strb)
426
427#ifndef __ARMEB__
428#define __put_user_asm_half(x, __pu_addr, err) \
429({ \
430 unsigned long __temp = (__force unsigned long)(x); \
431 __put_user_asm_byte(__temp, __pu_addr, err); \
432 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
433})
434#else
435#define __put_user_asm_half(x, __pu_addr, err) \
436({ \
437 unsigned long __temp = (__force unsigned long)(x); \
438 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
439 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
440})
441#endif
442
443#define __put_user_asm_word(x, __pu_addr, err) \
444 __put_user_asm(x, __pu_addr, err, str)
445
446#ifndef __ARMEB__
447#define __reg_oper0 "%R2"
448#define __reg_oper1 "%Q2"
449#else
450#define __reg_oper0 "%Q2"
451#define __reg_oper1 "%R2"
452#endif
453
454#define __put_user_asm_dword(x, __pu_addr, err) \
455 __asm__ __volatile__( \
456 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
457 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
458 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
459 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
460 "3:\n" \
461 " .pushsection .text.fixup,\"ax\"\n" \
462 " .align 2\n" \
463 "4: mov %0, %3\n" \
464 " b 3b\n" \
465 " .popsection\n" \
466 " .pushsection __ex_table,\"a\"\n" \
467 " .align 3\n" \
468 " .long 1b, 4b\n" \
469 " .long 2b, 4b\n" \
470 " .popsection" \
471 : "+r" (err), "+r" (__pu_addr) \
472 : "r" (x), "i" (-EFAULT) \
473 : "cc")
474
475
476#ifdef CONFIG_MMU
477extern unsigned long __must_check
478arm_copy_from_user(void *to, const void __user *from, unsigned long n);
479
480static inline unsigned long __must_check
481__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
482{
483 unsigned int __ua_flags;
484
485 __ua_flags = uaccess_save_and_enable();
486 n = arm_copy_from_user(to, from, n);
487 uaccess_restore(__ua_flags);
488 return n;
489}
490
491extern unsigned long __must_check
492arm_copy_to_user(void __user *to, const void *from, unsigned long n);
493extern unsigned long __must_check
494__copy_to_user_std(void __user *to, const void *from, unsigned long n);
495
496static inline unsigned long __must_check
497__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
498{
499#ifndef CONFIG_UACCESS_WITH_MEMCPY
500 unsigned int __ua_flags;
501 __ua_flags = uaccess_save_and_enable();
502 n = arm_copy_to_user(to, from, n);
503 uaccess_restore(__ua_flags);
504 return n;
505#else
506 return arm_copy_to_user(to, from, n);
507#endif
508}
509
510extern unsigned long __must_check
511arm_clear_user(void __user *addr, unsigned long n);
512extern unsigned long __must_check
513__clear_user_std(void __user *addr, unsigned long n);
514
515static inline unsigned long __must_check
516__clear_user(void __user *addr, unsigned long n)
517{
518 unsigned int __ua_flags = uaccess_save_and_enable();
519 n = arm_clear_user(addr, n);
520 uaccess_restore(__ua_flags);
521 return n;
522}
523
524#else
525#define __arch_copy_from_user(to, from, n) \
526 (memcpy(to, (void __force *)from, n), 0)
527#define __arch_copy_to_user(to, from, n) \
528 (memcpy((void __force *)to, from, n), 0)
529#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
530#endif
531
532static inline unsigned long __must_check
533__copy_from_user(void *to, const void __user *from, unsigned long n)
534{
535 check_object_size(to, n, false);
536 return __arch_copy_from_user(to, from, n);
537}
538
539static inline unsigned long __must_check
540copy_from_user(void *to, const void __user *from, unsigned long n)
541{
542 unsigned long res = n;
543
544 check_object_size(to, n, false);
545
546 if (likely(access_ok(VERIFY_READ, from, n)))
547 res = __arch_copy_from_user(to, from, n);
548 if (unlikely(res))
549 memset(to + (n - res), 0, res);
550 return res;
551}
552
553static inline unsigned long __must_check
554__copy_to_user(void __user *to, const void *from, unsigned long n)
555{
556 check_object_size(from, n, true);
557
558 return __arch_copy_to_user(to, from, n);
559}
560
561static inline unsigned long __must_check
562copy_to_user(void __user *to, const void *from, unsigned long n)
563{
564 check_object_size(from, n, true);
565
566 if (access_ok(VERIFY_WRITE, to, n))
567 n = __arch_copy_to_user(to, from, n);
568 return n;
569}
570
571#define __copy_to_user_inatomic __copy_to_user
572#define __copy_from_user_inatomic __copy_from_user
573
574static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
575{
576 if (access_ok(VERIFY_WRITE, to, n))
577 n = __clear_user(to, n);
578 return n;
579}
580
581/* These are from lib/ code, and use __get_user() and friends */
582extern long strncpy_from_user(char *dest, const char __user *src, long count);
583
584extern __must_check long strlen_user(const char __user *str);
585extern __must_check long strnlen_user(const char __user *str, long n);
586
587#endif /* _ASMARM_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/uaccess.h
4 */
5#ifndef _ASMARM_UACCESS_H
6#define _ASMARM_UACCESS_H
7
8/*
9 * User space memory access functions
10 */
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <asm/page.h>
14#include <asm/domain.h>
15#include <asm/unaligned.h>
16#include <asm/unified.h>
17#include <asm/compiler.h>
18
19#include <asm/extable.h>
20
21/*
22 * These two functions allow hooking accesses to userspace to increase
23 * system integrity by ensuring that the kernel can not inadvertantly
24 * perform such accesses (eg, via list poison values) which could then
25 * be exploited for priviledge escalation.
26 */
27static __always_inline unsigned int uaccess_save_and_enable(void)
28{
29#ifdef CONFIG_CPU_SW_DOMAIN_PAN
30 unsigned int old_domain = get_domain();
31
32 /* Set the current domain access to permit user accesses */
33 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
34 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
35
36 return old_domain;
37#else
38 return 0;
39#endif
40}
41
42static __always_inline void uaccess_restore(unsigned int flags)
43{
44#ifdef CONFIG_CPU_SW_DOMAIN_PAN
45 /* Restore the user access mask */
46 set_domain(flags);
47#endif
48}
49
50/*
51 * These two are intentionally not defined anywhere - if the kernel
52 * code generates any references to them, that's a bug.
53 */
54extern int __get_user_bad(void);
55extern int __put_user_bad(void);
56
57#ifdef CONFIG_MMU
58
59/*
60 * This is a type: either unsigned long, if the argument fits into
61 * that type, or otherwise unsigned long long.
62 */
63#define __inttype(x) \
64 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
65
66/*
67 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
68 * is above the current addr_limit.
69 */
70#define uaccess_mask_range_ptr(ptr, size) \
71 ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
72static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
73 size_t size)
74{
75 void __user *safe_ptr = (void __user *)ptr;
76 unsigned long tmp;
77
78 asm volatile(
79 " .syntax unified\n"
80 " sub %1, %3, #1\n"
81 " subs %1, %1, %0\n"
82 " addhs %1, %1, #1\n"
83 " subshs %1, %1, %2\n"
84 " movlo %0, #0\n"
85 : "+r" (safe_ptr), "=&r" (tmp)
86 : "r" (size), "r" (TASK_SIZE)
87 : "cc");
88
89 csdb();
90 return safe_ptr;
91}
92
93/*
94 * Single-value transfer routines. They automatically use the right
95 * size if we just have the right pointer type. Note that the functions
96 * which read from user space (*get_*) need to take care not to leak
97 * kernel data even if the calling code is buggy and fails to check
98 * the return value. This means zeroing out the destination variable
99 * or buffer on error. Normally this is done out of line by the
100 * fixup code, but there are a few places where it intrudes on the
101 * main code path. When we only write to user space, there is no
102 * problem.
103 */
104extern int __get_user_1(void *);
105extern int __get_user_2(void *);
106extern int __get_user_4(void *);
107extern int __get_user_32t_8(void *);
108extern int __get_user_8(void *);
109extern int __get_user_64t_1(void *);
110extern int __get_user_64t_2(void *);
111extern int __get_user_64t_4(void *);
112
113#define __get_user_x(__r2, __p, __e, __l, __s) \
114 __asm__ __volatile__ ( \
115 __asmeq("%0", "r0") __asmeq("%1", "r2") \
116 __asmeq("%3", "r1") \
117 "bl __get_user_" #__s \
118 : "=&r" (__e), "=r" (__r2) \
119 : "0" (__p), "r" (__l) \
120 : "ip", "lr", "cc")
121
122/* narrowing a double-word get into a single 32bit word register: */
123#ifdef __ARMEB__
124#define __get_user_x_32t(__r2, __p, __e, __l, __s) \
125 __get_user_x(__r2, __p, __e, __l, 32t_8)
126#else
127#define __get_user_x_32t __get_user_x
128#endif
129
130/*
131 * storing result into proper least significant word of 64bit target var,
132 * different only for big endian case where 64 bit __r2 lsw is r3:
133 */
134#ifdef __ARMEB__
135#define __get_user_x_64t(__r2, __p, __e, __l, __s) \
136 __asm__ __volatile__ ( \
137 __asmeq("%0", "r0") __asmeq("%1", "r2") \
138 __asmeq("%3", "r1") \
139 "bl __get_user_64t_" #__s \
140 : "=&r" (__e), "=r" (__r2) \
141 : "0" (__p), "r" (__l) \
142 : "ip", "lr", "cc")
143#else
144#define __get_user_x_64t __get_user_x
145#endif
146
147
148#define __get_user_check(x, p) \
149 ({ \
150 unsigned long __limit = TASK_SIZE - 1; \
151 register typeof(*(p)) __user *__p asm("r0") = (p); \
152 register __inttype(x) __r2 asm("r2"); \
153 register unsigned long __l asm("r1") = __limit; \
154 register int __e asm("r0"); \
155 unsigned int __ua_flags = uaccess_save_and_enable(); \
156 int __tmp_e; \
157 switch (sizeof(*(__p))) { \
158 case 1: \
159 if (sizeof((x)) >= 8) \
160 __get_user_x_64t(__r2, __p, __e, __l, 1); \
161 else \
162 __get_user_x(__r2, __p, __e, __l, 1); \
163 break; \
164 case 2: \
165 if (sizeof((x)) >= 8) \
166 __get_user_x_64t(__r2, __p, __e, __l, 2); \
167 else \
168 __get_user_x(__r2, __p, __e, __l, 2); \
169 break; \
170 case 4: \
171 if (sizeof((x)) >= 8) \
172 __get_user_x_64t(__r2, __p, __e, __l, 4); \
173 else \
174 __get_user_x(__r2, __p, __e, __l, 4); \
175 break; \
176 case 8: \
177 if (sizeof((x)) < 8) \
178 __get_user_x_32t(__r2, __p, __e, __l, 4); \
179 else \
180 __get_user_x(__r2, __p, __e, __l, 8); \
181 break; \
182 default: __e = __get_user_bad(); break; \
183 } \
184 __tmp_e = __e; \
185 uaccess_restore(__ua_flags); \
186 x = (typeof(*(p))) __r2; \
187 __tmp_e; \
188 })
189
190#define get_user(x, p) \
191 ({ \
192 might_fault(); \
193 __get_user_check(x, p); \
194 })
195
196extern int __put_user_1(void *, unsigned int);
197extern int __put_user_2(void *, unsigned int);
198extern int __put_user_4(void *, unsigned int);
199extern int __put_user_8(void *, unsigned long long);
200
201#define __put_user_check(__pu_val, __ptr, __err, __s) \
202 ({ \
203 unsigned long __limit = TASK_SIZE - 1; \
204 register typeof(__pu_val) __r2 asm("r2") = __pu_val; \
205 register const void __user *__p asm("r0") = __ptr; \
206 register unsigned long __l asm("r1") = __limit; \
207 register int __e asm("r0"); \
208 __asm__ __volatile__ ( \
209 __asmeq("%0", "r0") __asmeq("%2", "r2") \
210 __asmeq("%3", "r1") \
211 "bl __put_user_" #__s \
212 : "=&r" (__e) \
213 : "0" (__p), "r" (__r2), "r" (__l) \
214 : "ip", "lr", "cc"); \
215 __err = __e; \
216 })
217
218#else /* CONFIG_MMU */
219
220#define get_user(x, p) __get_user(x, p)
221#define __put_user_check __put_user_nocheck
222
223#endif /* CONFIG_MMU */
224
225#include <asm-generic/access_ok.h>
226
227#ifdef CONFIG_CPU_SPECTRE
228/*
229 * When mitigating Spectre variant 1, it is not worth fixing the non-
230 * verifying accessors, because we need to add verification of the
231 * address space there. Force these to use the standard get_user()
232 * version instead.
233 */
234#define __get_user(x, ptr) get_user(x, ptr)
235#else
236
237/*
238 * The "__xxx" versions of the user access functions do not verify the
239 * address space - it must have been done previously with a separate
240 * "access_ok()" call.
241 *
242 * The "xxx_error" versions set the third argument to EFAULT if an
243 * error occurs, and leave it unchanged on success. Note that these
244 * versions are void (ie, don't return a value as such).
245 */
246#define __get_user(x, ptr) \
247({ \
248 long __gu_err = 0; \
249 __get_user_err((x), (ptr), __gu_err, TUSER()); \
250 __gu_err; \
251})
252
253#define __get_user_err(x, ptr, err, __t) \
254do { \
255 unsigned long __gu_addr = (unsigned long)(ptr); \
256 unsigned long __gu_val; \
257 unsigned int __ua_flags; \
258 __chk_user_ptr(ptr); \
259 might_fault(); \
260 __ua_flags = uaccess_save_and_enable(); \
261 switch (sizeof(*(ptr))) { \
262 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err, __t); break; \
263 case 2: __get_user_asm_half(__gu_val, __gu_addr, err, __t); break; \
264 case 4: __get_user_asm_word(__gu_val, __gu_addr, err, __t); break; \
265 default: (__gu_val) = __get_user_bad(); \
266 } \
267 uaccess_restore(__ua_flags); \
268 (x) = (__typeof__(*(ptr)))__gu_val; \
269} while (0)
270#endif
271
272#define __get_user_asm(x, addr, err, instr) \
273 __asm__ __volatile__( \
274 "1: " instr " %1, [%2], #0\n" \
275 "2:\n" \
276 " .pushsection .text.fixup,\"ax\"\n" \
277 " .align 2\n" \
278 "3: mov %0, %3\n" \
279 " mov %1, #0\n" \
280 " b 2b\n" \
281 " .popsection\n" \
282 " .pushsection __ex_table,\"a\"\n" \
283 " .align 3\n" \
284 " .long 1b, 3b\n" \
285 " .popsection" \
286 : "+r" (err), "=&r" (x) \
287 : "r" (addr), "i" (-EFAULT) \
288 : "cc")
289
290#define __get_user_asm_byte(x, addr, err, __t) \
291 __get_user_asm(x, addr, err, "ldrb" __t)
292
293#if __LINUX_ARM_ARCH__ >= 6
294
295#define __get_user_asm_half(x, addr, err, __t) \
296 __get_user_asm(x, addr, err, "ldrh" __t)
297
298#else
299
300#ifndef __ARMEB__
301#define __get_user_asm_half(x, __gu_addr, err, __t) \
302({ \
303 unsigned long __b1, __b2; \
304 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
305 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
306 (x) = __b1 | (__b2 << 8); \
307})
308#else
309#define __get_user_asm_half(x, __gu_addr, err, __t) \
310({ \
311 unsigned long __b1, __b2; \
312 __get_user_asm_byte(__b1, __gu_addr, err, __t); \
313 __get_user_asm_byte(__b2, __gu_addr + 1, err, __t); \
314 (x) = (__b1 << 8) | __b2; \
315})
316#endif
317
318#endif /* __LINUX_ARM_ARCH__ >= 6 */
319
320#define __get_user_asm_word(x, addr, err, __t) \
321 __get_user_asm(x, addr, err, "ldr" __t)
322
323#define __put_user_switch(x, ptr, __err, __fn) \
324 do { \
325 const __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
326 __typeof__(*(ptr)) __pu_val = (x); \
327 unsigned int __ua_flags; \
328 might_fault(); \
329 __ua_flags = uaccess_save_and_enable(); \
330 switch (sizeof(*(ptr))) { \
331 case 1: __fn(__pu_val, __pu_ptr, __err, 1); break; \
332 case 2: __fn(__pu_val, __pu_ptr, __err, 2); break; \
333 case 4: __fn(__pu_val, __pu_ptr, __err, 4); break; \
334 case 8: __fn(__pu_val, __pu_ptr, __err, 8); break; \
335 default: __err = __put_user_bad(); break; \
336 } \
337 uaccess_restore(__ua_flags); \
338 } while (0)
339
340#define put_user(x, ptr) \
341({ \
342 int __pu_err = 0; \
343 __put_user_switch((x), (ptr), __pu_err, __put_user_check); \
344 __pu_err; \
345})
346
347#ifdef CONFIG_CPU_SPECTRE
348/*
349 * When mitigating Spectre variant 1.1, all accessors need to include
350 * verification of the address space.
351 */
352#define __put_user(x, ptr) put_user(x, ptr)
353
354#else
355#define __put_user(x, ptr) \
356({ \
357 long __pu_err = 0; \
358 __put_user_switch((x), (ptr), __pu_err, __put_user_nocheck); \
359 __pu_err; \
360})
361
362#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
363 do { \
364 unsigned long __pu_addr = (unsigned long)__pu_ptr; \
365 __put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
366 } while (0)
367
368#define __put_user_nocheck_1 __put_user_asm_byte
369#define __put_user_nocheck_2 __put_user_asm_half
370#define __put_user_nocheck_4 __put_user_asm_word
371#define __put_user_nocheck_8 __put_user_asm_dword
372
373#endif /* !CONFIG_CPU_SPECTRE */
374
375#define __put_user_asm(x, __pu_addr, err, instr) \
376 __asm__ __volatile__( \
377 "1: " instr " %1, [%2], #0\n" \
378 "2:\n" \
379 " .pushsection .text.fixup,\"ax\"\n" \
380 " .align 2\n" \
381 "3: mov %0, %3\n" \
382 " b 2b\n" \
383 " .popsection\n" \
384 " .pushsection __ex_table,\"a\"\n" \
385 " .align 3\n" \
386 " .long 1b, 3b\n" \
387 " .popsection" \
388 : "+r" (err) \
389 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
390 : "cc")
391
392#define __put_user_asm_byte(x, __pu_addr, err, __t) \
393 __put_user_asm(x, __pu_addr, err, "strb" __t)
394
395#if __LINUX_ARM_ARCH__ >= 6
396
397#define __put_user_asm_half(x, __pu_addr, err, __t) \
398 __put_user_asm(x, __pu_addr, err, "strh" __t)
399
400#else
401
402#ifndef __ARMEB__
403#define __put_user_asm_half(x, __pu_addr, err, __t) \
404({ \
405 unsigned long __temp = (__force unsigned long)(x); \
406 __put_user_asm_byte(__temp, __pu_addr, err, __t); \
407 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
408})
409#else
410#define __put_user_asm_half(x, __pu_addr, err, __t) \
411({ \
412 unsigned long __temp = (__force unsigned long)(x); \
413 __put_user_asm_byte(__temp >> 8, __pu_addr, err, __t); \
414 __put_user_asm_byte(__temp, __pu_addr + 1, err, __t); \
415})
416#endif
417
418#endif /* __LINUX_ARM_ARCH__ >= 6 */
419
420#define __put_user_asm_word(x, __pu_addr, err, __t) \
421 __put_user_asm(x, __pu_addr, err, "str" __t)
422
423#ifndef __ARMEB__
424#define __reg_oper0 "%R2"
425#define __reg_oper1 "%Q2"
426#else
427#define __reg_oper0 "%Q2"
428#define __reg_oper1 "%R2"
429#endif
430
431#define __put_user_asm_dword(x, __pu_addr, err, __t) \
432 __asm__ __volatile__( \
433 ARM( "1: str" __t " " __reg_oper1 ", [%1], #4\n" ) \
434 ARM( "2: str" __t " " __reg_oper0 ", [%1]\n" ) \
435 THUMB( "1: str" __t " " __reg_oper1 ", [%1]\n" ) \
436 THUMB( "2: str" __t " " __reg_oper0 ", [%1, #4]\n" ) \
437 "3:\n" \
438 " .pushsection .text.fixup,\"ax\"\n" \
439 " .align 2\n" \
440 "4: mov %0, %3\n" \
441 " b 3b\n" \
442 " .popsection\n" \
443 " .pushsection __ex_table,\"a\"\n" \
444 " .align 3\n" \
445 " .long 1b, 4b\n" \
446 " .long 2b, 4b\n" \
447 " .popsection" \
448 : "+r" (err), "+r" (__pu_addr) \
449 : "r" (x), "i" (-EFAULT) \
450 : "cc")
451
452#define __get_kernel_nofault(dst, src, type, err_label) \
453do { \
454 const type *__pk_ptr = (src); \
455 unsigned long __src = (unsigned long)(__pk_ptr); \
456 type __val; \
457 int __err = 0; \
458 switch (sizeof(type)) { \
459 case 1: __get_user_asm_byte(__val, __src, __err, ""); break; \
460 case 2: __get_user_asm_half(__val, __src, __err, ""); break; \
461 case 4: __get_user_asm_word(__val, __src, __err, ""); break; \
462 case 8: { \
463 u32 *__v32 = (u32*)&__val; \
464 __get_user_asm_word(__v32[0], __src, __err, ""); \
465 if (__err) \
466 break; \
467 __get_user_asm_word(__v32[1], __src+4, __err, ""); \
468 break; \
469 } \
470 default: __err = __get_user_bad(); break; \
471 } \
472 if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
473 put_unaligned(__val, (type *)(dst)); \
474 else \
475 *(type *)(dst) = __val; /* aligned by caller */ \
476 if (__err) \
477 goto err_label; \
478} while (0)
479
480#define __put_kernel_nofault(dst, src, type, err_label) \
481do { \
482 const type *__pk_ptr = (dst); \
483 unsigned long __dst = (unsigned long)__pk_ptr; \
484 int __err = 0; \
485 type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
486 ? get_unaligned((type *)(src)) \
487 : *(type *)(src); /* aligned by caller */ \
488 switch (sizeof(type)) { \
489 case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
490 case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
491 case 4: __put_user_asm_word(__val, __dst, __err, ""); break; \
492 case 8: __put_user_asm_dword(__val, __dst, __err, ""); break; \
493 default: __err = __put_user_bad(); break; \
494 } \
495 if (__err) \
496 goto err_label; \
497} while (0)
498
499#ifdef CONFIG_MMU
500extern unsigned long __must_check
501arm_copy_from_user(void *to, const void __user *from, unsigned long n);
502
503static inline unsigned long __must_check
504raw_copy_from_user(void *to, const void __user *from, unsigned long n)
505{
506 unsigned int __ua_flags;
507
508 __ua_flags = uaccess_save_and_enable();
509 n = arm_copy_from_user(to, from, n);
510 uaccess_restore(__ua_flags);
511 return n;
512}
513
514extern unsigned long __must_check
515arm_copy_to_user(void __user *to, const void *from, unsigned long n);
516extern unsigned long __must_check
517__copy_to_user_std(void __user *to, const void *from, unsigned long n);
518
519static inline unsigned long __must_check
520raw_copy_to_user(void __user *to, const void *from, unsigned long n)
521{
522#ifndef CONFIG_UACCESS_WITH_MEMCPY
523 unsigned int __ua_flags;
524 __ua_flags = uaccess_save_and_enable();
525 n = arm_copy_to_user(to, from, n);
526 uaccess_restore(__ua_flags);
527 return n;
528#else
529 return arm_copy_to_user(to, from, n);
530#endif
531}
532
533extern unsigned long __must_check
534arm_clear_user(void __user *addr, unsigned long n);
535extern unsigned long __must_check
536__clear_user_std(void __user *addr, unsigned long n);
537
538static inline unsigned long __must_check
539__clear_user(void __user *addr, unsigned long n)
540{
541 unsigned int __ua_flags = uaccess_save_and_enable();
542 n = arm_clear_user(addr, n);
543 uaccess_restore(__ua_flags);
544 return n;
545}
546
547#else
548static inline unsigned long
549raw_copy_from_user(void *to, const void __user *from, unsigned long n)
550{
551 memcpy(to, (const void __force *)from, n);
552 return 0;
553}
554static inline unsigned long
555raw_copy_to_user(void __user *to, const void *from, unsigned long n)
556{
557 memcpy((void __force *)to, from, n);
558 return 0;
559}
560#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
561#endif
562#define INLINE_COPY_TO_USER
563#define INLINE_COPY_FROM_USER
564
565static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
566{
567 if (access_ok(to, n))
568 n = __clear_user(to, n);
569 return n;
570}
571
572/* These are from lib/ code, and use __get_user() and friends */
573extern long strncpy_from_user(char *dest, const char __user *src, long count);
574
575extern __must_check long strnlen_user(const char __user *str, long n);
576
577#endif /* _ASMARM_UACCESS_H */