Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/uaccess.h
  4 */
  5#ifndef _ASMARM_UACCESS_H
  6#define _ASMARM_UACCESS_H
  7
  8/*
  9 * User space memory access functions
 10 */
 11#include <linux/string.h>
 12#include <asm/memory.h>
 13#include <asm/domain.h>
 14#include <asm/unaligned.h>
 15#include <asm/unified.h>
 16#include <asm/compiler.h>
 17
 18#include <asm/extable.h>
 19
 20/*
 21 * These two functions allow hooking accesses to userspace to increase
 22 * system integrity by ensuring that the kernel can not inadvertantly
 23 * perform such accesses (eg, via list poison values) which could then
 24 * be exploited for priviledge escalation.
 25 */
 26static __always_inline unsigned int uaccess_save_and_enable(void)
 27{
 28#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 29	unsigned int old_domain = get_domain();
 30
 31	/* Set the current domain access to permit user accesses */
 32	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
 33		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
 34
 35	return old_domain;
 36#else
 37	return 0;
 38#endif
 39}
 40
 41static __always_inline void uaccess_restore(unsigned int flags)
 42{
 43#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 44	/* Restore the user access mask */
 45	set_domain(flags);
 46#endif
 47}
 48
 49/*
 50 * These two are intentionally not defined anywhere - if the kernel
 51 * code generates any references to them, that's a bug.
 52 */
 53extern int __get_user_bad(void);
 54extern int __put_user_bad(void);
 55
 
 
 
 
 
 56#ifdef CONFIG_MMU
 57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58/*
 59 * This is a type: either unsigned long, if the argument fits into
 60 * that type, or otherwise unsigned long long.
 61 */
 62#define __inttype(x) \
 63	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 64
 65/*
 66 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
 67 * is above the current addr_limit.
 68 */
 69#define uaccess_mask_range_ptr(ptr, size)			\
 70	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
 71static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
 72						    size_t size)
 73{
 74	void __user *safe_ptr = (void __user *)ptr;
 75	unsigned long tmp;
 76
 77	asm volatile(
 78	"	.syntax unified\n"
 79	"	sub	%1, %3, #1\n"
 80	"	subs	%1, %1, %0\n"
 81	"	addhs	%1, %1, #1\n"
 82	"	subshs	%1, %1, %2\n"
 83	"	movlo	%0, #0\n"
 84	: "+r" (safe_ptr), "=&r" (tmp)
 85	: "r" (size), "r" (TASK_SIZE)
 86	: "cc");
 87
 88	csdb();
 89	return safe_ptr;
 90}
 91
 92/*
 93 * Single-value transfer routines.  They automatically use the right
 94 * size if we just have the right pointer type.  Note that the functions
 95 * which read from user space (*get_*) need to take care not to leak
 96 * kernel data even if the calling code is buggy and fails to check
 97 * the return value.  This means zeroing out the destination variable
 98 * or buffer on error.  Normally this is done out of line by the
 99 * fixup code, but there are a few places where it intrudes on the
100 * main code path.  When we only write to user space, there is no
101 * problem.
102 */
103extern int __get_user_1(void *);
104extern int __get_user_2(void *);
105extern int __get_user_4(void *);
106extern int __get_user_32t_8(void *);
107extern int __get_user_8(void *);
108extern int __get_user_64t_1(void *);
109extern int __get_user_64t_2(void *);
110extern int __get_user_64t_4(void *);
111
112#define __GUP_CLOBBER_1	"lr", "cc"
113#ifdef CONFIG_CPU_USE_DOMAINS
114#define __GUP_CLOBBER_2	"ip", "lr", "cc"
115#else
116#define __GUP_CLOBBER_2 "lr", "cc"
117#endif
118#define __GUP_CLOBBER_4	"lr", "cc"
119#define __GUP_CLOBBER_32t_8 "lr", "cc"
120#define __GUP_CLOBBER_8	"lr", "cc"
121
122#define __get_user_x(__r2, __p, __e, __l, __s)				\
123	   __asm__ __volatile__ (					\
124		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
125		__asmeq("%3", "r1")					\
126		"bl	__get_user_" #__s				\
127		: "=&r" (__e), "=r" (__r2)				\
128		: "0" (__p), "r" (__l)					\
129		: __GUP_CLOBBER_##__s)
130
131/* narrowing a double-word get into a single 32bit word register: */
132#ifdef __ARMEB__
133#define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
134	__get_user_x(__r2, __p, __e, __l, 32t_8)
135#else
136#define __get_user_x_32t __get_user_x
137#endif
138
139/*
140 * storing result into proper least significant word of 64bit target var,
141 * different only for big endian case where 64 bit __r2 lsw is r3:
142 */
143#ifdef __ARMEB__
144#define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
145	   __asm__ __volatile__ (					\
146		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
147		__asmeq("%3", "r1")					\
148		"bl	__get_user_64t_" #__s				\
149		: "=&r" (__e), "=r" (__r2)				\
150		: "0" (__p), "r" (__l)					\
151		: __GUP_CLOBBER_##__s)
152#else
153#define __get_user_x_64t __get_user_x
154#endif
155
156
157#define __get_user_check(x, p)						\
158	({								\
159		unsigned long __limit = TASK_SIZE - 1; \
160		register typeof(*(p)) __user *__p asm("r0") = (p);	\
161		register __inttype(x) __r2 asm("r2");			\
162		register unsigned long __l asm("r1") = __limit;		\
163		register int __e asm("r0");				\
164		unsigned int __ua_flags = uaccess_save_and_enable();	\
165		int __tmp_e;						\
166		switch (sizeof(*(__p))) {				\
167		case 1:							\
168			if (sizeof((x)) >= 8)				\
169				__get_user_x_64t(__r2, __p, __e, __l, 1); \
170			else						\
171				__get_user_x(__r2, __p, __e, __l, 1);	\
172			break;						\
173		case 2:							\
174			if (sizeof((x)) >= 8)				\
175				__get_user_x_64t(__r2, __p, __e, __l, 2); \
176			else						\
177				__get_user_x(__r2, __p, __e, __l, 2);	\
178			break;						\
179		case 4:							\
180			if (sizeof((x)) >= 8)				\
181				__get_user_x_64t(__r2, __p, __e, __l, 4); \
182			else						\
183				__get_user_x(__r2, __p, __e, __l, 4);	\
184			break;						\
185		case 8:							\
186			if (sizeof((x)) < 8)				\
187				__get_user_x_32t(__r2, __p, __e, __l, 4); \
188			else						\
189				__get_user_x(__r2, __p, __e, __l, 8);	\
190			break;						\
191		default: __e = __get_user_bad(); break;			\
192		}							\
193		__tmp_e = __e;						\
194		uaccess_restore(__ua_flags);				\
195		x = (typeof(*(p))) __r2;				\
196		__tmp_e;						\
197	})
198
199#define get_user(x, p)							\
200	({								\
201		might_fault();						\
202		__get_user_check(x, p);					\
203	 })
204
205extern int __put_user_1(void *, unsigned int);
206extern int __put_user_2(void *, unsigned int);
207extern int __put_user_4(void *, unsigned int);
208extern int __put_user_8(void *, unsigned long long);
209
210#define __put_user_check(__pu_val, __ptr, __err, __s)			\
211	({								\
212		unsigned long __limit = TASK_SIZE - 1; \
213		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
214		register const void __user *__p asm("r0") = __ptr;	\
215		register unsigned long __l asm("r1") = __limit;		\
216		register int __e asm("r0");				\
217		__asm__ __volatile__ (					\
218			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
219			__asmeq("%3", "r1")				\
220			"bl	__put_user_" #__s			\
221			: "=&r" (__e)					\
222			: "0" (__p), "r" (__r2), "r" (__l)		\
223			: "ip", "lr", "cc");				\
224		__err = __e;						\
225	})
226
227#else /* CONFIG_MMU */
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229#define get_user(x, p)	__get_user(x, p)
230#define __put_user_check __put_user_nocheck
231
232#endif /* CONFIG_MMU */
233
234#include <asm-generic/access_ok.h>
 
 
 
235
236#ifdef CONFIG_CPU_SPECTRE
237/*
238 * When mitigating Spectre variant 1, it is not worth fixing the non-
239 * verifying accessors, because we need to add verification of the
240 * address space there.  Force these to use the standard get_user()
241 * version instead.
242 */
243#define __get_user(x, ptr) get_user(x, ptr)
244#else
245
246/*
247 * The "__xxx" versions of the user access functions do not verify the
248 * address space - it must have been done previously with a separate
249 * "access_ok()" call.
250 *
251 * The "xxx_error" versions set the third argument to EFAULT if an
252 * error occurs, and leave it unchanged on success.  Note that these
253 * versions are void (ie, don't return a value as such).
254 */
255#define __get_user(x, ptr)						\
256({									\
257	long __gu_err = 0;						\
258	__get_user_err((x), (ptr), __gu_err, TUSER());			\
259	__gu_err;							\
260})
261
262#define __get_user_err(x, ptr, err, __t)				\
263do {									\
264	unsigned long __gu_addr = (unsigned long)(ptr);			\
265	unsigned long __gu_val;						\
266	unsigned int __ua_flags;					\
267	__chk_user_ptr(ptr);						\
268	might_fault();							\
269	__ua_flags = uaccess_save_and_enable();				\
270	switch (sizeof(*(ptr))) {					\
271	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err, __t); break;	\
272	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err, __t); break;	\
273	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err, __t); break;	\
274	default: (__gu_val) = __get_user_bad();				\
275	}								\
276	uaccess_restore(__ua_flags);					\
277	(x) = (__typeof__(*(ptr)))__gu_val;				\
278} while (0)
279#endif
280
281#define __get_user_asm(x, addr, err, instr)			\
282	__asm__ __volatile__(					\
283	"1:	" instr " %1, [%2], #0\n"			\
284	"2:\n"							\
285	"	.pushsection .text.fixup,\"ax\"\n"		\
286	"	.align	2\n"					\
287	"3:	mov	%0, %3\n"				\
288	"	mov	%1, #0\n"				\
289	"	b	2b\n"					\
290	"	.popsection\n"					\
291	"	.pushsection __ex_table,\"a\"\n"		\
292	"	.align	3\n"					\
293	"	.long	1b, 3b\n"				\
294	"	.popsection"					\
295	: "+r" (err), "=&r" (x)					\
296	: "r" (addr), "i" (-EFAULT)				\
297	: "cc")
298
299#define __get_user_asm_byte(x, addr, err, __t)			\
300	__get_user_asm(x, addr, err, "ldrb" __t)
301
302#if __LINUX_ARM_ARCH__ >= 6
303
304#define __get_user_asm_half(x, addr, err, __t)			\
305	__get_user_asm(x, addr, err, "ldrh" __t)
306
307#else
308
309#ifndef __ARMEB__
310#define __get_user_asm_half(x, __gu_addr, err, __t)		\
311({								\
312	unsigned long __b1, __b2;				\
313	__get_user_asm_byte(__b1, __gu_addr, err, __t);		\
314	__get_user_asm_byte(__b2, __gu_addr + 1, err, __t);	\
315	(x) = __b1 | (__b2 << 8);				\
316})
317#else
318#define __get_user_asm_half(x, __gu_addr, err, __t)		\
319({								\
320	unsigned long __b1, __b2;				\
321	__get_user_asm_byte(__b1, __gu_addr, err, __t);		\
322	__get_user_asm_byte(__b2, __gu_addr + 1, err, __t);	\
323	(x) = (__b1 << 8) | __b2;				\
324})
325#endif
326
327#endif /* __LINUX_ARM_ARCH__ >= 6 */
328
329#define __get_user_asm_word(x, addr, err, __t)			\
330	__get_user_asm(x, addr, err, "ldr" __t)
 
 
331
332#define __put_user_switch(x, ptr, __err, __fn)				\
333	do {								\
334		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
335		__typeof__(*(ptr)) __pu_val = (x);			\
336		unsigned int __ua_flags;				\
337		might_fault();						\
338		__ua_flags = uaccess_save_and_enable();			\
339		switch (sizeof(*(ptr))) {				\
340		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
341		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
342		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
343		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
344		default: __err = __put_user_bad(); break;		\
345		}							\
346		uaccess_restore(__ua_flags);				\
347	} while (0)
348
349#define put_user(x, ptr)						\
350({									\
351	int __pu_err = 0;						\
352	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
353	__pu_err;							\
354})
355
356#ifdef CONFIG_CPU_SPECTRE
357/*
358 * When mitigating Spectre variant 1.1, all accessors need to include
359 * verification of the address space.
360 */
361#define __put_user(x, ptr) put_user(x, ptr)
362
363#else
364#define __put_user(x, ptr)						\
365({									\
366	long __pu_err = 0;						\
367	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
368	__pu_err;							\
369})
370
371#define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
372	do {								\
373		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
374		__put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
375	} while (0)
376
377#define __put_user_nocheck_1 __put_user_asm_byte
378#define __put_user_nocheck_2 __put_user_asm_half
379#define __put_user_nocheck_4 __put_user_asm_word
380#define __put_user_nocheck_8 __put_user_asm_dword
381
382#endif /* !CONFIG_CPU_SPECTRE */
383
384#define __put_user_asm(x, __pu_addr, err, instr)		\
385	__asm__ __volatile__(					\
386	"1:	" instr " %1, [%2], #0\n"		\
387	"2:\n"							\
388	"	.pushsection .text.fixup,\"ax\"\n"		\
389	"	.align	2\n"					\
390	"3:	mov	%0, %3\n"				\
391	"	b	2b\n"					\
392	"	.popsection\n"					\
393	"	.pushsection __ex_table,\"a\"\n"		\
394	"	.align	3\n"					\
395	"	.long	1b, 3b\n"				\
396	"	.popsection"					\
397	: "+r" (err)						\
398	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
399	: "cc")
400
401#define __put_user_asm_byte(x, __pu_addr, err, __t)		\
402	__put_user_asm(x, __pu_addr, err, "strb" __t)
403
404#if __LINUX_ARM_ARCH__ >= 6
405
406#define __put_user_asm_half(x, __pu_addr, err, __t)		\
407	__put_user_asm(x, __pu_addr, err, "strh" __t)
408
409#else
410
411#ifndef __ARMEB__
412#define __put_user_asm_half(x, __pu_addr, err, __t)		\
413({								\
414	unsigned long __temp = (__force unsigned long)(x);	\
415	__put_user_asm_byte(__temp, __pu_addr, err, __t);	\
416	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
417})
418#else
419#define __put_user_asm_half(x, __pu_addr, err, __t)		\
420({								\
421	unsigned long __temp = (__force unsigned long)(x);	\
422	__put_user_asm_byte(__temp >> 8, __pu_addr, err, __t);	\
423	__put_user_asm_byte(__temp, __pu_addr + 1, err, __t);	\
424})
425#endif
426
427#endif /* __LINUX_ARM_ARCH__ >= 6 */
428
429#define __put_user_asm_word(x, __pu_addr, err, __t)		\
430	__put_user_asm(x, __pu_addr, err, "str" __t)
431
432#ifndef __ARMEB__
433#define	__reg_oper0	"%R2"
434#define	__reg_oper1	"%Q2"
435#else
436#define	__reg_oper0	"%Q2"
437#define	__reg_oper1	"%R2"
438#endif
439
440#define __put_user_asm_dword(x, __pu_addr, err, __t)		\
441	__asm__ __volatile__(					\
442 ARM(	"1:	str" __t "	" __reg_oper1 ", [%1], #4\n"  ) \
443 ARM(	"2:	str" __t "	" __reg_oper0 ", [%1]\n"      ) \
444 THUMB(	"1:	str" __t "	" __reg_oper1 ", [%1]\n"      ) \
445 THUMB(	"2:	str" __t "	" __reg_oper0 ", [%1, #4]\n"  ) \
446	"3:\n"							\
447	"	.pushsection .text.fixup,\"ax\"\n"		\
448	"	.align	2\n"					\
449	"4:	mov	%0, %3\n"				\
450	"	b	3b\n"					\
451	"	.popsection\n"					\
452	"	.pushsection __ex_table,\"a\"\n"		\
453	"	.align	3\n"					\
454	"	.long	1b, 4b\n"				\
455	"	.long	2b, 4b\n"				\
456	"	.popsection"					\
457	: "+r" (err), "+r" (__pu_addr)				\
458	: "r" (x), "i" (-EFAULT)				\
459	: "cc")
460
461#define __get_kernel_nofault(dst, src, type, err_label)			\
462do {									\
463	const type *__pk_ptr = (src);					\
464	unsigned long __src = (unsigned long)(__pk_ptr);		\
465	type __val;							\
466	int __err = 0;							\
467	switch (sizeof(type)) {						\
468	case 1:	__get_user_asm_byte(__val, __src, __err, ""); break;	\
469	case 2: __get_user_asm_half(__val, __src, __err, ""); break;	\
470	case 4: __get_user_asm_word(__val, __src, __err, ""); break;	\
471	case 8: {							\
472		u32 *__v32 = (u32*)&__val;				\
473		__get_user_asm_word(__v32[0], __src, __err, "");	\
474		if (__err)						\
475			break;						\
476		__get_user_asm_word(__v32[1], __src+4, __err, "");	\
477		break;							\
478	}								\
479	default: __err = __get_user_bad(); break;			\
480	}								\
481	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))		\
482		put_unaligned(__val, (type *)(dst));			\
483	else								\
484		*(type *)(dst) = __val; /* aligned by caller */		\
485	if (__err)							\
486		goto err_label;						\
487} while (0)
488
489#define __put_kernel_nofault(dst, src, type, err_label)			\
490do {									\
491	const type *__pk_ptr = (dst);					\
492	unsigned long __dst = (unsigned long)__pk_ptr;			\
493	int __err = 0;							\
494	type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)	\
495		     ? get_unaligned((type *)(src))			\
496		     : *(type *)(src);	/* aligned by caller */		\
497	switch (sizeof(type)) {						\
498	case 1: __put_user_asm_byte(__val, __dst, __err, ""); break;	\
499	case 2:	__put_user_asm_half(__val, __dst, __err, ""); break;	\
500	case 4:	__put_user_asm_word(__val, __dst, __err, ""); break;	\
501	case 8:	__put_user_asm_dword(__val, __dst, __err, ""); break;	\
502	default: __err = __put_user_bad(); break;			\
503	}								\
504	if (__err)							\
505		goto err_label;						\
506} while (0)
507
508#ifdef CONFIG_MMU
509extern unsigned long __must_check
510arm_copy_from_user(void *to, const void __user *from, unsigned long n);
511
512static inline unsigned long __must_check
513raw_copy_from_user(void *to, const void __user *from, unsigned long n)
514{
515	unsigned int __ua_flags;
516
517	__ua_flags = uaccess_save_and_enable();
518	n = arm_copy_from_user(to, from, n);
519	uaccess_restore(__ua_flags);
520	return n;
521}
522
523extern unsigned long __must_check
524arm_copy_to_user(void __user *to, const void *from, unsigned long n);
525extern unsigned long __must_check
526__copy_to_user_std(void __user *to, const void *from, unsigned long n);
527
528static inline unsigned long __must_check
529raw_copy_to_user(void __user *to, const void *from, unsigned long n)
530{
531#ifndef CONFIG_UACCESS_WITH_MEMCPY
532	unsigned int __ua_flags;
533	__ua_flags = uaccess_save_and_enable();
534	n = arm_copy_to_user(to, from, n);
535	uaccess_restore(__ua_flags);
536	return n;
537#else
538	return arm_copy_to_user(to, from, n);
539#endif
540}
541
542extern unsigned long __must_check
543arm_clear_user(void __user *addr, unsigned long n);
544extern unsigned long __must_check
545__clear_user_std(void __user *addr, unsigned long n);
546
547static inline unsigned long __must_check
548__clear_user(void __user *addr, unsigned long n)
549{
550	unsigned int __ua_flags = uaccess_save_and_enable();
551	n = arm_clear_user(addr, n);
552	uaccess_restore(__ua_flags);
553	return n;
554}
555
556#else
557static inline unsigned long
558raw_copy_from_user(void *to, const void __user *from, unsigned long n)
559{
560	memcpy(to, (const void __force *)from, n);
561	return 0;
562}
563static inline unsigned long
564raw_copy_to_user(void __user *to, const void *from, unsigned long n)
565{
566	memcpy((void __force *)to, from, n);
567	return 0;
568}
569#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
570#endif
571#define INLINE_COPY_TO_USER
572#define INLINE_COPY_FROM_USER
573
574static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
575{
576	if (access_ok(to, n))
577		n = __clear_user(to, n);
578	return n;
579}
580
581/* These are from lib/ code, and use __get_user() and friends */
582extern long strncpy_from_user(char *dest, const char __user *src, long count);
583
584extern __must_check long strnlen_user(const char __user *str, long n);
585
586#endif /* _ASMARM_UACCESS_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/uaccess.h
  4 */
  5#ifndef _ASMARM_UACCESS_H
  6#define _ASMARM_UACCESS_H
  7
  8/*
  9 * User space memory access functions
 10 */
 11#include <linux/string.h>
 12#include <asm/memory.h>
 13#include <asm/domain.h>
 
 14#include <asm/unified.h>
 15#include <asm/compiler.h>
 16
 17#include <asm/extable.h>
 18
 19/*
 20 * These two functions allow hooking accesses to userspace to increase
 21 * system integrity by ensuring that the kernel can not inadvertantly
 22 * perform such accesses (eg, via list poison values) which could then
 23 * be exploited for priviledge escalation.
 24 */
 25static __always_inline unsigned int uaccess_save_and_enable(void)
 26{
 27#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 28	unsigned int old_domain = get_domain();
 29
 30	/* Set the current domain access to permit user accesses */
 31	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
 32		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
 33
 34	return old_domain;
 35#else
 36	return 0;
 37#endif
 38}
 39
 40static __always_inline void uaccess_restore(unsigned int flags)
 41{
 42#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 43	/* Restore the user access mask */
 44	set_domain(flags);
 45#endif
 46}
 47
 48/*
 49 * These two are intentionally not defined anywhere - if the kernel
 50 * code generates any references to them, that's a bug.
 51 */
 52extern int __get_user_bad(void);
 53extern int __put_user_bad(void);
 54
 55/*
 56 * Note that this is actually 0x1,0000,0000
 57 */
 58#define KERNEL_DS	0x00000000
 59
 60#ifdef CONFIG_MMU
 61
 62#define USER_DS		TASK_SIZE
 63#define get_fs()	(current_thread_info()->addr_limit)
 64
 65static inline void set_fs(mm_segment_t fs)
 66{
 67	current_thread_info()->addr_limit = fs;
 68
 69	/*
 70	 * Prevent a mispredicted conditional call to set_fs from forwarding
 71	 * the wrong address limit to access_ok under speculation.
 72	 */
 73	dsb(nsh);
 74	isb();
 75
 76	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
 77}
 78
 79#define uaccess_kernel()	(get_fs() == KERNEL_DS)
 80
 81/*
 82 * We use 33-bit arithmetic here.  Success returns zero, failure returns
 83 * addr_limit.  We take advantage that addr_limit will be zero for KERNEL_DS,
 84 * so this will always return success in that case.
 85 */
 86#define __range_ok(addr, size) ({ \
 87	unsigned long flag, roksum; \
 88	__chk_user_ptr(addr);	\
 89	__asm__(".syntax unified\n" \
 90		"adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
 91		: "=&r" (flag), "=&r" (roksum) \
 92		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
 93		: "cc"); \
 94	flag; })
 95
 96/*
 97 * This is a type: either unsigned long, if the argument fits into
 98 * that type, or otherwise unsigned long long.
 99 */
100#define __inttype(x) \
101	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
102
103/*
104 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
105 * is above the current addr_limit.
106 */
107#define uaccess_mask_range_ptr(ptr, size)			\
108	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
109static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
110						    size_t size)
111{
112	void __user *safe_ptr = (void __user *)ptr;
113	unsigned long tmp;
114
115	asm volatile(
116	"	.syntax unified\n"
117	"	sub	%1, %3, #1\n"
118	"	subs	%1, %1, %0\n"
119	"	addhs	%1, %1, #1\n"
120	"	subshs	%1, %1, %2\n"
121	"	movlo	%0, #0\n"
122	: "+r" (safe_ptr), "=&r" (tmp)
123	: "r" (size), "r" (current_thread_info()->addr_limit)
124	: "cc");
125
126	csdb();
127	return safe_ptr;
128}
129
130/*
131 * Single-value transfer routines.  They automatically use the right
132 * size if we just have the right pointer type.  Note that the functions
133 * which read from user space (*get_*) need to take care not to leak
134 * kernel data even if the calling code is buggy and fails to check
135 * the return value.  This means zeroing out the destination variable
136 * or buffer on error.  Normally this is done out of line by the
137 * fixup code, but there are a few places where it intrudes on the
138 * main code path.  When we only write to user space, there is no
139 * problem.
140 */
141extern int __get_user_1(void *);
142extern int __get_user_2(void *);
143extern int __get_user_4(void *);
144extern int __get_user_32t_8(void *);
145extern int __get_user_8(void *);
146extern int __get_user_64t_1(void *);
147extern int __get_user_64t_2(void *);
148extern int __get_user_64t_4(void *);
149
150#define __GUP_CLOBBER_1	"lr", "cc"
151#ifdef CONFIG_CPU_USE_DOMAINS
152#define __GUP_CLOBBER_2	"ip", "lr", "cc"
153#else
154#define __GUP_CLOBBER_2 "lr", "cc"
155#endif
156#define __GUP_CLOBBER_4	"lr", "cc"
157#define __GUP_CLOBBER_32t_8 "lr", "cc"
158#define __GUP_CLOBBER_8	"lr", "cc"
159
160#define __get_user_x(__r2, __p, __e, __l, __s)				\
161	   __asm__ __volatile__ (					\
162		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
163		__asmeq("%3", "r1")					\
164		"bl	__get_user_" #__s				\
165		: "=&r" (__e), "=r" (__r2)				\
166		: "0" (__p), "r" (__l)					\
167		: __GUP_CLOBBER_##__s)
168
169/* narrowing a double-word get into a single 32bit word register: */
170#ifdef __ARMEB__
171#define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
172	__get_user_x(__r2, __p, __e, __l, 32t_8)
173#else
174#define __get_user_x_32t __get_user_x
175#endif
176
177/*
178 * storing result into proper least significant word of 64bit target var,
179 * different only for big endian case where 64 bit __r2 lsw is r3:
180 */
181#ifdef __ARMEB__
182#define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
183	   __asm__ __volatile__ (					\
184		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
185		__asmeq("%3", "r1")					\
186		"bl	__get_user_64t_" #__s				\
187		: "=&r" (__e), "=r" (__r2)				\
188		: "0" (__p), "r" (__l)					\
189		: __GUP_CLOBBER_##__s)
190#else
191#define __get_user_x_64t __get_user_x
192#endif
193
194
195#define __get_user_check(x, p)						\
196	({								\
197		unsigned long __limit = current_thread_info()->addr_limit - 1; \
198		register typeof(*(p)) __user *__p asm("r0") = (p);	\
199		register __inttype(x) __r2 asm("r2");			\
200		register unsigned long __l asm("r1") = __limit;		\
201		register int __e asm("r0");				\
202		unsigned int __ua_flags = uaccess_save_and_enable();	\
 
203		switch (sizeof(*(__p))) {				\
204		case 1:							\
205			if (sizeof((x)) >= 8)				\
206				__get_user_x_64t(__r2, __p, __e, __l, 1); \
207			else						\
208				__get_user_x(__r2, __p, __e, __l, 1);	\
209			break;						\
210		case 2:							\
211			if (sizeof((x)) >= 8)				\
212				__get_user_x_64t(__r2, __p, __e, __l, 2); \
213			else						\
214				__get_user_x(__r2, __p, __e, __l, 2);	\
215			break;						\
216		case 4:							\
217			if (sizeof((x)) >= 8)				\
218				__get_user_x_64t(__r2, __p, __e, __l, 4); \
219			else						\
220				__get_user_x(__r2, __p, __e, __l, 4);	\
221			break;						\
222		case 8:							\
223			if (sizeof((x)) < 8)				\
224				__get_user_x_32t(__r2, __p, __e, __l, 4); \
225			else						\
226				__get_user_x(__r2, __p, __e, __l, 8);	\
227			break;						\
228		default: __e = __get_user_bad(); break;			\
229		}							\
 
230		uaccess_restore(__ua_flags);				\
231		x = (typeof(*(p))) __r2;				\
232		__e;							\
233	})
234
235#define get_user(x, p)							\
236	({								\
237		might_fault();						\
238		__get_user_check(x, p);					\
239	 })
240
241extern int __put_user_1(void *, unsigned int);
242extern int __put_user_2(void *, unsigned int);
243extern int __put_user_4(void *, unsigned int);
244extern int __put_user_8(void *, unsigned long long);
245
246#define __put_user_check(__pu_val, __ptr, __err, __s)			\
247	({								\
248		unsigned long __limit = current_thread_info()->addr_limit - 1; \
249		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
250		register const void __user *__p asm("r0") = __ptr;	\
251		register unsigned long __l asm("r1") = __limit;		\
252		register int __e asm("r0");				\
253		__asm__ __volatile__ (					\
254			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
255			__asmeq("%3", "r1")				\
256			"bl	__put_user_" #__s			\
257			: "=&r" (__e)					\
258			: "0" (__p), "r" (__r2), "r" (__l)		\
259			: "ip", "lr", "cc");				\
260		__err = __e;						\
261	})
262
263#else /* CONFIG_MMU */
264
265/*
266 * uClinux has only one addr space, so has simplified address limits.
267 */
268#define USER_DS			KERNEL_DS
269
270#define uaccess_kernel()	(true)
271#define __addr_ok(addr)		((void)(addr), 1)
272#define __range_ok(addr, size)	((void)(addr), 0)
273#define get_fs()		(KERNEL_DS)
274
275static inline void set_fs(mm_segment_t fs)
276{
277}
278
279#define get_user(x, p)	__get_user(x, p)
280#define __put_user_check __put_user_nocheck
281
282#endif /* CONFIG_MMU */
283
284#define access_ok(addr, size)	(__range_ok(addr, size) == 0)
285
286#define user_addr_max() \
287	(uaccess_kernel() ? ~0UL : get_fs())
288
289#ifdef CONFIG_CPU_SPECTRE
290/*
291 * When mitigating Spectre variant 1, it is not worth fixing the non-
292 * verifying accessors, because we need to add verification of the
293 * address space there.  Force these to use the standard get_user()
294 * version instead.
295 */
296#define __get_user(x, ptr) get_user(x, ptr)
297#else
298
299/*
300 * The "__xxx" versions of the user access functions do not verify the
301 * address space - it must have been done previously with a separate
302 * "access_ok()" call.
303 *
304 * The "xxx_error" versions set the third argument to EFAULT if an
305 * error occurs, and leave it unchanged on success.  Note that these
306 * versions are void (ie, don't return a value as such).
307 */
308#define __get_user(x, ptr)						\
309({									\
310	long __gu_err = 0;						\
311	__get_user_err((x), (ptr), __gu_err);				\
312	__gu_err;							\
313})
314
315#define __get_user_err(x, ptr, err)					\
316do {									\
317	unsigned long __gu_addr = (unsigned long)(ptr);			\
318	unsigned long __gu_val;						\
319	unsigned int __ua_flags;					\
320	__chk_user_ptr(ptr);						\
321	might_fault();							\
322	__ua_flags = uaccess_save_and_enable();				\
323	switch (sizeof(*(ptr))) {					\
324	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
325	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
326	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
327	default: (__gu_val) = __get_user_bad();				\
328	}								\
329	uaccess_restore(__ua_flags);					\
330	(x) = (__typeof__(*(ptr)))__gu_val;				\
331} while (0)
 
332
333#define __get_user_asm(x, addr, err, instr)			\
334	__asm__ __volatile__(					\
335	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
336	"2:\n"							\
337	"	.pushsection .text.fixup,\"ax\"\n"		\
338	"	.align	2\n"					\
339	"3:	mov	%0, %3\n"				\
340	"	mov	%1, #0\n"				\
341	"	b	2b\n"					\
342	"	.popsection\n"					\
343	"	.pushsection __ex_table,\"a\"\n"		\
344	"	.align	3\n"					\
345	"	.long	1b, 3b\n"				\
346	"	.popsection"					\
347	: "+r" (err), "=&r" (x)					\
348	: "r" (addr), "i" (-EFAULT)				\
349	: "cc")
350
351#define __get_user_asm_byte(x, addr, err)			\
352	__get_user_asm(x, addr, err, ldrb)
353
354#if __LINUX_ARM_ARCH__ >= 6
355
356#define __get_user_asm_half(x, addr, err)			\
357	__get_user_asm(x, addr, err, ldrh)
358
359#else
360
361#ifndef __ARMEB__
362#define __get_user_asm_half(x, __gu_addr, err)			\
363({								\
364	unsigned long __b1, __b2;				\
365	__get_user_asm_byte(__b1, __gu_addr, err);		\
366	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
367	(x) = __b1 | (__b2 << 8);				\
368})
369#else
370#define __get_user_asm_half(x, __gu_addr, err)			\
371({								\
372	unsigned long __b1, __b2;				\
373	__get_user_asm_byte(__b1, __gu_addr, err);		\
374	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
375	(x) = (__b1 << 8) | __b2;				\
376})
377#endif
378
379#endif /* __LINUX_ARM_ARCH__ >= 6 */
380
381#define __get_user_asm_word(x, addr, err)			\
382	__get_user_asm(x, addr, err, ldr)
383#endif
384
385
386#define __put_user_switch(x, ptr, __err, __fn)				\
387	do {								\
388		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
389		__typeof__(*(ptr)) __pu_val = (x);			\
390		unsigned int __ua_flags;				\
391		might_fault();						\
392		__ua_flags = uaccess_save_and_enable();			\
393		switch (sizeof(*(ptr))) {				\
394		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
395		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
396		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
397		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
398		default: __err = __put_user_bad(); break;		\
399		}							\
400		uaccess_restore(__ua_flags);				\
401	} while (0)
402
403#define put_user(x, ptr)						\
404({									\
405	int __pu_err = 0;						\
406	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
407	__pu_err;							\
408})
409
410#ifdef CONFIG_CPU_SPECTRE
411/*
412 * When mitigating Spectre variant 1.1, all accessors need to include
413 * verification of the address space.
414 */
415#define __put_user(x, ptr) put_user(x, ptr)
416
417#else
418#define __put_user(x, ptr)						\
419({									\
420	long __pu_err = 0;						\
421	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
422	__pu_err;							\
423})
424
425#define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
426	do {								\
427		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
428		__put_user_nocheck_##__size(x, __pu_addr, __err);	\
429	} while (0)
430
431#define __put_user_nocheck_1 __put_user_asm_byte
432#define __put_user_nocheck_2 __put_user_asm_half
433#define __put_user_nocheck_4 __put_user_asm_word
434#define __put_user_nocheck_8 __put_user_asm_dword
435
 
 
436#define __put_user_asm(x, __pu_addr, err, instr)		\
437	__asm__ __volatile__(					\
438	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
439	"2:\n"							\
440	"	.pushsection .text.fixup,\"ax\"\n"		\
441	"	.align	2\n"					\
442	"3:	mov	%0, %3\n"				\
443	"	b	2b\n"					\
444	"	.popsection\n"					\
445	"	.pushsection __ex_table,\"a\"\n"		\
446	"	.align	3\n"					\
447	"	.long	1b, 3b\n"				\
448	"	.popsection"					\
449	: "+r" (err)						\
450	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
451	: "cc")
452
453#define __put_user_asm_byte(x, __pu_addr, err)			\
454	__put_user_asm(x, __pu_addr, err, strb)
455
456#if __LINUX_ARM_ARCH__ >= 6
457
458#define __put_user_asm_half(x, __pu_addr, err)			\
459	__put_user_asm(x, __pu_addr, err, strh)
460
461#else
462
463#ifndef __ARMEB__
464#define __put_user_asm_half(x, __pu_addr, err)			\
465({								\
466	unsigned long __temp = (__force unsigned long)(x);	\
467	__put_user_asm_byte(__temp, __pu_addr, err);		\
468	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
469})
470#else
471#define __put_user_asm_half(x, __pu_addr, err)			\
472({								\
473	unsigned long __temp = (__force unsigned long)(x);	\
474	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
475	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
476})
477#endif
478
479#endif /* __LINUX_ARM_ARCH__ >= 6 */
480
481#define __put_user_asm_word(x, __pu_addr, err)			\
482	__put_user_asm(x, __pu_addr, err, str)
483
484#ifndef __ARMEB__
485#define	__reg_oper0	"%R2"
486#define	__reg_oper1	"%Q2"
487#else
488#define	__reg_oper0	"%Q2"
489#define	__reg_oper1	"%R2"
490#endif
491
492#define __put_user_asm_dword(x, __pu_addr, err)			\
493	__asm__ __volatile__(					\
494 ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \
495 ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \
496 THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \
497 THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \
498	"3:\n"							\
499	"	.pushsection .text.fixup,\"ax\"\n"		\
500	"	.align	2\n"					\
501	"4:	mov	%0, %3\n"				\
502	"	b	3b\n"					\
503	"	.popsection\n"					\
504	"	.pushsection __ex_table,\"a\"\n"		\
505	"	.align	3\n"					\
506	"	.long	1b, 4b\n"				\
507	"	.long	2b, 4b\n"				\
508	"	.popsection"					\
509	: "+r" (err), "+r" (__pu_addr)				\
510	: "r" (x), "i" (-EFAULT)				\
511	: "cc")
512
513#endif /* !CONFIG_CPU_SPECTRE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
514
515#ifdef CONFIG_MMU
516extern unsigned long __must_check
517arm_copy_from_user(void *to, const void __user *from, unsigned long n);
518
519static inline unsigned long __must_check
520raw_copy_from_user(void *to, const void __user *from, unsigned long n)
521{
522	unsigned int __ua_flags;
523
524	__ua_flags = uaccess_save_and_enable();
525	n = arm_copy_from_user(to, from, n);
526	uaccess_restore(__ua_flags);
527	return n;
528}
529
530extern unsigned long __must_check
531arm_copy_to_user(void __user *to, const void *from, unsigned long n);
532extern unsigned long __must_check
533__copy_to_user_std(void __user *to, const void *from, unsigned long n);
534
535static inline unsigned long __must_check
536raw_copy_to_user(void __user *to, const void *from, unsigned long n)
537{
538#ifndef CONFIG_UACCESS_WITH_MEMCPY
539	unsigned int __ua_flags;
540	__ua_flags = uaccess_save_and_enable();
541	n = arm_copy_to_user(to, from, n);
542	uaccess_restore(__ua_flags);
543	return n;
544#else
545	return arm_copy_to_user(to, from, n);
546#endif
547}
548
549extern unsigned long __must_check
550arm_clear_user(void __user *addr, unsigned long n);
551extern unsigned long __must_check
552__clear_user_std(void __user *addr, unsigned long n);
553
554static inline unsigned long __must_check
555__clear_user(void __user *addr, unsigned long n)
556{
557	unsigned int __ua_flags = uaccess_save_and_enable();
558	n = arm_clear_user(addr, n);
559	uaccess_restore(__ua_flags);
560	return n;
561}
562
563#else
564static inline unsigned long
565raw_copy_from_user(void *to, const void __user *from, unsigned long n)
566{
567	memcpy(to, (const void __force *)from, n);
568	return 0;
569}
570static inline unsigned long
571raw_copy_to_user(void __user *to, const void *from, unsigned long n)
572{
573	memcpy((void __force *)to, from, n);
574	return 0;
575}
576#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
577#endif
578#define INLINE_COPY_TO_USER
579#define INLINE_COPY_FROM_USER
580
581static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
582{
583	if (access_ok(to, n))
584		n = __clear_user(to, n);
585	return n;
586}
587
588/* These are from lib/ code, and use __get_user() and friends */
589extern long strncpy_from_user(char *dest, const char __user *src, long count);
590
591extern __must_check long strnlen_user(const char __user *str, long n);
592
593#endif /* _ASMARM_UACCESS_H */