Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/uaccess.h
 
 
 
 
  4 */
  5#ifndef _ASMARM_UACCESS_H
  6#define _ASMARM_UACCESS_H
  7
  8/*
  9 * User space memory access functions
 10 */
 11#include <linux/string.h>
 
 
 12#include <asm/memory.h>
 13#include <asm/domain.h>
 14#include <asm/unaligned.h>
 15#include <asm/unified.h>
 16#include <asm/compiler.h>
 17
 18#include <asm/extable.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20/*
 21 * These two functions allow hooking accesses to userspace to increase
 22 * system integrity by ensuring that the kernel can not inadvertantly
 23 * perform such accesses (eg, via list poison values) which could then
 24 * be exploited for priviledge escalation.
 25 */
 26static __always_inline unsigned int uaccess_save_and_enable(void)
 27{
 28#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 29	unsigned int old_domain = get_domain();
 30
 31	/* Set the current domain access to permit user accesses */
 32	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
 33		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
 34
 35	return old_domain;
 36#else
 37	return 0;
 38#endif
 39}
 40
 41static __always_inline void uaccess_restore(unsigned int flags)
 42{
 43#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 44	/* Restore the user access mask */
 45	set_domain(flags);
 46#endif
 47}
 48
 49/*
 50 * These two are intentionally not defined anywhere - if the kernel
 51 * code generates any references to them, that's a bug.
 52 */
 53extern int __get_user_bad(void);
 54extern int __put_user_bad(void);
 55
 56#ifdef CONFIG_MMU
 57
 58/*
 59 * This is a type: either unsigned long, if the argument fits into
 60 * that type, or otherwise unsigned long long.
 61 */
 62#define __inttype(x) \
 63	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
 64
 65/*
 66 * Sanitise a uaccess pointer such that it becomes NULL if addr+size
 67 * is above the current addr_limit.
 68 */
 69#define uaccess_mask_range_ptr(ptr, size)			\
 70	((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
 71static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
 72						    size_t size)
 73{
 74	void __user *safe_ptr = (void __user *)ptr;
 75	unsigned long tmp;
 76
 77	asm volatile(
 78	"	.syntax unified\n"
 79	"	sub	%1, %3, #1\n"
 80	"	subs	%1, %1, %0\n"
 81	"	addhs	%1, %1, #1\n"
 82	"	subshs	%1, %1, %2\n"
 83	"	movlo	%0, #0\n"
 84	: "+r" (safe_ptr), "=&r" (tmp)
 85	: "r" (size), "r" (TASK_SIZE)
 86	: "cc");
 87
 88	csdb();
 89	return safe_ptr;
 
 
 
 
 
 90}
 91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92/*
 93 * Single-value transfer routines.  They automatically use the right
 94 * size if we just have the right pointer type.  Note that the functions
 95 * which read from user space (*get_*) need to take care not to leak
 96 * kernel data even if the calling code is buggy and fails to check
 97 * the return value.  This means zeroing out the destination variable
 98 * or buffer on error.  Normally this is done out of line by the
 99 * fixup code, but there are a few places where it intrudes on the
100 * main code path.  When we only write to user space, there is no
101 * problem.
102 */
103extern int __get_user_1(void *);
104extern int __get_user_2(void *);
105extern int __get_user_4(void *);
106extern int __get_user_32t_8(void *);
107extern int __get_user_8(void *);
108extern int __get_user_64t_1(void *);
109extern int __get_user_64t_2(void *);
110extern int __get_user_64t_4(void *);
111
112#define __GUP_CLOBBER_1	"lr", "cc"
113#ifdef CONFIG_CPU_USE_DOMAINS
114#define __GUP_CLOBBER_2	"ip", "lr", "cc"
115#else
116#define __GUP_CLOBBER_2 "lr", "cc"
117#endif
118#define __GUP_CLOBBER_4	"lr", "cc"
119#define __GUP_CLOBBER_32t_8 "lr", "cc"
120#define __GUP_CLOBBER_8	"lr", "cc"
121
122#define __get_user_x(__r2, __p, __e, __l, __s)				\
123	   __asm__ __volatile__ (					\
124		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
125		__asmeq("%3", "r1")					\
126		"bl	__get_user_" #__s				\
127		: "=&r" (__e), "=r" (__r2)				\
128		: "0" (__p), "r" (__l)					\
129		: __GUP_CLOBBER_##__s)
130
131/* narrowing a double-word get into a single 32bit word register: */
132#ifdef __ARMEB__
133#define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
134	__get_user_x(__r2, __p, __e, __l, 32t_8)
135#else
136#define __get_user_x_32t __get_user_x
137#endif
138
139/*
140 * storing result into proper least significant word of 64bit target var,
141 * different only for big endian case where 64 bit __r2 lsw is r3:
142 */
143#ifdef __ARMEB__
144#define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
145	   __asm__ __volatile__ (					\
146		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
147		__asmeq("%3", "r1")					\
148		"bl	__get_user_64t_" #__s				\
149		: "=&r" (__e), "=r" (__r2)				\
150		: "0" (__p), "r" (__l)					\
151		: __GUP_CLOBBER_##__s)
152#else
153#define __get_user_x_64t __get_user_x
154#endif
155
156
157#define __get_user_check(x, p)						\
158	({								\
159		unsigned long __limit = TASK_SIZE - 1; \
160		register typeof(*(p)) __user *__p asm("r0") = (p);	\
161		register __inttype(x) __r2 asm("r2");			\
162		register unsigned long __l asm("r1") = __limit;		\
163		register int __e asm("r0");				\
164		unsigned int __ua_flags = uaccess_save_and_enable();	\
165		int __tmp_e;						\
166		switch (sizeof(*(__p))) {				\
167		case 1:							\
168			if (sizeof((x)) >= 8)				\
169				__get_user_x_64t(__r2, __p, __e, __l, 1); \
170			else						\
171				__get_user_x(__r2, __p, __e, __l, 1);	\
172			break;						\
173		case 2:							\
174			if (sizeof((x)) >= 8)				\
175				__get_user_x_64t(__r2, __p, __e, __l, 2); \
176			else						\
177				__get_user_x(__r2, __p, __e, __l, 2);	\
178			break;						\
179		case 4:							\
180			if (sizeof((x)) >= 8)				\
181				__get_user_x_64t(__r2, __p, __e, __l, 4); \
182			else						\
183				__get_user_x(__r2, __p, __e, __l, 4);	\
184			break;						\
185		case 8:							\
186			if (sizeof((x)) < 8)				\
187				__get_user_x_32t(__r2, __p, __e, __l, 4); \
188			else						\
189				__get_user_x(__r2, __p, __e, __l, 8);	\
190			break;						\
191		default: __e = __get_user_bad(); break;			\
192		}							\
193		__tmp_e = __e;						\
194		uaccess_restore(__ua_flags);				\
195		x = (typeof(*(p))) __r2;				\
196		__tmp_e;						\
197	})
198
199#define get_user(x, p)							\
200	({								\
201		might_fault();						\
202		__get_user_check(x, p);					\
203	 })
204
205extern int __put_user_1(void *, unsigned int);
206extern int __put_user_2(void *, unsigned int);
207extern int __put_user_4(void *, unsigned int);
208extern int __put_user_8(void *, unsigned long long);
209
210#define __put_user_check(__pu_val, __ptr, __err, __s)			\
 
 
 
 
 
 
 
 
 
211	({								\
212		unsigned long __limit = TASK_SIZE - 1; \
213		register typeof(__pu_val) __r2 asm("r2") = __pu_val;	\
214		register const void __user *__p asm("r0") = __ptr;	\
 
215		register unsigned long __l asm("r1") = __limit;		\
216		register int __e asm("r0");				\
217		__asm__ __volatile__ (					\
218			__asmeq("%0", "r0") __asmeq("%2", "r2")		\
219			__asmeq("%3", "r1")				\
220			"bl	__put_user_" #__s			\
221			: "=&r" (__e)					\
222			: "0" (__p), "r" (__r2), "r" (__l)		\
223			: "ip", "lr", "cc");				\
224		__err = __e;						\
 
 
 
 
 
 
 
 
 
 
225	})
226
 
 
 
 
 
 
227#else /* CONFIG_MMU */
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229#define get_user(x, p)	__get_user(x, p)
230#define __put_user_check __put_user_nocheck
231
232#endif /* CONFIG_MMU */
233
234#include <asm-generic/access_ok.h>
235
236#ifdef CONFIG_CPU_SPECTRE
237/*
238 * When mitigating Spectre variant 1, it is not worth fixing the non-
239 * verifying accessors, because we need to add verification of the
240 * address space there.  Force these to use the standard get_user()
241 * version instead.
242 */
243#define __get_user(x, ptr) get_user(x, ptr)
244#else
245
246/*
247 * The "__xxx" versions of the user access functions do not verify the
248 * address space - it must have been done previously with a separate
249 * "access_ok()" call.
250 *
251 * The "xxx_error" versions set the third argument to EFAULT if an
252 * error occurs, and leave it unchanged on success.  Note that these
253 * versions are void (ie, don't return a value as such).
254 */
255#define __get_user(x, ptr)						\
256({									\
257	long __gu_err = 0;						\
258	__get_user_err((x), (ptr), __gu_err, TUSER());			\
259	__gu_err;							\
260})
261
262#define __get_user_err(x, ptr, err, __t)				\
 
 
 
 
 
 
263do {									\
264	unsigned long __gu_addr = (unsigned long)(ptr);			\
265	unsigned long __gu_val;						\
266	unsigned int __ua_flags;					\
267	__chk_user_ptr(ptr);						\
268	might_fault();							\
269	__ua_flags = uaccess_save_and_enable();				\
270	switch (sizeof(*(ptr))) {					\
271	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err, __t); break;	\
272	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err, __t); break;	\
273	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err, __t); break;	\
274	default: (__gu_val) = __get_user_bad();				\
275	}								\
276	uaccess_restore(__ua_flags);					\
277	(x) = (__typeof__(*(ptr)))__gu_val;				\
278} while (0)
279#endif
280
281#define __get_user_asm(x, addr, err, instr)			\
282	__asm__ __volatile__(					\
283	"1:	" instr " %1, [%2], #0\n"			\
284	"2:\n"							\
285	"	.pushsection .text.fixup,\"ax\"\n"		\
286	"	.align	2\n"					\
287	"3:	mov	%0, %3\n"				\
288	"	mov	%1, #0\n"				\
289	"	b	2b\n"					\
290	"	.popsection\n"					\
291	"	.pushsection __ex_table,\"a\"\n"		\
292	"	.align	3\n"					\
293	"	.long	1b, 3b\n"				\
294	"	.popsection"					\
295	: "+r" (err), "=&r" (x)					\
296	: "r" (addr), "i" (-EFAULT)				\
297	: "cc")
298
299#define __get_user_asm_byte(x, addr, err, __t)			\
300	__get_user_asm(x, addr, err, "ldrb" __t)
301
302#if __LINUX_ARM_ARCH__ >= 6
303
304#define __get_user_asm_half(x, addr, err, __t)			\
305	__get_user_asm(x, addr, err, "ldrh" __t)
306
307#else
308
309#ifndef __ARMEB__
310#define __get_user_asm_half(x, __gu_addr, err, __t)		\
311({								\
312	unsigned long __b1, __b2;				\
313	__get_user_asm_byte(__b1, __gu_addr, err, __t);		\
314	__get_user_asm_byte(__b2, __gu_addr + 1, err, __t);	\
315	(x) = __b1 | (__b2 << 8);				\
316})
317#else
318#define __get_user_asm_half(x, __gu_addr, err, __t)		\
319({								\
320	unsigned long __b1, __b2;				\
321	__get_user_asm_byte(__b1, __gu_addr, err, __t);		\
322	__get_user_asm_byte(__b2, __gu_addr + 1, err, __t);	\
323	(x) = (__b1 << 8) | __b2;				\
324})
325#endif
326
327#endif /* __LINUX_ARM_ARCH__ >= 6 */
328
329#define __get_user_asm_word(x, addr, err, __t)			\
330	__get_user_asm(x, addr, err, "ldr" __t)
331
332#define __put_user_switch(x, ptr, __err, __fn)				\
333	do {								\
334		const __typeof__(*(ptr)) __user *__pu_ptr = (ptr);	\
335		__typeof__(*(ptr)) __pu_val = (x);			\
336		unsigned int __ua_flags;				\
337		might_fault();						\
338		__ua_flags = uaccess_save_and_enable();			\
339		switch (sizeof(*(ptr))) {				\
340		case 1: __fn(__pu_val, __pu_ptr, __err, 1); break;	\
341		case 2:	__fn(__pu_val, __pu_ptr, __err, 2); break;	\
342		case 4:	__fn(__pu_val, __pu_ptr, __err, 4); break;	\
343		case 8:	__fn(__pu_val, __pu_ptr, __err, 8); break;	\
344		default: __err = __put_user_bad(); break;		\
345		}							\
346		uaccess_restore(__ua_flags);				\
347	} while (0)
348
349#define put_user(x, ptr)						\
350({									\
351	int __pu_err = 0;						\
352	__put_user_switch((x), (ptr), __pu_err, __put_user_check);	\
353	__pu_err;							\
354})
355
356#ifdef CONFIG_CPU_SPECTRE
357/*
358 * When mitigating Spectre variant 1.1, all accessors need to include
359 * verification of the address space.
360 */
361#define __put_user(x, ptr) put_user(x, ptr)
362
363#else
364#define __put_user(x, ptr)						\
365({									\
366	long __pu_err = 0;						\
367	__put_user_switch((x), (ptr), __pu_err, __put_user_nocheck);	\
368	__pu_err;							\
369})
370
371#define __put_user_nocheck(x, __pu_ptr, __err, __size)			\
372	do {								\
373		unsigned long __pu_addr = (unsigned long)__pu_ptr;	\
374		__put_user_nocheck_##__size(x, __pu_addr, __err, TUSER());\
375	} while (0)
376
377#define __put_user_nocheck_1 __put_user_asm_byte
378#define __put_user_nocheck_2 __put_user_asm_half
379#define __put_user_nocheck_4 __put_user_asm_word
380#define __put_user_nocheck_8 __put_user_asm_dword
381
382#endif /* !CONFIG_CPU_SPECTRE */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
384#define __put_user_asm(x, __pu_addr, err, instr)		\
385	__asm__ __volatile__(					\
386	"1:	" instr " %1, [%2], #0\n"		\
387	"2:\n"							\
388	"	.pushsection .text.fixup,\"ax\"\n"		\
389	"	.align	2\n"					\
390	"3:	mov	%0, %3\n"				\
391	"	b	2b\n"					\
392	"	.popsection\n"					\
393	"	.pushsection __ex_table,\"a\"\n"		\
394	"	.align	3\n"					\
395	"	.long	1b, 3b\n"				\
396	"	.popsection"					\
397	: "+r" (err)						\
398	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
399	: "cc")
400
401#define __put_user_asm_byte(x, __pu_addr, err, __t)		\
402	__put_user_asm(x, __pu_addr, err, "strb" __t)
403
404#if __LINUX_ARM_ARCH__ >= 6
405
406#define __put_user_asm_half(x, __pu_addr, err, __t)		\
407	__put_user_asm(x, __pu_addr, err, "strh" __t)
408
409#else
410
411#ifndef __ARMEB__
412#define __put_user_asm_half(x, __pu_addr, err, __t)		\
413({								\
414	unsigned long __temp = (__force unsigned long)(x);	\
415	__put_user_asm_byte(__temp, __pu_addr, err, __t);	\
416	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err, __t);\
417})
418#else
419#define __put_user_asm_half(x, __pu_addr, err, __t)		\
420({								\
421	unsigned long __temp = (__force unsigned long)(x);	\
422	__put_user_asm_byte(__temp >> 8, __pu_addr, err, __t);	\
423	__put_user_asm_byte(__temp, __pu_addr + 1, err, __t);	\
424})
425#endif
426
427#endif /* __LINUX_ARM_ARCH__ >= 6 */
428
429#define __put_user_asm_word(x, __pu_addr, err, __t)		\
430	__put_user_asm(x, __pu_addr, err, "str" __t)
431
432#ifndef __ARMEB__
433#define	__reg_oper0	"%R2"
434#define	__reg_oper1	"%Q2"
435#else
436#define	__reg_oper0	"%Q2"
437#define	__reg_oper1	"%R2"
438#endif
439
440#define __put_user_asm_dword(x, __pu_addr, err, __t)		\
441	__asm__ __volatile__(					\
442 ARM(	"1:	str" __t "	" __reg_oper1 ", [%1], #4\n"  ) \
443 ARM(	"2:	str" __t "	" __reg_oper0 ", [%1]\n"      ) \
444 THUMB(	"1:	str" __t "	" __reg_oper1 ", [%1]\n"      ) \
445 THUMB(	"2:	str" __t "	" __reg_oper0 ", [%1, #4]\n"  ) \
446	"3:\n"							\
447	"	.pushsection .text.fixup,\"ax\"\n"		\
448	"	.align	2\n"					\
449	"4:	mov	%0, %3\n"				\
450	"	b	3b\n"					\
451	"	.popsection\n"					\
452	"	.pushsection __ex_table,\"a\"\n"		\
453	"	.align	3\n"					\
454	"	.long	1b, 4b\n"				\
455	"	.long	2b, 4b\n"				\
456	"	.popsection"					\
457	: "+r" (err), "+r" (__pu_addr)				\
458	: "r" (x), "i" (-EFAULT)				\
459	: "cc")
460
461#define __get_kernel_nofault(dst, src, type, err_label)			\
462do {									\
463	const type *__pk_ptr = (src);					\
464	unsigned long __src = (unsigned long)(__pk_ptr);		\
465	type __val;							\
466	int __err = 0;							\
467	switch (sizeof(type)) {						\
468	case 1:	__get_user_asm_byte(__val, __src, __err, ""); break;	\
469	case 2: __get_user_asm_half(__val, __src, __err, ""); break;	\
470	case 4: __get_user_asm_word(__val, __src, __err, ""); break;	\
471	case 8: {							\
472		u32 *__v32 = (u32*)&__val;				\
473		__get_user_asm_word(__v32[0], __src, __err, "");	\
474		if (__err)						\
475			break;						\
476		__get_user_asm_word(__v32[1], __src+4, __err, "");	\
477		break;							\
478	}								\
479	default: __err = __get_user_bad(); break;			\
480	}								\
481	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))		\
482		put_unaligned(__val, (type *)(dst));			\
483	else								\
484		*(type *)(dst) = __val; /* aligned by caller */		\
485	if (__err)							\
486		goto err_label;						\
487} while (0)
488
489#define __put_kernel_nofault(dst, src, type, err_label)			\
490do {									\
491	const type *__pk_ptr = (dst);					\
492	unsigned long __dst = (unsigned long)__pk_ptr;			\
493	int __err = 0;							\
494	type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)	\
495		     ? get_unaligned((type *)(src))			\
496		     : *(type *)(src);	/* aligned by caller */		\
497	switch (sizeof(type)) {						\
498	case 1: __put_user_asm_byte(__val, __dst, __err, ""); break;	\
499	case 2:	__put_user_asm_half(__val, __dst, __err, ""); break;	\
500	case 4:	__put_user_asm_word(__val, __dst, __err, ""); break;	\
501	case 8:	__put_user_asm_dword(__val, __dst, __err, ""); break;	\
502	default: __err = __put_user_bad(); break;			\
503	}								\
504	if (__err)							\
505		goto err_label;						\
506} while (0)
507
508#ifdef CONFIG_MMU
509extern unsigned long __must_check
510arm_copy_from_user(void *to, const void __user *from, unsigned long n);
511
512static inline unsigned long __must_check
513raw_copy_from_user(void *to, const void __user *from, unsigned long n)
514{
515	unsigned int __ua_flags;
516
517	__ua_flags = uaccess_save_and_enable();
518	n = arm_copy_from_user(to, from, n);
519	uaccess_restore(__ua_flags);
520	return n;
521}
522
523extern unsigned long __must_check
524arm_copy_to_user(void __user *to, const void *from, unsigned long n);
525extern unsigned long __must_check
526__copy_to_user_std(void __user *to, const void *from, unsigned long n);
527
528static inline unsigned long __must_check
529raw_copy_to_user(void __user *to, const void *from, unsigned long n)
530{
531#ifndef CONFIG_UACCESS_WITH_MEMCPY
532	unsigned int __ua_flags;
533	__ua_flags = uaccess_save_and_enable();
534	n = arm_copy_to_user(to, from, n);
535	uaccess_restore(__ua_flags);
536	return n;
537#else
538	return arm_copy_to_user(to, from, n);
539#endif
540}
541
542extern unsigned long __must_check
543arm_clear_user(void __user *addr, unsigned long n);
544extern unsigned long __must_check
545__clear_user_std(void __user *addr, unsigned long n);
546
547static inline unsigned long __must_check
548__clear_user(void __user *addr, unsigned long n)
549{
550	unsigned int __ua_flags = uaccess_save_and_enable();
551	n = arm_clear_user(addr, n);
552	uaccess_restore(__ua_flags);
553	return n;
554}
555
556#else
557static inline unsigned long
558raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 
 
 
 
559{
560	memcpy(to, (const void __force *)from, n);
561	return 0;
 
 
 
562}
563static inline unsigned long
564raw_copy_to_user(void __user *to, const void *from, unsigned long n)
565{
566	memcpy((void __force *)to, from, n);
567	return 0;
 
568}
569#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
570#endif
571#define INLINE_COPY_TO_USER
572#define INLINE_COPY_FROM_USER
573
574static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
575{
576	if (access_ok(to, n))
577		n = __clear_user(to, n);
578	return n;
579}
580
581/* These are from lib/ code, and use __get_user() and friends */
582extern long strncpy_from_user(char *dest, const char __user *src, long count);
583
 
584extern __must_check long strnlen_user(const char __user *str, long n);
585
586#endif /* _ASMARM_UACCESS_H */
v4.6
 
  1/*
  2 *  arch/arm/include/asm/uaccess.h
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8#ifndef _ASMARM_UACCESS_H
  9#define _ASMARM_UACCESS_H
 10
 11/*
 12 * User space memory access functions
 13 */
 14#include <linux/string.h>
 15#include <linux/thread_info.h>
 16#include <asm/errno.h>
 17#include <asm/memory.h>
 18#include <asm/domain.h>
 
 19#include <asm/unified.h>
 20#include <asm/compiler.h>
 21
 22#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 23#include <asm-generic/uaccess-unaligned.h>
 24#else
 25#define __get_user_unaligned __get_user
 26#define __put_user_unaligned __put_user
 27#endif
 28
 29#define VERIFY_READ 0
 30#define VERIFY_WRITE 1
 31
 32/*
 33 * The exception table consists of pairs of addresses: the first is the
 34 * address of an instruction that is allowed to fault, and the second is
 35 * the address at which the program should continue.  No registers are
 36 * modified, so it is entirely up to the continuation code to figure out
 37 * what to do.
 38 *
 39 * All the routines below use bits of fixup code that are out of line
 40 * with the main instruction path.  This means when everything is well,
 41 * we don't even have to jump over them.  Further, they do not intrude
 42 * on our cache or tlb entries.
 43 */
 44
 45struct exception_table_entry
 46{
 47	unsigned long insn, fixup;
 48};
 49
 50extern int fixup_exception(struct pt_regs *regs);
 51
 52/*
 53 * These two functions allow hooking accesses to userspace to increase
 54 * system integrity by ensuring that the kernel can not inadvertantly
 55 * perform such accesses (eg, via list poison values) which could then
 56 * be exploited for priviledge escalation.
 57 */
 58static inline unsigned int uaccess_save_and_enable(void)
 59{
 60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 61	unsigned int old_domain = get_domain();
 62
 63	/* Set the current domain access to permit user accesses */
 64	set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
 65		   domain_val(DOMAIN_USER, DOMAIN_CLIENT));
 66
 67	return old_domain;
 68#else
 69	return 0;
 70#endif
 71}
 72
 73static inline void uaccess_restore(unsigned int flags)
 74{
 75#ifdef CONFIG_CPU_SW_DOMAIN_PAN
 76	/* Restore the user access mask */
 77	set_domain(flags);
 78#endif
 79}
 80
 81/*
 82 * These two are intentionally not defined anywhere - if the kernel
 83 * code generates any references to them, that's a bug.
 84 */
 85extern int __get_user_bad(void);
 86extern int __put_user_bad(void);
 87
 
 
 88/*
 89 * Note that this is actually 0x1,0000,0000
 
 90 */
 91#define KERNEL_DS	0x00000000
 92#define get_ds()	(KERNEL_DS)
 93
 94#ifdef CONFIG_MMU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95
 96#define USER_DS		TASK_SIZE
 97#define get_fs()	(current_thread_info()->addr_limit)
 98
 99static inline void set_fs(mm_segment_t fs)
100{
101	current_thread_info()->addr_limit = fs;
102	modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
103}
104
105#define segment_eq(a, b)	((a) == (b))
106
107#define __addr_ok(addr) ({ \
108	unsigned long flag; \
109	__asm__("cmp %2, %0; movlo %0, #0" \
110		: "=&r" (flag) \
111		: "0" (current_thread_info()->addr_limit), "r" (addr) \
112		: "cc"); \
113	(flag == 0); })
114
115/* We use 33-bit arithmetic here... */
116#define __range_ok(addr, size) ({ \
117	unsigned long flag, roksum; \
118	__chk_user_ptr(addr);	\
119	__asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
120		: "=&r" (flag), "=&r" (roksum) \
121		: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
122		: "cc"); \
123	flag; })
124
125/*
126 * Single-value transfer routines.  They automatically use the right
127 * size if we just have the right pointer type.  Note that the functions
128 * which read from user space (*get_*) need to take care not to leak
129 * kernel data even if the calling code is buggy and fails to check
130 * the return value.  This means zeroing out the destination variable
131 * or buffer on error.  Normally this is done out of line by the
132 * fixup code, but there are a few places where it intrudes on the
133 * main code path.  When we only write to user space, there is no
134 * problem.
135 */
136extern int __get_user_1(void *);
137extern int __get_user_2(void *);
138extern int __get_user_4(void *);
139extern int __get_user_32t_8(void *);
140extern int __get_user_8(void *);
141extern int __get_user_64t_1(void *);
142extern int __get_user_64t_2(void *);
143extern int __get_user_64t_4(void *);
144
145#define __GUP_CLOBBER_1	"lr", "cc"
146#ifdef CONFIG_CPU_USE_DOMAINS
147#define __GUP_CLOBBER_2	"ip", "lr", "cc"
148#else
149#define __GUP_CLOBBER_2 "lr", "cc"
150#endif
151#define __GUP_CLOBBER_4	"lr", "cc"
152#define __GUP_CLOBBER_32t_8 "lr", "cc"
153#define __GUP_CLOBBER_8	"lr", "cc"
154
155#define __get_user_x(__r2, __p, __e, __l, __s)				\
156	   __asm__ __volatile__ (					\
157		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
158		__asmeq("%3", "r1")					\
159		"bl	__get_user_" #__s				\
160		: "=&r" (__e), "=r" (__r2)				\
161		: "0" (__p), "r" (__l)					\
162		: __GUP_CLOBBER_##__s)
163
164/* narrowing a double-word get into a single 32bit word register: */
165#ifdef __ARMEB__
166#define __get_user_x_32t(__r2, __p, __e, __l, __s)			\
167	__get_user_x(__r2, __p, __e, __l, 32t_8)
168#else
169#define __get_user_x_32t __get_user_x
170#endif
171
172/*
173 * storing result into proper least significant word of 64bit target var,
174 * different only for big endian case where 64 bit __r2 lsw is r3:
175 */
176#ifdef __ARMEB__
177#define __get_user_x_64t(__r2, __p, __e, __l, __s)		        \
178	   __asm__ __volatile__ (					\
179		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
180		__asmeq("%3", "r1")					\
181		"bl	__get_user_64t_" #__s				\
182		: "=&r" (__e), "=r" (__r2)				\
183		: "0" (__p), "r" (__l)					\
184		: __GUP_CLOBBER_##__s)
185#else
186#define __get_user_x_64t __get_user_x
187#endif
188
189
190#define __get_user_check(x, p)						\
191	({								\
192		unsigned long __limit = current_thread_info()->addr_limit - 1; \
193		register const typeof(*(p)) __user *__p asm("r0") = (p);\
194		register typeof(x) __r2 asm("r2");			\
195		register unsigned long __l asm("r1") = __limit;		\
196		register int __e asm("r0");				\
197		unsigned int __ua_flags = uaccess_save_and_enable();	\
 
198		switch (sizeof(*(__p))) {				\
199		case 1:							\
200			if (sizeof((x)) >= 8)				\
201				__get_user_x_64t(__r2, __p, __e, __l, 1); \
202			else						\
203				__get_user_x(__r2, __p, __e, __l, 1);	\
204			break;						\
205		case 2:							\
206			if (sizeof((x)) >= 8)				\
207				__get_user_x_64t(__r2, __p, __e, __l, 2); \
208			else						\
209				__get_user_x(__r2, __p, __e, __l, 2);	\
210			break;						\
211		case 4:							\
212			if (sizeof((x)) >= 8)				\
213				__get_user_x_64t(__r2, __p, __e, __l, 4); \
214			else						\
215				__get_user_x(__r2, __p, __e, __l, 4);	\
216			break;						\
217		case 8:							\
218			if (sizeof((x)) < 8)				\
219				__get_user_x_32t(__r2, __p, __e, __l, 4); \
220			else						\
221				__get_user_x(__r2, __p, __e, __l, 8);	\
222			break;						\
223		default: __e = __get_user_bad(); break;			\
224		}							\
 
225		uaccess_restore(__ua_flags);				\
226		x = (typeof(*(p))) __r2;				\
227		__e;							\
228	})
229
230#define get_user(x, p)							\
231	({								\
232		might_fault();						\
233		__get_user_check(x, p);					\
234	 })
235
236extern int __put_user_1(void *, unsigned int);
237extern int __put_user_2(void *, unsigned int);
238extern int __put_user_4(void *, unsigned int);
239extern int __put_user_8(void *, unsigned long long);
240
241#define __put_user_x(__r2, __p, __e, __l, __s)				\
242	   __asm__ __volatile__ (					\
243		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
244		__asmeq("%3", "r1")					\
245		"bl	__put_user_" #__s				\
246		: "=&r" (__e)						\
247		: "0" (__p), "r" (__r2), "r" (__l)			\
248		: "ip", "lr", "cc")
249
250#define __put_user_check(x, p)						\
251	({								\
252		unsigned long __limit = current_thread_info()->addr_limit - 1; \
253		const typeof(*(p)) __user *__tmp_p = (p);		\
254		register const typeof(*(p)) __r2 asm("r2") = (x);	\
255		register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
256		register unsigned long __l asm("r1") = __limit;		\
257		register int __e asm("r0");				\
258		unsigned int __ua_flags = uaccess_save_and_enable();	\
259		switch (sizeof(*(__p))) {				\
260		case 1:							\
261			__put_user_x(__r2, __p, __e, __l, 1);		\
262			break;						\
263		case 2:							\
264			__put_user_x(__r2, __p, __e, __l, 2);		\
265			break;						\
266		case 4:							\
267			__put_user_x(__r2, __p, __e, __l, 4);		\
268			break;						\
269		case 8:							\
270			__put_user_x(__r2, __p, __e, __l, 8);		\
271			break;						\
272		default: __e = __put_user_bad(); break;			\
273		}							\
274		uaccess_restore(__ua_flags);				\
275		__e;							\
276	})
277
278#define put_user(x, p)							\
279	({								\
280		might_fault();						\
281		__put_user_check(x, p);					\
282	 })
283
284#else /* CONFIG_MMU */
285
286/*
287 * uClinux has only one addr space, so has simplified address limits.
288 */
289#define USER_DS			KERNEL_DS
290
291#define segment_eq(a, b)		(1)
292#define __addr_ok(addr)		((void)(addr), 1)
293#define __range_ok(addr, size)	((void)(addr), 0)
294#define get_fs()		(KERNEL_DS)
295
296static inline void set_fs(mm_segment_t fs)
297{
298}
299
300#define get_user(x, p)	__get_user(x, p)
301#define put_user(x, p)	__put_user(x, p)
302
303#endif /* CONFIG_MMU */
304
305#define access_ok(type, addr, size)	(__range_ok(addr, size) == 0)
306
307#define user_addr_max() \
308	(segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
 
 
 
 
 
 
 
309
310/*
311 * The "__xxx" versions of the user access functions do not verify the
312 * address space - it must have been done previously with a separate
313 * "access_ok()" call.
314 *
315 * The "xxx_error" versions set the third argument to EFAULT if an
316 * error occurs, and leave it unchanged on success.  Note that these
317 * versions are void (ie, don't return a value as such).
318 */
319#define __get_user(x, ptr)						\
320({									\
321	long __gu_err = 0;						\
322	__get_user_err((x), (ptr), __gu_err);				\
323	__gu_err;							\
324})
325
326#define __get_user_error(x, ptr, err)					\
327({									\
328	__get_user_err((x), (ptr), err);				\
329	(void) 0;							\
330})
331
332#define __get_user_err(x, ptr, err)					\
333do {									\
334	unsigned long __gu_addr = (unsigned long)(ptr);			\
335	unsigned long __gu_val;						\
336	unsigned int __ua_flags;					\
337	__chk_user_ptr(ptr);						\
338	might_fault();							\
339	__ua_flags = uaccess_save_and_enable();				\
340	switch (sizeof(*(ptr))) {					\
341	case 1:	__get_user_asm_byte(__gu_val, __gu_addr, err);	break;	\
342	case 2:	__get_user_asm_half(__gu_val, __gu_addr, err);	break;	\
343	case 4:	__get_user_asm_word(__gu_val, __gu_addr, err);	break;	\
344	default: (__gu_val) = __get_user_bad();				\
345	}								\
346	uaccess_restore(__ua_flags);					\
347	(x) = (__typeof__(*(ptr)))__gu_val;				\
348} while (0)
 
349
350#define __get_user_asm(x, addr, err, instr)			\
351	__asm__ __volatile__(					\
352	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
353	"2:\n"							\
354	"	.pushsection .text.fixup,\"ax\"\n"		\
355	"	.align	2\n"					\
356	"3:	mov	%0, %3\n"				\
357	"	mov	%1, #0\n"				\
358	"	b	2b\n"					\
359	"	.popsection\n"					\
360	"	.pushsection __ex_table,\"a\"\n"		\
361	"	.align	3\n"					\
362	"	.long	1b, 3b\n"				\
363	"	.popsection"					\
364	: "+r" (err), "=&r" (x)					\
365	: "r" (addr), "i" (-EFAULT)				\
366	: "cc")
367
368#define __get_user_asm_byte(x, addr, err)			\
369	__get_user_asm(x, addr, err, ldrb)
 
 
 
 
 
 
 
370
371#ifndef __ARMEB__
372#define __get_user_asm_half(x, __gu_addr, err)			\
373({								\
374	unsigned long __b1, __b2;				\
375	__get_user_asm_byte(__b1, __gu_addr, err);		\
376	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
377	(x) = __b1 | (__b2 << 8);				\
378})
379#else
380#define __get_user_asm_half(x, __gu_addr, err)			\
381({								\
382	unsigned long __b1, __b2;				\
383	__get_user_asm_byte(__b1, __gu_addr, err);		\
384	__get_user_asm_byte(__b2, __gu_addr + 1, err);		\
385	(x) = (__b1 << 8) | __b2;				\
386})
387#endif
388
389#define __get_user_asm_word(x, addr, err)			\
390	__get_user_asm(x, addr, err, ldr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
 
392#define __put_user(x, ptr)						\
393({									\
394	long __pu_err = 0;						\
395	__put_user_err((x), (ptr), __pu_err);				\
396	__pu_err;							\
397})
398
399#define __put_user_error(x, ptr, err)					\
400({									\
401	__put_user_err((x), (ptr), err);				\
402	(void) 0;							\
403})
 
 
 
 
 
404
405#define __put_user_err(x, ptr, err)					\
406do {									\
407	unsigned long __pu_addr = (unsigned long)(ptr);			\
408	unsigned int __ua_flags;					\
409	__typeof__(*(ptr)) __pu_val = (x);				\
410	__chk_user_ptr(ptr);						\
411	might_fault();							\
412	__ua_flags = uaccess_save_and_enable();				\
413	switch (sizeof(*(ptr))) {					\
414	case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);	break;	\
415	case 2: __put_user_asm_half(__pu_val, __pu_addr, err);	break;	\
416	case 4: __put_user_asm_word(__pu_val, __pu_addr, err);	break;	\
417	case 8:	__put_user_asm_dword(__pu_val, __pu_addr, err);	break;	\
418	default: __put_user_bad();					\
419	}								\
420	uaccess_restore(__ua_flags);					\
421} while (0)
422
423#define __put_user_asm(x, __pu_addr, err, instr)		\
424	__asm__ __volatile__(					\
425	"1:	" TUSER(instr) " %1, [%2], #0\n"		\
426	"2:\n"							\
427	"	.pushsection .text.fixup,\"ax\"\n"		\
428	"	.align	2\n"					\
429	"3:	mov	%0, %3\n"				\
430	"	b	2b\n"					\
431	"	.popsection\n"					\
432	"	.pushsection __ex_table,\"a\"\n"		\
433	"	.align	3\n"					\
434	"	.long	1b, 3b\n"				\
435	"	.popsection"					\
436	: "+r" (err)						\
437	: "r" (x), "r" (__pu_addr), "i" (-EFAULT)		\
438	: "cc")
439
440#define __put_user_asm_byte(x, __pu_addr, err)			\
441	__put_user_asm(x, __pu_addr, err, strb)
 
 
 
 
 
 
 
442
443#ifndef __ARMEB__
444#define __put_user_asm_half(x, __pu_addr, err)			\
445({								\
446	unsigned long __temp = (__force unsigned long)(x);	\
447	__put_user_asm_byte(__temp, __pu_addr, err);		\
448	__put_user_asm_byte(__temp >> 8, __pu_addr + 1, err);	\
449})
450#else
451#define __put_user_asm_half(x, __pu_addr, err)			\
452({								\
453	unsigned long __temp = (__force unsigned long)(x);	\
454	__put_user_asm_byte(__temp >> 8, __pu_addr, err);	\
455	__put_user_asm_byte(__temp, __pu_addr + 1, err);	\
456})
457#endif
458
459#define __put_user_asm_word(x, __pu_addr, err)			\
460	__put_user_asm(x, __pu_addr, err, str)
 
 
461
462#ifndef __ARMEB__
463#define	__reg_oper0	"%R2"
464#define	__reg_oper1	"%Q2"
465#else
466#define	__reg_oper0	"%Q2"
467#define	__reg_oper1	"%R2"
468#endif
469
470#define __put_user_asm_dword(x, __pu_addr, err)			\
471	__asm__ __volatile__(					\
472 ARM(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1], #4\n"	) \
473 ARM(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1]\n"	) \
474 THUMB(	"1:	" TUSER(str) "	" __reg_oper1 ", [%1]\n"	) \
475 THUMB(	"2:	" TUSER(str) "	" __reg_oper0 ", [%1, #4]\n"	) \
476	"3:\n"							\
477	"	.pushsection .text.fixup,\"ax\"\n"		\
478	"	.align	2\n"					\
479	"4:	mov	%0, %3\n"				\
480	"	b	3b\n"					\
481	"	.popsection\n"					\
482	"	.pushsection __ex_table,\"a\"\n"		\
483	"	.align	3\n"					\
484	"	.long	1b, 4b\n"				\
485	"	.long	2b, 4b\n"				\
486	"	.popsection"					\
487	: "+r" (err), "+r" (__pu_addr)				\
488	: "r" (x), "i" (-EFAULT)				\
489	: "cc")
490
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
492#ifdef CONFIG_MMU
493extern unsigned long __must_check
494arm_copy_from_user(void *to, const void __user *from, unsigned long n);
495
496static inline unsigned long __must_check
497__copy_from_user(void *to, const void __user *from, unsigned long n)
498{
499	unsigned int __ua_flags = uaccess_save_and_enable();
 
 
500	n = arm_copy_from_user(to, from, n);
501	uaccess_restore(__ua_flags);
502	return n;
503}
504
505extern unsigned long __must_check
506arm_copy_to_user(void __user *to, const void *from, unsigned long n);
507extern unsigned long __must_check
508__copy_to_user_std(void __user *to, const void *from, unsigned long n);
509
510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{
513#ifndef CONFIG_UACCESS_WITH_MEMCPY
514	unsigned int __ua_flags = uaccess_save_and_enable();
 
515	n = arm_copy_to_user(to, from, n);
516	uaccess_restore(__ua_flags);
517	return n;
518#else
519	return arm_copy_to_user(to, from, n);
520#endif
521}
522
523extern unsigned long __must_check
524arm_clear_user(void __user *addr, unsigned long n);
525extern unsigned long __must_check
526__clear_user_std(void __user *addr, unsigned long n);
527
528static inline unsigned long __must_check
529__clear_user(void __user *addr, unsigned long n)
530{
531	unsigned int __ua_flags = uaccess_save_and_enable();
532	n = arm_clear_user(addr, n);
533	uaccess_restore(__ua_flags);
534	return n;
535}
536
537#else
538#define __copy_from_user(to, from, n)	(memcpy(to, (void __force *)from, n), 0)
539#define __copy_to_user(to, from, n)	(memcpy((void __force *)to, from, n), 0)
540#define __clear_user(addr, n)		(memset((void __force *)addr, 0, n), 0)
541#endif
542
543static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
544{
545	if (access_ok(VERIFY_READ, from, n))
546		n = __copy_from_user(to, from, n);
547	else /* security hole - plug it */
548		memset(to, 0, n);
549	return n;
550}
551
552static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
553{
554	if (access_ok(VERIFY_WRITE, to, n))
555		n = __copy_to_user(to, from, n);
556	return n;
557}
558
559#define __copy_to_user_inatomic __copy_to_user
560#define __copy_from_user_inatomic __copy_from_user
 
561
562static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
563{
564	if (access_ok(VERIFY_WRITE, to, n))
565		n = __clear_user(to, n);
566	return n;
567}
568
569/* These are from lib/ code, and use __get_user() and friends */
570extern long strncpy_from_user(char *dest, const char __user *src, long count);
571
572extern __must_check long strlen_user(const char __user *str);
573extern __must_check long strnlen_user(const char __user *str, long n);
574
575#endif /* _ASMARM_UACCESS_H */