Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * OpenRISC Linux
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * OpenRISC implementation:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 * et al.
 13 */
 14
 15#ifndef __ASM_OPENRISC_UACCESS_H
 16#define __ASM_OPENRISC_UACCESS_H
 17
 18/*
 19 * User space memory access functions
 20 */
 21#include <linux/prefetch.h>
 22#include <linux/string.h>
 23#include <asm/page.h>
 24#include <asm/extable.h>
 25#include <asm-generic/access_ok.h>
 26
 27/*
 28 * These are the main single-value transfer routines.  They automatically
 29 * use the right size if we just have the right pointer type.
 30 *
 31 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 32 * and yet we don't want to do any pointers, because that is too much
 33 * of a performance impact. Thus we have a few rather ugly macros here,
 34 * and hide all the uglyness from the user.
 35 *
 36 * The "__xxx" versions of the user access functions are versions that
 37 * do not verify the address space, that must have been done previously
 38 * with a separate "access_ok()" call (this is used when we do multiple
 39 * accesses to the same area of user memory).
 40 *
 41 * As we use the same address space for kernel and user data on the
 42 * PowerPC, we can just do these as direct assignments.  (Of course, the
 43 * exception handling means that it's no longer "just"...)
 44 */
 45#define get_user(x, ptr) \
 46	__get_user_check((x), (ptr), sizeof(*(ptr)))
 47#define put_user(x, ptr) \
 48	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 49
 50#define __get_user(x, ptr) \
 51	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 52#define __put_user(x, ptr) \
 53	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 54
 55extern long __put_user_bad(void);
 56
 57#define __put_user_nocheck(x, ptr, size)		\
 58({							\
 59	long __pu_err;					\
 60	__put_user_size((x), (ptr), (size), __pu_err);	\
 61	__pu_err;					\
 62})
 63
 64#define __put_user_check(x, ptr, size)					\
 65({									\
 66	long __pu_err = -EFAULT;					\
 67	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
 68	if (access_ok(__pu_addr, size))			\
 69		__put_user_size((x), __pu_addr, (size), __pu_err);	\
 70	__pu_err;							\
 71})
 72
 73#define __put_user_size(x, ptr, size, retval)				\
 74do {									\
 75	retval = 0;							\
 76	switch (size) {							\
 77	case 1: __put_user_asm(x, ptr, retval, "l.sb"); break;		\
 78	case 2: __put_user_asm(x, ptr, retval, "l.sh"); break;		\
 79	case 4: __put_user_asm(x, ptr, retval, "l.sw"); break;		\
 80	case 8: __put_user_asm2(x, ptr, retval); break;			\
 81	default: __put_user_bad();					\
 82	}								\
 83} while (0)
 84
 85struct __large_struct {
 86	unsigned long buf[100];
 87};
 88#define __m(x) (*(struct __large_struct *)(x))
 89
 90/*
 91 * We don't tell gcc that we are accessing memory, but this is OK
 92 * because we do not write to any memory gcc knows about, so there
 93 * are no aliasing issues.
 94 */
 95#define __put_user_asm(x, addr, err, op)			\
 96	__asm__ __volatile__(					\
 97		"1:	"op" 0(%2),%1\n"			\
 98		"2:\n"						\
 99		".section .fixup,\"ax\"\n"			\
100		"3:	l.addi %0,r0,%3\n"			\
101		"	l.j 2b\n"				\
102		"	l.nop\n"				\
103		".previous\n"					\
104		".section __ex_table,\"a\"\n"			\
105		"	.align 2\n"				\
106		"	.long 1b,3b\n"				\
107		".previous"					\
108		: "=r"(err)					\
109		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
110
111#define __put_user_asm2(x, addr, err)				\
112	__asm__ __volatile__(					\
113		"1:	l.sw 0(%2),%1\n"			\
114		"2:	l.sw 4(%2),%H1\n"			\
115		"3:\n"						\
116		".section .fixup,\"ax\"\n"			\
117		"4:	l.addi %0,r0,%3\n"			\
118		"	l.j 3b\n"				\
119		"	l.nop\n"				\
120		".previous\n"					\
121		".section __ex_table,\"a\"\n"			\
122		"	.align 2\n"				\
123		"	.long 1b,4b\n"				\
124		"	.long 2b,4b\n"				\
125		".previous"					\
126		: "=r"(err)					\
127		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
128
129#define __get_user_nocheck(x, ptr, size)			\
130({								\
131	long __gu_err;						\
132	__get_user_size((x), (ptr), (size), __gu_err);		\
133	__gu_err;						\
134})
135
136#define __get_user_check(x, ptr, size)					\
137({									\
138	long __gu_err = -EFAULT;					\
139	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
140	if (access_ok(__gu_addr, size))					\
141		__get_user_size((x), __gu_addr, (size), __gu_err);	\
142	else								\
143		(x) = (__typeof__(*(ptr))) 0;				\
144	__gu_err;							\
145})
146
147extern long __get_user_bad(void);
148
149#define __get_user_size(x, ptr, size, retval)				\
150do {									\
151	retval = 0;							\
152	switch (size) {							\
153	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
154	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
155	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
156	case 8: __get_user_asm2(x, ptr, retval); break;			\
157	default: (x) = (__typeof__(*(ptr)))__get_user_bad();		\
158	}								\
159} while (0)
160
161#define __get_user_asm(x, addr, err, op)		\
162{							\
163	unsigned long __gu_tmp;				\
164	__asm__ __volatile__(				\
165		"1:	"op" %1,0(%2)\n"		\
166		"2:\n"					\
167		".section .fixup,\"ax\"\n"		\
168		"3:	l.addi %0,r0,%3\n"		\
169		"	l.addi %1,r0,0\n"		\
170		"	l.j 2b\n"			\
171		"	l.nop\n"			\
172		".previous\n"				\
173		".section __ex_table,\"a\"\n"		\
174		"	.align 2\n"			\
175		"	.long 1b,3b\n"			\
176		".previous"				\
177		: "=r"(err), "=r"(__gu_tmp)		\
178		: "r"(addr), "i"(-EFAULT), "0"(err));	\
179	(x) = (__typeof__(*(addr)))__gu_tmp;		\
180}
181
182#define __get_user_asm2(x, addr, err)			\
183{							\
184	unsigned long long __gu_tmp;			\
185	__asm__ __volatile__(				\
186		"1:	l.lwz %1,0(%2)\n"		\
187		"2:	l.lwz %H1,4(%2)\n"		\
188		"3:\n"					\
189		".section .fixup,\"ax\"\n"		\
190		"4:	l.addi %0,r0,%3\n"		\
191		"	l.addi %1,r0,0\n"		\
192		"	l.addi %H1,r0,0\n"		\
193		"	l.j 3b\n"			\
194		"	l.nop\n"			\
195		".previous\n"				\
196		".section __ex_table,\"a\"\n"		\
197		"	.align 2\n"			\
198		"	.long 1b,4b\n"			\
199		"	.long 2b,4b\n"			\
200		".previous"				\
201		: "=r"(err), "=&r"(__gu_tmp)		\
202		: "r"(addr), "i"(-EFAULT), "0"(err));	\
203	(x) = (__typeof__(*(addr)))(			\
204		(__typeof__((x)-(x)))__gu_tmp);		\
205}
206
207/* more complex routines */
208
209extern unsigned long __must_check
210__copy_tofrom_user(void *to, const void *from, unsigned long size);
211static inline unsigned long
212raw_copy_from_user(void *to, const void __user *from, unsigned long size)
213{
214	return __copy_tofrom_user(to, (__force const void *)from, size);
215}
216static inline unsigned long
217raw_copy_to_user(void __user *to, const void *from, unsigned long size)
218{
219	return __copy_tofrom_user((__force void *)to, from, size);
220}
221#define INLINE_COPY_FROM_USER
222#define INLINE_COPY_TO_USER
223
224extern unsigned long __clear_user(void __user *addr, unsigned long size);
225
226static inline __must_check unsigned long
227clear_user(void __user *addr, unsigned long size)
228{
229	if (likely(access_ok(addr, size)))
230		size = __clear_user(addr, size);
231	return size;
232}
233
234extern long strncpy_from_user(char *dest, const char __user *src, long count);
235
236extern __must_check long strnlen_user(const char __user *str, long n);
237
238#endif /* __ASM_OPENRISC_UACCESS_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * OpenRISC Linux
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * OpenRISC implementation:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 * et al.
 13 */
 14
 15#ifndef __ASM_OPENRISC_UACCESS_H
 16#define __ASM_OPENRISC_UACCESS_H
 17
 18/*
 19 * User space memory access functions
 20 */
 21#include <linux/prefetch.h>
 22#include <linux/string.h>
 23#include <asm/page.h>
 24#include <asm/extable.h>
 25#include <asm-generic/access_ok.h>
 26
 27/*
 28 * These are the main single-value transfer routines.  They automatically
 29 * use the right size if we just have the right pointer type.
 30 *
 31 * This gets kind of ugly. We want to return _two_ values in "get_user()"
 32 * and yet we don't want to do any pointers, because that is too much
 33 * of a performance impact. Thus we have a few rather ugly macros here,
 34 * and hide all the uglyness from the user.
 35 *
 36 * The "__xxx" versions of the user access functions are versions that
 37 * do not verify the address space, that must have been done previously
 38 * with a separate "access_ok()" call (this is used when we do multiple
 39 * accesses to the same area of user memory).
 40 *
 41 * As we use the same address space for kernel and user data on the
 42 * PowerPC, we can just do these as direct assignments.  (Of course, the
 43 * exception handling means that it's no longer "just"...)
 44 */
 45#define get_user(x, ptr) \
 46	__get_user_check((x), (ptr), sizeof(*(ptr)))
 47#define put_user(x, ptr) \
 48	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 49
 50#define __get_user(x, ptr) \
 51	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
 52#define __put_user(x, ptr) \
 53	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
 54
 55extern long __put_user_bad(void);
 56
 57#define __put_user_nocheck(x, ptr, size)		\
 58({							\
 59	long __pu_err;					\
 60	__put_user_size((x), (ptr), (size), __pu_err);	\
 61	__pu_err;					\
 62})
 63
 64#define __put_user_check(x, ptr, size)					\
 65({									\
 66	long __pu_err = -EFAULT;					\
 67	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
 68	if (access_ok(__pu_addr, size))			\
 69		__put_user_size((x), __pu_addr, (size), __pu_err);	\
 70	__pu_err;							\
 71})
 72
 73#define __put_user_size(x, ptr, size, retval)				\
 74do {									\
 75	retval = 0;							\
 76	switch (size) {							\
 77	case 1: __put_user_asm(x, ptr, retval, "l.sb"); break;		\
 78	case 2: __put_user_asm(x, ptr, retval, "l.sh"); break;		\
 79	case 4: __put_user_asm(x, ptr, retval, "l.sw"); break;		\
 80	case 8: __put_user_asm2(x, ptr, retval); break;			\
 81	default: __put_user_bad();					\
 82	}								\
 83} while (0)
 84
 85struct __large_struct {
 86	unsigned long buf[100];
 87};
 88#define __m(x) (*(struct __large_struct *)(x))
 89
 90/*
 91 * We don't tell gcc that we are accessing memory, but this is OK
 92 * because we do not write to any memory gcc knows about, so there
 93 * are no aliasing issues.
 94 */
 95#define __put_user_asm(x, addr, err, op)			\
 96	__asm__ __volatile__(					\
 97		"1:	"op" 0(%2),%1\n"			\
 98		"2:\n"						\
 99		".section .fixup,\"ax\"\n"			\
100		"3:	l.addi %0,r0,%3\n"			\
101		"	l.j 2b\n"				\
102		"	l.nop\n"				\
103		".previous\n"					\
104		".section __ex_table,\"a\"\n"			\
105		"	.align 2\n"				\
106		"	.long 1b,3b\n"				\
107		".previous"					\
108		: "=r"(err)					\
109		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
110
111#define __put_user_asm2(x, addr, err)				\
112	__asm__ __volatile__(					\
113		"1:	l.sw 0(%2),%1\n"			\
114		"2:	l.sw 4(%2),%H1\n"			\
115		"3:\n"						\
116		".section .fixup,\"ax\"\n"			\
117		"4:	l.addi %0,r0,%3\n"			\
118		"	l.j 3b\n"				\
119		"	l.nop\n"				\
120		".previous\n"					\
121		".section __ex_table,\"a\"\n"			\
122		"	.align 2\n"				\
123		"	.long 1b,4b\n"				\
124		"	.long 2b,4b\n"				\
125		".previous"					\
126		: "=r"(err)					\
127		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
128
129#define __get_user_nocheck(x, ptr, size)			\
130({								\
131	long __gu_err;						\
132	__get_user_size((x), (ptr), (size), __gu_err);		\
133	__gu_err;						\
134})
135
136#define __get_user_check(x, ptr, size)					\
137({									\
138	long __gu_err = -EFAULT;					\
139	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
140	if (access_ok(__gu_addr, size))					\
141		__get_user_size((x), __gu_addr, (size), __gu_err);	\
142	else								\
143		(x) = (__typeof__(*(ptr))) 0;				\
144	__gu_err;							\
145})
146
147extern long __get_user_bad(void);
148
149#define __get_user_size(x, ptr, size, retval)				\
150do {									\
151	retval = 0;							\
152	switch (size) {							\
153	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
154	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
155	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
156	case 8: __get_user_asm2(x, ptr, retval); break;			\
157	default: (x) = (__typeof__(*(ptr)))__get_user_bad();		\
158	}								\
159} while (0)
160
161#define __get_user_asm(x, addr, err, op)		\
162{							\
163	unsigned long __gu_tmp;				\
164	__asm__ __volatile__(				\
165		"1:	"op" %1,0(%2)\n"		\
166		"2:\n"					\
167		".section .fixup,\"ax\"\n"		\
168		"3:	l.addi %0,r0,%3\n"		\
169		"	l.addi %1,r0,0\n"		\
170		"	l.j 2b\n"			\
171		"	l.nop\n"			\
172		".previous\n"				\
173		".section __ex_table,\"a\"\n"		\
174		"	.align 2\n"			\
175		"	.long 1b,3b\n"			\
176		".previous"				\
177		: "=r"(err), "=r"(__gu_tmp)		\
178		: "r"(addr), "i"(-EFAULT), "0"(err));	\
179	(x) = (__typeof__(*(addr)))__gu_tmp;		\
180}
181
182#define __get_user_asm2(x, addr, err)			\
183{							\
184	unsigned long long __gu_tmp;			\
185	__asm__ __volatile__(				\
186		"1:	l.lwz %1,0(%2)\n"		\
187		"2:	l.lwz %H1,4(%2)\n"		\
188		"3:\n"					\
189		".section .fixup,\"ax\"\n"		\
190		"4:	l.addi %0,r0,%3\n"		\
191		"	l.addi %1,r0,0\n"		\
192		"	l.addi %H1,r0,0\n"		\
193		"	l.j 3b\n"			\
194		"	l.nop\n"			\
195		".previous\n"				\
196		".section __ex_table,\"a\"\n"		\
197		"	.align 2\n"			\
198		"	.long 1b,4b\n"			\
199		"	.long 2b,4b\n"			\
200		".previous"				\
201		: "=r"(err), "=&r"(__gu_tmp)		\
202		: "r"(addr), "i"(-EFAULT), "0"(err));	\
203	(x) = (__typeof__(*(addr)))(			\
204		(__typeof__((x)-(x)))__gu_tmp);		\
205}
206
207/* more complex routines */
208
209extern unsigned long __must_check
210__copy_tofrom_user(void *to, const void *from, unsigned long size);
211static inline unsigned long
212raw_copy_from_user(void *to, const void __user *from, unsigned long size)
213{
214	return __copy_tofrom_user(to, (__force const void *)from, size);
215}
216static inline unsigned long
217raw_copy_to_user(void __user *to, const void *from, unsigned long size)
218{
219	return __copy_tofrom_user((__force void *)to, from, size);
220}
221#define INLINE_COPY_FROM_USER
222#define INLINE_COPY_TO_USER
223
224extern unsigned long __clear_user(void __user *addr, unsigned long size);
225
226static inline __must_check unsigned long
227clear_user(void __user *addr, unsigned long size)
228{
229	if (likely(access_ok(addr, size)))
230		size = __clear_user(addr, size);
231	return size;
232}
233
234extern long strncpy_from_user(char *dest, const char __user *src, long count);
235
236extern __must_check long strnlen_user(const char __user *str, long n);
237
238#endif /* __ASM_OPENRISC_UACCESS_H */