Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_GENERIC_UACCESS_H
3#define __ASM_GENERIC_UACCESS_H
4
5/*
6 * User space memory access functions, these should work
7 * on any machine that has kernel and user data in the same
8 * address space, e.g. all NOMMU machines.
9 */
10#include <linux/string.h>
11#include <asm-generic/access_ok.h>
12
13#ifdef CONFIG_UACCESS_MEMCPY
14#include <linux/unaligned.h>
15
16static __always_inline int
17__get_user_fn(size_t size, const void __user *from, void *to)
18{
19 BUILD_BUG_ON(!__builtin_constant_p(size));
20
21 switch (size) {
22 case 1:
23 *(u8 *)to = *((u8 __force *)from);
24 return 0;
25 case 2:
26 *(u16 *)to = get_unaligned((u16 __force *)from);
27 return 0;
28 case 4:
29 *(u32 *)to = get_unaligned((u32 __force *)from);
30 return 0;
31 case 8:
32 *(u64 *)to = get_unaligned((u64 __force *)from);
33 return 0;
34 default:
35 BUILD_BUG();
36 return 0;
37 }
38
39}
40#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
41
42static __always_inline int
43__put_user_fn(size_t size, void __user *to, void *from)
44{
45 BUILD_BUG_ON(!__builtin_constant_p(size));
46
47 switch (size) {
48 case 1:
49 *(u8 __force *)to = *(u8 *)from;
50 return 0;
51 case 2:
52 put_unaligned(*(u16 *)from, (u16 __force *)to);
53 return 0;
54 case 4:
55 put_unaligned(*(u32 *)from, (u32 __force *)to);
56 return 0;
57 case 8:
58 put_unaligned(*(u64 *)from, (u64 __force *)to);
59 return 0;
60 default:
61 BUILD_BUG();
62 return 0;
63 }
64}
65#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
66
67#define __get_kernel_nofault(dst, src, type, err_label) \
68do { \
69 *((type *)dst) = get_unaligned((type *)(src)); \
70 if (0) /* make sure the label looks used to the compiler */ \
71 goto err_label; \
72} while (0)
73
74#define __put_kernel_nofault(dst, src, type, err_label) \
75do { \
76 put_unaligned(*((type *)src), (type *)(dst)); \
77 if (0) /* make sure the label looks used to the compiler */ \
78 goto err_label; \
79} while (0)
80
81static inline __must_check unsigned long
82raw_copy_from_user(void *to, const void __user * from, unsigned long n)
83{
84 memcpy(to, (const void __force *)from, n);
85 return 0;
86}
87
88static inline __must_check unsigned long
89raw_copy_to_user(void __user *to, const void *from, unsigned long n)
90{
91 memcpy((void __force *)to, from, n);
92 return 0;
93}
94#define INLINE_COPY_FROM_USER
95#define INLINE_COPY_TO_USER
96#endif /* CONFIG_UACCESS_MEMCPY */
97
98/*
99 * These are the main single-value transfer routines. They automatically
100 * use the right size if we just have the right pointer type.
101 * This version just falls back to copy_{from,to}_user, which should
102 * provide a fast-path for small values.
103 */
104#define __put_user(x, ptr) \
105({ \
106 __typeof__(*(ptr)) __x = (x); \
107 int __pu_err = -EFAULT; \
108 __chk_user_ptr(ptr); \
109 switch (sizeof (*(ptr))) { \
110 case 1: \
111 case 2: \
112 case 4: \
113 case 8: \
114 __pu_err = __put_user_fn(sizeof (*(ptr)), \
115 ptr, &__x); \
116 break; \
117 default: \
118 __put_user_bad(); \
119 break; \
120 } \
121 __pu_err; \
122})
123
124#define put_user(x, ptr) \
125({ \
126 void __user *__p = (ptr); \
127 might_fault(); \
128 access_ok(__p, sizeof(*ptr)) ? \
129 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
130 -EFAULT; \
131})
132
133#ifndef __put_user_fn
134
135static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
136{
137 return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
138}
139
140#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
141
142#endif
143
144extern int __put_user_bad(void) __attribute__((noreturn));
145
146#define __get_user(x, ptr) \
147({ \
148 int __gu_err = -EFAULT; \
149 __chk_user_ptr(ptr); \
150 switch (sizeof(*(ptr))) { \
151 case 1: { \
152 unsigned char __x = 0; \
153 __gu_err = __get_user_fn(sizeof (*(ptr)), \
154 ptr, &__x); \
155 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
156 break; \
157 }; \
158 case 2: { \
159 unsigned short __x = 0; \
160 __gu_err = __get_user_fn(sizeof (*(ptr)), \
161 ptr, &__x); \
162 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
163 break; \
164 }; \
165 case 4: { \
166 unsigned int __x = 0; \
167 __gu_err = __get_user_fn(sizeof (*(ptr)), \
168 ptr, &__x); \
169 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
170 break; \
171 }; \
172 case 8: { \
173 unsigned long long __x = 0; \
174 __gu_err = __get_user_fn(sizeof (*(ptr)), \
175 ptr, &__x); \
176 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
177 break; \
178 }; \
179 default: \
180 __get_user_bad(); \
181 break; \
182 } \
183 __gu_err; \
184})
185
186#define get_user(x, ptr) \
187({ \
188 const void __user *__p = (ptr); \
189 might_fault(); \
190 access_ok(__p, sizeof(*ptr)) ? \
191 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
192 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
193})
194
195#ifndef __get_user_fn
196static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
197{
198 return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
199}
200
201#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
202
203#endif
204
205extern int __get_user_bad(void) __attribute__((noreturn));
206
207/*
208 * Zero Userspace
209 */
210#ifndef __clear_user
211static inline __must_check unsigned long
212__clear_user(void __user *to, unsigned long n)
213{
214 memset((void __force *)to, 0, n);
215 return 0;
216}
217#endif
218
219static inline __must_check unsigned long
220clear_user(void __user *to, unsigned long n)
221{
222 might_fault();
223 if (!access_ok(to, n))
224 return n;
225
226 return __clear_user(to, n);
227}
228
229#include <asm/extable.h>
230
231__must_check long strncpy_from_user(char *dst, const char __user *src,
232 long count);
233__must_check long strnlen_user(const char __user *src, long n);
234
235#endif /* __ASM_GENERIC_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_GENERIC_UACCESS_H
3#define __ASM_GENERIC_UACCESS_H
4
5/*
6 * User space memory access functions, these should work
7 * on any machine that has kernel and user data in the same
8 * address space, e.g. all NOMMU machines.
9 */
10#include <linux/string.h>
11
12#ifdef CONFIG_UACCESS_MEMCPY
13#include <asm/unaligned.h>
14
15static __always_inline int
16__get_user_fn(size_t size, const void __user *from, void *to)
17{
18 BUILD_BUG_ON(!__builtin_constant_p(size));
19
20 switch (size) {
21 case 1:
22 *(u8 *)to = *((u8 __force *)from);
23 return 0;
24 case 2:
25 *(u16 *)to = get_unaligned((u16 __force *)from);
26 return 0;
27 case 4:
28 *(u32 *)to = get_unaligned((u32 __force *)from);
29 return 0;
30 case 8:
31 *(u64 *)to = get_unaligned((u64 __force *)from);
32 return 0;
33 default:
34 BUILD_BUG();
35 return 0;
36 }
37
38}
39#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
40
41static __always_inline int
42__put_user_fn(size_t size, void __user *to, void *from)
43{
44 BUILD_BUG_ON(!__builtin_constant_p(size));
45
46 switch (size) {
47 case 1:
48 *(u8 __force *)to = *(u8 *)from;
49 return 0;
50 case 2:
51 put_unaligned(*(u16 *)from, (u16 __force *)to);
52 return 0;
53 case 4:
54 put_unaligned(*(u32 *)from, (u32 __force *)to);
55 return 0;
56 case 8:
57 put_unaligned(*(u64 *)from, (u64 __force *)to);
58 return 0;
59 default:
60 BUILD_BUG();
61 return 0;
62 }
63}
64#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
65
66#define __get_kernel_nofault(dst, src, type, err_label) \
67do { \
68 *((type *)dst) = get_unaligned((type *)(src)); \
69 if (0) /* make sure the label looks used to the compiler */ \
70 goto err_label; \
71} while (0)
72
73#define __put_kernel_nofault(dst, src, type, err_label) \
74do { \
75 put_unaligned(*((type *)src), (type *)(dst)); \
76 if (0) /* make sure the label looks used to the compiler */ \
77 goto err_label; \
78} while (0)
79
80#define HAVE_GET_KERNEL_NOFAULT 1
81
82static inline __must_check unsigned long
83raw_copy_from_user(void *to, const void __user * from, unsigned long n)
84{
85 memcpy(to, (const void __force *)from, n);
86 return 0;
87}
88
89static inline __must_check unsigned long
90raw_copy_to_user(void __user *to, const void *from, unsigned long n)
91{
92 memcpy((void __force *)to, from, n);
93 return 0;
94}
95#define INLINE_COPY_FROM_USER
96#define INLINE_COPY_TO_USER
97#endif /* CONFIG_UACCESS_MEMCPY */
98
99#ifdef CONFIG_SET_FS
100#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
101
102#ifndef KERNEL_DS
103#define KERNEL_DS MAKE_MM_SEG(~0UL)
104#endif
105
106#ifndef USER_DS
107#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
108#endif
109
110#ifndef get_fs
111#define get_fs() (current_thread_info()->addr_limit)
112
113static inline void set_fs(mm_segment_t fs)
114{
115 current_thread_info()->addr_limit = fs;
116}
117#endif
118
119#ifndef uaccess_kernel
120#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
121#endif
122#endif /* CONFIG_SET_FS */
123
124#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
125
126/*
127 * The architecture should really override this if possible, at least
128 * doing a check on the get_fs()
129 */
130#ifndef __access_ok
131static inline int __access_ok(unsigned long addr, unsigned long size)
132{
133 return 1;
134}
135#endif
136
137/*
138 * These are the main single-value transfer routines. They automatically
139 * use the right size if we just have the right pointer type.
140 * This version just falls back to copy_{from,to}_user, which should
141 * provide a fast-path for small values.
142 */
143#define __put_user(x, ptr) \
144({ \
145 __typeof__(*(ptr)) __x = (x); \
146 int __pu_err = -EFAULT; \
147 __chk_user_ptr(ptr); \
148 switch (sizeof (*(ptr))) { \
149 case 1: \
150 case 2: \
151 case 4: \
152 case 8: \
153 __pu_err = __put_user_fn(sizeof (*(ptr)), \
154 ptr, &__x); \
155 break; \
156 default: \
157 __put_user_bad(); \
158 break; \
159 } \
160 __pu_err; \
161})
162
163#define put_user(x, ptr) \
164({ \
165 void __user *__p = (ptr); \
166 might_fault(); \
167 access_ok(__p, sizeof(*ptr)) ? \
168 __put_user((x), ((__typeof__(*(ptr)) __user *)__p)) : \
169 -EFAULT; \
170})
171
172#ifndef __put_user_fn
173
174static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
175{
176 return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
177}
178
179#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
180
181#endif
182
183extern int __put_user_bad(void) __attribute__((noreturn));
184
185#define __get_user(x, ptr) \
186({ \
187 int __gu_err = -EFAULT; \
188 __chk_user_ptr(ptr); \
189 switch (sizeof(*(ptr))) { \
190 case 1: { \
191 unsigned char __x = 0; \
192 __gu_err = __get_user_fn(sizeof (*(ptr)), \
193 ptr, &__x); \
194 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
195 break; \
196 }; \
197 case 2: { \
198 unsigned short __x = 0; \
199 __gu_err = __get_user_fn(sizeof (*(ptr)), \
200 ptr, &__x); \
201 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
202 break; \
203 }; \
204 case 4: { \
205 unsigned int __x = 0; \
206 __gu_err = __get_user_fn(sizeof (*(ptr)), \
207 ptr, &__x); \
208 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
209 break; \
210 }; \
211 case 8: { \
212 unsigned long long __x = 0; \
213 __gu_err = __get_user_fn(sizeof (*(ptr)), \
214 ptr, &__x); \
215 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
216 break; \
217 }; \
218 default: \
219 __get_user_bad(); \
220 break; \
221 } \
222 __gu_err; \
223})
224
225#define get_user(x, ptr) \
226({ \
227 const void __user *__p = (ptr); \
228 might_fault(); \
229 access_ok(__p, sizeof(*ptr)) ? \
230 __get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
231 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
232})
233
234#ifndef __get_user_fn
235static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
236{
237 return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
238}
239
240#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
241
242#endif
243
244extern int __get_user_bad(void) __attribute__((noreturn));
245
246/*
247 * Copy a null terminated string from userspace.
248 */
249#ifndef __strncpy_from_user
250static inline long
251__strncpy_from_user(char *dst, const char __user *src, long count)
252{
253 char *tmp;
254 strncpy(dst, (const char __force *)src, count);
255 for (tmp = dst; *tmp && count > 0; tmp++, count--)
256 ;
257 return (tmp - dst);
258}
259#endif
260
261static inline long
262strncpy_from_user(char *dst, const char __user *src, long count)
263{
264 if (!access_ok(src, 1))
265 return -EFAULT;
266 return __strncpy_from_user(dst, src, count);
267}
268
269/*
270 * Return the size of a string (including the ending 0)
271 *
272 * Return 0 on exception, a value greater than N if too long
273 */
274#ifndef __strnlen_user
275#define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
276#endif
277
278/*
279 * Unlike strnlen, strnlen_user includes the nul terminator in
280 * its returned count. Callers should check for a returned value
281 * greater than N as an indication the string is too long.
282 */
283static inline long strnlen_user(const char __user *src, long n)
284{
285 if (!access_ok(src, 1))
286 return 0;
287 return __strnlen_user(src, n);
288}
289
290/*
291 * Zero Userspace
292 */
293#ifndef __clear_user
294static inline __must_check unsigned long
295__clear_user(void __user *to, unsigned long n)
296{
297 memset((void __force *)to, 0, n);
298 return 0;
299}
300#endif
301
302static inline __must_check unsigned long
303clear_user(void __user *to, unsigned long n)
304{
305 might_fault();
306 if (!access_ok(to, n))
307 return n;
308
309 return __clear_user(to, n);
310}
311
312#include <asm/extable.h>
313
314#endif /* __ASM_GENERIC_UACCESS_H */