Loading...
1#ifndef __ALPHA_UACCESS_H
2#define __ALPHA_UACCESS_H
3
4#include <linux/errno.h>
5#include <linux/sched.h>
6
7
8/*
9 * The fs value determines whether argument validity checking should be
10 * performed or not. If get_fs() == USER_DS, checking is performed, with
11 * get_fs() == KERNEL_DS, checking is bypassed.
12 *
13 * Or at least it did once upon a time. Nowadays it is a mask that
14 * defines which bits of the address space are off limits. This is a
15 * wee bit faster than the above.
16 *
17 * For historical reasons, these macros are grossly misnamed.
18 */
19
20#define KERNEL_DS ((mm_segment_t) { 0UL })
21#define USER_DS ((mm_segment_t) { -0x40000000000UL })
22
23#define VERIFY_READ 0
24#define VERIFY_WRITE 1
25
26#define get_fs() (current_thread_info()->addr_limit)
27#define get_ds() (KERNEL_DS)
28#define set_fs(x) (current_thread_info()->addr_limit = (x))
29
30#define segment_eq(a,b) ((a).seg == (b).seg)
31
32/*
33 * Is a address valid? This does a straightforward calculation rather
34 * than tests.
35 *
36 * Address valid if:
37 * - "addr" doesn't have any high-bits set
38 * - AND "size" doesn't have any high-bits set
39 * - AND "addr+size" doesn't have any high-bits set
40 * - OR we are in kernel mode.
41 */
42#define __access_ok(addr,size,segment) \
43 (((segment).seg & (addr | size | (addr+size))) == 0)
44
45#define access_ok(type,addr,size) \
46({ \
47 __chk_user_ptr(addr); \
48 __access_ok(((unsigned long)(addr)),(size),get_fs()); \
49})
50
51/*
52 * These are the main single-value transfer routines. They automatically
53 * use the right size if we just have the right pointer type.
54 *
55 * As the alpha uses the same address space for kernel and user
56 * data, we can just do these as direct assignments. (Of course, the
57 * exception handling means that it's no longer "just"...)
58 *
59 * Careful to not
60 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
61 * (b) require any knowledge of processes at this stage
62 */
63#define put_user(x,ptr) \
64 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
65#define get_user(x,ptr) \
66 __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
67
68/*
69 * The "__xxx" versions do not do address space checking, useful when
70 * doing multiple accesses to the same area (the programmer has to do the
71 * checks by hand with "access_ok()")
72 */
73#define __put_user(x,ptr) \
74 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
75#define __get_user(x,ptr) \
76 __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
77
78/*
79 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
80 * encode the bits we need for resolving the exception. See the
81 * more extensive comments with fixup_inline_exception below for
82 * more information.
83 */
84
85extern void __get_user_unknown(void);
86
87#define __get_user_nocheck(x,ptr,size) \
88({ \
89 long __gu_err = 0; \
90 unsigned long __gu_val; \
91 __chk_user_ptr(ptr); \
92 switch (size) { \
93 case 1: __get_user_8(ptr); break; \
94 case 2: __get_user_16(ptr); break; \
95 case 4: __get_user_32(ptr); break; \
96 case 8: __get_user_64(ptr); break; \
97 default: __get_user_unknown(); break; \
98 } \
99 (x) = (__typeof__(*(ptr))) __gu_val; \
100 __gu_err; \
101})
102
103#define __get_user_check(x,ptr,size,segment) \
104({ \
105 long __gu_err = -EFAULT; \
106 unsigned long __gu_val = 0; \
107 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
108 if (__access_ok((unsigned long)__gu_addr,size,segment)) { \
109 __gu_err = 0; \
110 switch (size) { \
111 case 1: __get_user_8(__gu_addr); break; \
112 case 2: __get_user_16(__gu_addr); break; \
113 case 4: __get_user_32(__gu_addr); break; \
114 case 8: __get_user_64(__gu_addr); break; \
115 default: __get_user_unknown(); break; \
116 } \
117 } \
118 (x) = (__typeof__(*(ptr))) __gu_val; \
119 __gu_err; \
120})
121
122struct __large_struct { unsigned long buf[100]; };
123#define __m(x) (*(struct __large_struct __user *)(x))
124
125#define __get_user_64(addr) \
126 __asm__("1: ldq %0,%2\n" \
127 "2:\n" \
128 ".section __ex_table,\"a\"\n" \
129 " .long 1b - .\n" \
130 " lda %0, 2b-1b(%1)\n" \
131 ".previous" \
132 : "=r"(__gu_val), "=r"(__gu_err) \
133 : "m"(__m(addr)), "1"(__gu_err))
134
135#define __get_user_32(addr) \
136 __asm__("1: ldl %0,%2\n" \
137 "2:\n" \
138 ".section __ex_table,\"a\"\n" \
139 " .long 1b - .\n" \
140 " lda %0, 2b-1b(%1)\n" \
141 ".previous" \
142 : "=r"(__gu_val), "=r"(__gu_err) \
143 : "m"(__m(addr)), "1"(__gu_err))
144
145#ifdef __alpha_bwx__
146/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
147
148#define __get_user_16(addr) \
149 __asm__("1: ldwu %0,%2\n" \
150 "2:\n" \
151 ".section __ex_table,\"a\"\n" \
152 " .long 1b - .\n" \
153 " lda %0, 2b-1b(%1)\n" \
154 ".previous" \
155 : "=r"(__gu_val), "=r"(__gu_err) \
156 : "m"(__m(addr)), "1"(__gu_err))
157
158#define __get_user_8(addr) \
159 __asm__("1: ldbu %0,%2\n" \
160 "2:\n" \
161 ".section __ex_table,\"a\"\n" \
162 " .long 1b - .\n" \
163 " lda %0, 2b-1b(%1)\n" \
164 ".previous" \
165 : "=r"(__gu_val), "=r"(__gu_err) \
166 : "m"(__m(addr)), "1"(__gu_err))
167#else
168/* Unfortunately, we can't get an unaligned access trap for the sub-word
169 load, so we have to do a general unaligned operation. */
170
171#define __get_user_16(addr) \
172{ \
173 long __gu_tmp; \
174 __asm__("1: ldq_u %0,0(%3)\n" \
175 "2: ldq_u %1,1(%3)\n" \
176 " extwl %0,%3,%0\n" \
177 " extwh %1,%3,%1\n" \
178 " or %0,%1,%0\n" \
179 "3:\n" \
180 ".section __ex_table,\"a\"\n" \
181 " .long 1b - .\n" \
182 " lda %0, 3b-1b(%2)\n" \
183 " .long 2b - .\n" \
184 " lda %0, 3b-2b(%2)\n" \
185 ".previous" \
186 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
187 : "r"(addr), "2"(__gu_err)); \
188}
189
190#define __get_user_8(addr) \
191 __asm__("1: ldq_u %0,0(%2)\n" \
192 " extbl %0,%2,%0\n" \
193 "2:\n" \
194 ".section __ex_table,\"a\"\n" \
195 " .long 1b - .\n" \
196 " lda %0, 2b-1b(%1)\n" \
197 ".previous" \
198 : "=&r"(__gu_val), "=r"(__gu_err) \
199 : "r"(addr), "1"(__gu_err))
200#endif
201
202extern void __put_user_unknown(void);
203
204#define __put_user_nocheck(x,ptr,size) \
205({ \
206 long __pu_err = 0; \
207 __chk_user_ptr(ptr); \
208 switch (size) { \
209 case 1: __put_user_8(x,ptr); break; \
210 case 2: __put_user_16(x,ptr); break; \
211 case 4: __put_user_32(x,ptr); break; \
212 case 8: __put_user_64(x,ptr); break; \
213 default: __put_user_unknown(); break; \
214 } \
215 __pu_err; \
216})
217
218#define __put_user_check(x,ptr,size,segment) \
219({ \
220 long __pu_err = -EFAULT; \
221 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
222 if (__access_ok((unsigned long)__pu_addr,size,segment)) { \
223 __pu_err = 0; \
224 switch (size) { \
225 case 1: __put_user_8(x,__pu_addr); break; \
226 case 2: __put_user_16(x,__pu_addr); break; \
227 case 4: __put_user_32(x,__pu_addr); break; \
228 case 8: __put_user_64(x,__pu_addr); break; \
229 default: __put_user_unknown(); break; \
230 } \
231 } \
232 __pu_err; \
233})
234
235/*
236 * The "__put_user_xx()" macros tell gcc they read from memory
237 * instead of writing: this is because they do not write to
238 * any memory gcc knows about, so there are no aliasing issues
239 */
240#define __put_user_64(x,addr) \
241__asm__ __volatile__("1: stq %r2,%1\n" \
242 "2:\n" \
243 ".section __ex_table,\"a\"\n" \
244 " .long 1b - .\n" \
245 " lda $31,2b-1b(%0)\n" \
246 ".previous" \
247 : "=r"(__pu_err) \
248 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
249
250#define __put_user_32(x,addr) \
251__asm__ __volatile__("1: stl %r2,%1\n" \
252 "2:\n" \
253 ".section __ex_table,\"a\"\n" \
254 " .long 1b - .\n" \
255 " lda $31,2b-1b(%0)\n" \
256 ".previous" \
257 : "=r"(__pu_err) \
258 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
259
260#ifdef __alpha_bwx__
261/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
262
263#define __put_user_16(x,addr) \
264__asm__ __volatile__("1: stw %r2,%1\n" \
265 "2:\n" \
266 ".section __ex_table,\"a\"\n" \
267 " .long 1b - .\n" \
268 " lda $31,2b-1b(%0)\n" \
269 ".previous" \
270 : "=r"(__pu_err) \
271 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
272
273#define __put_user_8(x,addr) \
274__asm__ __volatile__("1: stb %r2,%1\n" \
275 "2:\n" \
276 ".section __ex_table,\"a\"\n" \
277 " .long 1b - .\n" \
278 " lda $31,2b-1b(%0)\n" \
279 ".previous" \
280 : "=r"(__pu_err) \
281 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
282#else
283/* Unfortunately, we can't get an unaligned access trap for the sub-word
284 write, so we have to do a general unaligned operation. */
285
286#define __put_user_16(x,addr) \
287{ \
288 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
289 __asm__ __volatile__( \
290 "1: ldq_u %2,1(%5)\n" \
291 "2: ldq_u %1,0(%5)\n" \
292 " inswh %6,%5,%4\n" \
293 " inswl %6,%5,%3\n" \
294 " mskwh %2,%5,%2\n" \
295 " mskwl %1,%5,%1\n" \
296 " or %2,%4,%2\n" \
297 " or %1,%3,%1\n" \
298 "3: stq_u %2,1(%5)\n" \
299 "4: stq_u %1,0(%5)\n" \
300 "5:\n" \
301 ".section __ex_table,\"a\"\n" \
302 " .long 1b - .\n" \
303 " lda $31, 5b-1b(%0)\n" \
304 " .long 2b - .\n" \
305 " lda $31, 5b-2b(%0)\n" \
306 " .long 3b - .\n" \
307 " lda $31, 5b-3b(%0)\n" \
308 " .long 4b - .\n" \
309 " lda $31, 5b-4b(%0)\n" \
310 ".previous" \
311 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
312 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
313 "=&r"(__pu_tmp4) \
314 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
315}
316
317#define __put_user_8(x,addr) \
318{ \
319 long __pu_tmp1, __pu_tmp2; \
320 __asm__ __volatile__( \
321 "1: ldq_u %1,0(%4)\n" \
322 " insbl %3,%4,%2\n" \
323 " mskbl %1,%4,%1\n" \
324 " or %1,%2,%1\n" \
325 "2: stq_u %1,0(%4)\n" \
326 "3:\n" \
327 ".section __ex_table,\"a\"\n" \
328 " .long 1b - .\n" \
329 " lda $31, 3b-1b(%0)\n" \
330 " .long 2b - .\n" \
331 " lda $31, 3b-2b(%0)\n" \
332 ".previous" \
333 : "=r"(__pu_err), \
334 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
335 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
336}
337#endif
338
339
340/*
341 * Complex access routines
342 */
343
344/* This little bit of silliness is to get the GP loaded for a function
345 that ordinarily wouldn't. Otherwise we could have it done by the macro
346 directly, which can be optimized the linker. */
347#ifdef MODULE
348#define __module_address(sym) "r"(sym),
349#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
350#else
351#define __module_address(sym)
352#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
353#endif
354
355extern void __copy_user(void);
356
357extern inline long
358__copy_tofrom_user_nocheck(void *to, const void *from, long len)
359{
360 register void * __cu_to __asm__("$6") = to;
361 register const void * __cu_from __asm__("$7") = from;
362 register long __cu_len __asm__("$0") = len;
363
364 __asm__ __volatile__(
365 __module_call(28, 3, __copy_user)
366 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
367 : __module_address(__copy_user)
368 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
369 : "$1","$2","$3","$4","$5","$28","memory");
370
371 return __cu_len;
372}
373
374extern inline long
375__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
376{
377 if (__access_ok((unsigned long)validate, len, get_fs()))
378 len = __copy_tofrom_user_nocheck(to, from, len);
379 return len;
380}
381
382#define __copy_to_user(to,from,n) \
383({ \
384 __chk_user_ptr(to); \
385 __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
386})
387#define __copy_from_user(to,from,n) \
388({ \
389 __chk_user_ptr(from); \
390 __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
391})
392
393#define __copy_to_user_inatomic __copy_to_user
394#define __copy_from_user_inatomic __copy_from_user
395
396
397extern inline long
398copy_to_user(void __user *to, const void *from, long n)
399{
400 return __copy_tofrom_user((__force void *)to, from, n, to);
401}
402
403extern inline long
404copy_from_user(void *to, const void __user *from, long n)
405{
406 return __copy_tofrom_user(to, (__force void *)from, n, from);
407}
408
409extern void __do_clear_user(void);
410
411extern inline long
412__clear_user(void __user *to, long len)
413{
414 register void __user * __cl_to __asm__("$6") = to;
415 register long __cl_len __asm__("$0") = len;
416 __asm__ __volatile__(
417 __module_call(28, 2, __do_clear_user)
418 : "=r"(__cl_len), "=r"(__cl_to)
419 : __module_address(__do_clear_user)
420 "0"(__cl_len), "1"(__cl_to)
421 : "$1","$2","$3","$4","$5","$28","memory");
422 return __cl_len;
423}
424
425extern inline long
426clear_user(void __user *to, long len)
427{
428 if (__access_ok((unsigned long)to, len, get_fs()))
429 len = __clear_user(to, len);
430 return len;
431}
432
433#undef __module_address
434#undef __module_call
435
436/* Returns: -EFAULT if exception before terminator, N if the entire
437 buffer filled, else strlen. */
438
439extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len);
440
441extern inline long
442strncpy_from_user(char *to, const char __user *from, long n)
443{
444 long ret = -EFAULT;
445 if (__access_ok((unsigned long)from, 0, get_fs()))
446 ret = __strncpy_from_user(to, from, n);
447 return ret;
448}
449
450/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
451extern long __strlen_user(const char __user *);
452
453extern inline long strlen_user(const char __user *str)
454{
455 return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0;
456}
457
458/* Returns: 0 if exception before NUL or reaching the supplied limit (N),
459 * a value greater than N if the limit would be exceeded, else strlen. */
460extern long __strnlen_user(const char __user *, long);
461
462extern inline long strnlen_user(const char __user *str, long n)
463{
464 return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0;
465}
466
467/*
468 * About the exception table:
469 *
470 * - insn is a 32-bit pc-relative offset from the faulting insn.
471 * - nextinsn is a 16-bit offset off of the faulting instruction
472 * (not off of the *next* instruction as branches are).
473 * - errreg is the register in which to place -EFAULT.
474 * - valreg is the final target register for the load sequence
475 * and will be zeroed.
476 *
477 * Either errreg or valreg may be $31, in which case nothing happens.
478 *
479 * The exception fixup information "just so happens" to be arranged
480 * as in a MEM format instruction. This lets us emit our three
481 * values like so:
482 *
483 * lda valreg, nextinsn(errreg)
484 *
485 */
486
487struct exception_table_entry
488{
489 signed int insn;
490 union exception_fixup {
491 unsigned unit;
492 struct {
493 signed int nextinsn : 16;
494 unsigned int errreg : 5;
495 unsigned int valreg : 5;
496 } bits;
497 } fixup;
498};
499
500/* Returns the new pc */
501#define fixup_exception(map_reg, _fixup, pc) \
502({ \
503 if ((_fixup)->fixup.bits.valreg != 31) \
504 map_reg((_fixup)->fixup.bits.valreg) = 0; \
505 if ((_fixup)->fixup.bits.errreg != 31) \
506 map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \
507 (pc) + (_fixup)->fixup.bits.nextinsn; \
508})
509
510#define ARCH_HAS_SORT_EXTABLE
511#define ARCH_HAS_SEARCH_EXTABLE
512
513#endif /* __ALPHA_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ALPHA_UACCESS_H
3#define __ALPHA_UACCESS_H
4
5/*
6 * The fs value determines whether argument validity checking should be
7 * performed or not. If get_fs() == USER_DS, checking is performed, with
8 * get_fs() == KERNEL_DS, checking is bypassed.
9 *
10 * Or at least it did once upon a time. Nowadays it is a mask that
11 * defines which bits of the address space are off limits. This is a
12 * wee bit faster than the above.
13 *
14 * For historical reasons, these macros are grossly misnamed.
15 */
16
17#define KERNEL_DS ((mm_segment_t) { 0UL })
18#define USER_DS ((mm_segment_t) { -0x40000000000UL })
19
20#define get_fs() (current_thread_info()->addr_limit)
21#define set_fs(x) (current_thread_info()->addr_limit = (x))
22
23#define segment_eq(a, b) ((a).seg == (b).seg)
24
25/*
26 * Is a address valid? This does a straightforward calculation rather
27 * than tests.
28 *
29 * Address valid if:
30 * - "addr" doesn't have any high-bits set
31 * - AND "size" doesn't have any high-bits set
32 * - AND "addr+size-(size != 0)" doesn't have any high-bits set
33 * - OR we are in kernel mode.
34 */
35#define __access_ok(addr, size) ({ \
36 unsigned long __ao_a = (addr), __ao_b = (size); \
37 unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
38 (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
39
40#define access_ok(addr, size) \
41({ \
42 __chk_user_ptr(addr); \
43 __access_ok(((unsigned long)(addr)), (size)); \
44})
45
46/*
47 * These are the main single-value transfer routines. They automatically
48 * use the right size if we just have the right pointer type.
49 *
50 * As the alpha uses the same address space for kernel and user
51 * data, we can just do these as direct assignments. (Of course, the
52 * exception handling means that it's no longer "just"...)
53 *
54 * Careful to not
55 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
56 * (b) require any knowledge of processes at this stage
57 */
58#define put_user(x, ptr) \
59 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
60#define get_user(x, ptr) \
61 __get_user_check((x), (ptr), sizeof(*(ptr)))
62
63/*
64 * The "__xxx" versions do not do address space checking, useful when
65 * doing multiple accesses to the same area (the programmer has to do the
66 * checks by hand with "access_ok()")
67 */
68#define __put_user(x, ptr) \
69 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
70#define __get_user(x, ptr) \
71 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
72
73/*
74 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
75 * encode the bits we need for resolving the exception. See the
76 * more extensive comments with fixup_inline_exception below for
77 * more information.
78 */
79#define EXC(label,cont,res,err) \
80 ".section __ex_table,\"a\"\n" \
81 " .long "#label"-.\n" \
82 " lda "#res","#cont"-"#label"("#err")\n" \
83 ".previous\n"
84
85extern void __get_user_unknown(void);
86
87#define __get_user_nocheck(x, ptr, size) \
88({ \
89 long __gu_err = 0; \
90 unsigned long __gu_val; \
91 __chk_user_ptr(ptr); \
92 switch (size) { \
93 case 1: __get_user_8(ptr); break; \
94 case 2: __get_user_16(ptr); break; \
95 case 4: __get_user_32(ptr); break; \
96 case 8: __get_user_64(ptr); break; \
97 default: __get_user_unknown(); break; \
98 } \
99 (x) = (__force __typeof__(*(ptr))) __gu_val; \
100 __gu_err; \
101})
102
103#define __get_user_check(x, ptr, size) \
104({ \
105 long __gu_err = -EFAULT; \
106 unsigned long __gu_val = 0; \
107 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
108 if (__access_ok((unsigned long)__gu_addr, size)) { \
109 __gu_err = 0; \
110 switch (size) { \
111 case 1: __get_user_8(__gu_addr); break; \
112 case 2: __get_user_16(__gu_addr); break; \
113 case 4: __get_user_32(__gu_addr); break; \
114 case 8: __get_user_64(__gu_addr); break; \
115 default: __get_user_unknown(); break; \
116 } \
117 } \
118 (x) = (__force __typeof__(*(ptr))) __gu_val; \
119 __gu_err; \
120})
121
122struct __large_struct { unsigned long buf[100]; };
123#define __m(x) (*(struct __large_struct __user *)(x))
124
125#define __get_user_64(addr) \
126 __asm__("1: ldq %0,%2\n" \
127 "2:\n" \
128 EXC(1b,2b,%0,%1) \
129 : "=r"(__gu_val), "=r"(__gu_err) \
130 : "m"(__m(addr)), "1"(__gu_err))
131
132#define __get_user_32(addr) \
133 __asm__("1: ldl %0,%2\n" \
134 "2:\n" \
135 EXC(1b,2b,%0,%1) \
136 : "=r"(__gu_val), "=r"(__gu_err) \
137 : "m"(__m(addr)), "1"(__gu_err))
138
139#ifdef __alpha_bwx__
140/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
141
142#define __get_user_16(addr) \
143 __asm__("1: ldwu %0,%2\n" \
144 "2:\n" \
145 EXC(1b,2b,%0,%1) \
146 : "=r"(__gu_val), "=r"(__gu_err) \
147 : "m"(__m(addr)), "1"(__gu_err))
148
149#define __get_user_8(addr) \
150 __asm__("1: ldbu %0,%2\n" \
151 "2:\n" \
152 EXC(1b,2b,%0,%1) \
153 : "=r"(__gu_val), "=r"(__gu_err) \
154 : "m"(__m(addr)), "1"(__gu_err))
155#else
156/* Unfortunately, we can't get an unaligned access trap for the sub-word
157 load, so we have to do a general unaligned operation. */
158
159#define __get_user_16(addr) \
160{ \
161 long __gu_tmp; \
162 __asm__("1: ldq_u %0,0(%3)\n" \
163 "2: ldq_u %1,1(%3)\n" \
164 " extwl %0,%3,%0\n" \
165 " extwh %1,%3,%1\n" \
166 " or %0,%1,%0\n" \
167 "3:\n" \
168 EXC(1b,3b,%0,%2) \
169 EXC(2b,3b,%0,%2) \
170 : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
171 : "r"(addr), "2"(__gu_err)); \
172}
173
174#define __get_user_8(addr) \
175 __asm__("1: ldq_u %0,0(%2)\n" \
176 " extbl %0,%2,%0\n" \
177 "2:\n" \
178 EXC(1b,2b,%0,%1) \
179 : "=&r"(__gu_val), "=r"(__gu_err) \
180 : "r"(addr), "1"(__gu_err))
181#endif
182
183extern void __put_user_unknown(void);
184
185#define __put_user_nocheck(x, ptr, size) \
186({ \
187 long __pu_err = 0; \
188 __chk_user_ptr(ptr); \
189 switch (size) { \
190 case 1: __put_user_8(x, ptr); break; \
191 case 2: __put_user_16(x, ptr); break; \
192 case 4: __put_user_32(x, ptr); break; \
193 case 8: __put_user_64(x, ptr); break; \
194 default: __put_user_unknown(); break; \
195 } \
196 __pu_err; \
197})
198
199#define __put_user_check(x, ptr, size) \
200({ \
201 long __pu_err = -EFAULT; \
202 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
203 if (__access_ok((unsigned long)__pu_addr, size)) { \
204 __pu_err = 0; \
205 switch (size) { \
206 case 1: __put_user_8(x, __pu_addr); break; \
207 case 2: __put_user_16(x, __pu_addr); break; \
208 case 4: __put_user_32(x, __pu_addr); break; \
209 case 8: __put_user_64(x, __pu_addr); break; \
210 default: __put_user_unknown(); break; \
211 } \
212 } \
213 __pu_err; \
214})
215
216/*
217 * The "__put_user_xx()" macros tell gcc they read from memory
218 * instead of writing: this is because they do not write to
219 * any memory gcc knows about, so there are no aliasing issues
220 */
221#define __put_user_64(x, addr) \
222__asm__ __volatile__("1: stq %r2,%1\n" \
223 "2:\n" \
224 EXC(1b,2b,$31,%0) \
225 : "=r"(__pu_err) \
226 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
227
228#define __put_user_32(x, addr) \
229__asm__ __volatile__("1: stl %r2,%1\n" \
230 "2:\n" \
231 EXC(1b,2b,$31,%0) \
232 : "=r"(__pu_err) \
233 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
234
235#ifdef __alpha_bwx__
236/* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
237
238#define __put_user_16(x, addr) \
239__asm__ __volatile__("1: stw %r2,%1\n" \
240 "2:\n" \
241 EXC(1b,2b,$31,%0) \
242 : "=r"(__pu_err) \
243 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
244
245#define __put_user_8(x, addr) \
246__asm__ __volatile__("1: stb %r2,%1\n" \
247 "2:\n" \
248 EXC(1b,2b,$31,%0) \
249 : "=r"(__pu_err) \
250 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
251#else
252/* Unfortunately, we can't get an unaligned access trap for the sub-word
253 write, so we have to do a general unaligned operation. */
254
255#define __put_user_16(x, addr) \
256{ \
257 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
258 __asm__ __volatile__( \
259 "1: ldq_u %2,1(%5)\n" \
260 "2: ldq_u %1,0(%5)\n" \
261 " inswh %6,%5,%4\n" \
262 " inswl %6,%5,%3\n" \
263 " mskwh %2,%5,%2\n" \
264 " mskwl %1,%5,%1\n" \
265 " or %2,%4,%2\n" \
266 " or %1,%3,%1\n" \
267 "3: stq_u %2,1(%5)\n" \
268 "4: stq_u %1,0(%5)\n" \
269 "5:\n" \
270 EXC(1b,5b,$31,%0) \
271 EXC(2b,5b,$31,%0) \
272 EXC(3b,5b,$31,%0) \
273 EXC(4b,5b,$31,%0) \
274 : "=r"(__pu_err), "=&r"(__pu_tmp1), \
275 "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
276 "=&r"(__pu_tmp4) \
277 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
278}
279
280#define __put_user_8(x, addr) \
281{ \
282 long __pu_tmp1, __pu_tmp2; \
283 __asm__ __volatile__( \
284 "1: ldq_u %1,0(%4)\n" \
285 " insbl %3,%4,%2\n" \
286 " mskbl %1,%4,%1\n" \
287 " or %1,%2,%1\n" \
288 "2: stq_u %1,0(%4)\n" \
289 "3:\n" \
290 EXC(1b,3b,$31,%0) \
291 EXC(2b,3b,$31,%0) \
292 : "=r"(__pu_err), \
293 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
294 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
295}
296#endif
297
298
299/*
300 * Complex access routines
301 */
302
303extern long __copy_user(void *to, const void *from, long len);
304
305static inline unsigned long
306raw_copy_from_user(void *to, const void __user *from, unsigned long len)
307{
308 return __copy_user(to, (__force const void *)from, len);
309}
310
311static inline unsigned long
312raw_copy_to_user(void __user *to, const void *from, unsigned long len)
313{
314 return __copy_user((__force void *)to, from, len);
315}
316
317extern long __clear_user(void __user *to, long len);
318
319extern inline long
320clear_user(void __user *to, long len)
321{
322 if (__access_ok((unsigned long)to, len))
323 len = __clear_user(to, len);
324 return len;
325}
326
327#define user_addr_max() \
328 (uaccess_kernel() ? ~0UL : TASK_SIZE)
329
330extern long strncpy_from_user(char *dest, const char __user *src, long count);
331extern __must_check long strnlen_user(const char __user *str, long n);
332
333#include <asm/extable.h>
334
335#endif /* __ALPHA_UACCESS_H */