Loading...
1#ifndef _ASM_IA64_UACCESS_H
2#define _ASM_IA64_UACCESS_H
3
4/*
5 * This file defines various macros to transfer memory areas across
6 * the user/kernel boundary. This needs to be done carefully because
7 * this code is executed in kernel mode and uses user-specified
8 * addresses. Thus, we need to be careful not to let the user to
9 * trick us into accessing kernel memory that would normally be
10 * inaccessible. This code is also fairly performance sensitive,
11 * so we want to spend as little time doing safety checks as
12 * possible.
13 *
14 * To make matters a bit more interesting, these macros sometimes also
15 * called from within the kernel itself, in which case the address
16 * validity check must be skipped. The get_fs() macro tells us what
17 * to do: if get_fs()==USER_DS, checking is performed, if
18 * get_fs()==KERNEL_DS, checking is bypassed.
19 *
20 * Note that even if the memory area specified by the user is in a
21 * valid address range, it is still possible that we'll get a page
22 * fault while accessing it. This is handled by filling out an
23 * exception handler fixup entry for each instruction that has the
24 * potential to fault. When such a fault occurs, the page fault
25 * handler checks to see whether the faulting instruction has a fixup
26 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
27 * then resumes execution at the continuation point.
28 *
29 * Based on <asm-alpha/uaccess.h>.
30 *
31 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
32 * David Mosberger-Tang <davidm@hpl.hp.com>
33 */
34
35#include <linux/compiler.h>
36#include <linux/errno.h>
37#include <linux/sched.h>
38#include <linux/page-flags.h>
39#include <linux/mm.h>
40
41#include <asm/intrinsics.h>
42#include <asm/pgtable.h>
43#include <asm/io.h>
44
45/*
46 * For historical reasons, the following macros are grossly misnamed:
47 */
48#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
49#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
50
51#define VERIFY_READ 0
52#define VERIFY_WRITE 1
53
54#define get_ds() (KERNEL_DS)
55#define get_fs() (current_thread_info()->addr_limit)
56#define set_fs(x) (current_thread_info()->addr_limit = (x))
57
58#define segment_eq(a, b) ((a).seg == (b).seg)
59
60/*
61 * When accessing user memory, we need to make sure the entire area really is in
62 * user-level space. In order to do this efficiently, we make sure that the page at
63 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
64 * point inside the virtually mapped linear page table.
65 */
66#define __access_ok(addr, size, segment) \
67({ \
68 __chk_user_ptr(addr); \
69 (likely((unsigned long) (addr) <= (segment).seg) \
70 && ((segment).seg == KERNEL_DS.seg \
71 || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
72})
73#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
74
75/*
76 * These are the main single-value transfer routines. They automatically
77 * use the right size if we just have the right pointer type.
78 *
79 * Careful to not
80 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
81 * (b) require any knowledge of processes at this stage
82 */
83#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
84#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
85
86/*
87 * The "__xxx" versions do not do address space checking, useful when
88 * doing multiple accesses to the same area (the programmer has to do the
89 * checks by hand with "access_ok()")
90 */
91#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
92#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
93
94extern long __put_user_unaligned_unknown (void);
95
96#define __put_user_unaligned(x, ptr) \
97({ \
98 long __ret; \
99 switch (sizeof(*(ptr))) { \
100 case 1: __ret = __put_user((x), (ptr)); break; \
101 case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
102 | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
103 case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
104 | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
105 case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
106 | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
107 default: __ret = __put_user_unaligned_unknown(); \
108 } \
109 __ret; \
110})
111
112extern long __get_user_unaligned_unknown (void);
113
114#define __get_user_unaligned(x, ptr) \
115({ \
116 long __ret; \
117 switch (sizeof(*(ptr))) { \
118 case 1: __ret = __get_user((x), (ptr)); break; \
119 case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
120 | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
121 case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
122 | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
123 case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
124 | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
125 default: __ret = __get_user_unaligned_unknown(); \
126 } \
127 __ret; \
128})
129
130#ifdef ASM_SUPPORTED
131 struct __large_struct { unsigned long buf[100]; };
132# define __m(x) (*(struct __large_struct __user *)(x))
133
134/* We need to declare the __ex_table section before we can use it in .xdata. */
135asm (".section \"__ex_table\", \"a\"\n\t.previous");
136
137# define __get_user_size(val, addr, n, err) \
138do { \
139 register long __gu_r8 asm ("r8") = 0; \
140 register long __gu_r9 asm ("r9"); \
141 asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
142 "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
143 "[1:]" \
144 : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
145 (err) = __gu_r8; \
146 (val) = __gu_r9; \
147} while (0)
148
149/*
150 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
151 * is because they do not write to any memory gcc knows about, so there are no aliasing
152 * issues.
153 */
154# define __put_user_size(val, addr, n, err) \
155do { \
156 register long __pu_r8 asm ("r8") = 0; \
157 asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
158 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
159 "[1:]" \
160 : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
161 (err) = __pu_r8; \
162} while (0)
163
164#else /* !ASM_SUPPORTED */
165# define RELOC_TYPE 2 /* ip-rel */
166# define __get_user_size(val, addr, n, err) \
167do { \
168 __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
169 (err) = ia64_getreg(_IA64_REG_R8); \
170 (val) = ia64_getreg(_IA64_REG_R9); \
171} while (0)
172# define __put_user_size(val, addr, n, err) \
173do { \
174 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \
175 (err) = ia64_getreg(_IA64_REG_R8); \
176} while (0)
177#endif /* !ASM_SUPPORTED */
178
179extern void __get_user_unknown (void);
180
181/*
182 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
183 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
184 * using r8/r9.
185 */
186#define __do_get_user(check, x, ptr, size, segment) \
187({ \
188 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
189 __typeof__ (size) __gu_size = (size); \
190 long __gu_err = -EFAULT; \
191 unsigned long __gu_val = 0; \
192 if (!check || __access_ok(__gu_ptr, size, segment)) \
193 switch (__gu_size) { \
194 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
195 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
196 case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
197 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
198 default: __get_user_unknown(); break; \
199 } \
200 (x) = (__typeof__(*(__gu_ptr))) __gu_val; \
201 __gu_err; \
202})
203
204#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS)
205#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment)
206
207extern void __put_user_unknown (void);
208
209/*
210 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
211 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
212 */
213#define __do_put_user(check, x, ptr, size, segment) \
214({ \
215 __typeof__ (x) __pu_x = (x); \
216 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
217 __typeof__ (size) __pu_size = (size); \
218 long __pu_err = -EFAULT; \
219 \
220 if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \
221 switch (__pu_size) { \
222 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
223 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
224 case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
225 case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
226 default: __put_user_unknown(); break; \
227 } \
228 __pu_err; \
229})
230
231#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS)
232#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment)
233
234/*
235 * Complex access routines
236 */
237extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
238 unsigned long count);
239
240static inline unsigned long
241__copy_to_user (void __user *to, const void *from, unsigned long count)
242{
243 return __copy_user(to, (__force void __user *) from, count);
244}
245
246static inline unsigned long
247__copy_from_user (void *to, const void __user *from, unsigned long count)
248{
249 return __copy_user((__force void __user *) to, from, count);
250}
251
252#define __copy_to_user_inatomic __copy_to_user
253#define __copy_from_user_inatomic __copy_from_user
254#define copy_to_user(to, from, n) \
255({ \
256 void __user *__cu_to = (to); \
257 const void *__cu_from = (from); \
258 long __cu_len = (n); \
259 \
260 if (__access_ok(__cu_to, __cu_len, get_fs())) \
261 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
262 __cu_len; \
263})
264
265#define copy_from_user(to, from, n) \
266({ \
267 void *__cu_to = (to); \
268 const void __user *__cu_from = (from); \
269 long __cu_len = (n); \
270 \
271 __chk_user_ptr(__cu_from); \
272 if (__access_ok(__cu_from, __cu_len, get_fs())) \
273 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
274 __cu_len; \
275})
276
277#define __copy_in_user(to, from, size) __copy_user((to), (from), (size))
278
279static inline unsigned long
280copy_in_user (void __user *to, const void __user *from, unsigned long n)
281{
282 if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)))
283 n = __copy_user(to, from, n);
284 return n;
285}
286
287extern unsigned long __do_clear_user (void __user *, unsigned long);
288
289#define __clear_user(to, n) __do_clear_user(to, n)
290
291#define clear_user(to, n) \
292({ \
293 unsigned long __cu_len = (n); \
294 if (__access_ok(to, __cu_len, get_fs())) \
295 __cu_len = __do_clear_user(to, __cu_len); \
296 __cu_len; \
297})
298
299
300/*
301 * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
302 * strlen.
303 */
304extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
305
306#define strncpy_from_user(to, from, n) \
307({ \
308 const char __user * __sfu_from = (from); \
309 long __sfu_ret = -EFAULT; \
310 if (__access_ok(__sfu_from, 0, get_fs())) \
311 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
312 __sfu_ret; \
313})
314
315/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
316extern unsigned long __strlen_user (const char __user *);
317
318#define strlen_user(str) \
319({ \
320 const char __user *__su_str = (str); \
321 unsigned long __su_ret = 0; \
322 if (__access_ok(__su_str, 0, get_fs())) \
323 __su_ret = __strlen_user(__su_str); \
324 __su_ret; \
325})
326
327/*
328 * Returns: 0 if exception before NUL or reaching the supplied limit
329 * (N), a value greater than N if the limit would be exceeded, else
330 * strlen.
331 */
332extern unsigned long __strnlen_user (const char __user *, long);
333
334#define strnlen_user(str, len) \
335({ \
336 const char __user *__su_str = (str); \
337 unsigned long __su_ret = 0; \
338 if (__access_ok(__su_str, 0, get_fs())) \
339 __su_ret = __strnlen_user(__su_str, len); \
340 __su_ret; \
341})
342
343/* Generic code can't deal with the location-relative format that we use for compactness. */
344#define ARCH_HAS_SORT_EXTABLE
345#define ARCH_HAS_SEARCH_EXTABLE
346
347struct exception_table_entry {
348 int addr; /* location-relative address of insn this fixup is for */
349 int cont; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */
350};
351
352extern void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e);
353extern const struct exception_table_entry *search_exception_tables (unsigned long addr);
354
355static inline int
356ia64_done_with_exception (struct pt_regs *regs)
357{
358 const struct exception_table_entry *e;
359 e = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
360 if (e) {
361 ia64_handle_exception(regs, e);
362 return 1;
363 }
364 return 0;
365}
366
367#define ARCH_HAS_TRANSLATE_MEM_PTR 1
368static __inline__ char *
369xlate_dev_mem_ptr (unsigned long p)
370{
371 struct page *page;
372 char * ptr;
373
374 page = pfn_to_page(p >> PAGE_SHIFT);
375 if (PageUncached(page))
376 ptr = (char *)p + __IA64_UNCACHED_OFFSET;
377 else
378 ptr = __va(p);
379
380 return ptr;
381}
382
383/*
384 * Convert a virtual cached kernel memory pointer to an uncached pointer
385 */
386static __inline__ char *
387xlate_dev_kmem_ptr (char * p)
388{
389 struct page *page;
390 char * ptr;
391
392 page = virt_to_page((unsigned long)p);
393 if (PageUncached(page))
394 ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
395 else
396 ptr = p;
397
398 return ptr;
399}
400
401#endif /* _ASM_IA64_UACCESS_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_UACCESS_H
3#define _ASM_IA64_UACCESS_H
4
5/*
6 * This file defines various macros to transfer memory areas across
7 * the user/kernel boundary. This needs to be done carefully because
8 * this code is executed in kernel mode and uses user-specified
9 * addresses. Thus, we need to be careful not to let the user to
10 * trick us into accessing kernel memory that would normally be
11 * inaccessible. This code is also fairly performance sensitive,
12 * so we want to spend as little time doing safety checks as
13 * possible.
14 *
15 * To make matters a bit more interesting, these macros sometimes also
16 * called from within the kernel itself, in which case the address
17 * validity check must be skipped. The get_fs() macro tells us what
18 * to do: if get_fs()==USER_DS, checking is performed, if
19 * get_fs()==KERNEL_DS, checking is bypassed.
20 *
21 * Note that even if the memory area specified by the user is in a
22 * valid address range, it is still possible that we'll get a page
23 * fault while accessing it. This is handled by filling out an
24 * exception handler fixup entry for each instruction that has the
25 * potential to fault. When such a fault occurs, the page fault
26 * handler checks to see whether the faulting instruction has a fixup
27 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
28 * then resumes execution at the continuation point.
29 *
30 * Based on <asm-alpha/uaccess.h>.
31 *
32 * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
33 * David Mosberger-Tang <davidm@hpl.hp.com>
34 */
35
36#include <linux/compiler.h>
37#include <linux/page-flags.h>
38#include <linux/mm.h>
39
40#include <asm/intrinsics.h>
41#include <asm/pgtable.h>
42#include <asm/io.h>
43#include <asm/extable.h>
44
45/*
46 * For historical reasons, the following macros are grossly misnamed:
47 */
48#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
49#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
50
51#define get_ds() (KERNEL_DS)
52#define get_fs() (current_thread_info()->addr_limit)
53#define set_fs(x) (current_thread_info()->addr_limit = (x))
54
55#define segment_eq(a, b) ((a).seg == (b).seg)
56
57/*
58 * When accessing user memory, we need to make sure the entire area really is in
59 * user-level space. In order to do this efficiently, we make sure that the page at
60 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
61 * point inside the virtually mapped linear page table.
62 */
63static inline int __access_ok(const void __user *p, unsigned long size)
64{
65 unsigned long addr = (unsigned long)p;
66 unsigned long seg = get_fs().seg;
67 return likely(addr <= seg) &&
68 (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
69}
70#define access_ok(type, addr, size) __access_ok((addr), (size))
71
72/*
73 * These are the main single-value transfer routines. They automatically
74 * use the right size if we just have the right pointer type.
75 *
76 * Careful to not
77 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
78 * (b) require any knowledge of processes at this stage
79 */
80#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
81#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
82
83/*
84 * The "__xxx" versions do not do address space checking, useful when
85 * doing multiple accesses to the same area (the programmer has to do the
86 * checks by hand with "access_ok()")
87 */
88#define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
89#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
90
91#ifdef ASM_SUPPORTED
92 struct __large_struct { unsigned long buf[100]; };
93# define __m(x) (*(struct __large_struct __user *)(x))
94
95/* We need to declare the __ex_table section before we can use it in .xdata. */
96asm (".section \"__ex_table\", \"a\"\n\t.previous");
97
98# define __get_user_size(val, addr, n, err) \
99do { \
100 register long __gu_r8 asm ("r8") = 0; \
101 register long __gu_r9 asm ("r9"); \
102 asm ("\n[1:]\tld"#n" %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
103 "\t.xdata4 \"__ex_table\", 1b-., 1f-.+4\n" \
104 "[1:]" \
105 : "=r"(__gu_r9), "=r"(__gu_r8) : "m"(__m(addr)), "1"(__gu_r8)); \
106 (err) = __gu_r8; \
107 (val) = __gu_r9; \
108} while (0)
109
110/*
111 * The "__put_user_size()" macro tells gcc it reads from memory instead of writing it. This
112 * is because they do not write to any memory gcc knows about, so there are no aliasing
113 * issues.
114 */
115# define __put_user_size(val, addr, n, err) \
116do { \
117 register long __pu_r8 asm ("r8") = 0; \
118 asm volatile ("\n[1:]\tst"#n" %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
119 "\t.xdata4 \"__ex_table\", 1b-., 1f-.\n" \
120 "[1:]" \
121 : "=r"(__pu_r8) : "m"(__m(addr)), "rO"(val), "0"(__pu_r8)); \
122 (err) = __pu_r8; \
123} while (0)
124
125#else /* !ASM_SUPPORTED */
126# define RELOC_TYPE 2 /* ip-rel */
127# define __get_user_size(val, addr, n, err) \
128do { \
129 __ld_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE); \
130 (err) = ia64_getreg(_IA64_REG_R8); \
131 (val) = ia64_getreg(_IA64_REG_R9); \
132} while (0)
133# define __put_user_size(val, addr, n, err) \
134do { \
135 __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \
136 (__force unsigned long) (val)); \
137 (err) = ia64_getreg(_IA64_REG_R8); \
138} while (0)
139#endif /* !ASM_SUPPORTED */
140
141extern void __get_user_unknown (void);
142
143/*
144 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
145 * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while
146 * using r8/r9.
147 */
148#define __do_get_user(check, x, ptr, size) \
149({ \
150 const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
151 __typeof__ (size) __gu_size = (size); \
152 long __gu_err = -EFAULT; \
153 unsigned long __gu_val = 0; \
154 if (!check || __access_ok(__gu_ptr, size)) \
155 switch (__gu_size) { \
156 case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \
157 case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \
158 case 4: __get_user_size(__gu_val, __gu_ptr, 4, __gu_err); break; \
159 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \
160 default: __get_user_unknown(); break; \
161 } \
162 (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \
163 __gu_err; \
164})
165
166#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size)
167#define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size)
168
169extern void __put_user_unknown (void);
170
171/*
172 * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which
173 * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8.
174 */
175#define __do_put_user(check, x, ptr, size) \
176({ \
177 __typeof__ (x) __pu_x = (x); \
178 __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \
179 __typeof__ (size) __pu_size = (size); \
180 long __pu_err = -EFAULT; \
181 \
182 if (!check || __access_ok(__pu_ptr, __pu_size)) \
183 switch (__pu_size) { \
184 case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \
185 case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \
186 case 4: __put_user_size(__pu_x, __pu_ptr, 4, __pu_err); break; \
187 case 8: __put_user_size(__pu_x, __pu_ptr, 8, __pu_err); break; \
188 default: __put_user_unknown(); break; \
189 } \
190 __pu_err; \
191})
192
193#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size)
194#define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size)
195
196/*
197 * Complex access routines
198 */
199extern unsigned long __must_check __copy_user (void __user *to, const void __user *from,
200 unsigned long count);
201
202static inline unsigned long
203raw_copy_to_user(void __user *to, const void *from, unsigned long count)
204{
205 return __copy_user(to, (__force void __user *) from, count);
206}
207
208static inline unsigned long
209raw_copy_from_user(void *to, const void __user *from, unsigned long count)
210{
211 return __copy_user((__force void __user *) to, from, count);
212}
213
214#define INLINE_COPY_FROM_USER
215#define INLINE_COPY_TO_USER
216
217extern unsigned long __do_clear_user (void __user *, unsigned long);
218
219#define __clear_user(to, n) __do_clear_user(to, n)
220
221#define clear_user(to, n) \
222({ \
223 unsigned long __cu_len = (n); \
224 if (__access_ok(to, __cu_len)) \
225 __cu_len = __do_clear_user(to, __cu_len); \
226 __cu_len; \
227})
228
229
230/*
231 * Returns: -EFAULT if exception before terminator, N if the entire buffer filled, else
232 * strlen.
233 */
234extern long __must_check __strncpy_from_user (char *to, const char __user *from, long to_len);
235
236#define strncpy_from_user(to, from, n) \
237({ \
238 const char __user * __sfu_from = (from); \
239 long __sfu_ret = -EFAULT; \
240 if (__access_ok(__sfu_from, 0)) \
241 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
242 __sfu_ret; \
243})
244
245/*
246 * Returns: 0 if exception before NUL or reaching the supplied limit
247 * (N), a value greater than N if the limit would be exceeded, else
248 * strlen.
249 */
250extern unsigned long __strnlen_user (const char __user *, long);
251
252#define strnlen_user(str, len) \
253({ \
254 const char __user *__su_str = (str); \
255 unsigned long __su_ret = 0; \
256 if (__access_ok(__su_str, 0)) \
257 __su_ret = __strnlen_user(__su_str, len); \
258 __su_ret; \
259})
260
261#define ARCH_HAS_TRANSLATE_MEM_PTR 1
262static __inline__ void *
263xlate_dev_mem_ptr(phys_addr_t p)
264{
265 struct page *page;
266 void *ptr;
267
268 page = pfn_to_page(p >> PAGE_SHIFT);
269 if (PageUncached(page))
270 ptr = (void *)p + __IA64_UNCACHED_OFFSET;
271 else
272 ptr = __va(p);
273
274 return ptr;
275}
276
277/*
278 * Convert a virtual cached kernel memory pointer to an uncached pointer
279 */
280static __inline__ void *
281xlate_dev_kmem_ptr(void *p)
282{
283 struct page *page;
284 void *ptr;
285
286 page = virt_to_page((unsigned long)p);
287 if (PageUncached(page))
288 ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET;
289 else
290 ptr = p;
291
292 return ptr;
293}
294
295#endif /* _ASM_IA64_UACCESS_H */