Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel support for the ptrace() and syscall tracing interfaces.
4 *
5 * Copyright (C) 1999-2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 2006 Intel Co
8 * 2006-08-12 - IA64 Native Utrace implementation support added by
9 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
10 *
11 * Derived from the x86 and Alpha versions.
12 */
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/sched/task.h>
16#include <linux/sched/task_stack.h>
17#include <linux/mm.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
20#include <linux/user.h>
21#include <linux/security.h>
22#include <linux/audit.h>
23#include <linux/signal.h>
24#include <linux/regset.h>
25#include <linux/elf.h>
26#include <linux/tracehook.h>
27
28#include <asm/pgtable.h>
29#include <asm/processor.h>
30#include <asm/ptrace_offsets.h>
31#include <asm/rse.h>
32#include <linux/uaccess.h>
33#include <asm/unwind.h>
34#ifdef CONFIG_PERFMON
35#include <asm/perfmon.h>
36#endif
37
38#include "entry.h"
39
40/*
41 * Bits in the PSR that we allow ptrace() to change:
42 * be, up, ac, mfl, mfh (the user mask; five bits total)
43 * db (debug breakpoint fault; one bit)
44 * id (instruction debug fault disable; one bit)
45 * dd (data debug fault disable; one bit)
46 * ri (restart instruction; two bits)
47 * is (instruction set; one bit)
48 */
49#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
50 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
51
52#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
53#define PFM_MASK MASK(38)
54
55#define PTRACE_DEBUG 0
56
57#if PTRACE_DEBUG
58# define dprintk(format...) printk(format)
59# define inline
60#else
61# define dprintk(format...)
62#endif
63
64/* Return TRUE if PT was created due to kernel-entry via a system-call. */
65
66static inline int
67in_syscall (struct pt_regs *pt)
68{
69 return (long) pt->cr_ifs >= 0;
70}
71
72/*
73 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
74 * bitset where bit i is set iff the NaT bit of register i is set.
75 */
76unsigned long
77ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
78{
79# define GET_BITS(first, last, unat) \
80 ({ \
81 unsigned long bit = ia64_unat_pos(&pt->r##first); \
82 unsigned long nbits = (last - first + 1); \
83 unsigned long mask = MASK(nbits) << first; \
84 unsigned long dist; \
85 if (bit < first) \
86 dist = 64 + bit - first; \
87 else \
88 dist = bit - first; \
89 ia64_rotr(unat, dist) & mask; \
90 })
91 unsigned long val;
92
93 /*
94 * Registers that are stored consecutively in struct pt_regs
95 * can be handled in parallel. If the register order in
96 * struct_pt_regs changes, this code MUST be updated.
97 */
98 val = GET_BITS( 1, 1, scratch_unat);
99 val |= GET_BITS( 2, 3, scratch_unat);
100 val |= GET_BITS(12, 13, scratch_unat);
101 val |= GET_BITS(14, 14, scratch_unat);
102 val |= GET_BITS(15, 15, scratch_unat);
103 val |= GET_BITS( 8, 11, scratch_unat);
104 val |= GET_BITS(16, 31, scratch_unat);
105 return val;
106
107# undef GET_BITS
108}
109
110/*
111 * Set the NaT bits for the scratch registers according to NAT and
112 * return the resulting unat (assuming the scratch registers are
113 * stored in PT).
114 */
115unsigned long
116ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
117{
118# define PUT_BITS(first, last, nat) \
119 ({ \
120 unsigned long bit = ia64_unat_pos(&pt->r##first); \
121 unsigned long nbits = (last - first + 1); \
122 unsigned long mask = MASK(nbits) << first; \
123 long dist; \
124 if (bit < first) \
125 dist = 64 + bit - first; \
126 else \
127 dist = bit - first; \
128 ia64_rotl(nat & mask, dist); \
129 })
130 unsigned long scratch_unat;
131
132 /*
133 * Registers that are stored consecutively in struct pt_regs
134 * can be handled in parallel. If the register order in
135 * struct_pt_regs changes, this code MUST be updated.
136 */
137 scratch_unat = PUT_BITS( 1, 1, nat);
138 scratch_unat |= PUT_BITS( 2, 3, nat);
139 scratch_unat |= PUT_BITS(12, 13, nat);
140 scratch_unat |= PUT_BITS(14, 14, nat);
141 scratch_unat |= PUT_BITS(15, 15, nat);
142 scratch_unat |= PUT_BITS( 8, 11, nat);
143 scratch_unat |= PUT_BITS(16, 31, nat);
144
145 return scratch_unat;
146
147# undef PUT_BITS
148}
149
150#define IA64_MLX_TEMPLATE 0x2
151#define IA64_MOVL_OPCODE 6
152
153void
154ia64_increment_ip (struct pt_regs *regs)
155{
156 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
157
158 if (ri > 2) {
159 ri = 0;
160 regs->cr_iip += 16;
161 } else if (ri == 2) {
162 get_user(w0, (char __user *) regs->cr_iip + 0);
163 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
164 /*
165 * rfi'ing to slot 2 of an MLX bundle causes
166 * an illegal operation fault. We don't want
167 * that to happen...
168 */
169 ri = 0;
170 regs->cr_iip += 16;
171 }
172 }
173 ia64_psr(regs)->ri = ri;
174}
175
176void
177ia64_decrement_ip (struct pt_regs *regs)
178{
179 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
180
181 if (ia64_psr(regs)->ri == 0) {
182 regs->cr_iip -= 16;
183 ri = 2;
184 get_user(w0, (char __user *) regs->cr_iip + 0);
185 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
186 /*
187 * rfi'ing to slot 2 of an MLX bundle causes
188 * an illegal operation fault. We don't want
189 * that to happen...
190 */
191 ri = 1;
192 }
193 }
194 ia64_psr(regs)->ri = ri;
195}
196
197/*
198 * This routine is used to read an rnat bits that are stored on the
199 * kernel backing store. Since, in general, the alignment of the user
200 * and kernel are different, this is not completely trivial. In
201 * essence, we need to construct the user RNAT based on up to two
202 * kernel RNAT values and/or the RNAT value saved in the child's
203 * pt_regs.
204 *
205 * user rbs
206 *
207 * +--------+ <-- lowest address
208 * | slot62 |
209 * +--------+
210 * | rnat | 0x....1f8
211 * +--------+
212 * | slot00 | \
213 * +--------+ |
214 * | slot01 | > child_regs->ar_rnat
215 * +--------+ |
216 * | slot02 | / kernel rbs
217 * +--------+ +--------+
218 * <- child_regs->ar_bspstore | slot61 | <-- krbs
219 * +- - - - + +--------+
220 * | slot62 |
221 * +- - - - + +--------+
222 * | rnat |
223 * +- - - - + +--------+
224 * vrnat | slot00 |
225 * +- - - - + +--------+
226 * = =
227 * +--------+
228 * | slot00 | \
229 * +--------+ |
230 * | slot01 | > child_stack->ar_rnat
231 * +--------+ |
232 * | slot02 | /
233 * +--------+
234 * <--- child_stack->ar_bspstore
235 *
236 * The way to think of this code is as follows: bit 0 in the user rnat
237 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
238 * value. The kernel rnat value holding this bit is stored in
239 * variable rnat0. rnat1 is loaded with the kernel rnat value that
240 * form the upper bits of the user rnat value.
241 *
242 * Boundary cases:
243 *
244 * o when reading the rnat "below" the first rnat slot on the kernel
245 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
246 * merged in from pt->ar_rnat.
247 *
248 * o when reading the rnat "above" the last rnat slot on the kernel
249 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
250 */
251static unsigned long
252get_rnat (struct task_struct *task, struct switch_stack *sw,
253 unsigned long *krbs, unsigned long *urnat_addr,
254 unsigned long *urbs_end)
255{
256 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
257 unsigned long umask = 0, mask, m;
258 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
259 long num_regs, nbits;
260 struct pt_regs *pt;
261
262 pt = task_pt_regs(task);
263 kbsp = (unsigned long *) sw->ar_bspstore;
264 ubspstore = (unsigned long *) pt->ar_bspstore;
265
266 if (urbs_end < urnat_addr)
267 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
268 else
269 nbits = 63;
270 mask = MASK(nbits);
271 /*
272 * First, figure out which bit number slot 0 in user-land maps
273 * to in the kernel rnat. Do this by figuring out how many
274 * register slots we're beyond the user's backingstore and
275 * then computing the equivalent address in kernel space.
276 */
277 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
278 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
279 shift = ia64_rse_slot_num(slot0_kaddr);
280 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
281 rnat0_kaddr = rnat1_kaddr - 64;
282
283 if (ubspstore + 63 > urnat_addr) {
284 /* some bits need to be merged in from pt->ar_rnat */
285 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
286 urnat = (pt->ar_rnat & umask);
287 mask &= ~umask;
288 if (!mask)
289 return urnat;
290 }
291
292 m = mask << shift;
293 if (rnat0_kaddr >= kbsp)
294 rnat0 = sw->ar_rnat;
295 else if (rnat0_kaddr > krbs)
296 rnat0 = *rnat0_kaddr;
297 urnat |= (rnat0 & m) >> shift;
298
299 m = mask >> (63 - shift);
300 if (rnat1_kaddr >= kbsp)
301 rnat1 = sw->ar_rnat;
302 else if (rnat1_kaddr > krbs)
303 rnat1 = *rnat1_kaddr;
304 urnat |= (rnat1 & m) << (63 - shift);
305 return urnat;
306}
307
308/*
309 * The reverse of get_rnat.
310 */
311static void
312put_rnat (struct task_struct *task, struct switch_stack *sw,
313 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
314 unsigned long *urbs_end)
315{
316 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
317 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
318 long num_regs, nbits;
319 struct pt_regs *pt;
320 unsigned long cfm, *urbs_kargs;
321
322 pt = task_pt_regs(task);
323 kbsp = (unsigned long *) sw->ar_bspstore;
324 ubspstore = (unsigned long *) pt->ar_bspstore;
325
326 urbs_kargs = urbs_end;
327 if (in_syscall(pt)) {
328 /*
329 * If entered via syscall, don't allow user to set rnat bits
330 * for syscall args.
331 */
332 cfm = pt->cr_ifs;
333 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
334 }
335
336 if (urbs_kargs >= urnat_addr)
337 nbits = 63;
338 else {
339 if ((urnat_addr - 63) >= urbs_kargs)
340 return;
341 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
342 }
343 mask = MASK(nbits);
344
345 /*
346 * First, figure out which bit number slot 0 in user-land maps
347 * to in the kernel rnat. Do this by figuring out how many
348 * register slots we're beyond the user's backingstore and
349 * then computing the equivalent address in kernel space.
350 */
351 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
352 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
353 shift = ia64_rse_slot_num(slot0_kaddr);
354 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
355 rnat0_kaddr = rnat1_kaddr - 64;
356
357 if (ubspstore + 63 > urnat_addr) {
358 /* some bits need to be place in pt->ar_rnat: */
359 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
360 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
361 mask &= ~umask;
362 if (!mask)
363 return;
364 }
365 /*
366 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
367 * rnat slot is ignored. so we don't have to clear it here.
368 */
369 rnat0 = (urnat << shift);
370 m = mask << shift;
371 if (rnat0_kaddr >= kbsp)
372 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
373 else if (rnat0_kaddr > krbs)
374 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
375
376 rnat1 = (urnat >> (63 - shift));
377 m = mask >> (63 - shift);
378 if (rnat1_kaddr >= kbsp)
379 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
380 else if (rnat1_kaddr > krbs)
381 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
382}
383
384static inline int
385on_kernel_rbs (unsigned long addr, unsigned long bspstore,
386 unsigned long urbs_end)
387{
388 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
389 urbs_end);
390 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
391}
392
393/*
394 * Read a word from the user-level backing store of task CHILD. ADDR
395 * is the user-level address to read the word from, VAL a pointer to
396 * the return value, and USER_BSP gives the end of the user-level
397 * backing store (i.e., it's the address that would be in ar.bsp after
398 * the user executed a "cover" instruction).
399 *
400 * This routine takes care of accessing the kernel register backing
401 * store for those registers that got spilled there. It also takes
402 * care of calculating the appropriate RNaT collection words.
403 */
404long
405ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
406 unsigned long user_rbs_end, unsigned long addr, long *val)
407{
408 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
409 struct pt_regs *child_regs;
410 size_t copied;
411 long ret;
412
413 urbs_end = (long *) user_rbs_end;
414 laddr = (unsigned long *) addr;
415 child_regs = task_pt_regs(child);
416 bspstore = (unsigned long *) child_regs->ar_bspstore;
417 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
418 if (on_kernel_rbs(addr, (unsigned long) bspstore,
419 (unsigned long) urbs_end))
420 {
421 /*
422 * Attempt to read the RBS in an area that's actually
423 * on the kernel RBS => read the corresponding bits in
424 * the kernel RBS.
425 */
426 rnat_addr = ia64_rse_rnat_addr(laddr);
427 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
428
429 if (laddr == rnat_addr) {
430 /* return NaT collection word itself */
431 *val = ret;
432 return 0;
433 }
434
435 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
436 /*
437 * It is implementation dependent whether the
438 * data portion of a NaT value gets saved on a
439 * st8.spill or RSE spill (e.g., see EAS 2.6,
440 * 4.4.4.6 Register Spill and Fill). To get
441 * consistent behavior across all possible
442 * IA-64 implementations, we return zero in
443 * this case.
444 */
445 *val = 0;
446 return 0;
447 }
448
449 if (laddr < urbs_end) {
450 /*
451 * The desired word is on the kernel RBS and
452 * is not a NaT.
453 */
454 regnum = ia64_rse_num_regs(bspstore, laddr);
455 *val = *ia64_rse_skip_regs(krbs, regnum);
456 return 0;
457 }
458 }
459 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
460 if (copied != sizeof(ret))
461 return -EIO;
462 *val = ret;
463 return 0;
464}
465
466long
467ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
468 unsigned long user_rbs_end, unsigned long addr, long val)
469{
470 unsigned long *bspstore, *krbs, regnum, *laddr;
471 unsigned long *urbs_end = (long *) user_rbs_end;
472 struct pt_regs *child_regs;
473
474 laddr = (unsigned long *) addr;
475 child_regs = task_pt_regs(child);
476 bspstore = (unsigned long *) child_regs->ar_bspstore;
477 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
478 if (on_kernel_rbs(addr, (unsigned long) bspstore,
479 (unsigned long) urbs_end))
480 {
481 /*
482 * Attempt to write the RBS in an area that's actually
483 * on the kernel RBS => write the corresponding bits
484 * in the kernel RBS.
485 */
486 if (ia64_rse_is_rnat_slot(laddr))
487 put_rnat(child, child_stack, krbs, laddr, val,
488 urbs_end);
489 else {
490 if (laddr < urbs_end) {
491 regnum = ia64_rse_num_regs(bspstore, laddr);
492 *ia64_rse_skip_regs(krbs, regnum) = val;
493 }
494 }
495 } else if (access_process_vm(child, addr, &val, sizeof(val),
496 FOLL_FORCE | FOLL_WRITE)
497 != sizeof(val))
498 return -EIO;
499 return 0;
500}
501
502/*
503 * Calculate the address of the end of the user-level register backing
504 * store. This is the address that would have been stored in ar.bsp
505 * if the user had executed a "cover" instruction right before
506 * entering the kernel. If CFMP is not NULL, it is used to return the
507 * "current frame mask" that was active at the time the kernel was
508 * entered.
509 */
510unsigned long
511ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
512 unsigned long *cfmp)
513{
514 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
515 long ndirty;
516
517 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
518 bspstore = (unsigned long *) pt->ar_bspstore;
519 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
520
521 if (in_syscall(pt))
522 ndirty += (cfm & 0x7f);
523 else
524 cfm &= ~(1UL << 63); /* clear valid bit */
525
526 if (cfmp)
527 *cfmp = cfm;
528 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
529}
530
531/*
532 * Synchronize (i.e, write) the RSE backing store living in kernel
533 * space to the VM of the CHILD task. SW and PT are the pointers to
534 * the switch_stack and pt_regs structures, respectively.
535 * USER_RBS_END is the user-level address at which the backing store
536 * ends.
537 */
538long
539ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
540 unsigned long user_rbs_start, unsigned long user_rbs_end)
541{
542 unsigned long addr, val;
543 long ret;
544
545 /* now copy word for word from kernel rbs to user rbs: */
546 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
547 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
548 if (ret < 0)
549 return ret;
550 if (access_process_vm(child, addr, &val, sizeof(val),
551 FOLL_FORCE | FOLL_WRITE)
552 != sizeof(val))
553 return -EIO;
554 }
555 return 0;
556}
557
558static long
559ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
560 unsigned long user_rbs_start, unsigned long user_rbs_end)
561{
562 unsigned long addr, val;
563 long ret;
564
565 /* now copy word for word from user rbs to kernel rbs: */
566 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
567 if (access_process_vm(child, addr, &val, sizeof(val),
568 FOLL_FORCE)
569 != sizeof(val))
570 return -EIO;
571
572 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
573 if (ret < 0)
574 return ret;
575 }
576 return 0;
577}
578
579typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
580 unsigned long, unsigned long);
581
582static void do_sync_rbs(struct unw_frame_info *info, void *arg)
583{
584 struct pt_regs *pt;
585 unsigned long urbs_end;
586 syncfunc_t fn = arg;
587
588 if (unw_unwind_to_user(info) < 0)
589 return;
590 pt = task_pt_regs(info->task);
591 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
592
593 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
594}
595
596/*
597 * when a thread is stopped (ptraced), debugger might change thread's user
598 * stack (change memory directly), and we must avoid the RSE stored in kernel
599 * to override user stack (user space's RSE is newer than kernel's in the
600 * case). To workaround the issue, we copy kernel RSE to user RSE before the
601 * task is stopped, so user RSE has updated data. we then copy user RSE to
602 * kernel after the task is resummed from traced stop and kernel will use the
603 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
604 * synchronize user RSE to kernel.
605 */
606void ia64_ptrace_stop(void)
607{
608 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
609 return;
610 set_notify_resume(current);
611 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
612}
613
614/*
615 * This is called to read back the register backing store.
616 */
617void ia64_sync_krbs(void)
618{
619 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
620
621 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
622}
623
624/*
625 * After PTRACE_ATTACH, a thread's register backing store area in user
626 * space is assumed to contain correct data whenever the thread is
627 * stopped. arch_ptrace_stop takes care of this on tracing stops.
628 * But if the child was already stopped for job control when we attach
629 * to it, then it might not ever get into ptrace_stop by the time we
630 * want to examine the user memory containing the RBS.
631 */
632void
633ptrace_attach_sync_user_rbs (struct task_struct *child)
634{
635 int stopped = 0;
636 struct unw_frame_info info;
637
638 /*
639 * If the child is in TASK_STOPPED, we need to change that to
640 * TASK_TRACED momentarily while we operate on it. This ensures
641 * that the child won't be woken up and return to user mode while
642 * we are doing the sync. (It can only be woken up for SIGKILL.)
643 */
644
645 read_lock(&tasklist_lock);
646 if (child->sighand) {
647 spin_lock_irq(&child->sighand->siglock);
648 if (child->state == TASK_STOPPED &&
649 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
650 set_notify_resume(child);
651
652 child->state = TASK_TRACED;
653 stopped = 1;
654 }
655 spin_unlock_irq(&child->sighand->siglock);
656 }
657 read_unlock(&tasklist_lock);
658
659 if (!stopped)
660 return;
661
662 unw_init_from_blocked_task(&info, child);
663 do_sync_rbs(&info, ia64_sync_user_rbs);
664
665 /*
666 * Now move the child back into TASK_STOPPED if it should be in a
667 * job control stop, so that SIGCONT can be used to wake it up.
668 */
669 read_lock(&tasklist_lock);
670 if (child->sighand) {
671 spin_lock_irq(&child->sighand->siglock);
672 if (child->state == TASK_TRACED &&
673 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
674 child->state = TASK_STOPPED;
675 }
676 spin_unlock_irq(&child->sighand->siglock);
677 }
678 read_unlock(&tasklist_lock);
679}
680
681/*
682 * Write f32-f127 back to task->thread.fph if it has been modified.
683 */
684inline void
685ia64_flush_fph (struct task_struct *task)
686{
687 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
688
689 /*
690 * Prevent migrating this task while
691 * we're fiddling with the FPU state
692 */
693 preempt_disable();
694 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
695 psr->mfh = 0;
696 task->thread.flags |= IA64_THREAD_FPH_VALID;
697 ia64_save_fpu(&task->thread.fph[0]);
698 }
699 preempt_enable();
700}
701
702/*
703 * Sync the fph state of the task so that it can be manipulated
704 * through thread.fph. If necessary, f32-f127 are written back to
705 * thread.fph or, if the fph state hasn't been used before, thread.fph
706 * is cleared to zeroes. Also, access to f32-f127 is disabled to
707 * ensure that the task picks up the state from thread.fph when it
708 * executes again.
709 */
710void
711ia64_sync_fph (struct task_struct *task)
712{
713 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
714
715 ia64_flush_fph(task);
716 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
717 task->thread.flags |= IA64_THREAD_FPH_VALID;
718 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
719 }
720 ia64_drop_fpu(task);
721 psr->dfh = 1;
722}
723
724/*
725 * Change the machine-state of CHILD such that it will return via the normal
726 * kernel exit-path, rather than the syscall-exit path.
727 */
728static void
729convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
730 unsigned long cfm)
731{
732 struct unw_frame_info info, prev_info;
733 unsigned long ip, sp, pr;
734
735 unw_init_from_blocked_task(&info, child);
736 while (1) {
737 prev_info = info;
738 if (unw_unwind(&info) < 0)
739 return;
740
741 unw_get_sp(&info, &sp);
742 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
743 < IA64_PT_REGS_SIZE) {
744 dprintk("ptrace.%s: ran off the top of the kernel "
745 "stack\n", __func__);
746 return;
747 }
748 if (unw_get_pr (&prev_info, &pr) < 0) {
749 unw_get_rp(&prev_info, &ip);
750 dprintk("ptrace.%s: failed to read "
751 "predicate register (ip=0x%lx)\n",
752 __func__, ip);
753 return;
754 }
755 if (unw_is_intr_frame(&info)
756 && (pr & (1UL << PRED_USER_STACK)))
757 break;
758 }
759
760 /*
761 * Note: at the time of this call, the target task is blocked
762 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
763 * (aka, "pLvSys") we redirect execution from
764 * .work_pending_syscall_end to .work_processed_kernel.
765 */
766 unw_get_pr(&prev_info, &pr);
767 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
768 pr |= (1UL << PRED_NON_SYSCALL);
769 unw_set_pr(&prev_info, pr);
770
771 pt->cr_ifs = (1UL << 63) | cfm;
772 /*
773 * Clear the memory that is NOT written on syscall-entry to
774 * ensure we do not leak kernel-state to user when execution
775 * resumes.
776 */
777 pt->r2 = 0;
778 pt->r3 = 0;
779 pt->r14 = 0;
780 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
781 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
782 pt->b7 = 0;
783 pt->ar_ccv = 0;
784 pt->ar_csd = 0;
785 pt->ar_ssd = 0;
786}
787
788static int
789access_nat_bits (struct task_struct *child, struct pt_regs *pt,
790 struct unw_frame_info *info,
791 unsigned long *data, int write_access)
792{
793 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
794 char nat = 0;
795
796 if (write_access) {
797 nat_bits = *data;
798 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
799 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
800 dprintk("ptrace: failed to set ar.unat\n");
801 return -1;
802 }
803 for (regnum = 4; regnum <= 7; ++regnum) {
804 unw_get_gr(info, regnum, &dummy, &nat);
805 unw_set_gr(info, regnum, dummy,
806 (nat_bits >> regnum) & 1);
807 }
808 } else {
809 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
810 dprintk("ptrace: failed to read ar.unat\n");
811 return -1;
812 }
813 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
814 for (regnum = 4; regnum <= 7; ++regnum) {
815 unw_get_gr(info, regnum, &dummy, &nat);
816 nat_bits |= (nat != 0) << regnum;
817 }
818 *data = nat_bits;
819 }
820 return 0;
821}
822
823static int
824access_uarea (struct task_struct *child, unsigned long addr,
825 unsigned long *data, int write_access);
826
827static long
828ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
829{
830 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
831 struct unw_frame_info info;
832 struct ia64_fpreg fpval;
833 struct switch_stack *sw;
834 struct pt_regs *pt;
835 long ret, retval = 0;
836 char nat = 0;
837 int i;
838
839 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
840 return -EIO;
841
842 pt = task_pt_regs(child);
843 sw = (struct switch_stack *) (child->thread.ksp + 16);
844 unw_init_from_blocked_task(&info, child);
845 if (unw_unwind_to_user(&info) < 0) {
846 return -EIO;
847 }
848
849 if (((unsigned long) ppr & 0x7) != 0) {
850 dprintk("ptrace:unaligned register address %p\n", ppr);
851 return -EIO;
852 }
853
854 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
855 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
856 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
857 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
858 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
859 || access_uarea(child, PT_CFM, &cfm, 0)
860 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
861 return -EIO;
862
863 /* control regs */
864
865 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
866 retval |= __put_user(psr, &ppr->cr_ipsr);
867
868 /* app regs */
869
870 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
871 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
872 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
873 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
874 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
875 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
876
877 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
878 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
879 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
880 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
881 retval |= __put_user(cfm, &ppr->cfm);
882
883 /* gr1-gr3 */
884
885 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
886 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
887
888 /* gr4-gr7 */
889
890 for (i = 4; i < 8; i++) {
891 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
892 return -EIO;
893 retval |= __put_user(val, &ppr->gr[i]);
894 }
895
896 /* gr8-gr11 */
897
898 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
899
900 /* gr12-gr15 */
901
902 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
903 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
904 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
905
906 /* gr16-gr31 */
907
908 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
909
910 /* b0 */
911
912 retval |= __put_user(pt->b0, &ppr->br[0]);
913
914 /* b1-b5 */
915
916 for (i = 1; i < 6; i++) {
917 if (unw_access_br(&info, i, &val, 0) < 0)
918 return -EIO;
919 __put_user(val, &ppr->br[i]);
920 }
921
922 /* b6-b7 */
923
924 retval |= __put_user(pt->b6, &ppr->br[6]);
925 retval |= __put_user(pt->b7, &ppr->br[7]);
926
927 /* fr2-fr5 */
928
929 for (i = 2; i < 6; i++) {
930 if (unw_get_fr(&info, i, &fpval) < 0)
931 return -EIO;
932 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
933 }
934
935 /* fr6-fr11 */
936
937 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
938 sizeof(struct ia64_fpreg) * 6);
939
940 /* fp scratch regs(12-15) */
941
942 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
943 sizeof(struct ia64_fpreg) * 4);
944
945 /* fr16-fr31 */
946
947 for (i = 16; i < 32; i++) {
948 if (unw_get_fr(&info, i, &fpval) < 0)
949 return -EIO;
950 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
951 }
952
953 /* fph */
954
955 ia64_flush_fph(child);
956 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
957 sizeof(ppr->fr[32]) * 96);
958
959 /* preds */
960
961 retval |= __put_user(pt->pr, &ppr->pr);
962
963 /* nat bits */
964
965 retval |= __put_user(nat_bits, &ppr->nat);
966
967 ret = retval ? -EIO : 0;
968 return ret;
969}
970
971static long
972ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
973{
974 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
975 struct unw_frame_info info;
976 struct switch_stack *sw;
977 struct ia64_fpreg fpval;
978 struct pt_regs *pt;
979 long ret, retval = 0;
980 int i;
981
982 memset(&fpval, 0, sizeof(fpval));
983
984 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
985 return -EIO;
986
987 pt = task_pt_regs(child);
988 sw = (struct switch_stack *) (child->thread.ksp + 16);
989 unw_init_from_blocked_task(&info, child);
990 if (unw_unwind_to_user(&info) < 0) {
991 return -EIO;
992 }
993
994 if (((unsigned long) ppr & 0x7) != 0) {
995 dprintk("ptrace:unaligned register address %p\n", ppr);
996 return -EIO;
997 }
998
999 /* control regs */
1000
1001 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1002 retval |= __get_user(psr, &ppr->cr_ipsr);
1003
1004 /* app regs */
1005
1006 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1007 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1008 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1009 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1010 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1011 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1012
1013 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1014 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1015 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1016 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1017 retval |= __get_user(cfm, &ppr->cfm);
1018
1019 /* gr1-gr3 */
1020
1021 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1022 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1023
1024 /* gr4-gr7 */
1025
1026 for (i = 4; i < 8; i++) {
1027 retval |= __get_user(val, &ppr->gr[i]);
1028 /* NaT bit will be set via PT_NAT_BITS: */
1029 if (unw_set_gr(&info, i, val, 0) < 0)
1030 return -EIO;
1031 }
1032
1033 /* gr8-gr11 */
1034
1035 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1036
1037 /* gr12-gr15 */
1038
1039 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1040 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1041 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1042
1043 /* gr16-gr31 */
1044
1045 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1046
1047 /* b0 */
1048
1049 retval |= __get_user(pt->b0, &ppr->br[0]);
1050
1051 /* b1-b5 */
1052
1053 for (i = 1; i < 6; i++) {
1054 retval |= __get_user(val, &ppr->br[i]);
1055 unw_set_br(&info, i, val);
1056 }
1057
1058 /* b6-b7 */
1059
1060 retval |= __get_user(pt->b6, &ppr->br[6]);
1061 retval |= __get_user(pt->b7, &ppr->br[7]);
1062
1063 /* fr2-fr5 */
1064
1065 for (i = 2; i < 6; i++) {
1066 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1067 if (unw_set_fr(&info, i, fpval) < 0)
1068 return -EIO;
1069 }
1070
1071 /* fr6-fr11 */
1072
1073 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1074 sizeof(ppr->fr[6]) * 6);
1075
1076 /* fp scratch regs(12-15) */
1077
1078 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1079 sizeof(ppr->fr[12]) * 4);
1080
1081 /* fr16-fr31 */
1082
1083 for (i = 16; i < 32; i++) {
1084 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1085 sizeof(fpval));
1086 if (unw_set_fr(&info, i, fpval) < 0)
1087 return -EIO;
1088 }
1089
1090 /* fph */
1091
1092 ia64_sync_fph(child);
1093 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1094 sizeof(ppr->fr[32]) * 96);
1095
1096 /* preds */
1097
1098 retval |= __get_user(pt->pr, &ppr->pr);
1099
1100 /* nat bits */
1101
1102 retval |= __get_user(nat_bits, &ppr->nat);
1103
1104 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1105 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1106 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1107 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1108 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1109 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1110 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1111 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1112
1113 ret = retval ? -EIO : 0;
1114 return ret;
1115}
1116
1117void
1118user_enable_single_step (struct task_struct *child)
1119{
1120 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1121
1122 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1123 child_psr->ss = 1;
1124}
1125
1126void
1127user_enable_block_step (struct task_struct *child)
1128{
1129 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1130
1131 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1132 child_psr->tb = 1;
1133}
1134
1135void
1136user_disable_single_step (struct task_struct *child)
1137{
1138 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1139
1140 /* make sure the single step/taken-branch trap bits are not set: */
1141 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1142 child_psr->ss = 0;
1143 child_psr->tb = 0;
1144}
1145
1146/*
1147 * Called by kernel/ptrace.c when detaching..
1148 *
1149 * Make sure the single step bit is not set.
1150 */
1151void
1152ptrace_disable (struct task_struct *child)
1153{
1154 user_disable_single_step(child);
1155}
1156
1157long
1158arch_ptrace (struct task_struct *child, long request,
1159 unsigned long addr, unsigned long data)
1160{
1161 switch (request) {
1162 case PTRACE_PEEKTEXT:
1163 case PTRACE_PEEKDATA:
1164 /* read word at location addr */
1165 if (ptrace_access_vm(child, addr, &data, sizeof(data),
1166 FOLL_FORCE)
1167 != sizeof(data))
1168 return -EIO;
1169 /* ensure return value is not mistaken for error code */
1170 force_successful_syscall_return();
1171 return data;
1172
1173 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1174 * by the generic ptrace_request().
1175 */
1176
1177 case PTRACE_PEEKUSR:
1178 /* read the word at addr in the USER area */
1179 if (access_uarea(child, addr, &data, 0) < 0)
1180 return -EIO;
1181 /* ensure return value is not mistaken for error code */
1182 force_successful_syscall_return();
1183 return data;
1184
1185 case PTRACE_POKEUSR:
1186 /* write the word at addr in the USER area */
1187 if (access_uarea(child, addr, &data, 1) < 0)
1188 return -EIO;
1189 return 0;
1190
1191 case PTRACE_OLD_GETSIGINFO:
1192 /* for backwards-compatibility */
1193 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1194
1195 case PTRACE_OLD_SETSIGINFO:
1196 /* for backwards-compatibility */
1197 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1198
1199 case PTRACE_GETREGS:
1200 return ptrace_getregs(child,
1201 (struct pt_all_user_regs __user *) data);
1202
1203 case PTRACE_SETREGS:
1204 return ptrace_setregs(child,
1205 (struct pt_all_user_regs __user *) data);
1206
1207 default:
1208 return ptrace_request(child, request, addr, data);
1209 }
1210}
1211
1212
1213/* "asmlinkage" so the input arguments are preserved... */
1214
1215asmlinkage long
1216syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1217 long arg4, long arg5, long arg6, long arg7,
1218 struct pt_regs regs)
1219{
1220 if (test_thread_flag(TIF_SYSCALL_TRACE))
1221 if (tracehook_report_syscall_entry(®s))
1222 return -ENOSYS;
1223
1224 /* copy user rbs to kernel rbs */
1225 if (test_thread_flag(TIF_RESTORE_RSE))
1226 ia64_sync_krbs();
1227
1228
1229 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1230
1231 return 0;
1232}
1233
1234/* "asmlinkage" so the input arguments are preserved... */
1235
1236asmlinkage void
1237syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1238 long arg4, long arg5, long arg6, long arg7,
1239 struct pt_regs regs)
1240{
1241 int step;
1242
1243 audit_syscall_exit(®s);
1244
1245 step = test_thread_flag(TIF_SINGLESTEP);
1246 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1247 tracehook_report_syscall_exit(®s, step);
1248
1249 /* copy user rbs to kernel rbs */
1250 if (test_thread_flag(TIF_RESTORE_RSE))
1251 ia64_sync_krbs();
1252}
1253
1254/* Utrace implementation starts here */
1255struct regset_get {
1256 void *kbuf;
1257 void __user *ubuf;
1258};
1259
1260struct regset_set {
1261 const void *kbuf;
1262 const void __user *ubuf;
1263};
1264
1265struct regset_getset {
1266 struct task_struct *target;
1267 const struct user_regset *regset;
1268 union {
1269 struct regset_get get;
1270 struct regset_set set;
1271 } u;
1272 unsigned int pos;
1273 unsigned int count;
1274 int ret;
1275};
1276
1277static int
1278access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1279 unsigned long addr, unsigned long *data, int write_access)
1280{
1281 struct pt_regs *pt;
1282 unsigned long *ptr = NULL;
1283 int ret;
1284 char nat = 0;
1285
1286 pt = task_pt_regs(target);
1287 switch (addr) {
1288 case ELF_GR_OFFSET(1):
1289 ptr = &pt->r1;
1290 break;
1291 case ELF_GR_OFFSET(2):
1292 case ELF_GR_OFFSET(3):
1293 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1294 break;
1295 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1296 if (write_access) {
1297 /* read NaT bit first: */
1298 unsigned long dummy;
1299
1300 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1301 if (ret < 0)
1302 return ret;
1303 }
1304 return unw_access_gr(info, addr/8, data, &nat, write_access);
1305 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1306 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1307 break;
1308 case ELF_GR_OFFSET(12):
1309 case ELF_GR_OFFSET(13):
1310 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1311 break;
1312 case ELF_GR_OFFSET(14):
1313 ptr = &pt->r14;
1314 break;
1315 case ELF_GR_OFFSET(15):
1316 ptr = &pt->r15;
1317 }
1318 if (write_access)
1319 *ptr = *data;
1320 else
1321 *data = *ptr;
1322 return 0;
1323}
1324
1325static int
1326access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1327 unsigned long addr, unsigned long *data, int write_access)
1328{
1329 struct pt_regs *pt;
1330 unsigned long *ptr = NULL;
1331
1332 pt = task_pt_regs(target);
1333 switch (addr) {
1334 case ELF_BR_OFFSET(0):
1335 ptr = &pt->b0;
1336 break;
1337 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1338 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1339 data, write_access);
1340 case ELF_BR_OFFSET(6):
1341 ptr = &pt->b6;
1342 break;
1343 case ELF_BR_OFFSET(7):
1344 ptr = &pt->b7;
1345 }
1346 if (write_access)
1347 *ptr = *data;
1348 else
1349 *data = *ptr;
1350 return 0;
1351}
1352
1353static int
1354access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1355 unsigned long addr, unsigned long *data, int write_access)
1356{
1357 struct pt_regs *pt;
1358 unsigned long cfm, urbs_end;
1359 unsigned long *ptr = NULL;
1360
1361 pt = task_pt_regs(target);
1362 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1363 switch (addr) {
1364 case ELF_AR_RSC_OFFSET:
1365 /* force PL3 */
1366 if (write_access)
1367 pt->ar_rsc = *data | (3 << 2);
1368 else
1369 *data = pt->ar_rsc;
1370 return 0;
1371 case ELF_AR_BSP_OFFSET:
1372 /*
1373 * By convention, we use PT_AR_BSP to refer to
1374 * the end of the user-level backing store.
1375 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1376 * to get the real value of ar.bsp at the time
1377 * the kernel was entered.
1378 *
1379 * Furthermore, when changing the contents of
1380 * PT_AR_BSP (or PT_CFM) while the task is
1381 * blocked in a system call, convert the state
1382 * so that the non-system-call exit
1383 * path is used. This ensures that the proper
1384 * state will be picked up when resuming
1385 * execution. However, it *also* means that
1386 * once we write PT_AR_BSP/PT_CFM, it won't be
1387 * possible to modify the syscall arguments of
1388 * the pending system call any longer. This
1389 * shouldn't be an issue because modifying
1390 * PT_AR_BSP/PT_CFM generally implies that
1391 * we're either abandoning the pending system
1392 * call or that we defer it's re-execution
1393 * (e.g., due to GDB doing an inferior
1394 * function call).
1395 */
1396 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1397 if (write_access) {
1398 if (*data != urbs_end) {
1399 if (in_syscall(pt))
1400 convert_to_non_syscall(target,
1401 pt,
1402 cfm);
1403 /*
1404 * Simulate user-level write
1405 * of ar.bsp:
1406 */
1407 pt->loadrs = 0;
1408 pt->ar_bspstore = *data;
1409 }
1410 } else
1411 *data = urbs_end;
1412 return 0;
1413 case ELF_AR_BSPSTORE_OFFSET:
1414 ptr = &pt->ar_bspstore;
1415 break;
1416 case ELF_AR_RNAT_OFFSET:
1417 ptr = &pt->ar_rnat;
1418 break;
1419 case ELF_AR_CCV_OFFSET:
1420 ptr = &pt->ar_ccv;
1421 break;
1422 case ELF_AR_UNAT_OFFSET:
1423 ptr = &pt->ar_unat;
1424 break;
1425 case ELF_AR_FPSR_OFFSET:
1426 ptr = &pt->ar_fpsr;
1427 break;
1428 case ELF_AR_PFS_OFFSET:
1429 ptr = &pt->ar_pfs;
1430 break;
1431 case ELF_AR_LC_OFFSET:
1432 return unw_access_ar(info, UNW_AR_LC, data,
1433 write_access);
1434 case ELF_AR_EC_OFFSET:
1435 return unw_access_ar(info, UNW_AR_EC, data,
1436 write_access);
1437 case ELF_AR_CSD_OFFSET:
1438 ptr = &pt->ar_csd;
1439 break;
1440 case ELF_AR_SSD_OFFSET:
1441 ptr = &pt->ar_ssd;
1442 }
1443 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1444 switch (addr) {
1445 case ELF_CR_IIP_OFFSET:
1446 ptr = &pt->cr_iip;
1447 break;
1448 case ELF_CFM_OFFSET:
1449 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1450 if (write_access) {
1451 if (((cfm ^ *data) & PFM_MASK) != 0) {
1452 if (in_syscall(pt))
1453 convert_to_non_syscall(target,
1454 pt,
1455 cfm);
1456 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1457 | (*data & PFM_MASK));
1458 }
1459 } else
1460 *data = cfm;
1461 return 0;
1462 case ELF_CR_IPSR_OFFSET:
1463 if (write_access) {
1464 unsigned long tmp = *data;
1465 /* psr.ri==3 is a reserved value: SDM 2:25 */
1466 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1467 tmp &= ~IA64_PSR_RI;
1468 pt->cr_ipsr = ((tmp & IPSR_MASK)
1469 | (pt->cr_ipsr & ~IPSR_MASK));
1470 } else
1471 *data = (pt->cr_ipsr & IPSR_MASK);
1472 return 0;
1473 }
1474 } else if (addr == ELF_NAT_OFFSET)
1475 return access_nat_bits(target, pt, info,
1476 data, write_access);
1477 else if (addr == ELF_PR_OFFSET)
1478 ptr = &pt->pr;
1479 else
1480 return -1;
1481
1482 if (write_access)
1483 *ptr = *data;
1484 else
1485 *data = *ptr;
1486
1487 return 0;
1488}
1489
1490static int
1491access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1492 unsigned long addr, unsigned long *data, int write_access)
1493{
1494 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1495 return access_elf_gpreg(target, info, addr, data, write_access);
1496 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1497 return access_elf_breg(target, info, addr, data, write_access);
1498 else
1499 return access_elf_areg(target, info, addr, data, write_access);
1500}
1501
1502void do_gpregs_get(struct unw_frame_info *info, void *arg)
1503{
1504 struct pt_regs *pt;
1505 struct regset_getset *dst = arg;
1506 elf_greg_t tmp[16];
1507 unsigned int i, index, min_copy;
1508
1509 if (unw_unwind_to_user(info) < 0)
1510 return;
1511
1512 /*
1513 * coredump format:
1514 * r0-r31
1515 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1516 * predicate registers (p0-p63)
1517 * b0-b7
1518 * ip cfm user-mask
1519 * ar.rsc ar.bsp ar.bspstore ar.rnat
1520 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1521 */
1522
1523
1524 /* Skip r0 */
1525 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1526 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1527 &dst->u.get.kbuf,
1528 &dst->u.get.ubuf,
1529 0, ELF_GR_OFFSET(1));
1530 if (dst->ret || dst->count == 0)
1531 return;
1532 }
1533
1534 /* gr1 - gr15 */
1535 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1536 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1537 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1538 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1539 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1540 index++)
1541 if (access_elf_reg(dst->target, info, i,
1542 &tmp[index], 0) < 0) {
1543 dst->ret = -EIO;
1544 return;
1545 }
1546 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1547 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1548 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1549 if (dst->ret || dst->count == 0)
1550 return;
1551 }
1552
1553 /* r16-r31 */
1554 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1555 pt = task_pt_regs(dst->target);
1556 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1557 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1558 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1559 if (dst->ret || dst->count == 0)
1560 return;
1561 }
1562
1563 /* nat, pr, b0 - b7 */
1564 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1565 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1566 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1567 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1568 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1569 index++)
1570 if (access_elf_reg(dst->target, info, i,
1571 &tmp[index], 0) < 0) {
1572 dst->ret = -EIO;
1573 return;
1574 }
1575 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1576 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1577 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1578 if (dst->ret || dst->count == 0)
1579 return;
1580 }
1581
1582 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1583 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1584 */
1585 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1586 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1587 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1588 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1589 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1590 index++)
1591 if (access_elf_reg(dst->target, info, i,
1592 &tmp[index], 0) < 0) {
1593 dst->ret = -EIO;
1594 return;
1595 }
1596 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1597 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1598 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1599 }
1600}
1601
1602void do_gpregs_set(struct unw_frame_info *info, void *arg)
1603{
1604 struct pt_regs *pt;
1605 struct regset_getset *dst = arg;
1606 elf_greg_t tmp[16];
1607 unsigned int i, index;
1608
1609 if (unw_unwind_to_user(info) < 0)
1610 return;
1611
1612 /* Skip r0 */
1613 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1614 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1615 &dst->u.set.kbuf,
1616 &dst->u.set.ubuf,
1617 0, ELF_GR_OFFSET(1));
1618 if (dst->ret || dst->count == 0)
1619 return;
1620 }
1621
1622 /* gr1-gr15 */
1623 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1624 i = dst->pos;
1625 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1626 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1627 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1628 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1629 if (dst->ret)
1630 return;
1631 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1632 if (access_elf_reg(dst->target, info, i,
1633 &tmp[index], 1) < 0) {
1634 dst->ret = -EIO;
1635 return;
1636 }
1637 if (dst->count == 0)
1638 return;
1639 }
1640
1641 /* gr16-gr31 */
1642 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1643 pt = task_pt_regs(dst->target);
1644 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1645 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1646 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1647 if (dst->ret || dst->count == 0)
1648 return;
1649 }
1650
1651 /* nat, pr, b0 - b7 */
1652 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1653 i = dst->pos;
1654 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1655 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1656 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1657 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1658 if (dst->ret)
1659 return;
1660 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1661 if (access_elf_reg(dst->target, info, i,
1662 &tmp[index], 1) < 0) {
1663 dst->ret = -EIO;
1664 return;
1665 }
1666 if (dst->count == 0)
1667 return;
1668 }
1669
1670 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1671 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1672 */
1673 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1674 i = dst->pos;
1675 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1676 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1677 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1678 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1679 if (dst->ret)
1680 return;
1681 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1682 if (access_elf_reg(dst->target, info, i,
1683 &tmp[index], 1) < 0) {
1684 dst->ret = -EIO;
1685 return;
1686 }
1687 }
1688}
1689
1690#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1691
1692void do_fpregs_get(struct unw_frame_info *info, void *arg)
1693{
1694 struct regset_getset *dst = arg;
1695 struct task_struct *task = dst->target;
1696 elf_fpreg_t tmp[30];
1697 int index, min_copy, i;
1698
1699 if (unw_unwind_to_user(info) < 0)
1700 return;
1701
1702 /* Skip pos 0 and 1 */
1703 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1704 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1705 &dst->u.get.kbuf,
1706 &dst->u.get.ubuf,
1707 0, ELF_FP_OFFSET(2));
1708 if (dst->count == 0 || dst->ret)
1709 return;
1710 }
1711
1712 /* fr2-fr31 */
1713 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1714 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1715
1716 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1717 dst->pos + dst->count);
1718 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1719 index++)
1720 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1721 &tmp[index])) {
1722 dst->ret = -EIO;
1723 return;
1724 }
1725 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1726 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1727 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1728 if (dst->count == 0 || dst->ret)
1729 return;
1730 }
1731
1732 /* fph */
1733 if (dst->count > 0) {
1734 ia64_flush_fph(dst->target);
1735 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1736 dst->ret = user_regset_copyout(
1737 &dst->pos, &dst->count,
1738 &dst->u.get.kbuf, &dst->u.get.ubuf,
1739 &dst->target->thread.fph,
1740 ELF_FP_OFFSET(32), -1);
1741 else
1742 /* Zero fill instead. */
1743 dst->ret = user_regset_copyout_zero(
1744 &dst->pos, &dst->count,
1745 &dst->u.get.kbuf, &dst->u.get.ubuf,
1746 ELF_FP_OFFSET(32), -1);
1747 }
1748}
1749
1750void do_fpregs_set(struct unw_frame_info *info, void *arg)
1751{
1752 struct regset_getset *dst = arg;
1753 elf_fpreg_t fpreg, tmp[30];
1754 int index, start, end;
1755
1756 if (unw_unwind_to_user(info) < 0)
1757 return;
1758
1759 /* Skip pos 0 and 1 */
1760 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1761 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1762 &dst->u.set.kbuf,
1763 &dst->u.set.ubuf,
1764 0, ELF_FP_OFFSET(2));
1765 if (dst->count == 0 || dst->ret)
1766 return;
1767 }
1768
1769 /* fr2-fr31 */
1770 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1771 start = dst->pos;
1772 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1773 dst->pos + dst->count);
1774 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1775 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1776 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1777 if (dst->ret)
1778 return;
1779
1780 if (start & 0xF) { /* only write high part */
1781 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1782 &fpreg)) {
1783 dst->ret = -EIO;
1784 return;
1785 }
1786 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1787 = fpreg.u.bits[0];
1788 start &= ~0xFUL;
1789 }
1790 if (end & 0xF) { /* only write low part */
1791 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1792 &fpreg)) {
1793 dst->ret = -EIO;
1794 return;
1795 }
1796 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1797 = fpreg.u.bits[1];
1798 end = (end + 0xF) & ~0xFUL;
1799 }
1800
1801 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1802 index = start / sizeof(elf_fpreg_t);
1803 if (unw_set_fr(info, index, tmp[index - 2])) {
1804 dst->ret = -EIO;
1805 return;
1806 }
1807 }
1808 if (dst->ret || dst->count == 0)
1809 return;
1810 }
1811
1812 /* fph */
1813 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1814 ia64_sync_fph(dst->target);
1815 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1816 &dst->u.set.kbuf,
1817 &dst->u.set.ubuf,
1818 &dst->target->thread.fph,
1819 ELF_FP_OFFSET(32), -1);
1820 }
1821}
1822
1823static int
1824do_regset_call(void (*call)(struct unw_frame_info *, void *),
1825 struct task_struct *target,
1826 const struct user_regset *regset,
1827 unsigned int pos, unsigned int count,
1828 const void *kbuf, const void __user *ubuf)
1829{
1830 struct regset_getset info = { .target = target, .regset = regset,
1831 .pos = pos, .count = count,
1832 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1833 .ret = 0 };
1834
1835 if (target == current)
1836 unw_init_running(call, &info);
1837 else {
1838 struct unw_frame_info ufi;
1839 memset(&ufi, 0, sizeof(ufi));
1840 unw_init_from_blocked_task(&ufi, target);
1841 (*call)(&ufi, &info);
1842 }
1843
1844 return info.ret;
1845}
1846
1847static int
1848gpregs_get(struct task_struct *target,
1849 const struct user_regset *regset,
1850 unsigned int pos, unsigned int count,
1851 void *kbuf, void __user *ubuf)
1852{
1853 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1854 kbuf, ubuf);
1855}
1856
1857static int gpregs_set(struct task_struct *target,
1858 const struct user_regset *regset,
1859 unsigned int pos, unsigned int count,
1860 const void *kbuf, const void __user *ubuf)
1861{
1862 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1863 kbuf, ubuf);
1864}
1865
1866static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1867{
1868 do_sync_rbs(info, ia64_sync_user_rbs);
1869}
1870
1871/*
1872 * This is called to write back the register backing store.
1873 * ptrace does this before it stops, so that a tracer reading the user
1874 * memory after the thread stops will get the current register data.
1875 */
1876static int
1877gpregs_writeback(struct task_struct *target,
1878 const struct user_regset *regset,
1879 int now)
1880{
1881 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1882 return 0;
1883 set_notify_resume(target);
1884 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1885 NULL, NULL);
1886}
1887
1888static int
1889fpregs_active(struct task_struct *target, const struct user_regset *regset)
1890{
1891 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1892}
1893
1894static int fpregs_get(struct task_struct *target,
1895 const struct user_regset *regset,
1896 unsigned int pos, unsigned int count,
1897 void *kbuf, void __user *ubuf)
1898{
1899 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1900 kbuf, ubuf);
1901}
1902
1903static int fpregs_set(struct task_struct *target,
1904 const struct user_regset *regset,
1905 unsigned int pos, unsigned int count,
1906 const void *kbuf, const void __user *ubuf)
1907{
1908 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1909 kbuf, ubuf);
1910}
1911
1912static int
1913access_uarea(struct task_struct *child, unsigned long addr,
1914 unsigned long *data, int write_access)
1915{
1916 unsigned int pos = -1; /* an invalid value */
1917 int ret;
1918 unsigned long *ptr, regnum;
1919
1920 if ((addr & 0x7) != 0) {
1921 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1922 return -1;
1923 }
1924 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1925 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1926 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1927 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1928 dprintk("ptrace: rejecting access to register "
1929 "address 0x%lx\n", addr);
1930 return -1;
1931 }
1932
1933 switch (addr) {
1934 case PT_F32 ... (PT_F127 + 15):
1935 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1936 break;
1937 case PT_F2 ... (PT_F5 + 15):
1938 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1939 break;
1940 case PT_F10 ... (PT_F31 + 15):
1941 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1942 break;
1943 case PT_F6 ... (PT_F9 + 15):
1944 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1945 break;
1946 }
1947
1948 if (pos != -1) {
1949 if (write_access)
1950 ret = fpregs_set(child, NULL, pos,
1951 sizeof(unsigned long), data, NULL);
1952 else
1953 ret = fpregs_get(child, NULL, pos,
1954 sizeof(unsigned long), data, NULL);
1955 if (ret != 0)
1956 return -1;
1957 return 0;
1958 }
1959
1960 switch (addr) {
1961 case PT_NAT_BITS:
1962 pos = ELF_NAT_OFFSET;
1963 break;
1964 case PT_R4 ... PT_R7:
1965 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1966 break;
1967 case PT_B1 ... PT_B5:
1968 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1969 break;
1970 case PT_AR_EC:
1971 pos = ELF_AR_EC_OFFSET;
1972 break;
1973 case PT_AR_LC:
1974 pos = ELF_AR_LC_OFFSET;
1975 break;
1976 case PT_CR_IPSR:
1977 pos = ELF_CR_IPSR_OFFSET;
1978 break;
1979 case PT_CR_IIP:
1980 pos = ELF_CR_IIP_OFFSET;
1981 break;
1982 case PT_CFM:
1983 pos = ELF_CFM_OFFSET;
1984 break;
1985 case PT_AR_UNAT:
1986 pos = ELF_AR_UNAT_OFFSET;
1987 break;
1988 case PT_AR_PFS:
1989 pos = ELF_AR_PFS_OFFSET;
1990 break;
1991 case PT_AR_RSC:
1992 pos = ELF_AR_RSC_OFFSET;
1993 break;
1994 case PT_AR_RNAT:
1995 pos = ELF_AR_RNAT_OFFSET;
1996 break;
1997 case PT_AR_BSPSTORE:
1998 pos = ELF_AR_BSPSTORE_OFFSET;
1999 break;
2000 case PT_PR:
2001 pos = ELF_PR_OFFSET;
2002 break;
2003 case PT_B6:
2004 pos = ELF_BR_OFFSET(6);
2005 break;
2006 case PT_AR_BSP:
2007 pos = ELF_AR_BSP_OFFSET;
2008 break;
2009 case PT_R1 ... PT_R3:
2010 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2011 break;
2012 case PT_R12 ... PT_R15:
2013 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2014 break;
2015 case PT_R8 ... PT_R11:
2016 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2017 break;
2018 case PT_R16 ... PT_R31:
2019 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2020 break;
2021 case PT_AR_CCV:
2022 pos = ELF_AR_CCV_OFFSET;
2023 break;
2024 case PT_AR_FPSR:
2025 pos = ELF_AR_FPSR_OFFSET;
2026 break;
2027 case PT_B0:
2028 pos = ELF_BR_OFFSET(0);
2029 break;
2030 case PT_B7:
2031 pos = ELF_BR_OFFSET(7);
2032 break;
2033 case PT_AR_CSD:
2034 pos = ELF_AR_CSD_OFFSET;
2035 break;
2036 case PT_AR_SSD:
2037 pos = ELF_AR_SSD_OFFSET;
2038 break;
2039 }
2040
2041 if (pos != -1) {
2042 if (write_access)
2043 ret = gpregs_set(child, NULL, pos,
2044 sizeof(unsigned long), data, NULL);
2045 else
2046 ret = gpregs_get(child, NULL, pos,
2047 sizeof(unsigned long), data, NULL);
2048 if (ret != 0)
2049 return -1;
2050 return 0;
2051 }
2052
2053 /* access debug registers */
2054 if (addr >= PT_IBR) {
2055 regnum = (addr - PT_IBR) >> 3;
2056 ptr = &child->thread.ibr[0];
2057 } else {
2058 regnum = (addr - PT_DBR) >> 3;
2059 ptr = &child->thread.dbr[0];
2060 }
2061
2062 if (regnum >= 8) {
2063 dprintk("ptrace: rejecting access to register "
2064 "address 0x%lx\n", addr);
2065 return -1;
2066 }
2067#ifdef CONFIG_PERFMON
2068 /*
2069 * Check if debug registers are used by perfmon. This
2070 * test must be done once we know that we can do the
2071 * operation, i.e. the arguments are all valid, but
2072 * before we start modifying the state.
2073 *
2074 * Perfmon needs to keep a count of how many processes
2075 * are trying to modify the debug registers for system
2076 * wide monitoring sessions.
2077 *
2078 * We also include read access here, because they may
2079 * cause the PMU-installed debug register state
2080 * (dbr[], ibr[]) to be reset. The two arrays are also
2081 * used by perfmon, but we do not use
2082 * IA64_THREAD_DBG_VALID. The registers are restored
2083 * by the PMU context switch code.
2084 */
2085 if (pfm_use_debug_registers(child))
2086 return -1;
2087#endif
2088
2089 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2090 child->thread.flags |= IA64_THREAD_DBG_VALID;
2091 memset(child->thread.dbr, 0,
2092 sizeof(child->thread.dbr));
2093 memset(child->thread.ibr, 0,
2094 sizeof(child->thread.ibr));
2095 }
2096
2097 ptr += regnum;
2098
2099 if ((regnum & 1) && write_access) {
2100 /* don't let the user set kernel-level breakpoints: */
2101 *ptr = *data & ~(7UL << 56);
2102 return 0;
2103 }
2104 if (write_access)
2105 *ptr = *data;
2106 else
2107 *data = *ptr;
2108 return 0;
2109}
2110
2111static const struct user_regset native_regsets[] = {
2112 {
2113 .core_note_type = NT_PRSTATUS,
2114 .n = ELF_NGREG,
2115 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2116 .get = gpregs_get, .set = gpregs_set,
2117 .writeback = gpregs_writeback
2118 },
2119 {
2120 .core_note_type = NT_PRFPREG,
2121 .n = ELF_NFPREG,
2122 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2123 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2124 },
2125};
2126
2127static const struct user_regset_view user_ia64_view = {
2128 .name = "ia64",
2129 .e_machine = EM_IA_64,
2130 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2131};
2132
2133const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2134{
2135 return &user_ia64_view;
2136}
2137
2138struct syscall_get_set_args {
2139 unsigned int i;
2140 unsigned int n;
2141 unsigned long *args;
2142 struct pt_regs *regs;
2143 int rw;
2144};
2145
2146static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2147{
2148 struct syscall_get_set_args *args = data;
2149 struct pt_regs *pt = args->regs;
2150 unsigned long *krbs, cfm, ndirty;
2151 int i, count;
2152
2153 if (unw_unwind_to_user(info) < 0)
2154 return;
2155
2156 cfm = pt->cr_ifs;
2157 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2158 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2159
2160 count = 0;
2161 if (in_syscall(pt))
2162 count = min_t(int, args->n, cfm & 0x7f);
2163
2164 for (i = 0; i < count; i++) {
2165 if (args->rw)
2166 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2167 args->args[i];
2168 else
2169 args->args[i] = *ia64_rse_skip_regs(krbs,
2170 ndirty + i + args->i);
2171 }
2172
2173 if (!args->rw) {
2174 while (i < args->n) {
2175 args->args[i] = 0;
2176 i++;
2177 }
2178 }
2179}
2180
2181void ia64_syscall_get_set_arguments(struct task_struct *task,
2182 struct pt_regs *regs, unsigned int i, unsigned int n,
2183 unsigned long *args, int rw)
2184{
2185 struct syscall_get_set_args data = {
2186 .i = i,
2187 .n = n,
2188 .args = args,
2189 .regs = regs,
2190 .rw = rw,
2191 };
2192
2193 if (task == current)
2194 unw_init_running(syscall_get_set_args_cb, &data);
2195 else {
2196 struct unw_frame_info ufi;
2197 memset(&ufi, 0, sizeof(ufi));
2198 unw_init_from_blocked_task(&ufi, task);
2199 syscall_get_set_args_cb(&ufi, &data);
2200 }
2201}
1/*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 1999-2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2006 Intel Co
7 * 2006-08-12 - IA64 Native Utrace implementation support added by
8 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 *
10 * Derived from the x86 and Alpha versions.
11 */
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/user.h>
18#include <linux/security.h>
19#include <linux/audit.h>
20#include <linux/signal.h>
21#include <linux/regset.h>
22#include <linux/elf.h>
23#include <linux/tracehook.h>
24
25#include <asm/pgtable.h>
26#include <asm/processor.h>
27#include <asm/ptrace_offsets.h>
28#include <asm/rse.h>
29#include <asm/uaccess.h>
30#include <asm/unwind.h>
31#ifdef CONFIG_PERFMON
32#include <asm/perfmon.h>
33#endif
34
35#include "entry.h"
36
37/*
38 * Bits in the PSR that we allow ptrace() to change:
39 * be, up, ac, mfl, mfh (the user mask; five bits total)
40 * db (debug breakpoint fault; one bit)
41 * id (instruction debug fault disable; one bit)
42 * dd (data debug fault disable; one bit)
43 * ri (restart instruction; two bits)
44 * is (instruction set; one bit)
45 */
46#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
47 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
48
49#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
50#define PFM_MASK MASK(38)
51
52#define PTRACE_DEBUG 0
53
54#if PTRACE_DEBUG
55# define dprintk(format...) printk(format)
56# define inline
57#else
58# define dprintk(format...)
59#endif
60
61/* Return TRUE if PT was created due to kernel-entry via a system-call. */
62
63static inline int
64in_syscall (struct pt_regs *pt)
65{
66 return (long) pt->cr_ifs >= 0;
67}
68
69/*
70 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
71 * bitset where bit i is set iff the NaT bit of register i is set.
72 */
73unsigned long
74ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
75{
76# define GET_BITS(first, last, unat) \
77 ({ \
78 unsigned long bit = ia64_unat_pos(&pt->r##first); \
79 unsigned long nbits = (last - first + 1); \
80 unsigned long mask = MASK(nbits) << first; \
81 unsigned long dist; \
82 if (bit < first) \
83 dist = 64 + bit - first; \
84 else \
85 dist = bit - first; \
86 ia64_rotr(unat, dist) & mask; \
87 })
88 unsigned long val;
89
90 /*
91 * Registers that are stored consecutively in struct pt_regs
92 * can be handled in parallel. If the register order in
93 * struct_pt_regs changes, this code MUST be updated.
94 */
95 val = GET_BITS( 1, 1, scratch_unat);
96 val |= GET_BITS( 2, 3, scratch_unat);
97 val |= GET_BITS(12, 13, scratch_unat);
98 val |= GET_BITS(14, 14, scratch_unat);
99 val |= GET_BITS(15, 15, scratch_unat);
100 val |= GET_BITS( 8, 11, scratch_unat);
101 val |= GET_BITS(16, 31, scratch_unat);
102 return val;
103
104# undef GET_BITS
105}
106
107/*
108 * Set the NaT bits for the scratch registers according to NAT and
109 * return the resulting unat (assuming the scratch registers are
110 * stored in PT).
111 */
112unsigned long
113ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
114{
115# define PUT_BITS(first, last, nat) \
116 ({ \
117 unsigned long bit = ia64_unat_pos(&pt->r##first); \
118 unsigned long nbits = (last - first + 1); \
119 unsigned long mask = MASK(nbits) << first; \
120 long dist; \
121 if (bit < first) \
122 dist = 64 + bit - first; \
123 else \
124 dist = bit - first; \
125 ia64_rotl(nat & mask, dist); \
126 })
127 unsigned long scratch_unat;
128
129 /*
130 * Registers that are stored consecutively in struct pt_regs
131 * can be handled in parallel. If the register order in
132 * struct_pt_regs changes, this code MUST be updated.
133 */
134 scratch_unat = PUT_BITS( 1, 1, nat);
135 scratch_unat |= PUT_BITS( 2, 3, nat);
136 scratch_unat |= PUT_BITS(12, 13, nat);
137 scratch_unat |= PUT_BITS(14, 14, nat);
138 scratch_unat |= PUT_BITS(15, 15, nat);
139 scratch_unat |= PUT_BITS( 8, 11, nat);
140 scratch_unat |= PUT_BITS(16, 31, nat);
141
142 return scratch_unat;
143
144# undef PUT_BITS
145}
146
147#define IA64_MLX_TEMPLATE 0x2
148#define IA64_MOVL_OPCODE 6
149
150void
151ia64_increment_ip (struct pt_regs *regs)
152{
153 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
154
155 if (ri > 2) {
156 ri = 0;
157 regs->cr_iip += 16;
158 } else if (ri == 2) {
159 get_user(w0, (char __user *) regs->cr_iip + 0);
160 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
161 /*
162 * rfi'ing to slot 2 of an MLX bundle causes
163 * an illegal operation fault. We don't want
164 * that to happen...
165 */
166 ri = 0;
167 regs->cr_iip += 16;
168 }
169 }
170 ia64_psr(regs)->ri = ri;
171}
172
173void
174ia64_decrement_ip (struct pt_regs *regs)
175{
176 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
177
178 if (ia64_psr(regs)->ri == 0) {
179 regs->cr_iip -= 16;
180 ri = 2;
181 get_user(w0, (char __user *) regs->cr_iip + 0);
182 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
183 /*
184 * rfi'ing to slot 2 of an MLX bundle causes
185 * an illegal operation fault. We don't want
186 * that to happen...
187 */
188 ri = 1;
189 }
190 }
191 ia64_psr(regs)->ri = ri;
192}
193
194/*
195 * This routine is used to read an rnat bits that are stored on the
196 * kernel backing store. Since, in general, the alignment of the user
197 * and kernel are different, this is not completely trivial. In
198 * essence, we need to construct the user RNAT based on up to two
199 * kernel RNAT values and/or the RNAT value saved in the child's
200 * pt_regs.
201 *
202 * user rbs
203 *
204 * +--------+ <-- lowest address
205 * | slot62 |
206 * +--------+
207 * | rnat | 0x....1f8
208 * +--------+
209 * | slot00 | \
210 * +--------+ |
211 * | slot01 | > child_regs->ar_rnat
212 * +--------+ |
213 * | slot02 | / kernel rbs
214 * +--------+ +--------+
215 * <- child_regs->ar_bspstore | slot61 | <-- krbs
216 * +- - - - + +--------+
217 * | slot62 |
218 * +- - - - + +--------+
219 * | rnat |
220 * +- - - - + +--------+
221 * vrnat | slot00 |
222 * +- - - - + +--------+
223 * = =
224 * +--------+
225 * | slot00 | \
226 * +--------+ |
227 * | slot01 | > child_stack->ar_rnat
228 * +--------+ |
229 * | slot02 | /
230 * +--------+
231 * <--- child_stack->ar_bspstore
232 *
233 * The way to think of this code is as follows: bit 0 in the user rnat
234 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
235 * value. The kernel rnat value holding this bit is stored in
236 * variable rnat0. rnat1 is loaded with the kernel rnat value that
237 * form the upper bits of the user rnat value.
238 *
239 * Boundary cases:
240 *
241 * o when reading the rnat "below" the first rnat slot on the kernel
242 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
243 * merged in from pt->ar_rnat.
244 *
245 * o when reading the rnat "above" the last rnat slot on the kernel
246 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
247 */
248static unsigned long
249get_rnat (struct task_struct *task, struct switch_stack *sw,
250 unsigned long *krbs, unsigned long *urnat_addr,
251 unsigned long *urbs_end)
252{
253 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
254 unsigned long umask = 0, mask, m;
255 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
256 long num_regs, nbits;
257 struct pt_regs *pt;
258
259 pt = task_pt_regs(task);
260 kbsp = (unsigned long *) sw->ar_bspstore;
261 ubspstore = (unsigned long *) pt->ar_bspstore;
262
263 if (urbs_end < urnat_addr)
264 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
265 else
266 nbits = 63;
267 mask = MASK(nbits);
268 /*
269 * First, figure out which bit number slot 0 in user-land maps
270 * to in the kernel rnat. Do this by figuring out how many
271 * register slots we're beyond the user's backingstore and
272 * then computing the equivalent address in kernel space.
273 */
274 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
275 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
276 shift = ia64_rse_slot_num(slot0_kaddr);
277 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
278 rnat0_kaddr = rnat1_kaddr - 64;
279
280 if (ubspstore + 63 > urnat_addr) {
281 /* some bits need to be merged in from pt->ar_rnat */
282 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
283 urnat = (pt->ar_rnat & umask);
284 mask &= ~umask;
285 if (!mask)
286 return urnat;
287 }
288
289 m = mask << shift;
290 if (rnat0_kaddr >= kbsp)
291 rnat0 = sw->ar_rnat;
292 else if (rnat0_kaddr > krbs)
293 rnat0 = *rnat0_kaddr;
294 urnat |= (rnat0 & m) >> shift;
295
296 m = mask >> (63 - shift);
297 if (rnat1_kaddr >= kbsp)
298 rnat1 = sw->ar_rnat;
299 else if (rnat1_kaddr > krbs)
300 rnat1 = *rnat1_kaddr;
301 urnat |= (rnat1 & m) << (63 - shift);
302 return urnat;
303}
304
305/*
306 * The reverse of get_rnat.
307 */
308static void
309put_rnat (struct task_struct *task, struct switch_stack *sw,
310 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
311 unsigned long *urbs_end)
312{
313 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
314 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
315 long num_regs, nbits;
316 struct pt_regs *pt;
317 unsigned long cfm, *urbs_kargs;
318
319 pt = task_pt_regs(task);
320 kbsp = (unsigned long *) sw->ar_bspstore;
321 ubspstore = (unsigned long *) pt->ar_bspstore;
322
323 urbs_kargs = urbs_end;
324 if (in_syscall(pt)) {
325 /*
326 * If entered via syscall, don't allow user to set rnat bits
327 * for syscall args.
328 */
329 cfm = pt->cr_ifs;
330 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
331 }
332
333 if (urbs_kargs >= urnat_addr)
334 nbits = 63;
335 else {
336 if ((urnat_addr - 63) >= urbs_kargs)
337 return;
338 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
339 }
340 mask = MASK(nbits);
341
342 /*
343 * First, figure out which bit number slot 0 in user-land maps
344 * to in the kernel rnat. Do this by figuring out how many
345 * register slots we're beyond the user's backingstore and
346 * then computing the equivalent address in kernel space.
347 */
348 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
349 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
350 shift = ia64_rse_slot_num(slot0_kaddr);
351 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
352 rnat0_kaddr = rnat1_kaddr - 64;
353
354 if (ubspstore + 63 > urnat_addr) {
355 /* some bits need to be place in pt->ar_rnat: */
356 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
357 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
358 mask &= ~umask;
359 if (!mask)
360 return;
361 }
362 /*
363 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
364 * rnat slot is ignored. so we don't have to clear it here.
365 */
366 rnat0 = (urnat << shift);
367 m = mask << shift;
368 if (rnat0_kaddr >= kbsp)
369 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
370 else if (rnat0_kaddr > krbs)
371 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
372
373 rnat1 = (urnat >> (63 - shift));
374 m = mask >> (63 - shift);
375 if (rnat1_kaddr >= kbsp)
376 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
377 else if (rnat1_kaddr > krbs)
378 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
379}
380
381static inline int
382on_kernel_rbs (unsigned long addr, unsigned long bspstore,
383 unsigned long urbs_end)
384{
385 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
386 urbs_end);
387 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
388}
389
390/*
391 * Read a word from the user-level backing store of task CHILD. ADDR
392 * is the user-level address to read the word from, VAL a pointer to
393 * the return value, and USER_BSP gives the end of the user-level
394 * backing store (i.e., it's the address that would be in ar.bsp after
395 * the user executed a "cover" instruction).
396 *
397 * This routine takes care of accessing the kernel register backing
398 * store for those registers that got spilled there. It also takes
399 * care of calculating the appropriate RNaT collection words.
400 */
401long
402ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
403 unsigned long user_rbs_end, unsigned long addr, long *val)
404{
405 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
406 struct pt_regs *child_regs;
407 size_t copied;
408 long ret;
409
410 urbs_end = (long *) user_rbs_end;
411 laddr = (unsigned long *) addr;
412 child_regs = task_pt_regs(child);
413 bspstore = (unsigned long *) child_regs->ar_bspstore;
414 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
415 if (on_kernel_rbs(addr, (unsigned long) bspstore,
416 (unsigned long) urbs_end))
417 {
418 /*
419 * Attempt to read the RBS in an area that's actually
420 * on the kernel RBS => read the corresponding bits in
421 * the kernel RBS.
422 */
423 rnat_addr = ia64_rse_rnat_addr(laddr);
424 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
425
426 if (laddr == rnat_addr) {
427 /* return NaT collection word itself */
428 *val = ret;
429 return 0;
430 }
431
432 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
433 /*
434 * It is implementation dependent whether the
435 * data portion of a NaT value gets saved on a
436 * st8.spill or RSE spill (e.g., see EAS 2.6,
437 * 4.4.4.6 Register Spill and Fill). To get
438 * consistent behavior across all possible
439 * IA-64 implementations, we return zero in
440 * this case.
441 */
442 *val = 0;
443 return 0;
444 }
445
446 if (laddr < urbs_end) {
447 /*
448 * The desired word is on the kernel RBS and
449 * is not a NaT.
450 */
451 regnum = ia64_rse_num_regs(bspstore, laddr);
452 *val = *ia64_rse_skip_regs(krbs, regnum);
453 return 0;
454 }
455 }
456 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
457 if (copied != sizeof(ret))
458 return -EIO;
459 *val = ret;
460 return 0;
461}
462
463long
464ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
465 unsigned long user_rbs_end, unsigned long addr, long val)
466{
467 unsigned long *bspstore, *krbs, regnum, *laddr;
468 unsigned long *urbs_end = (long *) user_rbs_end;
469 struct pt_regs *child_regs;
470
471 laddr = (unsigned long *) addr;
472 child_regs = task_pt_regs(child);
473 bspstore = (unsigned long *) child_regs->ar_bspstore;
474 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
475 if (on_kernel_rbs(addr, (unsigned long) bspstore,
476 (unsigned long) urbs_end))
477 {
478 /*
479 * Attempt to write the RBS in an area that's actually
480 * on the kernel RBS => write the corresponding bits
481 * in the kernel RBS.
482 */
483 if (ia64_rse_is_rnat_slot(laddr))
484 put_rnat(child, child_stack, krbs, laddr, val,
485 urbs_end);
486 else {
487 if (laddr < urbs_end) {
488 regnum = ia64_rse_num_regs(bspstore, laddr);
489 *ia64_rse_skip_regs(krbs, regnum) = val;
490 }
491 }
492 } else if (access_process_vm(child, addr, &val, sizeof(val), 1)
493 != sizeof(val))
494 return -EIO;
495 return 0;
496}
497
498/*
499 * Calculate the address of the end of the user-level register backing
500 * store. This is the address that would have been stored in ar.bsp
501 * if the user had executed a "cover" instruction right before
502 * entering the kernel. If CFMP is not NULL, it is used to return the
503 * "current frame mask" that was active at the time the kernel was
504 * entered.
505 */
506unsigned long
507ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
508 unsigned long *cfmp)
509{
510 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
511 long ndirty;
512
513 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
514 bspstore = (unsigned long *) pt->ar_bspstore;
515 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
516
517 if (in_syscall(pt))
518 ndirty += (cfm & 0x7f);
519 else
520 cfm &= ~(1UL << 63); /* clear valid bit */
521
522 if (cfmp)
523 *cfmp = cfm;
524 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
525}
526
527/*
528 * Synchronize (i.e, write) the RSE backing store living in kernel
529 * space to the VM of the CHILD task. SW and PT are the pointers to
530 * the switch_stack and pt_regs structures, respectively.
531 * USER_RBS_END is the user-level address at which the backing store
532 * ends.
533 */
534long
535ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
536 unsigned long user_rbs_start, unsigned long user_rbs_end)
537{
538 unsigned long addr, val;
539 long ret;
540
541 /* now copy word for word from kernel rbs to user rbs: */
542 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
543 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544 if (ret < 0)
545 return ret;
546 if (access_process_vm(child, addr, &val, sizeof(val), 1)
547 != sizeof(val))
548 return -EIO;
549 }
550 return 0;
551}
552
553static long
554ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
555 unsigned long user_rbs_start, unsigned long user_rbs_end)
556{
557 unsigned long addr, val;
558 long ret;
559
560 /* now copy word for word from user rbs to kernel rbs: */
561 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
562 if (access_process_vm(child, addr, &val, sizeof(val), 0)
563 != sizeof(val))
564 return -EIO;
565
566 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
567 if (ret < 0)
568 return ret;
569 }
570 return 0;
571}
572
573typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
574 unsigned long, unsigned long);
575
576static void do_sync_rbs(struct unw_frame_info *info, void *arg)
577{
578 struct pt_regs *pt;
579 unsigned long urbs_end;
580 syncfunc_t fn = arg;
581
582 if (unw_unwind_to_user(info) < 0)
583 return;
584 pt = task_pt_regs(info->task);
585 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
586
587 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
588}
589
590/*
591 * when a thread is stopped (ptraced), debugger might change thread's user
592 * stack (change memory directly), and we must avoid the RSE stored in kernel
593 * to override user stack (user space's RSE is newer than kernel's in the
594 * case). To workaround the issue, we copy kernel RSE to user RSE before the
595 * task is stopped, so user RSE has updated data. we then copy user RSE to
596 * kernel after the task is resummed from traced stop and kernel will use the
597 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
598 * synchronize user RSE to kernel.
599 */
600void ia64_ptrace_stop(void)
601{
602 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
603 return;
604 set_notify_resume(current);
605 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
606}
607
608/*
609 * This is called to read back the register backing store.
610 */
611void ia64_sync_krbs(void)
612{
613 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
614
615 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
616}
617
618/*
619 * After PTRACE_ATTACH, a thread's register backing store area in user
620 * space is assumed to contain correct data whenever the thread is
621 * stopped. arch_ptrace_stop takes care of this on tracing stops.
622 * But if the child was already stopped for job control when we attach
623 * to it, then it might not ever get into ptrace_stop by the time we
624 * want to examine the user memory containing the RBS.
625 */
626void
627ptrace_attach_sync_user_rbs (struct task_struct *child)
628{
629 int stopped = 0;
630 struct unw_frame_info info;
631
632 /*
633 * If the child is in TASK_STOPPED, we need to change that to
634 * TASK_TRACED momentarily while we operate on it. This ensures
635 * that the child won't be woken up and return to user mode while
636 * we are doing the sync. (It can only be woken up for SIGKILL.)
637 */
638
639 read_lock(&tasklist_lock);
640 if (child->sighand) {
641 spin_lock_irq(&child->sighand->siglock);
642 if (child->state == TASK_STOPPED &&
643 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
644 set_notify_resume(child);
645
646 child->state = TASK_TRACED;
647 stopped = 1;
648 }
649 spin_unlock_irq(&child->sighand->siglock);
650 }
651 read_unlock(&tasklist_lock);
652
653 if (!stopped)
654 return;
655
656 unw_init_from_blocked_task(&info, child);
657 do_sync_rbs(&info, ia64_sync_user_rbs);
658
659 /*
660 * Now move the child back into TASK_STOPPED if it should be in a
661 * job control stop, so that SIGCONT can be used to wake it up.
662 */
663 read_lock(&tasklist_lock);
664 if (child->sighand) {
665 spin_lock_irq(&child->sighand->siglock);
666 if (child->state == TASK_TRACED &&
667 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
668 child->state = TASK_STOPPED;
669 }
670 spin_unlock_irq(&child->sighand->siglock);
671 }
672 read_unlock(&tasklist_lock);
673}
674
675static inline int
676thread_matches (struct task_struct *thread, unsigned long addr)
677{
678 unsigned long thread_rbs_end;
679 struct pt_regs *thread_regs;
680
681 if (ptrace_check_attach(thread, 0) < 0)
682 /*
683 * If the thread is not in an attachable state, we'll
684 * ignore it. The net effect is that if ADDR happens
685 * to overlap with the portion of the thread's
686 * register backing store that is currently residing
687 * on the thread's kernel stack, then ptrace() may end
688 * up accessing a stale value. But if the thread
689 * isn't stopped, that's a problem anyhow, so we're
690 * doing as well as we can...
691 */
692 return 0;
693
694 thread_regs = task_pt_regs(thread);
695 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
696 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
697 return 0;
698
699 return 1; /* looks like we've got a winner */
700}
701
702/*
703 * Write f32-f127 back to task->thread.fph if it has been modified.
704 */
705inline void
706ia64_flush_fph (struct task_struct *task)
707{
708 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
709
710 /*
711 * Prevent migrating this task while
712 * we're fiddling with the FPU state
713 */
714 preempt_disable();
715 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
716 psr->mfh = 0;
717 task->thread.flags |= IA64_THREAD_FPH_VALID;
718 ia64_save_fpu(&task->thread.fph[0]);
719 }
720 preempt_enable();
721}
722
723/*
724 * Sync the fph state of the task so that it can be manipulated
725 * through thread.fph. If necessary, f32-f127 are written back to
726 * thread.fph or, if the fph state hasn't been used before, thread.fph
727 * is cleared to zeroes. Also, access to f32-f127 is disabled to
728 * ensure that the task picks up the state from thread.fph when it
729 * executes again.
730 */
731void
732ia64_sync_fph (struct task_struct *task)
733{
734 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
735
736 ia64_flush_fph(task);
737 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
738 task->thread.flags |= IA64_THREAD_FPH_VALID;
739 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
740 }
741 ia64_drop_fpu(task);
742 psr->dfh = 1;
743}
744
745/*
746 * Change the machine-state of CHILD such that it will return via the normal
747 * kernel exit-path, rather than the syscall-exit path.
748 */
749static void
750convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
751 unsigned long cfm)
752{
753 struct unw_frame_info info, prev_info;
754 unsigned long ip, sp, pr;
755
756 unw_init_from_blocked_task(&info, child);
757 while (1) {
758 prev_info = info;
759 if (unw_unwind(&info) < 0)
760 return;
761
762 unw_get_sp(&info, &sp);
763 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
764 < IA64_PT_REGS_SIZE) {
765 dprintk("ptrace.%s: ran off the top of the kernel "
766 "stack\n", __func__);
767 return;
768 }
769 if (unw_get_pr (&prev_info, &pr) < 0) {
770 unw_get_rp(&prev_info, &ip);
771 dprintk("ptrace.%s: failed to read "
772 "predicate register (ip=0x%lx)\n",
773 __func__, ip);
774 return;
775 }
776 if (unw_is_intr_frame(&info)
777 && (pr & (1UL << PRED_USER_STACK)))
778 break;
779 }
780
781 /*
782 * Note: at the time of this call, the target task is blocked
783 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
784 * (aka, "pLvSys") we redirect execution from
785 * .work_pending_syscall_end to .work_processed_kernel.
786 */
787 unw_get_pr(&prev_info, &pr);
788 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
789 pr |= (1UL << PRED_NON_SYSCALL);
790 unw_set_pr(&prev_info, pr);
791
792 pt->cr_ifs = (1UL << 63) | cfm;
793 /*
794 * Clear the memory that is NOT written on syscall-entry to
795 * ensure we do not leak kernel-state to user when execution
796 * resumes.
797 */
798 pt->r2 = 0;
799 pt->r3 = 0;
800 pt->r14 = 0;
801 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
802 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
803 pt->b7 = 0;
804 pt->ar_ccv = 0;
805 pt->ar_csd = 0;
806 pt->ar_ssd = 0;
807}
808
809static int
810access_nat_bits (struct task_struct *child, struct pt_regs *pt,
811 struct unw_frame_info *info,
812 unsigned long *data, int write_access)
813{
814 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
815 char nat = 0;
816
817 if (write_access) {
818 nat_bits = *data;
819 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
820 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
821 dprintk("ptrace: failed to set ar.unat\n");
822 return -1;
823 }
824 for (regnum = 4; regnum <= 7; ++regnum) {
825 unw_get_gr(info, regnum, &dummy, &nat);
826 unw_set_gr(info, regnum, dummy,
827 (nat_bits >> regnum) & 1);
828 }
829 } else {
830 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
831 dprintk("ptrace: failed to read ar.unat\n");
832 return -1;
833 }
834 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
835 for (regnum = 4; regnum <= 7; ++regnum) {
836 unw_get_gr(info, regnum, &dummy, &nat);
837 nat_bits |= (nat != 0) << regnum;
838 }
839 *data = nat_bits;
840 }
841 return 0;
842}
843
844static int
845access_uarea (struct task_struct *child, unsigned long addr,
846 unsigned long *data, int write_access);
847
848static long
849ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
850{
851 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
852 struct unw_frame_info info;
853 struct ia64_fpreg fpval;
854 struct switch_stack *sw;
855 struct pt_regs *pt;
856 long ret, retval = 0;
857 char nat = 0;
858 int i;
859
860 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
861 return -EIO;
862
863 pt = task_pt_regs(child);
864 sw = (struct switch_stack *) (child->thread.ksp + 16);
865 unw_init_from_blocked_task(&info, child);
866 if (unw_unwind_to_user(&info) < 0) {
867 return -EIO;
868 }
869
870 if (((unsigned long) ppr & 0x7) != 0) {
871 dprintk("ptrace:unaligned register address %p\n", ppr);
872 return -EIO;
873 }
874
875 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
876 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
877 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
878 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
879 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
880 || access_uarea(child, PT_CFM, &cfm, 0)
881 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
882 return -EIO;
883
884 /* control regs */
885
886 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
887 retval |= __put_user(psr, &ppr->cr_ipsr);
888
889 /* app regs */
890
891 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
892 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
893 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
894 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
895 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
896 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
897
898 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
899 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
900 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
901 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
902 retval |= __put_user(cfm, &ppr->cfm);
903
904 /* gr1-gr3 */
905
906 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
907 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
908
909 /* gr4-gr7 */
910
911 for (i = 4; i < 8; i++) {
912 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
913 return -EIO;
914 retval |= __put_user(val, &ppr->gr[i]);
915 }
916
917 /* gr8-gr11 */
918
919 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
920
921 /* gr12-gr15 */
922
923 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
924 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
925 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
926
927 /* gr16-gr31 */
928
929 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
930
931 /* b0 */
932
933 retval |= __put_user(pt->b0, &ppr->br[0]);
934
935 /* b1-b5 */
936
937 for (i = 1; i < 6; i++) {
938 if (unw_access_br(&info, i, &val, 0) < 0)
939 return -EIO;
940 __put_user(val, &ppr->br[i]);
941 }
942
943 /* b6-b7 */
944
945 retval |= __put_user(pt->b6, &ppr->br[6]);
946 retval |= __put_user(pt->b7, &ppr->br[7]);
947
948 /* fr2-fr5 */
949
950 for (i = 2; i < 6; i++) {
951 if (unw_get_fr(&info, i, &fpval) < 0)
952 return -EIO;
953 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
954 }
955
956 /* fr6-fr11 */
957
958 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
959 sizeof(struct ia64_fpreg) * 6);
960
961 /* fp scratch regs(12-15) */
962
963 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
964 sizeof(struct ia64_fpreg) * 4);
965
966 /* fr16-fr31 */
967
968 for (i = 16; i < 32; i++) {
969 if (unw_get_fr(&info, i, &fpval) < 0)
970 return -EIO;
971 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
972 }
973
974 /* fph */
975
976 ia64_flush_fph(child);
977 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
978 sizeof(ppr->fr[32]) * 96);
979
980 /* preds */
981
982 retval |= __put_user(pt->pr, &ppr->pr);
983
984 /* nat bits */
985
986 retval |= __put_user(nat_bits, &ppr->nat);
987
988 ret = retval ? -EIO : 0;
989 return ret;
990}
991
992static long
993ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
994{
995 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
996 struct unw_frame_info info;
997 struct switch_stack *sw;
998 struct ia64_fpreg fpval;
999 struct pt_regs *pt;
1000 long ret, retval = 0;
1001 int i;
1002
1003 memset(&fpval, 0, sizeof(fpval));
1004
1005 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1006 return -EIO;
1007
1008 pt = task_pt_regs(child);
1009 sw = (struct switch_stack *) (child->thread.ksp + 16);
1010 unw_init_from_blocked_task(&info, child);
1011 if (unw_unwind_to_user(&info) < 0) {
1012 return -EIO;
1013 }
1014
1015 if (((unsigned long) ppr & 0x7) != 0) {
1016 dprintk("ptrace:unaligned register address %p\n", ppr);
1017 return -EIO;
1018 }
1019
1020 /* control regs */
1021
1022 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1023 retval |= __get_user(psr, &ppr->cr_ipsr);
1024
1025 /* app regs */
1026
1027 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1028 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1029 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1030 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1031 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1032 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1033
1034 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1035 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1036 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1037 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1038 retval |= __get_user(cfm, &ppr->cfm);
1039
1040 /* gr1-gr3 */
1041
1042 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1043 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1044
1045 /* gr4-gr7 */
1046
1047 for (i = 4; i < 8; i++) {
1048 retval |= __get_user(val, &ppr->gr[i]);
1049 /* NaT bit will be set via PT_NAT_BITS: */
1050 if (unw_set_gr(&info, i, val, 0) < 0)
1051 return -EIO;
1052 }
1053
1054 /* gr8-gr11 */
1055
1056 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1057
1058 /* gr12-gr15 */
1059
1060 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1061 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1062 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1063
1064 /* gr16-gr31 */
1065
1066 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1067
1068 /* b0 */
1069
1070 retval |= __get_user(pt->b0, &ppr->br[0]);
1071
1072 /* b1-b5 */
1073
1074 for (i = 1; i < 6; i++) {
1075 retval |= __get_user(val, &ppr->br[i]);
1076 unw_set_br(&info, i, val);
1077 }
1078
1079 /* b6-b7 */
1080
1081 retval |= __get_user(pt->b6, &ppr->br[6]);
1082 retval |= __get_user(pt->b7, &ppr->br[7]);
1083
1084 /* fr2-fr5 */
1085
1086 for (i = 2; i < 6; i++) {
1087 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1088 if (unw_set_fr(&info, i, fpval) < 0)
1089 return -EIO;
1090 }
1091
1092 /* fr6-fr11 */
1093
1094 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1095 sizeof(ppr->fr[6]) * 6);
1096
1097 /* fp scratch regs(12-15) */
1098
1099 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1100 sizeof(ppr->fr[12]) * 4);
1101
1102 /* fr16-fr31 */
1103
1104 for (i = 16; i < 32; i++) {
1105 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1106 sizeof(fpval));
1107 if (unw_set_fr(&info, i, fpval) < 0)
1108 return -EIO;
1109 }
1110
1111 /* fph */
1112
1113 ia64_sync_fph(child);
1114 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1115 sizeof(ppr->fr[32]) * 96);
1116
1117 /* preds */
1118
1119 retval |= __get_user(pt->pr, &ppr->pr);
1120
1121 /* nat bits */
1122
1123 retval |= __get_user(nat_bits, &ppr->nat);
1124
1125 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1126 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1127 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1128 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1129 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1130 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1131 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1132 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1133
1134 ret = retval ? -EIO : 0;
1135 return ret;
1136}
1137
1138void
1139user_enable_single_step (struct task_struct *child)
1140{
1141 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1142
1143 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1144 child_psr->ss = 1;
1145}
1146
1147void
1148user_enable_block_step (struct task_struct *child)
1149{
1150 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1151
1152 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1153 child_psr->tb = 1;
1154}
1155
1156void
1157user_disable_single_step (struct task_struct *child)
1158{
1159 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1160
1161 /* make sure the single step/taken-branch trap bits are not set: */
1162 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1163 child_psr->ss = 0;
1164 child_psr->tb = 0;
1165}
1166
1167/*
1168 * Called by kernel/ptrace.c when detaching..
1169 *
1170 * Make sure the single step bit is not set.
1171 */
1172void
1173ptrace_disable (struct task_struct *child)
1174{
1175 user_disable_single_step(child);
1176}
1177
1178long
1179arch_ptrace (struct task_struct *child, long request,
1180 unsigned long addr, unsigned long data)
1181{
1182 switch (request) {
1183 case PTRACE_PEEKTEXT:
1184 case PTRACE_PEEKDATA:
1185 /* read word at location addr */
1186 if (access_process_vm(child, addr, &data, sizeof(data), 0)
1187 != sizeof(data))
1188 return -EIO;
1189 /* ensure return value is not mistaken for error code */
1190 force_successful_syscall_return();
1191 return data;
1192
1193 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1194 * by the generic ptrace_request().
1195 */
1196
1197 case PTRACE_PEEKUSR:
1198 /* read the word at addr in the USER area */
1199 if (access_uarea(child, addr, &data, 0) < 0)
1200 return -EIO;
1201 /* ensure return value is not mistaken for error code */
1202 force_successful_syscall_return();
1203 return data;
1204
1205 case PTRACE_POKEUSR:
1206 /* write the word at addr in the USER area */
1207 if (access_uarea(child, addr, &data, 1) < 0)
1208 return -EIO;
1209 return 0;
1210
1211 case PTRACE_OLD_GETSIGINFO:
1212 /* for backwards-compatibility */
1213 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1214
1215 case PTRACE_OLD_SETSIGINFO:
1216 /* for backwards-compatibility */
1217 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1218
1219 case PTRACE_GETREGS:
1220 return ptrace_getregs(child,
1221 (struct pt_all_user_regs __user *) data);
1222
1223 case PTRACE_SETREGS:
1224 return ptrace_setregs(child,
1225 (struct pt_all_user_regs __user *) data);
1226
1227 default:
1228 return ptrace_request(child, request, addr, data);
1229 }
1230}
1231
1232
1233/* "asmlinkage" so the input arguments are preserved... */
1234
1235asmlinkage long
1236syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1237 long arg4, long arg5, long arg6, long arg7,
1238 struct pt_regs regs)
1239{
1240 if (test_thread_flag(TIF_SYSCALL_TRACE))
1241 if (tracehook_report_syscall_entry(®s))
1242 return -ENOSYS;
1243
1244 /* copy user rbs to kernel rbs */
1245 if (test_thread_flag(TIF_RESTORE_RSE))
1246 ia64_sync_krbs();
1247
1248
1249 audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
1250
1251 return 0;
1252}
1253
1254/* "asmlinkage" so the input arguments are preserved... */
1255
1256asmlinkage void
1257syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1258 long arg4, long arg5, long arg6, long arg7,
1259 struct pt_regs regs)
1260{
1261 int step;
1262
1263 audit_syscall_exit(®s);
1264
1265 step = test_thread_flag(TIF_SINGLESTEP);
1266 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1267 tracehook_report_syscall_exit(®s, step);
1268
1269 /* copy user rbs to kernel rbs */
1270 if (test_thread_flag(TIF_RESTORE_RSE))
1271 ia64_sync_krbs();
1272}
1273
1274/* Utrace implementation starts here */
1275struct regset_get {
1276 void *kbuf;
1277 void __user *ubuf;
1278};
1279
1280struct regset_set {
1281 const void *kbuf;
1282 const void __user *ubuf;
1283};
1284
1285struct regset_getset {
1286 struct task_struct *target;
1287 const struct user_regset *regset;
1288 union {
1289 struct regset_get get;
1290 struct regset_set set;
1291 } u;
1292 unsigned int pos;
1293 unsigned int count;
1294 int ret;
1295};
1296
1297static int
1298access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1299 unsigned long addr, unsigned long *data, int write_access)
1300{
1301 struct pt_regs *pt;
1302 unsigned long *ptr = NULL;
1303 int ret;
1304 char nat = 0;
1305
1306 pt = task_pt_regs(target);
1307 switch (addr) {
1308 case ELF_GR_OFFSET(1):
1309 ptr = &pt->r1;
1310 break;
1311 case ELF_GR_OFFSET(2):
1312 case ELF_GR_OFFSET(3):
1313 ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1314 break;
1315 case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1316 if (write_access) {
1317 /* read NaT bit first: */
1318 unsigned long dummy;
1319
1320 ret = unw_get_gr(info, addr/8, &dummy, &nat);
1321 if (ret < 0)
1322 return ret;
1323 }
1324 return unw_access_gr(info, addr/8, data, &nat, write_access);
1325 case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1326 ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1327 break;
1328 case ELF_GR_OFFSET(12):
1329 case ELF_GR_OFFSET(13):
1330 ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1331 break;
1332 case ELF_GR_OFFSET(14):
1333 ptr = &pt->r14;
1334 break;
1335 case ELF_GR_OFFSET(15):
1336 ptr = &pt->r15;
1337 }
1338 if (write_access)
1339 *ptr = *data;
1340 else
1341 *data = *ptr;
1342 return 0;
1343}
1344
1345static int
1346access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1347 unsigned long addr, unsigned long *data, int write_access)
1348{
1349 struct pt_regs *pt;
1350 unsigned long *ptr = NULL;
1351
1352 pt = task_pt_regs(target);
1353 switch (addr) {
1354 case ELF_BR_OFFSET(0):
1355 ptr = &pt->b0;
1356 break;
1357 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1358 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1359 data, write_access);
1360 case ELF_BR_OFFSET(6):
1361 ptr = &pt->b6;
1362 break;
1363 case ELF_BR_OFFSET(7):
1364 ptr = &pt->b7;
1365 }
1366 if (write_access)
1367 *ptr = *data;
1368 else
1369 *data = *ptr;
1370 return 0;
1371}
1372
1373static int
1374access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1375 unsigned long addr, unsigned long *data, int write_access)
1376{
1377 struct pt_regs *pt;
1378 unsigned long cfm, urbs_end;
1379 unsigned long *ptr = NULL;
1380
1381 pt = task_pt_regs(target);
1382 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1383 switch (addr) {
1384 case ELF_AR_RSC_OFFSET:
1385 /* force PL3 */
1386 if (write_access)
1387 pt->ar_rsc = *data | (3 << 2);
1388 else
1389 *data = pt->ar_rsc;
1390 return 0;
1391 case ELF_AR_BSP_OFFSET:
1392 /*
1393 * By convention, we use PT_AR_BSP to refer to
1394 * the end of the user-level backing store.
1395 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1396 * to get the real value of ar.bsp at the time
1397 * the kernel was entered.
1398 *
1399 * Furthermore, when changing the contents of
1400 * PT_AR_BSP (or PT_CFM) while the task is
1401 * blocked in a system call, convert the state
1402 * so that the non-system-call exit
1403 * path is used. This ensures that the proper
1404 * state will be picked up when resuming
1405 * execution. However, it *also* means that
1406 * once we write PT_AR_BSP/PT_CFM, it won't be
1407 * possible to modify the syscall arguments of
1408 * the pending system call any longer. This
1409 * shouldn't be an issue because modifying
1410 * PT_AR_BSP/PT_CFM generally implies that
1411 * we're either abandoning the pending system
1412 * call or that we defer it's re-execution
1413 * (e.g., due to GDB doing an inferior
1414 * function call).
1415 */
1416 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1417 if (write_access) {
1418 if (*data != urbs_end) {
1419 if (in_syscall(pt))
1420 convert_to_non_syscall(target,
1421 pt,
1422 cfm);
1423 /*
1424 * Simulate user-level write
1425 * of ar.bsp:
1426 */
1427 pt->loadrs = 0;
1428 pt->ar_bspstore = *data;
1429 }
1430 } else
1431 *data = urbs_end;
1432 return 0;
1433 case ELF_AR_BSPSTORE_OFFSET:
1434 ptr = &pt->ar_bspstore;
1435 break;
1436 case ELF_AR_RNAT_OFFSET:
1437 ptr = &pt->ar_rnat;
1438 break;
1439 case ELF_AR_CCV_OFFSET:
1440 ptr = &pt->ar_ccv;
1441 break;
1442 case ELF_AR_UNAT_OFFSET:
1443 ptr = &pt->ar_unat;
1444 break;
1445 case ELF_AR_FPSR_OFFSET:
1446 ptr = &pt->ar_fpsr;
1447 break;
1448 case ELF_AR_PFS_OFFSET:
1449 ptr = &pt->ar_pfs;
1450 break;
1451 case ELF_AR_LC_OFFSET:
1452 return unw_access_ar(info, UNW_AR_LC, data,
1453 write_access);
1454 case ELF_AR_EC_OFFSET:
1455 return unw_access_ar(info, UNW_AR_EC, data,
1456 write_access);
1457 case ELF_AR_CSD_OFFSET:
1458 ptr = &pt->ar_csd;
1459 break;
1460 case ELF_AR_SSD_OFFSET:
1461 ptr = &pt->ar_ssd;
1462 }
1463 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1464 switch (addr) {
1465 case ELF_CR_IIP_OFFSET:
1466 ptr = &pt->cr_iip;
1467 break;
1468 case ELF_CFM_OFFSET:
1469 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1470 if (write_access) {
1471 if (((cfm ^ *data) & PFM_MASK) != 0) {
1472 if (in_syscall(pt))
1473 convert_to_non_syscall(target,
1474 pt,
1475 cfm);
1476 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1477 | (*data & PFM_MASK));
1478 }
1479 } else
1480 *data = cfm;
1481 return 0;
1482 case ELF_CR_IPSR_OFFSET:
1483 if (write_access) {
1484 unsigned long tmp = *data;
1485 /* psr.ri==3 is a reserved value: SDM 2:25 */
1486 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1487 tmp &= ~IA64_PSR_RI;
1488 pt->cr_ipsr = ((tmp & IPSR_MASK)
1489 | (pt->cr_ipsr & ~IPSR_MASK));
1490 } else
1491 *data = (pt->cr_ipsr & IPSR_MASK);
1492 return 0;
1493 }
1494 } else if (addr == ELF_NAT_OFFSET)
1495 return access_nat_bits(target, pt, info,
1496 data, write_access);
1497 else if (addr == ELF_PR_OFFSET)
1498 ptr = &pt->pr;
1499 else
1500 return -1;
1501
1502 if (write_access)
1503 *ptr = *data;
1504 else
1505 *data = *ptr;
1506
1507 return 0;
1508}
1509
1510static int
1511access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1512 unsigned long addr, unsigned long *data, int write_access)
1513{
1514 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1515 return access_elf_gpreg(target, info, addr, data, write_access);
1516 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1517 return access_elf_breg(target, info, addr, data, write_access);
1518 else
1519 return access_elf_areg(target, info, addr, data, write_access);
1520}
1521
1522void do_gpregs_get(struct unw_frame_info *info, void *arg)
1523{
1524 struct pt_regs *pt;
1525 struct regset_getset *dst = arg;
1526 elf_greg_t tmp[16];
1527 unsigned int i, index, min_copy;
1528
1529 if (unw_unwind_to_user(info) < 0)
1530 return;
1531
1532 /*
1533 * coredump format:
1534 * r0-r31
1535 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1536 * predicate registers (p0-p63)
1537 * b0-b7
1538 * ip cfm user-mask
1539 * ar.rsc ar.bsp ar.bspstore ar.rnat
1540 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1541 */
1542
1543
1544 /* Skip r0 */
1545 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1546 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1547 &dst->u.get.kbuf,
1548 &dst->u.get.ubuf,
1549 0, ELF_GR_OFFSET(1));
1550 if (dst->ret || dst->count == 0)
1551 return;
1552 }
1553
1554 /* gr1 - gr15 */
1555 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1556 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1557 min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1558 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1559 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1560 index++)
1561 if (access_elf_reg(dst->target, info, i,
1562 &tmp[index], 0) < 0) {
1563 dst->ret = -EIO;
1564 return;
1565 }
1566 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1567 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1568 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1569 if (dst->ret || dst->count == 0)
1570 return;
1571 }
1572
1573 /* r16-r31 */
1574 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1575 pt = task_pt_regs(dst->target);
1576 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1577 &dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1578 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1579 if (dst->ret || dst->count == 0)
1580 return;
1581 }
1582
1583 /* nat, pr, b0 - b7 */
1584 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1585 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1586 min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1587 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1588 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1589 index++)
1590 if (access_elf_reg(dst->target, info, i,
1591 &tmp[index], 0) < 0) {
1592 dst->ret = -EIO;
1593 return;
1594 }
1595 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1596 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1597 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1598 if (dst->ret || dst->count == 0)
1599 return;
1600 }
1601
1602 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1603 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1604 */
1605 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1606 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1607 min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1608 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1609 for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1610 index++)
1611 if (access_elf_reg(dst->target, info, i,
1612 &tmp[index], 0) < 0) {
1613 dst->ret = -EIO;
1614 return;
1615 }
1616 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1617 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1618 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1619 }
1620}
1621
1622void do_gpregs_set(struct unw_frame_info *info, void *arg)
1623{
1624 struct pt_regs *pt;
1625 struct regset_getset *dst = arg;
1626 elf_greg_t tmp[16];
1627 unsigned int i, index;
1628
1629 if (unw_unwind_to_user(info) < 0)
1630 return;
1631
1632 /* Skip r0 */
1633 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1634 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1635 &dst->u.set.kbuf,
1636 &dst->u.set.ubuf,
1637 0, ELF_GR_OFFSET(1));
1638 if (dst->ret || dst->count == 0)
1639 return;
1640 }
1641
1642 /* gr1-gr15 */
1643 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1644 i = dst->pos;
1645 index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1646 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1647 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1648 ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1649 if (dst->ret)
1650 return;
1651 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1652 if (access_elf_reg(dst->target, info, i,
1653 &tmp[index], 1) < 0) {
1654 dst->ret = -EIO;
1655 return;
1656 }
1657 if (dst->count == 0)
1658 return;
1659 }
1660
1661 /* gr16-gr31 */
1662 if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1663 pt = task_pt_regs(dst->target);
1664 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1665 &dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1666 ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1667 if (dst->ret || dst->count == 0)
1668 return;
1669 }
1670
1671 /* nat, pr, b0 - b7 */
1672 if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1673 i = dst->pos;
1674 index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1675 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1676 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1677 ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1678 if (dst->ret)
1679 return;
1680 for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1681 if (access_elf_reg(dst->target, info, i,
1682 &tmp[index], 1) < 0) {
1683 dst->ret = -EIO;
1684 return;
1685 }
1686 if (dst->count == 0)
1687 return;
1688 }
1689
1690 /* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1691 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1692 */
1693 if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1694 i = dst->pos;
1695 index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1696 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1697 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1698 ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1699 if (dst->ret)
1700 return;
1701 for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1702 if (access_elf_reg(dst->target, info, i,
1703 &tmp[index], 1) < 0) {
1704 dst->ret = -EIO;
1705 return;
1706 }
1707 }
1708}
1709
1710#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1711
1712void do_fpregs_get(struct unw_frame_info *info, void *arg)
1713{
1714 struct regset_getset *dst = arg;
1715 struct task_struct *task = dst->target;
1716 elf_fpreg_t tmp[30];
1717 int index, min_copy, i;
1718
1719 if (unw_unwind_to_user(info) < 0)
1720 return;
1721
1722 /* Skip pos 0 and 1 */
1723 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1724 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1725 &dst->u.get.kbuf,
1726 &dst->u.get.ubuf,
1727 0, ELF_FP_OFFSET(2));
1728 if (dst->count == 0 || dst->ret)
1729 return;
1730 }
1731
1732 /* fr2-fr31 */
1733 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1734 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1735
1736 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1737 dst->pos + dst->count);
1738 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1739 index++)
1740 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1741 &tmp[index])) {
1742 dst->ret = -EIO;
1743 return;
1744 }
1745 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1746 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1747 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1748 if (dst->count == 0 || dst->ret)
1749 return;
1750 }
1751
1752 /* fph */
1753 if (dst->count > 0) {
1754 ia64_flush_fph(dst->target);
1755 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1756 dst->ret = user_regset_copyout(
1757 &dst->pos, &dst->count,
1758 &dst->u.get.kbuf, &dst->u.get.ubuf,
1759 &dst->target->thread.fph,
1760 ELF_FP_OFFSET(32), -1);
1761 else
1762 /* Zero fill instead. */
1763 dst->ret = user_regset_copyout_zero(
1764 &dst->pos, &dst->count,
1765 &dst->u.get.kbuf, &dst->u.get.ubuf,
1766 ELF_FP_OFFSET(32), -1);
1767 }
1768}
1769
1770void do_fpregs_set(struct unw_frame_info *info, void *arg)
1771{
1772 struct regset_getset *dst = arg;
1773 elf_fpreg_t fpreg, tmp[30];
1774 int index, start, end;
1775
1776 if (unw_unwind_to_user(info) < 0)
1777 return;
1778
1779 /* Skip pos 0 and 1 */
1780 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1781 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1782 &dst->u.set.kbuf,
1783 &dst->u.set.ubuf,
1784 0, ELF_FP_OFFSET(2));
1785 if (dst->count == 0 || dst->ret)
1786 return;
1787 }
1788
1789 /* fr2-fr31 */
1790 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1791 start = dst->pos;
1792 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1793 dst->pos + dst->count);
1794 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1795 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1796 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1797 if (dst->ret)
1798 return;
1799
1800 if (start & 0xF) { /* only write high part */
1801 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1802 &fpreg)) {
1803 dst->ret = -EIO;
1804 return;
1805 }
1806 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1807 = fpreg.u.bits[0];
1808 start &= ~0xFUL;
1809 }
1810 if (end & 0xF) { /* only write low part */
1811 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1812 &fpreg)) {
1813 dst->ret = -EIO;
1814 return;
1815 }
1816 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1817 = fpreg.u.bits[1];
1818 end = (end + 0xF) & ~0xFUL;
1819 }
1820
1821 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1822 index = start / sizeof(elf_fpreg_t);
1823 if (unw_set_fr(info, index, tmp[index - 2])) {
1824 dst->ret = -EIO;
1825 return;
1826 }
1827 }
1828 if (dst->ret || dst->count == 0)
1829 return;
1830 }
1831
1832 /* fph */
1833 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1834 ia64_sync_fph(dst->target);
1835 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1836 &dst->u.set.kbuf,
1837 &dst->u.set.ubuf,
1838 &dst->target->thread.fph,
1839 ELF_FP_OFFSET(32), -1);
1840 }
1841}
1842
1843static int
1844do_regset_call(void (*call)(struct unw_frame_info *, void *),
1845 struct task_struct *target,
1846 const struct user_regset *regset,
1847 unsigned int pos, unsigned int count,
1848 const void *kbuf, const void __user *ubuf)
1849{
1850 struct regset_getset info = { .target = target, .regset = regset,
1851 .pos = pos, .count = count,
1852 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1853 .ret = 0 };
1854
1855 if (target == current)
1856 unw_init_running(call, &info);
1857 else {
1858 struct unw_frame_info ufi;
1859 memset(&ufi, 0, sizeof(ufi));
1860 unw_init_from_blocked_task(&ufi, target);
1861 (*call)(&ufi, &info);
1862 }
1863
1864 return info.ret;
1865}
1866
1867static int
1868gpregs_get(struct task_struct *target,
1869 const struct user_regset *regset,
1870 unsigned int pos, unsigned int count,
1871 void *kbuf, void __user *ubuf)
1872{
1873 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1874 kbuf, ubuf);
1875}
1876
1877static int gpregs_set(struct task_struct *target,
1878 const struct user_regset *regset,
1879 unsigned int pos, unsigned int count,
1880 const void *kbuf, const void __user *ubuf)
1881{
1882 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1883 kbuf, ubuf);
1884}
1885
1886static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1887{
1888 do_sync_rbs(info, ia64_sync_user_rbs);
1889}
1890
1891/*
1892 * This is called to write back the register backing store.
1893 * ptrace does this before it stops, so that a tracer reading the user
1894 * memory after the thread stops will get the current register data.
1895 */
1896static int
1897gpregs_writeback(struct task_struct *target,
1898 const struct user_regset *regset,
1899 int now)
1900{
1901 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1902 return 0;
1903 set_notify_resume(target);
1904 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1905 NULL, NULL);
1906}
1907
1908static int
1909fpregs_active(struct task_struct *target, const struct user_regset *regset)
1910{
1911 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1912}
1913
1914static int fpregs_get(struct task_struct *target,
1915 const struct user_regset *regset,
1916 unsigned int pos, unsigned int count,
1917 void *kbuf, void __user *ubuf)
1918{
1919 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1920 kbuf, ubuf);
1921}
1922
1923static int fpregs_set(struct task_struct *target,
1924 const struct user_regset *regset,
1925 unsigned int pos, unsigned int count,
1926 const void *kbuf, const void __user *ubuf)
1927{
1928 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1929 kbuf, ubuf);
1930}
1931
1932static int
1933access_uarea(struct task_struct *child, unsigned long addr,
1934 unsigned long *data, int write_access)
1935{
1936 unsigned int pos = -1; /* an invalid value */
1937 int ret;
1938 unsigned long *ptr, regnum;
1939
1940 if ((addr & 0x7) != 0) {
1941 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1942 return -1;
1943 }
1944 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1945 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1946 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1947 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1948 dprintk("ptrace: rejecting access to register "
1949 "address 0x%lx\n", addr);
1950 return -1;
1951 }
1952
1953 switch (addr) {
1954 case PT_F32 ... (PT_F127 + 15):
1955 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1956 break;
1957 case PT_F2 ... (PT_F5 + 15):
1958 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1959 break;
1960 case PT_F10 ... (PT_F31 + 15):
1961 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1962 break;
1963 case PT_F6 ... (PT_F9 + 15):
1964 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1965 break;
1966 }
1967
1968 if (pos != -1) {
1969 if (write_access)
1970 ret = fpregs_set(child, NULL, pos,
1971 sizeof(unsigned long), data, NULL);
1972 else
1973 ret = fpregs_get(child, NULL, pos,
1974 sizeof(unsigned long), data, NULL);
1975 if (ret != 0)
1976 return -1;
1977 return 0;
1978 }
1979
1980 switch (addr) {
1981 case PT_NAT_BITS:
1982 pos = ELF_NAT_OFFSET;
1983 break;
1984 case PT_R4 ... PT_R7:
1985 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1986 break;
1987 case PT_B1 ... PT_B5:
1988 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1989 break;
1990 case PT_AR_EC:
1991 pos = ELF_AR_EC_OFFSET;
1992 break;
1993 case PT_AR_LC:
1994 pos = ELF_AR_LC_OFFSET;
1995 break;
1996 case PT_CR_IPSR:
1997 pos = ELF_CR_IPSR_OFFSET;
1998 break;
1999 case PT_CR_IIP:
2000 pos = ELF_CR_IIP_OFFSET;
2001 break;
2002 case PT_CFM:
2003 pos = ELF_CFM_OFFSET;
2004 break;
2005 case PT_AR_UNAT:
2006 pos = ELF_AR_UNAT_OFFSET;
2007 break;
2008 case PT_AR_PFS:
2009 pos = ELF_AR_PFS_OFFSET;
2010 break;
2011 case PT_AR_RSC:
2012 pos = ELF_AR_RSC_OFFSET;
2013 break;
2014 case PT_AR_RNAT:
2015 pos = ELF_AR_RNAT_OFFSET;
2016 break;
2017 case PT_AR_BSPSTORE:
2018 pos = ELF_AR_BSPSTORE_OFFSET;
2019 break;
2020 case PT_PR:
2021 pos = ELF_PR_OFFSET;
2022 break;
2023 case PT_B6:
2024 pos = ELF_BR_OFFSET(6);
2025 break;
2026 case PT_AR_BSP:
2027 pos = ELF_AR_BSP_OFFSET;
2028 break;
2029 case PT_R1 ... PT_R3:
2030 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2031 break;
2032 case PT_R12 ... PT_R15:
2033 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2034 break;
2035 case PT_R8 ... PT_R11:
2036 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2037 break;
2038 case PT_R16 ... PT_R31:
2039 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2040 break;
2041 case PT_AR_CCV:
2042 pos = ELF_AR_CCV_OFFSET;
2043 break;
2044 case PT_AR_FPSR:
2045 pos = ELF_AR_FPSR_OFFSET;
2046 break;
2047 case PT_B0:
2048 pos = ELF_BR_OFFSET(0);
2049 break;
2050 case PT_B7:
2051 pos = ELF_BR_OFFSET(7);
2052 break;
2053 case PT_AR_CSD:
2054 pos = ELF_AR_CSD_OFFSET;
2055 break;
2056 case PT_AR_SSD:
2057 pos = ELF_AR_SSD_OFFSET;
2058 break;
2059 }
2060
2061 if (pos != -1) {
2062 if (write_access)
2063 ret = gpregs_set(child, NULL, pos,
2064 sizeof(unsigned long), data, NULL);
2065 else
2066 ret = gpregs_get(child, NULL, pos,
2067 sizeof(unsigned long), data, NULL);
2068 if (ret != 0)
2069 return -1;
2070 return 0;
2071 }
2072
2073 /* access debug registers */
2074 if (addr >= PT_IBR) {
2075 regnum = (addr - PT_IBR) >> 3;
2076 ptr = &child->thread.ibr[0];
2077 } else {
2078 regnum = (addr - PT_DBR) >> 3;
2079 ptr = &child->thread.dbr[0];
2080 }
2081
2082 if (regnum >= 8) {
2083 dprintk("ptrace: rejecting access to register "
2084 "address 0x%lx\n", addr);
2085 return -1;
2086 }
2087#ifdef CONFIG_PERFMON
2088 /*
2089 * Check if debug registers are used by perfmon. This
2090 * test must be done once we know that we can do the
2091 * operation, i.e. the arguments are all valid, but
2092 * before we start modifying the state.
2093 *
2094 * Perfmon needs to keep a count of how many processes
2095 * are trying to modify the debug registers for system
2096 * wide monitoring sessions.
2097 *
2098 * We also include read access here, because they may
2099 * cause the PMU-installed debug register state
2100 * (dbr[], ibr[]) to be reset. The two arrays are also
2101 * used by perfmon, but we do not use
2102 * IA64_THREAD_DBG_VALID. The registers are restored
2103 * by the PMU context switch code.
2104 */
2105 if (pfm_use_debug_registers(child))
2106 return -1;
2107#endif
2108
2109 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2110 child->thread.flags |= IA64_THREAD_DBG_VALID;
2111 memset(child->thread.dbr, 0,
2112 sizeof(child->thread.dbr));
2113 memset(child->thread.ibr, 0,
2114 sizeof(child->thread.ibr));
2115 }
2116
2117 ptr += regnum;
2118
2119 if ((regnum & 1) && write_access) {
2120 /* don't let the user set kernel-level breakpoints: */
2121 *ptr = *data & ~(7UL << 56);
2122 return 0;
2123 }
2124 if (write_access)
2125 *ptr = *data;
2126 else
2127 *data = *ptr;
2128 return 0;
2129}
2130
2131static const struct user_regset native_regsets[] = {
2132 {
2133 .core_note_type = NT_PRSTATUS,
2134 .n = ELF_NGREG,
2135 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2136 .get = gpregs_get, .set = gpregs_set,
2137 .writeback = gpregs_writeback
2138 },
2139 {
2140 .core_note_type = NT_PRFPREG,
2141 .n = ELF_NFPREG,
2142 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2143 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2144 },
2145};
2146
2147static const struct user_regset_view user_ia64_view = {
2148 .name = "ia64",
2149 .e_machine = EM_IA_64,
2150 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2151};
2152
2153const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2154{
2155 return &user_ia64_view;
2156}
2157
2158struct syscall_get_set_args {
2159 unsigned int i;
2160 unsigned int n;
2161 unsigned long *args;
2162 struct pt_regs *regs;
2163 int rw;
2164};
2165
2166static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2167{
2168 struct syscall_get_set_args *args = data;
2169 struct pt_regs *pt = args->regs;
2170 unsigned long *krbs, cfm, ndirty;
2171 int i, count;
2172
2173 if (unw_unwind_to_user(info) < 0)
2174 return;
2175
2176 cfm = pt->cr_ifs;
2177 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2178 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2179
2180 count = 0;
2181 if (in_syscall(pt))
2182 count = min_t(int, args->n, cfm & 0x7f);
2183
2184 for (i = 0; i < count; i++) {
2185 if (args->rw)
2186 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2187 args->args[i];
2188 else
2189 args->args[i] = *ia64_rse_skip_regs(krbs,
2190 ndirty + i + args->i);
2191 }
2192
2193 if (!args->rw) {
2194 while (i < args->n) {
2195 args->args[i] = 0;
2196 i++;
2197 }
2198 }
2199}
2200
2201void ia64_syscall_get_set_arguments(struct task_struct *task,
2202 struct pt_regs *regs, unsigned int i, unsigned int n,
2203 unsigned long *args, int rw)
2204{
2205 struct syscall_get_set_args data = {
2206 .i = i,
2207 .n = n,
2208 .args = args,
2209 .regs = regs,
2210 .rw = rw,
2211 };
2212
2213 if (task == current)
2214 unw_init_running(syscall_get_set_args_cb, &data);
2215 else {
2216 struct unw_frame_info ufi;
2217 memset(&ufi, 0, sizeof(ufi));
2218 unw_init_from_blocked_task(&ufi, task);
2219 syscall_get_set_args_cb(&ufi, &data);
2220 }
2221}