Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
13 *
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15 * binaries.
16 */
17#include <linux/compiler.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/smp.h>
24#include <linux/user.h>
25#include <linux/security.h>
26#include <linux/audit.h>
27#include <linux/seccomp.h>
28
29#include <asm/byteorder.h>
30#include <asm/cpu.h>
31#include <asm/dsp.h>
32#include <asm/fpu.h>
33#include <asm/mipsregs.h>
34#include <asm/mipsmtregs.h>
35#include <asm/pgtable.h>
36#include <asm/page.h>
37#include <asm/uaccess.h>
38#include <asm/bootinfo.h>
39#include <asm/reg.h>
40
41/*
42 * Called by kernel/ptrace.c when detaching..
43 *
44 * Make sure single step bits etc are not set.
45 */
46void ptrace_disable(struct task_struct *child)
47{
48 /* Don't load the watchpoint registers for the ex-child. */
49 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
50}
51
52/*
53 * Read a general register set. We always use the 64-bit format, even
54 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
55 * Registers are sign extended to fill the available space.
56 */
57int ptrace_getregs(struct task_struct *child, __s64 __user *data)
58{
59 struct pt_regs *regs;
60 int i;
61
62 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
63 return -EIO;
64
65 regs = task_pt_regs(child);
66
67 for (i = 0; i < 32; i++)
68 __put_user((long)regs->regs[i], data + i);
69 __put_user((long)regs->lo, data + EF_LO - EF_R0);
70 __put_user((long)regs->hi, data + EF_HI - EF_R0);
71 __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
72 __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
73 __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
74 __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
75
76 return 0;
77}
78
79/*
80 * Write a general register set. As for PTRACE_GETREGS, we always use
81 * the 64-bit format. On a 32-bit kernel only the lower order half
82 * (according to endianness) will be used.
83 */
84int ptrace_setregs(struct task_struct *child, __s64 __user *data)
85{
86 struct pt_regs *regs;
87 int i;
88
89 if (!access_ok(VERIFY_READ, data, 38 * 8))
90 return -EIO;
91
92 regs = task_pt_regs(child);
93
94 for (i = 0; i < 32; i++)
95 __get_user(regs->regs[i], data + i);
96 __get_user(regs->lo, data + EF_LO - EF_R0);
97 __get_user(regs->hi, data + EF_HI - EF_R0);
98 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
99
100 /* badvaddr, status, and cause may not be written. */
101
102 return 0;
103}
104
105int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
106{
107 int i;
108 unsigned int tmp;
109
110 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
111 return -EIO;
112
113 if (tsk_used_math(child)) {
114 fpureg_t *fregs = get_fpu_regs(child);
115 for (i = 0; i < 32; i++)
116 __put_user(fregs[i], i + (__u64 __user *) data);
117 } else {
118 for (i = 0; i < 32; i++)
119 __put_user((__u64) -1, i + (__u64 __user *) data);
120 }
121
122 __put_user(child->thread.fpu.fcr31, data + 64);
123
124 preempt_disable();
125 if (cpu_has_fpu) {
126 unsigned int flags;
127
128 if (cpu_has_mipsmt) {
129 unsigned int vpflags = dvpe();
130 flags = read_c0_status();
131 __enable_fpu();
132 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
133 write_c0_status(flags);
134 evpe(vpflags);
135 } else {
136 flags = read_c0_status();
137 __enable_fpu();
138 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
139 write_c0_status(flags);
140 }
141 } else {
142 tmp = 0;
143 }
144 preempt_enable();
145 __put_user(tmp, data + 65);
146
147 return 0;
148}
149
150int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
151{
152 fpureg_t *fregs;
153 int i;
154
155 if (!access_ok(VERIFY_READ, data, 33 * 8))
156 return -EIO;
157
158 fregs = get_fpu_regs(child);
159
160 for (i = 0; i < 32; i++)
161 __get_user(fregs[i], i + (__u64 __user *) data);
162
163 __get_user(child->thread.fpu.fcr31, data + 64);
164
165 /* FIR may not be written. */
166
167 return 0;
168}
169
170int ptrace_get_watch_regs(struct task_struct *child,
171 struct pt_watch_regs __user *addr)
172{
173 enum pt_watch_style style;
174 int i;
175
176 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
177 return -EIO;
178 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
179 return -EIO;
180
181#ifdef CONFIG_32BIT
182 style = pt_watch_style_mips32;
183#define WATCH_STYLE mips32
184#else
185 style = pt_watch_style_mips64;
186#define WATCH_STYLE mips64
187#endif
188
189 __put_user(style, &addr->style);
190 __put_user(current_cpu_data.watch_reg_use_cnt,
191 &addr->WATCH_STYLE.num_valid);
192 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
193 __put_user(child->thread.watch.mips3264.watchlo[i],
194 &addr->WATCH_STYLE.watchlo[i]);
195 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
196 &addr->WATCH_STYLE.watchhi[i]);
197 __put_user(current_cpu_data.watch_reg_masks[i],
198 &addr->WATCH_STYLE.watch_masks[i]);
199 }
200 for (; i < 8; i++) {
201 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
202 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
203 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
204 }
205
206 return 0;
207}
208
209int ptrace_set_watch_regs(struct task_struct *child,
210 struct pt_watch_regs __user *addr)
211{
212 int i;
213 int watch_active = 0;
214 unsigned long lt[NUM_WATCH_REGS];
215 u16 ht[NUM_WATCH_REGS];
216
217 if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
218 return -EIO;
219 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
220 return -EIO;
221 /* Check the values. */
222 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
223 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
224#ifdef CONFIG_32BIT
225 if (lt[i] & __UA_LIMIT)
226 return -EINVAL;
227#else
228 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
229 if (lt[i] & 0xffffffff80000000UL)
230 return -EINVAL;
231 } else {
232 if (lt[i] & __UA_LIMIT)
233 return -EINVAL;
234 }
235#endif
236 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
237 if (ht[i] & ~0xff8)
238 return -EINVAL;
239 }
240 /* Install them. */
241 for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
242 if (lt[i] & 7)
243 watch_active = 1;
244 child->thread.watch.mips3264.watchlo[i] = lt[i];
245 /* Set the G bit. */
246 child->thread.watch.mips3264.watchhi[i] = ht[i];
247 }
248
249 if (watch_active)
250 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
251 else
252 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
253
254 return 0;
255}
256
257long arch_ptrace(struct task_struct *child, long request,
258 unsigned long addr, unsigned long data)
259{
260 int ret;
261 void __user *addrp = (void __user *) addr;
262 void __user *datavp = (void __user *) data;
263 unsigned long __user *datalp = (void __user *) data;
264
265 switch (request) {
266 /* when I and D space are separate, these will need to be fixed. */
267 case PTRACE_PEEKTEXT: /* read word at location addr. */
268 case PTRACE_PEEKDATA:
269 ret = generic_ptrace_peekdata(child, addr, data);
270 break;
271
272 /* Read the word at location addr in the USER area. */
273 case PTRACE_PEEKUSR: {
274 struct pt_regs *regs;
275 unsigned long tmp = 0;
276
277 regs = task_pt_regs(child);
278 ret = 0; /* Default return value. */
279
280 switch (addr) {
281 case 0 ... 31:
282 tmp = regs->regs[addr];
283 break;
284 case FPR_BASE ... FPR_BASE + 31:
285 if (tsk_used_math(child)) {
286 fpureg_t *fregs = get_fpu_regs(child);
287
288#ifdef CONFIG_32BIT
289 /*
290 * The odd registers are actually the high
291 * order bits of the values stored in the even
292 * registers - unless we're using r2k_switch.S.
293 */
294 if (addr & 1)
295 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
296 else
297 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
298#endif
299#ifdef CONFIG_64BIT
300 tmp = fregs[addr - FPR_BASE];
301#endif
302 } else {
303 tmp = -1; /* FP not yet used */
304 }
305 break;
306 case PC:
307 tmp = regs->cp0_epc;
308 break;
309 case CAUSE:
310 tmp = regs->cp0_cause;
311 break;
312 case BADVADDR:
313 tmp = regs->cp0_badvaddr;
314 break;
315 case MMHI:
316 tmp = regs->hi;
317 break;
318 case MMLO:
319 tmp = regs->lo;
320 break;
321#ifdef CONFIG_CPU_HAS_SMARTMIPS
322 case ACX:
323 tmp = regs->acx;
324 break;
325#endif
326 case FPC_CSR:
327 tmp = child->thread.fpu.fcr31;
328 break;
329 case FPC_EIR: { /* implementation / version register */
330 unsigned int flags;
331#ifdef CONFIG_MIPS_MT_SMTC
332 unsigned long irqflags;
333 unsigned int mtflags;
334#endif /* CONFIG_MIPS_MT_SMTC */
335
336 preempt_disable();
337 if (!cpu_has_fpu) {
338 preempt_enable();
339 break;
340 }
341
342#ifdef CONFIG_MIPS_MT_SMTC
343 /* Read-modify-write of Status must be atomic */
344 local_irq_save(irqflags);
345 mtflags = dmt();
346#endif /* CONFIG_MIPS_MT_SMTC */
347 if (cpu_has_mipsmt) {
348 unsigned int vpflags = dvpe();
349 flags = read_c0_status();
350 __enable_fpu();
351 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
352 write_c0_status(flags);
353 evpe(vpflags);
354 } else {
355 flags = read_c0_status();
356 __enable_fpu();
357 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
358 write_c0_status(flags);
359 }
360#ifdef CONFIG_MIPS_MT_SMTC
361 emt(mtflags);
362 local_irq_restore(irqflags);
363#endif /* CONFIG_MIPS_MT_SMTC */
364 preempt_enable();
365 break;
366 }
367 case DSP_BASE ... DSP_BASE + 5: {
368 dspreg_t *dregs;
369
370 if (!cpu_has_dsp) {
371 tmp = 0;
372 ret = -EIO;
373 goto out;
374 }
375 dregs = __get_dsp_regs(child);
376 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
377 break;
378 }
379 case DSP_CONTROL:
380 if (!cpu_has_dsp) {
381 tmp = 0;
382 ret = -EIO;
383 goto out;
384 }
385 tmp = child->thread.dsp.dspcontrol;
386 break;
387 default:
388 tmp = 0;
389 ret = -EIO;
390 goto out;
391 }
392 ret = put_user(tmp, datalp);
393 break;
394 }
395
396 /* when I and D space are separate, this will have to be fixed. */
397 case PTRACE_POKETEXT: /* write the word at location addr. */
398 case PTRACE_POKEDATA:
399 ret = generic_ptrace_pokedata(child, addr, data);
400 break;
401
402 case PTRACE_POKEUSR: {
403 struct pt_regs *regs;
404 ret = 0;
405 regs = task_pt_regs(child);
406
407 switch (addr) {
408 case 0 ... 31:
409 regs->regs[addr] = data;
410 break;
411 case FPR_BASE ... FPR_BASE + 31: {
412 fpureg_t *fregs = get_fpu_regs(child);
413
414 if (!tsk_used_math(child)) {
415 /* FP not yet used */
416 memset(&child->thread.fpu, ~0,
417 sizeof(child->thread.fpu));
418 child->thread.fpu.fcr31 = 0;
419 }
420#ifdef CONFIG_32BIT
421 /*
422 * The odd registers are actually the high order bits
423 * of the values stored in the even registers - unless
424 * we're using r2k_switch.S.
425 */
426 if (addr & 1) {
427 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
428 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
429 } else {
430 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
431 fregs[addr - FPR_BASE] |= data;
432 }
433#endif
434#ifdef CONFIG_64BIT
435 fregs[addr - FPR_BASE] = data;
436#endif
437 break;
438 }
439 case PC:
440 regs->cp0_epc = data;
441 break;
442 case MMHI:
443 regs->hi = data;
444 break;
445 case MMLO:
446 regs->lo = data;
447 break;
448#ifdef CONFIG_CPU_HAS_SMARTMIPS
449 case ACX:
450 regs->acx = data;
451 break;
452#endif
453 case FPC_CSR:
454 child->thread.fpu.fcr31 = data;
455 break;
456 case DSP_BASE ... DSP_BASE + 5: {
457 dspreg_t *dregs;
458
459 if (!cpu_has_dsp) {
460 ret = -EIO;
461 break;
462 }
463
464 dregs = __get_dsp_regs(child);
465 dregs[addr - DSP_BASE] = data;
466 break;
467 }
468 case DSP_CONTROL:
469 if (!cpu_has_dsp) {
470 ret = -EIO;
471 break;
472 }
473 child->thread.dsp.dspcontrol = data;
474 break;
475 default:
476 /* The rest are not allowed. */
477 ret = -EIO;
478 break;
479 }
480 break;
481 }
482
483 case PTRACE_GETREGS:
484 ret = ptrace_getregs(child, datavp);
485 break;
486
487 case PTRACE_SETREGS:
488 ret = ptrace_setregs(child, datavp);
489 break;
490
491 case PTRACE_GETFPREGS:
492 ret = ptrace_getfpregs(child, datavp);
493 break;
494
495 case PTRACE_SETFPREGS:
496 ret = ptrace_setfpregs(child, datavp);
497 break;
498
499 case PTRACE_GET_THREAD_AREA:
500 ret = put_user(task_thread_info(child)->tp_value, datalp);
501 break;
502
503 case PTRACE_GET_WATCH_REGS:
504 ret = ptrace_get_watch_regs(child, addrp);
505 break;
506
507 case PTRACE_SET_WATCH_REGS:
508 ret = ptrace_set_watch_regs(child, addrp);
509 break;
510
511 default:
512 ret = ptrace_request(child, request, addr, data);
513 break;
514 }
515 out:
516 return ret;
517}
518
519static inline int audit_arch(void)
520{
521 int arch = EM_MIPS;
522#ifdef CONFIG_64BIT
523 arch |= __AUDIT_ARCH_64BIT;
524#endif
525#if defined(__LITTLE_ENDIAN)
526 arch |= __AUDIT_ARCH_LE;
527#endif
528 return arch;
529}
530
531/*
532 * Notification of system call entry/exit
533 * - triggered by current->work.syscall_trace
534 */
535asmlinkage void syscall_trace_enter(struct pt_regs *regs)
536{
537 /* do the secure computing check first */
538 secure_computing_strict(regs->regs[2]);
539
540 if (!(current->ptrace & PT_PTRACED))
541 goto out;
542
543 if (!test_thread_flag(TIF_SYSCALL_TRACE))
544 goto out;
545
546 /* The 0x80 provides a way for the tracing parent to distinguish
547 between a syscall stop and SIGTRAP delivery */
548 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
549 0x80 : 0));
550
551 /*
552 * this isn't the same as continuing with a signal, but it will do
553 * for normal use. strace only continues with a signal if the
554 * stopping signal is not SIGTRAP. -brl
555 */
556 if (current->exit_code) {
557 send_sig(current->exit_code, current, 1);
558 current->exit_code = 0;
559 }
560
561out:
562 audit_syscall_entry(audit_arch(), regs->regs[2],
563 regs->regs[4], regs->regs[5],
564 regs->regs[6], regs->regs[7]);
565}
566
567/*
568 * Notification of system call entry/exit
569 * - triggered by current->work.syscall_trace
570 */
571asmlinkage void syscall_trace_leave(struct pt_regs *regs)
572{
573 audit_syscall_exit(regs);
574
575 if (!(current->ptrace & PT_PTRACED))
576 return;
577
578 if (!test_thread_flag(TIF_SYSCALL_TRACE))
579 return;
580
581 /* The 0x80 provides a way for the tracing parent to distinguish
582 between a syscall stop and SIGTRAP delivery */
583 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
584 0x80 : 0));
585
586 /*
587 * this isn't the same as continuing with a signal, but it will do
588 * for normal use. strace only continues with a signal if the
589 * stopping signal is not SIGTRAP. -brl
590 */
591 if (current->exit_code) {
592 send_sig(current->exit_code, current, 1);
593 current->exit_code = 0;
594 }
595}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1992 Ross Biro
7 * Copyright (C) Linus Torvalds
8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
9 * Copyright (C) 1996 David S. Miller
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 1999 MIPS Technologies, Inc.
12 * Copyright (C) 2000 Ulf Carlsson
13 *
14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
15 * binaries.
16 */
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/elf.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/mm.h>
23#include <linux/errno.h>
24#include <linux/ptrace.h>
25#include <linux/regset.h>
26#include <linux/smp.h>
27#include <linux/user.h>
28#include <linux/security.h>
29#include <linux/tracehook.h>
30#include <linux/audit.h>
31#include <linux/seccomp.h>
32#include <linux/ftrace.h>
33
34#include <asm/byteorder.h>
35#include <asm/cpu.h>
36#include <asm/dsp.h>
37#include <asm/fpu.h>
38#include <asm/mipsregs.h>
39#include <asm/mipsmtregs.h>
40#include <asm/pgtable.h>
41#include <asm/page.h>
42#include <asm/syscall.h>
43#include <asm/uaccess.h>
44#include <asm/bootinfo.h>
45#include <asm/reg.h>
46
47#define CREATE_TRACE_POINTS
48#include <trace/events/syscalls.h>
49
50/*
51 * Called by kernel/ptrace.c when detaching..
52 *
53 * Make sure single step bits etc are not set.
54 */
55void ptrace_disable(struct task_struct *child)
56{
57 /* Don't load the watchpoint registers for the ex-child. */
58 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
59}
60
61/*
62 * Read a general register set. We always use the 64-bit format, even
63 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
64 * Registers are sign extended to fill the available space.
65 */
66int ptrace_getregs(struct task_struct *child, __s64 __user *data)
67{
68 struct pt_regs *regs;
69 int i;
70
71 if (!access_ok(VERIFY_WRITE, data, 38 * 8))
72 return -EIO;
73
74 regs = task_pt_regs(child);
75
76 for (i = 0; i < 32; i++)
77 __put_user((long)regs->regs[i], data + i);
78 __put_user((long)regs->lo, data + EF_LO - EF_R0);
79 __put_user((long)regs->hi, data + EF_HI - EF_R0);
80 __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
81 __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
82 __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
83 __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
84
85 return 0;
86}
87
88/*
89 * Write a general register set. As for PTRACE_GETREGS, we always use
90 * the 64-bit format. On a 32-bit kernel only the lower order half
91 * (according to endianness) will be used.
92 */
93int ptrace_setregs(struct task_struct *child, __s64 __user *data)
94{
95 struct pt_regs *regs;
96 int i;
97
98 if (!access_ok(VERIFY_READ, data, 38 * 8))
99 return -EIO;
100
101 regs = task_pt_regs(child);
102
103 for (i = 0; i < 32; i++)
104 __get_user(regs->regs[i], data + i);
105 __get_user(regs->lo, data + EF_LO - EF_R0);
106 __get_user(regs->hi, data + EF_HI - EF_R0);
107 __get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
108
109 /* badvaddr, status, and cause may not be written. */
110
111 return 0;
112}
113
114int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
115{
116 int i;
117
118 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
119 return -EIO;
120
121 if (tsk_used_math(child)) {
122 union fpureg *fregs = get_fpu_regs(child);
123 for (i = 0; i < 32; i++)
124 __put_user(get_fpr64(&fregs[i], 0),
125 i + (__u64 __user *)data);
126 } else {
127 for (i = 0; i < 32; i++)
128 __put_user((__u64) -1, i + (__u64 __user *) data);
129 }
130
131 __put_user(child->thread.fpu.fcr31, data + 64);
132 __put_user(current_cpu_data.fpu_id, data + 65);
133
134 return 0;
135}
136
137int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
138{
139 union fpureg *fregs;
140 u64 fpr_val;
141 int i;
142
143 if (!access_ok(VERIFY_READ, data, 33 * 8))
144 return -EIO;
145
146 fregs = get_fpu_regs(child);
147
148 for (i = 0; i < 32; i++) {
149 __get_user(fpr_val, i + (__u64 __user *)data);
150 set_fpr64(&fregs[i], 0, fpr_val);
151 }
152
153 __get_user(child->thread.fpu.fcr31, data + 64);
154
155 /* FIR may not be written. */
156
157 return 0;
158}
159
160int ptrace_get_watch_regs(struct task_struct *child,
161 struct pt_watch_regs __user *addr)
162{
163 enum pt_watch_style style;
164 int i;
165
166 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
167 return -EIO;
168 if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
169 return -EIO;
170
171#ifdef CONFIG_32BIT
172 style = pt_watch_style_mips32;
173#define WATCH_STYLE mips32
174#else
175 style = pt_watch_style_mips64;
176#define WATCH_STYLE mips64
177#endif
178
179 __put_user(style, &addr->style);
180 __put_user(boot_cpu_data.watch_reg_use_cnt,
181 &addr->WATCH_STYLE.num_valid);
182 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
183 __put_user(child->thread.watch.mips3264.watchlo[i],
184 &addr->WATCH_STYLE.watchlo[i]);
185 __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
186 &addr->WATCH_STYLE.watchhi[i]);
187 __put_user(boot_cpu_data.watch_reg_masks[i],
188 &addr->WATCH_STYLE.watch_masks[i]);
189 }
190 for (; i < 8; i++) {
191 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
192 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
193 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
194 }
195
196 return 0;
197}
198
199int ptrace_set_watch_regs(struct task_struct *child,
200 struct pt_watch_regs __user *addr)
201{
202 int i;
203 int watch_active = 0;
204 unsigned long lt[NUM_WATCH_REGS];
205 u16 ht[NUM_WATCH_REGS];
206
207 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
208 return -EIO;
209 if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
210 return -EIO;
211 /* Check the values. */
212 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
213 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
214#ifdef CONFIG_32BIT
215 if (lt[i] & __UA_LIMIT)
216 return -EINVAL;
217#else
218 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
219 if (lt[i] & 0xffffffff80000000UL)
220 return -EINVAL;
221 } else {
222 if (lt[i] & __UA_LIMIT)
223 return -EINVAL;
224 }
225#endif
226 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
227 if (ht[i] & ~0xff8)
228 return -EINVAL;
229 }
230 /* Install them. */
231 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
232 if (lt[i] & 7)
233 watch_active = 1;
234 child->thread.watch.mips3264.watchlo[i] = lt[i];
235 /* Set the G bit. */
236 child->thread.watch.mips3264.watchhi[i] = ht[i];
237 }
238
239 if (watch_active)
240 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
241 else
242 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
243
244 return 0;
245}
246
247/* regset get/set implementations */
248
249static int gpr_get(struct task_struct *target,
250 const struct user_regset *regset,
251 unsigned int pos, unsigned int count,
252 void *kbuf, void __user *ubuf)
253{
254 struct pt_regs *regs = task_pt_regs(target);
255
256 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
257 regs, 0, sizeof(*regs));
258}
259
260static int gpr_set(struct task_struct *target,
261 const struct user_regset *regset,
262 unsigned int pos, unsigned int count,
263 const void *kbuf, const void __user *ubuf)
264{
265 struct pt_regs newregs;
266 int ret;
267
268 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
269 &newregs,
270 0, sizeof(newregs));
271 if (ret)
272 return ret;
273
274 *task_pt_regs(target) = newregs;
275
276 return 0;
277}
278
279static int fpr_get(struct task_struct *target,
280 const struct user_regset *regset,
281 unsigned int pos, unsigned int count,
282 void *kbuf, void __user *ubuf)
283{
284 unsigned i;
285 int err;
286 u64 fpr_val;
287
288 /* XXX fcr31 */
289
290 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
291 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
292 &target->thread.fpu,
293 0, sizeof(elf_fpregset_t));
294
295 for (i = 0; i < NUM_FPU_REGS; i++) {
296 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
297 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
298 &fpr_val, i * sizeof(elf_fpreg_t),
299 (i + 1) * sizeof(elf_fpreg_t));
300 if (err)
301 return err;
302 }
303
304 return 0;
305}
306
307static int fpr_set(struct task_struct *target,
308 const struct user_regset *regset,
309 unsigned int pos, unsigned int count,
310 const void *kbuf, const void __user *ubuf)
311{
312 unsigned i;
313 int err;
314 u64 fpr_val;
315
316 /* XXX fcr31 */
317
318 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
319 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
320 &target->thread.fpu,
321 0, sizeof(elf_fpregset_t));
322
323 for (i = 0; i < NUM_FPU_REGS; i++) {
324 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
325 &fpr_val, i * sizeof(elf_fpreg_t),
326 (i + 1) * sizeof(elf_fpreg_t));
327 if (err)
328 return err;
329 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
330 }
331
332 return 0;
333}
334
335enum mips_regset {
336 REGSET_GPR,
337 REGSET_FPR,
338};
339
340static const struct user_regset mips_regsets[] = {
341 [REGSET_GPR] = {
342 .core_note_type = NT_PRSTATUS,
343 .n = ELF_NGREG,
344 .size = sizeof(unsigned int),
345 .align = sizeof(unsigned int),
346 .get = gpr_get,
347 .set = gpr_set,
348 },
349 [REGSET_FPR] = {
350 .core_note_type = NT_PRFPREG,
351 .n = ELF_NFPREG,
352 .size = sizeof(elf_fpreg_t),
353 .align = sizeof(elf_fpreg_t),
354 .get = fpr_get,
355 .set = fpr_set,
356 },
357};
358
359static const struct user_regset_view user_mips_view = {
360 .name = "mips",
361 .e_machine = ELF_ARCH,
362 .ei_osabi = ELF_OSABI,
363 .regsets = mips_regsets,
364 .n = ARRAY_SIZE(mips_regsets),
365};
366
367static const struct user_regset mips64_regsets[] = {
368 [REGSET_GPR] = {
369 .core_note_type = NT_PRSTATUS,
370 .n = ELF_NGREG,
371 .size = sizeof(unsigned long),
372 .align = sizeof(unsigned long),
373 .get = gpr_get,
374 .set = gpr_set,
375 },
376 [REGSET_FPR] = {
377 .core_note_type = NT_PRFPREG,
378 .n = ELF_NFPREG,
379 .size = sizeof(elf_fpreg_t),
380 .align = sizeof(elf_fpreg_t),
381 .get = fpr_get,
382 .set = fpr_set,
383 },
384};
385
386static const struct user_regset_view user_mips64_view = {
387 .name = "mips",
388 .e_machine = ELF_ARCH,
389 .ei_osabi = ELF_OSABI,
390 .regsets = mips64_regsets,
391 .n = ARRAY_SIZE(mips_regsets),
392};
393
394const struct user_regset_view *task_user_regset_view(struct task_struct *task)
395{
396#ifdef CONFIG_32BIT
397 return &user_mips_view;
398#endif
399
400#ifdef CONFIG_MIPS32_O32
401 if (test_thread_flag(TIF_32BIT_REGS))
402 return &user_mips_view;
403#endif
404
405 return &user_mips64_view;
406}
407
408long arch_ptrace(struct task_struct *child, long request,
409 unsigned long addr, unsigned long data)
410{
411 int ret;
412 void __user *addrp = (void __user *) addr;
413 void __user *datavp = (void __user *) data;
414 unsigned long __user *datalp = (void __user *) data;
415
416 switch (request) {
417 /* when I and D space are separate, these will need to be fixed. */
418 case PTRACE_PEEKTEXT: /* read word at location addr. */
419 case PTRACE_PEEKDATA:
420 ret = generic_ptrace_peekdata(child, addr, data);
421 break;
422
423 /* Read the word at location addr in the USER area. */
424 case PTRACE_PEEKUSR: {
425 struct pt_regs *regs;
426 union fpureg *fregs;
427 unsigned long tmp = 0;
428
429 regs = task_pt_regs(child);
430 ret = 0; /* Default return value. */
431
432 switch (addr) {
433 case 0 ... 31:
434 tmp = regs->regs[addr];
435 break;
436 case FPR_BASE ... FPR_BASE + 31:
437 if (!tsk_used_math(child)) {
438 /* FP not yet used */
439 tmp = -1;
440 break;
441 }
442 fregs = get_fpu_regs(child);
443
444#ifdef CONFIG_32BIT
445 if (test_thread_flag(TIF_32BIT_FPREGS)) {
446 /*
447 * The odd registers are actually the high
448 * order bits of the values stored in the even
449 * registers - unless we're using r2k_switch.S.
450 */
451 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
452 addr & 1);
453 break;
454 }
455#endif
456 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
457 break;
458 case PC:
459 tmp = regs->cp0_epc;
460 break;
461 case CAUSE:
462 tmp = regs->cp0_cause;
463 break;
464 case BADVADDR:
465 tmp = regs->cp0_badvaddr;
466 break;
467 case MMHI:
468 tmp = regs->hi;
469 break;
470 case MMLO:
471 tmp = regs->lo;
472 break;
473#ifdef CONFIG_CPU_HAS_SMARTMIPS
474 case ACX:
475 tmp = regs->acx;
476 break;
477#endif
478 case FPC_CSR:
479 tmp = child->thread.fpu.fcr31;
480 break;
481 case FPC_EIR:
482 /* implementation / version register */
483 tmp = current_cpu_data.fpu_id;
484 break;
485 case DSP_BASE ... DSP_BASE + 5: {
486 dspreg_t *dregs;
487
488 if (!cpu_has_dsp) {
489 tmp = 0;
490 ret = -EIO;
491 goto out;
492 }
493 dregs = __get_dsp_regs(child);
494 tmp = (unsigned long) (dregs[addr - DSP_BASE]);
495 break;
496 }
497 case DSP_CONTROL:
498 if (!cpu_has_dsp) {
499 tmp = 0;
500 ret = -EIO;
501 goto out;
502 }
503 tmp = child->thread.dsp.dspcontrol;
504 break;
505 default:
506 tmp = 0;
507 ret = -EIO;
508 goto out;
509 }
510 ret = put_user(tmp, datalp);
511 break;
512 }
513
514 /* when I and D space are separate, this will have to be fixed. */
515 case PTRACE_POKETEXT: /* write the word at location addr. */
516 case PTRACE_POKEDATA:
517 ret = generic_ptrace_pokedata(child, addr, data);
518 break;
519
520 case PTRACE_POKEUSR: {
521 struct pt_regs *regs;
522 ret = 0;
523 regs = task_pt_regs(child);
524
525 switch (addr) {
526 case 0 ... 31:
527 regs->regs[addr] = data;
528 break;
529 case FPR_BASE ... FPR_BASE + 31: {
530 union fpureg *fregs = get_fpu_regs(child);
531
532 if (!tsk_used_math(child)) {
533 /* FP not yet used */
534 memset(&child->thread.fpu, ~0,
535 sizeof(child->thread.fpu));
536 child->thread.fpu.fcr31 = 0;
537 }
538#ifdef CONFIG_32BIT
539 if (test_thread_flag(TIF_32BIT_FPREGS)) {
540 /*
541 * The odd registers are actually the high
542 * order bits of the values stored in the even
543 * registers - unless we're using r2k_switch.S.
544 */
545 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
546 addr & 1, data);
547 break;
548 }
549#endif
550 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
551 break;
552 }
553 case PC:
554 regs->cp0_epc = data;
555 break;
556 case MMHI:
557 regs->hi = data;
558 break;
559 case MMLO:
560 regs->lo = data;
561 break;
562#ifdef CONFIG_CPU_HAS_SMARTMIPS
563 case ACX:
564 regs->acx = data;
565 break;
566#endif
567 case FPC_CSR:
568 child->thread.fpu.fcr31 = data;
569 break;
570 case DSP_BASE ... DSP_BASE + 5: {
571 dspreg_t *dregs;
572
573 if (!cpu_has_dsp) {
574 ret = -EIO;
575 break;
576 }
577
578 dregs = __get_dsp_regs(child);
579 dregs[addr - DSP_BASE] = data;
580 break;
581 }
582 case DSP_CONTROL:
583 if (!cpu_has_dsp) {
584 ret = -EIO;
585 break;
586 }
587 child->thread.dsp.dspcontrol = data;
588 break;
589 default:
590 /* The rest are not allowed. */
591 ret = -EIO;
592 break;
593 }
594 break;
595 }
596
597 case PTRACE_GETREGS:
598 ret = ptrace_getregs(child, datavp);
599 break;
600
601 case PTRACE_SETREGS:
602 ret = ptrace_setregs(child, datavp);
603 break;
604
605 case PTRACE_GETFPREGS:
606 ret = ptrace_getfpregs(child, datavp);
607 break;
608
609 case PTRACE_SETFPREGS:
610 ret = ptrace_setfpregs(child, datavp);
611 break;
612
613 case PTRACE_GET_THREAD_AREA:
614 ret = put_user(task_thread_info(child)->tp_value, datalp);
615 break;
616
617 case PTRACE_GET_WATCH_REGS:
618 ret = ptrace_get_watch_regs(child, addrp);
619 break;
620
621 case PTRACE_SET_WATCH_REGS:
622 ret = ptrace_set_watch_regs(child, addrp);
623 break;
624
625 default:
626 ret = ptrace_request(child, request, addr, data);
627 break;
628 }
629 out:
630 return ret;
631}
632
633/*
634 * Notification of system call entry/exit
635 * - triggered by current->work.syscall_trace
636 */
637asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
638{
639 long ret = 0;
640 user_exit();
641
642 if (secure_computing(syscall) == -1)
643 return -1;
644
645 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
646 tracehook_report_syscall_entry(regs))
647 ret = -1;
648
649 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
650 trace_sys_enter(regs, regs->regs[2]);
651
652 audit_syscall_entry(syscall_get_arch(),
653 syscall,
654 regs->regs[4], regs->regs[5],
655 regs->regs[6], regs->regs[7]);
656 return syscall;
657}
658
659/*
660 * Notification of system call entry/exit
661 * - triggered by current->work.syscall_trace
662 */
663asmlinkage void syscall_trace_leave(struct pt_regs *regs)
664{
665 /*
666 * We may come here right after calling schedule_user()
667 * or do_notify_resume(), in which case we can be in RCU
668 * user mode.
669 */
670 user_exit();
671
672 audit_syscall_exit(regs);
673
674 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
675 trace_sys_exit(regs, regs->regs[2]);
676
677 if (test_thread_flag(TIF_SYSCALL_TRACE))
678 tracehook_report_syscall_exit(regs, 0);
679
680 user_enter();
681}