Loading...
1/*
2 * arch/sh/kernel/ptrace_64.c
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2003 - 2008 Paul Mundt
6 *
7 * Started from SH3/4 version:
8 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
9 *
10 * Original x86 implementation:
11 * By Ross Biro 1/23/92
12 * edited by Linus Torvalds
13 *
14 * This file is subject to the terms and conditions of the GNU General Public
15 * License. See the file "COPYING" in the main directory of this archive
16 * for more details.
17 */
18#include <linux/kernel.h>
19#include <linux/rwsem.h>
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/bitops.h>
24#include <linux/errno.h>
25#include <linux/ptrace.h>
26#include <linux/user.h>
27#include <linux/signal.h>
28#include <linux/syscalls.h>
29#include <linux/audit.h>
30#include <linux/seccomp.h>
31#include <linux/tracehook.h>
32#include <linux/elf.h>
33#include <linux/regset.h>
34#include <asm/io.h>
35#include <asm/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/processor.h>
38#include <asm/mmu_context.h>
39#include <asm/syscalls.h>
40#include <asm/fpu.h>
41#include <asm/traps.h>
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/syscalls.h>
45
46/* This mask defines the bits of the SR which the user is not allowed to
47 change, which are everything except S, Q, M, PR, SZ, FR. */
48#define SR_MASK (0xffff8cfd)
49
50/*
51 * does not yet catch signals sent when the child dies.
52 * in exit.c or in signal.c.
53 */
54
55/*
56 * This routine will get a word from the user area in the process kernel stack.
57 */
58static inline int get_stack_long(struct task_struct *task, int offset)
59{
60 unsigned char *stack;
61
62 stack = (unsigned char *)(task->thread.uregs);
63 stack += offset;
64 return (*((int *)stack));
65}
66
67static inline unsigned long
68get_fpu_long(struct task_struct *task, unsigned long addr)
69{
70 unsigned long tmp;
71 struct pt_regs *regs;
72 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
73
74 if (!tsk_used_math(task)) {
75 if (addr == offsetof(struct user_fpu_struct, fpscr)) {
76 tmp = FPSCR_INIT;
77 } else {
78 tmp = 0xffffffffUL; /* matches initial value in fpu.c */
79 }
80 return tmp;
81 }
82
83 if (last_task_used_math == task) {
84 enable_fpu();
85 save_fpu(task);
86 disable_fpu();
87 last_task_used_math = 0;
88 regs->sr |= SR_FD;
89 }
90
91 tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
92 return tmp;
93}
94
95/*
96 * This routine will put a word into the user area in the process kernel stack.
97 */
98static inline int put_stack_long(struct task_struct *task, int offset,
99 unsigned long data)
100{
101 unsigned char *stack;
102
103 stack = (unsigned char *)(task->thread.uregs);
104 stack += offset;
105 *(unsigned long *) stack = data;
106 return 0;
107}
108
109static inline int
110put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
111{
112 struct pt_regs *regs;
113
114 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
115
116 if (!tsk_used_math(task)) {
117 init_fpu(task);
118 } else if (last_task_used_math == task) {
119 enable_fpu();
120 save_fpu(task);
121 disable_fpu();
122 last_task_used_math = 0;
123 regs->sr |= SR_FD;
124 }
125
126 ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
127 return 0;
128}
129
130void user_enable_single_step(struct task_struct *child)
131{
132 struct pt_regs *regs = child->thread.uregs;
133
134 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
135
136 set_tsk_thread_flag(child, TIF_SINGLESTEP);
137}
138
139void user_disable_single_step(struct task_struct *child)
140{
141 struct pt_regs *regs = child->thread.uregs;
142
143 regs->sr &= ~SR_SSTEP;
144
145 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
146}
147
148static int genregs_get(struct task_struct *target,
149 const struct user_regset *regset,
150 unsigned int pos, unsigned int count,
151 void *kbuf, void __user *ubuf)
152{
153 const struct pt_regs *regs = task_pt_regs(target);
154 int ret;
155
156 /* PC, SR, SYSCALL */
157 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
158 ®s->pc,
159 0, 3 * sizeof(unsigned long long));
160
161 /* R1 -> R63 */
162 if (!ret)
163 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
164 regs->regs,
165 offsetof(struct pt_regs, regs[0]),
166 63 * sizeof(unsigned long long));
167 /* TR0 -> TR7 */
168 if (!ret)
169 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
170 regs->tregs,
171 offsetof(struct pt_regs, tregs[0]),
172 8 * sizeof(unsigned long long));
173
174 if (!ret)
175 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
176 sizeof(struct pt_regs), -1);
177
178 return ret;
179}
180
181static int genregs_set(struct task_struct *target,
182 const struct user_regset *regset,
183 unsigned int pos, unsigned int count,
184 const void *kbuf, const void __user *ubuf)
185{
186 struct pt_regs *regs = task_pt_regs(target);
187 int ret;
188
189 /* PC, SR, SYSCALL */
190 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
191 ®s->pc,
192 0, 3 * sizeof(unsigned long long));
193
194 /* R1 -> R63 */
195 if (!ret && count > 0)
196 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
197 regs->regs,
198 offsetof(struct pt_regs, regs[0]),
199 63 * sizeof(unsigned long long));
200
201 /* TR0 -> TR7 */
202 if (!ret && count > 0)
203 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
204 regs->tregs,
205 offsetof(struct pt_regs, tregs[0]),
206 8 * sizeof(unsigned long long));
207
208 if (!ret)
209 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
210 sizeof(struct pt_regs), -1);
211
212 return ret;
213}
214
215#ifdef CONFIG_SH_FPU
216int fpregs_get(struct task_struct *target,
217 const struct user_regset *regset,
218 unsigned int pos, unsigned int count,
219 void *kbuf, void __user *ubuf)
220{
221 int ret;
222
223 ret = init_fpu(target);
224 if (ret)
225 return ret;
226
227 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
228 &target->thread.xstate->hardfpu, 0, -1);
229}
230
231static int fpregs_set(struct task_struct *target,
232 const struct user_regset *regset,
233 unsigned int pos, unsigned int count,
234 const void *kbuf, const void __user *ubuf)
235{
236 int ret;
237
238 ret = init_fpu(target);
239 if (ret)
240 return ret;
241
242 set_stopped_child_used_math(target);
243
244 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
245 &target->thread.xstate->hardfpu, 0, -1);
246}
247
248static int fpregs_active(struct task_struct *target,
249 const struct user_regset *regset)
250{
251 return tsk_used_math(target) ? regset->n : 0;
252}
253#endif
254
255const struct pt_regs_offset regoffset_table[] = {
256 REG_OFFSET_NAME(pc),
257 REG_OFFSET_NAME(sr),
258 REG_OFFSET_NAME(syscall_nr),
259 REGS_OFFSET_NAME(0),
260 REGS_OFFSET_NAME(1),
261 REGS_OFFSET_NAME(2),
262 REGS_OFFSET_NAME(3),
263 REGS_OFFSET_NAME(4),
264 REGS_OFFSET_NAME(5),
265 REGS_OFFSET_NAME(6),
266 REGS_OFFSET_NAME(7),
267 REGS_OFFSET_NAME(8),
268 REGS_OFFSET_NAME(9),
269 REGS_OFFSET_NAME(10),
270 REGS_OFFSET_NAME(11),
271 REGS_OFFSET_NAME(12),
272 REGS_OFFSET_NAME(13),
273 REGS_OFFSET_NAME(14),
274 REGS_OFFSET_NAME(15),
275 REGS_OFFSET_NAME(16),
276 REGS_OFFSET_NAME(17),
277 REGS_OFFSET_NAME(18),
278 REGS_OFFSET_NAME(19),
279 REGS_OFFSET_NAME(20),
280 REGS_OFFSET_NAME(21),
281 REGS_OFFSET_NAME(22),
282 REGS_OFFSET_NAME(23),
283 REGS_OFFSET_NAME(24),
284 REGS_OFFSET_NAME(25),
285 REGS_OFFSET_NAME(26),
286 REGS_OFFSET_NAME(27),
287 REGS_OFFSET_NAME(28),
288 REGS_OFFSET_NAME(29),
289 REGS_OFFSET_NAME(30),
290 REGS_OFFSET_NAME(31),
291 REGS_OFFSET_NAME(32),
292 REGS_OFFSET_NAME(33),
293 REGS_OFFSET_NAME(34),
294 REGS_OFFSET_NAME(35),
295 REGS_OFFSET_NAME(36),
296 REGS_OFFSET_NAME(37),
297 REGS_OFFSET_NAME(38),
298 REGS_OFFSET_NAME(39),
299 REGS_OFFSET_NAME(40),
300 REGS_OFFSET_NAME(41),
301 REGS_OFFSET_NAME(42),
302 REGS_OFFSET_NAME(43),
303 REGS_OFFSET_NAME(44),
304 REGS_OFFSET_NAME(45),
305 REGS_OFFSET_NAME(46),
306 REGS_OFFSET_NAME(47),
307 REGS_OFFSET_NAME(48),
308 REGS_OFFSET_NAME(49),
309 REGS_OFFSET_NAME(50),
310 REGS_OFFSET_NAME(51),
311 REGS_OFFSET_NAME(52),
312 REGS_OFFSET_NAME(53),
313 REGS_OFFSET_NAME(54),
314 REGS_OFFSET_NAME(55),
315 REGS_OFFSET_NAME(56),
316 REGS_OFFSET_NAME(57),
317 REGS_OFFSET_NAME(58),
318 REGS_OFFSET_NAME(59),
319 REGS_OFFSET_NAME(60),
320 REGS_OFFSET_NAME(61),
321 REGS_OFFSET_NAME(62),
322 REGS_OFFSET_NAME(63),
323 TREGS_OFFSET_NAME(0),
324 TREGS_OFFSET_NAME(1),
325 TREGS_OFFSET_NAME(2),
326 TREGS_OFFSET_NAME(3),
327 TREGS_OFFSET_NAME(4),
328 TREGS_OFFSET_NAME(5),
329 TREGS_OFFSET_NAME(6),
330 TREGS_OFFSET_NAME(7),
331 REG_OFFSET_END,
332};
333
334/*
335 * These are our native regset flavours.
336 */
337enum sh_regset {
338 REGSET_GENERAL,
339#ifdef CONFIG_SH_FPU
340 REGSET_FPU,
341#endif
342};
343
344static const struct user_regset sh_regsets[] = {
345 /*
346 * Format is:
347 * PC, SR, SYSCALL,
348 * R1 --> R63,
349 * TR0 --> TR7,
350 */
351 [REGSET_GENERAL] = {
352 .core_note_type = NT_PRSTATUS,
353 .n = ELF_NGREG,
354 .size = sizeof(long long),
355 .align = sizeof(long long),
356 .get = genregs_get,
357 .set = genregs_set,
358 },
359
360#ifdef CONFIG_SH_FPU
361 [REGSET_FPU] = {
362 .core_note_type = NT_PRFPREG,
363 .n = sizeof(struct user_fpu_struct) /
364 sizeof(long long),
365 .size = sizeof(long long),
366 .align = sizeof(long long),
367 .get = fpregs_get,
368 .set = fpregs_set,
369 .active = fpregs_active,
370 },
371#endif
372};
373
374static const struct user_regset_view user_sh64_native_view = {
375 .name = "sh64",
376 .e_machine = EM_SH,
377 .regsets = sh_regsets,
378 .n = ARRAY_SIZE(sh_regsets),
379};
380
381const struct user_regset_view *task_user_regset_view(struct task_struct *task)
382{
383 return &user_sh64_native_view;
384}
385
386long arch_ptrace(struct task_struct *child, long request,
387 unsigned long addr, unsigned long data)
388{
389 int ret;
390 unsigned long __user *datap = (unsigned long __user *) data;
391
392 switch (request) {
393 /* read the word at location addr in the USER area. */
394 case PTRACE_PEEKUSR: {
395 unsigned long tmp;
396
397 ret = -EIO;
398 if ((addr & 3) || addr < 0)
399 break;
400
401 if (addr < sizeof(struct pt_regs))
402 tmp = get_stack_long(child, addr);
403 else if ((addr >= offsetof(struct user, fpu)) &&
404 (addr < offsetof(struct user, u_fpvalid))) {
405 unsigned long index;
406 ret = init_fpu(child);
407 if (ret)
408 break;
409 index = addr - offsetof(struct user, fpu);
410 tmp = get_fpu_long(child, index);
411 } else if (addr == offsetof(struct user, u_fpvalid)) {
412 tmp = !!tsk_used_math(child);
413 } else {
414 break;
415 }
416 ret = put_user(tmp, datap);
417 break;
418 }
419
420 case PTRACE_POKEUSR:
421 /* write the word at location addr in the USER area. We must
422 disallow any changes to certain SR bits or u_fpvalid, since
423 this could crash the kernel or result in a security
424 loophole. */
425 ret = -EIO;
426 if ((addr & 3) || addr < 0)
427 break;
428
429 if (addr < sizeof(struct pt_regs)) {
430 /* Ignore change of top 32 bits of SR */
431 if (addr == offsetof (struct pt_regs, sr)+4)
432 {
433 ret = 0;
434 break;
435 }
436 /* If lower 32 bits of SR, ignore non-user bits */
437 if (addr == offsetof (struct pt_regs, sr))
438 {
439 long cursr = get_stack_long(child, addr);
440 data &= ~(SR_MASK);
441 data |= (cursr & SR_MASK);
442 }
443 ret = put_stack_long(child, addr, data);
444 }
445 else if ((addr >= offsetof(struct user, fpu)) &&
446 (addr < offsetof(struct user, u_fpvalid))) {
447 unsigned long index;
448 ret = init_fpu(child);
449 if (ret)
450 break;
451 index = addr - offsetof(struct user, fpu);
452 ret = put_fpu_long(child, index, data);
453 }
454 break;
455
456 case PTRACE_GETREGS:
457 return copy_regset_to_user(child, &user_sh64_native_view,
458 REGSET_GENERAL,
459 0, sizeof(struct pt_regs),
460 datap);
461 case PTRACE_SETREGS:
462 return copy_regset_from_user(child, &user_sh64_native_view,
463 REGSET_GENERAL,
464 0, sizeof(struct pt_regs),
465 datap);
466#ifdef CONFIG_SH_FPU
467 case PTRACE_GETFPREGS:
468 return copy_regset_to_user(child, &user_sh64_native_view,
469 REGSET_FPU,
470 0, sizeof(struct user_fpu_struct),
471 datap);
472 case PTRACE_SETFPREGS:
473 return copy_regset_from_user(child, &user_sh64_native_view,
474 REGSET_FPU,
475 0, sizeof(struct user_fpu_struct),
476 datap);
477#endif
478 default:
479 ret = ptrace_request(child, request, addr, data);
480 break;
481 }
482
483 return ret;
484}
485
486asmlinkage int sh64_ptrace(long request, long pid,
487 unsigned long addr, unsigned long data)
488{
489#define WPC_DBRMODE 0x0d104008
490 static unsigned long first_call;
491
492 if (!test_and_set_bit(0, &first_call)) {
493 /* Set WPC.DBRMODE to 0. This makes all debug events get
494 * delivered through RESVEC, i.e. into the handlers in entry.S.
495 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
496 * would normally be left set to 1, which makes debug events get
497 * delivered through DBRVEC, i.e. into the remote gdb's
498 * handlers. This prevents ptrace getting them, and confuses
499 * the remote gdb.) */
500 printk("DBRMODE set to 0 to permit native debugging\n");
501 poke_real_address_q(WPC_DBRMODE, 0);
502 }
503
504 return sys_ptrace(request, pid, addr, data);
505}
506
507static inline int audit_arch(void)
508{
509 int arch = EM_SH;
510
511#ifdef CONFIG_64BIT
512 arch |= __AUDIT_ARCH_64BIT;
513#endif
514#ifdef CONFIG_CPU_LITTLE_ENDIAN
515 arch |= __AUDIT_ARCH_LE;
516#endif
517
518 return arch;
519}
520
521asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
522{
523 long long ret = 0;
524
525 secure_computing_strict(regs->regs[9]);
526
527 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
528 tracehook_report_syscall_entry(regs))
529 /*
530 * Tracing decided this syscall should not happen.
531 * We'll return a bogus call number to get an ENOSYS
532 * error, but leave the original number in regs->regs[0].
533 */
534 ret = -1LL;
535
536 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
537 trace_sys_enter(regs, regs->regs[9]);
538
539 audit_syscall_entry(audit_arch(), regs->regs[1],
540 regs->regs[2], regs->regs[3],
541 regs->regs[4], regs->regs[5]);
542
543 return ret ?: regs->regs[9];
544}
545
546asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
547{
548 int step;
549
550 audit_syscall_exit(regs);
551
552 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
553 trace_sys_exit(regs, regs->regs[9]);
554
555 step = test_thread_flag(TIF_SINGLESTEP);
556 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
557 tracehook_report_syscall_exit(regs, step);
558}
559
560/* Called with interrupts disabled */
561asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
562{
563 /* This is called after a single step exception (DEBUGSS).
564 There is no need to change the PC, as it is a post-execution
565 exception, as entry.S does not do anything to the PC for DEBUGSS.
566 We need to clear the Single Step setting in SR to avoid
567 continually stepping. */
568 local_irq_enable();
569 regs->sr &= ~SR_SSTEP;
570 force_sig(SIGTRAP, current);
571}
572
573/* Called with interrupts disabled */
574BUILD_TRAP_HANDLER(breakpoint)
575{
576 TRAP_HANDLER_DECL;
577
578 /* We need to forward step the PC, to counteract the backstep done
579 in signal.c. */
580 local_irq_enable();
581 force_sig(SIGTRAP, current);
582 regs->pc += 4;
583}
584
585/*
586 * Called by kernel/ptrace.c when detaching..
587 *
588 * Make sure single step bits etc are not set.
589 */
590void ptrace_disable(struct task_struct *child)
591{
592 user_disable_single_step(child);
593}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sh/kernel/ptrace_64.c
4 *
5 * Copyright (C) 2000, 2001 Paolo Alberelli
6 * Copyright (C) 2003 - 2008 Paul Mundt
7 *
8 * Started from SH3/4 version:
9 * SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
10 *
11 * Original x86 implementation:
12 * By Ross Biro 1/23/92
13 * edited by Linus Torvalds
14 */
15#include <linux/kernel.h>
16#include <linux/rwsem.h>
17#include <linux/sched.h>
18#include <linux/sched/task_stack.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/bitops.h>
22#include <linux/errno.h>
23#include <linux/ptrace.h>
24#include <linux/user.h>
25#include <linux/signal.h>
26#include <linux/syscalls.h>
27#include <linux/audit.h>
28#include <linux/seccomp.h>
29#include <linux/tracehook.h>
30#include <linux/elf.h>
31#include <linux/regset.h>
32#include <asm/io.h>
33#include <linux/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/processor.h>
36#include <asm/mmu_context.h>
37#include <asm/syscalls.h>
38#include <asm/fpu.h>
39#include <asm/traps.h>
40
41#define CREATE_TRACE_POINTS
42#include <trace/events/syscalls.h>
43
44/* This mask defines the bits of the SR which the user is not allowed to
45 change, which are everything except S, Q, M, PR, SZ, FR. */
46#define SR_MASK (0xffff8cfd)
47
48/*
49 * does not yet catch signals sent when the child dies.
50 * in exit.c or in signal.c.
51 */
52
53/*
54 * This routine will get a word from the user area in the process kernel stack.
55 */
56static inline int get_stack_long(struct task_struct *task, int offset)
57{
58 unsigned char *stack;
59
60 stack = (unsigned char *)(task->thread.uregs);
61 stack += offset;
62 return (*((int *)stack));
63}
64
65static inline unsigned long
66get_fpu_long(struct task_struct *task, unsigned long addr)
67{
68 unsigned long tmp;
69 struct pt_regs *regs;
70 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
71
72 if (!tsk_used_math(task)) {
73 if (addr == offsetof(struct user_fpu_struct, fpscr)) {
74 tmp = FPSCR_INIT;
75 } else {
76 tmp = 0xffffffffUL; /* matches initial value in fpu.c */
77 }
78 return tmp;
79 }
80
81 if (last_task_used_math == task) {
82 enable_fpu();
83 save_fpu(task);
84 disable_fpu();
85 last_task_used_math = 0;
86 regs->sr |= SR_FD;
87 }
88
89 tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
90 return tmp;
91}
92
93/*
94 * This routine will put a word into the user area in the process kernel stack.
95 */
96static inline int put_stack_long(struct task_struct *task, int offset,
97 unsigned long data)
98{
99 unsigned char *stack;
100
101 stack = (unsigned char *)(task->thread.uregs);
102 stack += offset;
103 *(unsigned long *) stack = data;
104 return 0;
105}
106
107static inline int
108put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
109{
110 struct pt_regs *regs;
111
112 regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
113
114 if (!tsk_used_math(task)) {
115 init_fpu(task);
116 } else if (last_task_used_math == task) {
117 enable_fpu();
118 save_fpu(task);
119 disable_fpu();
120 last_task_used_math = 0;
121 regs->sr |= SR_FD;
122 }
123
124 ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
125 return 0;
126}
127
128void user_enable_single_step(struct task_struct *child)
129{
130 struct pt_regs *regs = child->thread.uregs;
131
132 regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
133
134 set_tsk_thread_flag(child, TIF_SINGLESTEP);
135}
136
137void user_disable_single_step(struct task_struct *child)
138{
139 struct pt_regs *regs = child->thread.uregs;
140
141 regs->sr &= ~SR_SSTEP;
142
143 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
144}
145
146static int genregs_get(struct task_struct *target,
147 const struct user_regset *regset,
148 unsigned int pos, unsigned int count,
149 void *kbuf, void __user *ubuf)
150{
151 const struct pt_regs *regs = task_pt_regs(target);
152 int ret;
153
154 /* PC, SR, SYSCALL */
155 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
156 ®s->pc,
157 0, 3 * sizeof(unsigned long long));
158
159 /* R1 -> R63 */
160 if (!ret)
161 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
162 regs->regs,
163 offsetof(struct pt_regs, regs[0]),
164 63 * sizeof(unsigned long long));
165 /* TR0 -> TR7 */
166 if (!ret)
167 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
168 regs->tregs,
169 offsetof(struct pt_regs, tregs[0]),
170 8 * sizeof(unsigned long long));
171
172 if (!ret)
173 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
174 sizeof(struct pt_regs), -1);
175
176 return ret;
177}
178
179static int genregs_set(struct task_struct *target,
180 const struct user_regset *regset,
181 unsigned int pos, unsigned int count,
182 const void *kbuf, const void __user *ubuf)
183{
184 struct pt_regs *regs = task_pt_regs(target);
185 int ret;
186
187 /* PC, SR, SYSCALL */
188 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
189 ®s->pc,
190 0, 3 * sizeof(unsigned long long));
191
192 /* R1 -> R63 */
193 if (!ret && count > 0)
194 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
195 regs->regs,
196 offsetof(struct pt_regs, regs[0]),
197 63 * sizeof(unsigned long long));
198
199 /* TR0 -> TR7 */
200 if (!ret && count > 0)
201 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
202 regs->tregs,
203 offsetof(struct pt_regs, tregs[0]),
204 8 * sizeof(unsigned long long));
205
206 if (!ret)
207 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
208 sizeof(struct pt_regs), -1);
209
210 return ret;
211}
212
213#ifdef CONFIG_SH_FPU
214int fpregs_get(struct task_struct *target,
215 const struct user_regset *regset,
216 unsigned int pos, unsigned int count,
217 void *kbuf, void __user *ubuf)
218{
219 int ret;
220
221 ret = init_fpu(target);
222 if (ret)
223 return ret;
224
225 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
226 &target->thread.xstate->hardfpu, 0, -1);
227}
228
229static int fpregs_set(struct task_struct *target,
230 const struct user_regset *regset,
231 unsigned int pos, unsigned int count,
232 const void *kbuf, const void __user *ubuf)
233{
234 int ret;
235
236 ret = init_fpu(target);
237 if (ret)
238 return ret;
239
240 set_stopped_child_used_math(target);
241
242 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
243 &target->thread.xstate->hardfpu, 0, -1);
244}
245
246static int fpregs_active(struct task_struct *target,
247 const struct user_regset *regset)
248{
249 return tsk_used_math(target) ? regset->n : 0;
250}
251#endif
252
253const struct pt_regs_offset regoffset_table[] = {
254 REG_OFFSET_NAME(pc),
255 REG_OFFSET_NAME(sr),
256 REG_OFFSET_NAME(syscall_nr),
257 REGS_OFFSET_NAME(0),
258 REGS_OFFSET_NAME(1),
259 REGS_OFFSET_NAME(2),
260 REGS_OFFSET_NAME(3),
261 REGS_OFFSET_NAME(4),
262 REGS_OFFSET_NAME(5),
263 REGS_OFFSET_NAME(6),
264 REGS_OFFSET_NAME(7),
265 REGS_OFFSET_NAME(8),
266 REGS_OFFSET_NAME(9),
267 REGS_OFFSET_NAME(10),
268 REGS_OFFSET_NAME(11),
269 REGS_OFFSET_NAME(12),
270 REGS_OFFSET_NAME(13),
271 REGS_OFFSET_NAME(14),
272 REGS_OFFSET_NAME(15),
273 REGS_OFFSET_NAME(16),
274 REGS_OFFSET_NAME(17),
275 REGS_OFFSET_NAME(18),
276 REGS_OFFSET_NAME(19),
277 REGS_OFFSET_NAME(20),
278 REGS_OFFSET_NAME(21),
279 REGS_OFFSET_NAME(22),
280 REGS_OFFSET_NAME(23),
281 REGS_OFFSET_NAME(24),
282 REGS_OFFSET_NAME(25),
283 REGS_OFFSET_NAME(26),
284 REGS_OFFSET_NAME(27),
285 REGS_OFFSET_NAME(28),
286 REGS_OFFSET_NAME(29),
287 REGS_OFFSET_NAME(30),
288 REGS_OFFSET_NAME(31),
289 REGS_OFFSET_NAME(32),
290 REGS_OFFSET_NAME(33),
291 REGS_OFFSET_NAME(34),
292 REGS_OFFSET_NAME(35),
293 REGS_OFFSET_NAME(36),
294 REGS_OFFSET_NAME(37),
295 REGS_OFFSET_NAME(38),
296 REGS_OFFSET_NAME(39),
297 REGS_OFFSET_NAME(40),
298 REGS_OFFSET_NAME(41),
299 REGS_OFFSET_NAME(42),
300 REGS_OFFSET_NAME(43),
301 REGS_OFFSET_NAME(44),
302 REGS_OFFSET_NAME(45),
303 REGS_OFFSET_NAME(46),
304 REGS_OFFSET_NAME(47),
305 REGS_OFFSET_NAME(48),
306 REGS_OFFSET_NAME(49),
307 REGS_OFFSET_NAME(50),
308 REGS_OFFSET_NAME(51),
309 REGS_OFFSET_NAME(52),
310 REGS_OFFSET_NAME(53),
311 REGS_OFFSET_NAME(54),
312 REGS_OFFSET_NAME(55),
313 REGS_OFFSET_NAME(56),
314 REGS_OFFSET_NAME(57),
315 REGS_OFFSET_NAME(58),
316 REGS_OFFSET_NAME(59),
317 REGS_OFFSET_NAME(60),
318 REGS_OFFSET_NAME(61),
319 REGS_OFFSET_NAME(62),
320 REGS_OFFSET_NAME(63),
321 TREGS_OFFSET_NAME(0),
322 TREGS_OFFSET_NAME(1),
323 TREGS_OFFSET_NAME(2),
324 TREGS_OFFSET_NAME(3),
325 TREGS_OFFSET_NAME(4),
326 TREGS_OFFSET_NAME(5),
327 TREGS_OFFSET_NAME(6),
328 TREGS_OFFSET_NAME(7),
329 REG_OFFSET_END,
330};
331
332/*
333 * These are our native regset flavours.
334 */
335enum sh_regset {
336 REGSET_GENERAL,
337#ifdef CONFIG_SH_FPU
338 REGSET_FPU,
339#endif
340};
341
342static const struct user_regset sh_regsets[] = {
343 /*
344 * Format is:
345 * PC, SR, SYSCALL,
346 * R1 --> R63,
347 * TR0 --> TR7,
348 */
349 [REGSET_GENERAL] = {
350 .core_note_type = NT_PRSTATUS,
351 .n = ELF_NGREG,
352 .size = sizeof(long long),
353 .align = sizeof(long long),
354 .get = genregs_get,
355 .set = genregs_set,
356 },
357
358#ifdef CONFIG_SH_FPU
359 [REGSET_FPU] = {
360 .core_note_type = NT_PRFPREG,
361 .n = sizeof(struct user_fpu_struct) /
362 sizeof(long long),
363 .size = sizeof(long long),
364 .align = sizeof(long long),
365 .get = fpregs_get,
366 .set = fpregs_set,
367 .active = fpregs_active,
368 },
369#endif
370};
371
372static const struct user_regset_view user_sh64_native_view = {
373 .name = "sh64",
374 .e_machine = EM_SH,
375 .regsets = sh_regsets,
376 .n = ARRAY_SIZE(sh_regsets),
377};
378
379const struct user_regset_view *task_user_regset_view(struct task_struct *task)
380{
381 return &user_sh64_native_view;
382}
383
384long arch_ptrace(struct task_struct *child, long request,
385 unsigned long addr, unsigned long data)
386{
387 int ret;
388 unsigned long __user *datap = (unsigned long __user *) data;
389
390 switch (request) {
391 /* read the word at location addr in the USER area. */
392 case PTRACE_PEEKUSR: {
393 unsigned long tmp;
394
395 ret = -EIO;
396 if ((addr & 3) || addr < 0)
397 break;
398
399 if (addr < sizeof(struct pt_regs))
400 tmp = get_stack_long(child, addr);
401 else if ((addr >= offsetof(struct user, fpu)) &&
402 (addr < offsetof(struct user, u_fpvalid))) {
403 unsigned long index;
404 ret = init_fpu(child);
405 if (ret)
406 break;
407 index = addr - offsetof(struct user, fpu);
408 tmp = get_fpu_long(child, index);
409 } else if (addr == offsetof(struct user, u_fpvalid)) {
410 tmp = !!tsk_used_math(child);
411 } else {
412 break;
413 }
414 ret = put_user(tmp, datap);
415 break;
416 }
417
418 case PTRACE_POKEUSR:
419 /* write the word at location addr in the USER area. We must
420 disallow any changes to certain SR bits or u_fpvalid, since
421 this could crash the kernel or result in a security
422 loophole. */
423 ret = -EIO;
424 if ((addr & 3) || addr < 0)
425 break;
426
427 if (addr < sizeof(struct pt_regs)) {
428 /* Ignore change of top 32 bits of SR */
429 if (addr == offsetof (struct pt_regs, sr)+4)
430 {
431 ret = 0;
432 break;
433 }
434 /* If lower 32 bits of SR, ignore non-user bits */
435 if (addr == offsetof (struct pt_regs, sr))
436 {
437 long cursr = get_stack_long(child, addr);
438 data &= ~(SR_MASK);
439 data |= (cursr & SR_MASK);
440 }
441 ret = put_stack_long(child, addr, data);
442 }
443 else if ((addr >= offsetof(struct user, fpu)) &&
444 (addr < offsetof(struct user, u_fpvalid))) {
445 unsigned long index;
446 ret = init_fpu(child);
447 if (ret)
448 break;
449 index = addr - offsetof(struct user, fpu);
450 ret = put_fpu_long(child, index, data);
451 }
452 break;
453
454 case PTRACE_GETREGS:
455 return copy_regset_to_user(child, &user_sh64_native_view,
456 REGSET_GENERAL,
457 0, sizeof(struct pt_regs),
458 datap);
459 case PTRACE_SETREGS:
460 return copy_regset_from_user(child, &user_sh64_native_view,
461 REGSET_GENERAL,
462 0, sizeof(struct pt_regs),
463 datap);
464#ifdef CONFIG_SH_FPU
465 case PTRACE_GETFPREGS:
466 return copy_regset_to_user(child, &user_sh64_native_view,
467 REGSET_FPU,
468 0, sizeof(struct user_fpu_struct),
469 datap);
470 case PTRACE_SETFPREGS:
471 return copy_regset_from_user(child, &user_sh64_native_view,
472 REGSET_FPU,
473 0, sizeof(struct user_fpu_struct),
474 datap);
475#endif
476 default:
477 ret = ptrace_request(child, request, addr, data);
478 break;
479 }
480
481 return ret;
482}
483
484asmlinkage int sh64_ptrace(long request, long pid,
485 unsigned long addr, unsigned long data)
486{
487#define WPC_DBRMODE 0x0d104008
488 static unsigned long first_call;
489
490 if (!test_and_set_bit(0, &first_call)) {
491 /* Set WPC.DBRMODE to 0. This makes all debug events get
492 * delivered through RESVEC, i.e. into the handlers in entry.S.
493 * (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
494 * would normally be left set to 1, which makes debug events get
495 * delivered through DBRVEC, i.e. into the remote gdb's
496 * handlers. This prevents ptrace getting them, and confuses
497 * the remote gdb.) */
498 printk("DBRMODE set to 0 to permit native debugging\n");
499 poke_real_address_q(WPC_DBRMODE, 0);
500 }
501
502 return sys_ptrace(request, pid, addr, data);
503}
504
505asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
506{
507 long long ret = 0;
508
509 secure_computing_strict(regs->regs[9]);
510
511 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
512 tracehook_report_syscall_entry(regs))
513 /*
514 * Tracing decided this syscall should not happen.
515 * We'll return a bogus call number to get an ENOSYS
516 * error, but leave the original number in regs->regs[0].
517 */
518 ret = -1LL;
519
520 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
521 trace_sys_enter(regs, regs->regs[9]);
522
523 audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
524 regs->regs[4], regs->regs[5]);
525
526 return ret ?: regs->regs[9];
527}
528
529asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
530{
531 int step;
532
533 audit_syscall_exit(regs);
534
535 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
536 trace_sys_exit(regs, regs->regs[9]);
537
538 step = test_thread_flag(TIF_SINGLESTEP);
539 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
540 tracehook_report_syscall_exit(regs, step);
541}
542
543/* Called with interrupts disabled */
544asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
545{
546 /* This is called after a single step exception (DEBUGSS).
547 There is no need to change the PC, as it is a post-execution
548 exception, as entry.S does not do anything to the PC for DEBUGSS.
549 We need to clear the Single Step setting in SR to avoid
550 continually stepping. */
551 local_irq_enable();
552 regs->sr &= ~SR_SSTEP;
553 force_sig(SIGTRAP);
554}
555
556/* Called with interrupts disabled */
557BUILD_TRAP_HANDLER(breakpoint)
558{
559 TRAP_HANDLER_DECL;
560
561 /* We need to forward step the PC, to counteract the backstep done
562 in signal.c. */
563 local_irq_enable();
564 force_sig(SIGTRAP);
565 regs->pc += 4;
566}
567
568/*
569 * Called by kernel/ptrace.c when detaching..
570 *
571 * Make sure single step bits etc are not set.
572 */
573void ptrace_disable(struct task_struct *child)
574{
575 user_disable_single_step(child);
576}