Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1992 Ross Biro
  7 * Copyright (C) Linus Torvalds
  8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  9 * Copyright (C) 1996 David S. Miller
 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
 11 * Copyright (C) 1999 MIPS Technologies, Inc.
 12 * Copyright (C) 2000 Ulf Carlsson
 13 *
 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
 15 * binaries.
 16 */
 17#include <linux/compiler.h>
 18#include <linux/context_tracking.h>
 19#include <linux/elf.h>
 20#include <linux/kernel.h>
 21#include <linux/sched.h>
 
 22#include <linux/mm.h>
 23#include <linux/errno.h>
 24#include <linux/ptrace.h>
 25#include <linux/regset.h>
 26#include <linux/smp.h>
 27#include <linux/security.h>
 28#include <linux/stddef.h>
 29#include <linux/tracehook.h>
 30#include <linux/audit.h>
 31#include <linux/seccomp.h>
 32#include <linux/ftrace.h>
 33
 34#include <asm/byteorder.h>
 35#include <asm/cpu.h>
 36#include <asm/cpu-info.h>
 37#include <asm/dsp.h>
 38#include <asm/fpu.h>
 39#include <asm/mipsregs.h>
 40#include <asm/mipsmtregs.h>
 41#include <asm/pgtable.h>
 42#include <asm/page.h>
 43#include <asm/syscall.h>
 44#include <asm/uaccess.h>
 45#include <asm/bootinfo.h>
 46#include <asm/reg.h>
 47
 48#define CREATE_TRACE_POINTS
 49#include <trace/events/syscalls.h>
 50
 51static void init_fp_ctx(struct task_struct *target)
 52{
 53	/* If FP has been used then the target already has context */
 54	if (tsk_used_math(target))
 55		return;
 56
 57	/* Begin with data registers set to all 1s... */
 58	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
 59
 60	/* ...and FCSR zeroed */
 61	target->thread.fpu.fcr31 = 0;
 62
 63	/*
 64	 * Record that the target has "used" math, such that the context
 65	 * just initialised, and any modifications made by the caller,
 66	 * aren't discarded.
 67	 */
 68	set_stopped_child_used_math(target);
 69}
 70
 71/*
 72 * Called by kernel/ptrace.c when detaching..
 73 *
 74 * Make sure single step bits etc are not set.
 75 */
 76void ptrace_disable(struct task_struct *child)
 77{
 78	/* Don't load the watchpoint registers for the ex-child. */
 79	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 80}
 81
 82/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83 * Read a general register set.	 We always use the 64-bit format, even
 84 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
 85 * Registers are sign extended to fill the available space.
 86 */
 87int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
 88{
 89	struct pt_regs *regs;
 90	int i;
 91
 92	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
 93		return -EIO;
 94
 95	regs = task_pt_regs(child);
 96
 97	for (i = 0; i < 32; i++)
 98		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
 99	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
100	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
101	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
102	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
103	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
104	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
105
106	return 0;
107}
108
109/*
110 * Write a general register set.  As for PTRACE_GETREGS, we always use
111 * the 64-bit format.  On a 32-bit kernel only the lower order half
112 * (according to endianness) will be used.
113 */
114int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
115{
116	struct pt_regs *regs;
117	int i;
118
119	if (!access_ok(VERIFY_READ, data, 38 * 8))
120		return -EIO;
121
122	regs = task_pt_regs(child);
123
124	for (i = 0; i < 32; i++)
125		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
126	__get_user(regs->lo, (__s64 __user *)&data->lo);
127	__get_user(regs->hi, (__s64 __user *)&data->hi);
128	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
129
130	/* badvaddr, status, and cause may not be written.  */
131
 
 
 
132	return 0;
133}
134
135int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
136{
137	int i;
138
139	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
140		return -EIO;
141
142	if (tsk_used_math(child)) {
143		union fpureg *fregs = get_fpu_regs(child);
144		for (i = 0; i < 32; i++)
145			__put_user(get_fpr64(&fregs[i], 0),
146				   i + (__u64 __user *)data);
147	} else {
148		for (i = 0; i < 32; i++)
149			__put_user((__u64) -1, i + (__u64 __user *) data);
150	}
151
152	__put_user(child->thread.fpu.fcr31, data + 64);
153	__put_user(boot_cpu_data.fpu_id, data + 65);
154
155	return 0;
156}
157
158int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
159{
160	union fpureg *fregs;
161	u64 fpr_val;
162	u32 fcr31;
163	u32 value;
164	u32 mask;
165	int i;
166
167	if (!access_ok(VERIFY_READ, data, 33 * 8))
168		return -EIO;
169
170	init_fp_ctx(child);
171	fregs = get_fpu_regs(child);
172
173	for (i = 0; i < 32; i++) {
174		__get_user(fpr_val, i + (__u64 __user *)data);
175		set_fpr64(&fregs[i], 0, fpr_val);
176	}
177
178	__get_user(value, data + 64);
179	fcr31 = child->thread.fpu.fcr31;
180	mask = boot_cpu_data.fpu_msk31;
181	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
182
183	/* FIR may not be written.  */
184
185	return 0;
186}
187
188int ptrace_get_watch_regs(struct task_struct *child,
189			  struct pt_watch_regs __user *addr)
190{
191	enum pt_watch_style style;
192	int i;
193
194	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
195		return -EIO;
196	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
197		return -EIO;
198
199#ifdef CONFIG_32BIT
200	style = pt_watch_style_mips32;
201#define WATCH_STYLE mips32
202#else
203	style = pt_watch_style_mips64;
204#define WATCH_STYLE mips64
205#endif
206
207	__put_user(style, &addr->style);
208	__put_user(boot_cpu_data.watch_reg_use_cnt,
209		   &addr->WATCH_STYLE.num_valid);
210	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
211		__put_user(child->thread.watch.mips3264.watchlo[i],
212			   &addr->WATCH_STYLE.watchlo[i]);
213		__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
 
214			   &addr->WATCH_STYLE.watchhi[i]);
215		__put_user(boot_cpu_data.watch_reg_masks[i],
216			   &addr->WATCH_STYLE.watch_masks[i]);
217	}
218	for (; i < 8; i++) {
219		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
220		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
221		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
222	}
223
224	return 0;
225}
226
227int ptrace_set_watch_regs(struct task_struct *child,
228			  struct pt_watch_regs __user *addr)
229{
230	int i;
231	int watch_active = 0;
232	unsigned long lt[NUM_WATCH_REGS];
233	u16 ht[NUM_WATCH_REGS];
234
235	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
236		return -EIO;
237	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
238		return -EIO;
239	/* Check the values. */
240	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
241		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
242#ifdef CONFIG_32BIT
243		if (lt[i] & __UA_LIMIT)
244			return -EINVAL;
245#else
246		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
247			if (lt[i] & 0xffffffff80000000UL)
248				return -EINVAL;
249		} else {
250			if (lt[i] & __UA_LIMIT)
251				return -EINVAL;
252		}
253#endif
254		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
255		if (ht[i] & ~0xff8)
256			return -EINVAL;
257	}
258	/* Install them. */
259	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
260		if (lt[i] & 7)
261			watch_active = 1;
262		child->thread.watch.mips3264.watchlo[i] = lt[i];
263		/* Set the G bit. */
264		child->thread.watch.mips3264.watchhi[i] = ht[i];
265	}
266
267	if (watch_active)
268		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
269	else
270		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
271
272	return 0;
273}
274
275/* regset get/set implementations */
276
277#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
278
279static int gpr32_get(struct task_struct *target,
280		     const struct user_regset *regset,
281		     unsigned int pos, unsigned int count,
282		     void *kbuf, void __user *ubuf)
283{
284	struct pt_regs *regs = task_pt_regs(target);
285	u32 uregs[ELF_NGREG] = {};
286	unsigned i;
287
288	for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
289		/* k0/k1 are copied as zero. */
290		if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
291			continue;
292
293		uregs[i] = regs->regs[i - MIPS32_EF_R0];
294	}
295
296	uregs[MIPS32_EF_LO] = regs->lo;
297	uregs[MIPS32_EF_HI] = regs->hi;
298	uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
299	uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
300	uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
301	uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
302
 
303	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
304				   sizeof(uregs));
305}
306
307static int gpr32_set(struct task_struct *target,
308		     const struct user_regset *regset,
309		     unsigned int pos, unsigned int count,
310		     const void *kbuf, const void __user *ubuf)
311{
312	struct pt_regs *regs = task_pt_regs(target);
313	u32 uregs[ELF_NGREG];
314	unsigned start, num_regs, i;
315	int err;
316
317	start = pos / sizeof(u32);
318	num_regs = count / sizeof(u32);
319
320	if (start + num_regs > ELF_NGREG)
321		return -EIO;
322
323	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
324				 sizeof(uregs));
325	if (err)
326		return err;
327
328	for (i = start; i < num_regs; i++) {
329		/*
330		 * Cast all values to signed here so that if this is a 64-bit
331		 * kernel, the supplied 32-bit values will be sign extended.
332		 */
333		switch (i) {
334		case MIPS32_EF_R1 ... MIPS32_EF_R25:
335			/* k0/k1 are ignored. */
336		case MIPS32_EF_R28 ... MIPS32_EF_R31:
337			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
338			break;
339		case MIPS32_EF_LO:
340			regs->lo = (s32)uregs[i];
341			break;
342		case MIPS32_EF_HI:
343			regs->hi = (s32)uregs[i];
344			break;
345		case MIPS32_EF_CP0_EPC:
346			regs->cp0_epc = (s32)uregs[i];
347			break;
348		}
349	}
350
 
 
 
351	return 0;
352}
353
354#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
355
356#ifdef CONFIG_64BIT
357
358static int gpr64_get(struct task_struct *target,
359		     const struct user_regset *regset,
360		     unsigned int pos, unsigned int count,
361		     void *kbuf, void __user *ubuf)
362{
363	struct pt_regs *regs = task_pt_regs(target);
364	u64 uregs[ELF_NGREG] = {};
365	unsigned i;
366
367	for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
368		/* k0/k1 are copied as zero. */
369		if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
370			continue;
371
372		uregs[i] = regs->regs[i - MIPS64_EF_R0];
373	}
374
375	uregs[MIPS64_EF_LO] = regs->lo;
376	uregs[MIPS64_EF_HI] = regs->hi;
377	uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
378	uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
379	uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
380	uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
381
 
382	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
383				   sizeof(uregs));
384}
385
386static int gpr64_set(struct task_struct *target,
387		     const struct user_regset *regset,
388		     unsigned int pos, unsigned int count,
389		     const void *kbuf, const void __user *ubuf)
390{
391	struct pt_regs *regs = task_pt_regs(target);
392	u64 uregs[ELF_NGREG];
393	unsigned start, num_regs, i;
394	int err;
395
396	start = pos / sizeof(u64);
397	num_regs = count / sizeof(u64);
398
399	if (start + num_regs > ELF_NGREG)
400		return -EIO;
401
402	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
403				 sizeof(uregs));
404	if (err)
405		return err;
406
407	for (i = start; i < num_regs; i++) {
408		switch (i) {
409		case MIPS64_EF_R1 ... MIPS64_EF_R25:
410			/* k0/k1 are ignored. */
411		case MIPS64_EF_R28 ... MIPS64_EF_R31:
412			regs->regs[i - MIPS64_EF_R0] = uregs[i];
413			break;
414		case MIPS64_EF_LO:
415			regs->lo = uregs[i];
416			break;
417		case MIPS64_EF_HI:
418			regs->hi = uregs[i];
419			break;
420		case MIPS64_EF_CP0_EPC:
421			regs->cp0_epc = uregs[i];
422			break;
423		}
424	}
425
 
 
 
426	return 0;
427}
428
429#endif /* CONFIG_64BIT */
430
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431static int fpr_get(struct task_struct *target,
432		   const struct user_regset *regset,
433		   unsigned int pos, unsigned int count,
434		   void *kbuf, void __user *ubuf)
435{
436	unsigned i;
 
437	int err;
438	u64 fpr_val;
439
440	/* XXX fcr31  */
 
 
 
 
 
441
442	if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
443		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
444					   &target->thread.fpu,
445					   0, sizeof(elf_fpregset_t));
 
446
447	for (i = 0; i < NUM_FPU_REGS; i++) {
448		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
449		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
450					  &fpr_val, i * sizeof(elf_fpreg_t),
451					  (i + 1) * sizeof(elf_fpreg_t));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452		if (err)
453			return err;
 
454	}
455
456	return 0;
457}
458
 
 
 
 
 
 
 
 
 
 
 
 
459static int fpr_set(struct task_struct *target,
460		   const struct user_regset *regset,
461		   unsigned int pos, unsigned int count,
462		   const void *kbuf, const void __user *ubuf)
463{
464	unsigned i;
 
 
465	int err;
466	u64 fpr_val;
467
468	/* XXX fcr31  */
 
 
 
469
470	init_fp_ctx(target);
471
472	if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
473		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
474					  &target->thread.fpu,
475					  0, sizeof(elf_fpregset_t));
 
 
476
477	for (i = 0; i < NUM_FPU_REGS; i++) {
478		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
479					 &fpr_val, i * sizeof(elf_fpreg_t),
480					 (i + 1) * sizeof(elf_fpreg_t));
481		if (err)
482			return err;
483		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 
484	}
485
486	return 0;
 
 
 
 
 
487}
488
489enum mips_regset {
490	REGSET_GPR,
491	REGSET_FPR,
492};
493
494struct pt_regs_offset {
495	const char *name;
496	int offset;
497};
498
499#define REG_OFFSET_NAME(reg, r) {					\
500	.name = #reg,							\
501	.offset = offsetof(struct pt_regs, r)				\
502}
503
504#define REG_OFFSET_END {						\
505	.name = NULL,							\
506	.offset = 0							\
507}
508
509static const struct pt_regs_offset regoffset_table[] = {
510	REG_OFFSET_NAME(r0, regs[0]),
511	REG_OFFSET_NAME(r1, regs[1]),
512	REG_OFFSET_NAME(r2, regs[2]),
513	REG_OFFSET_NAME(r3, regs[3]),
514	REG_OFFSET_NAME(r4, regs[4]),
515	REG_OFFSET_NAME(r5, regs[5]),
516	REG_OFFSET_NAME(r6, regs[6]),
517	REG_OFFSET_NAME(r7, regs[7]),
518	REG_OFFSET_NAME(r8, regs[8]),
519	REG_OFFSET_NAME(r9, regs[9]),
520	REG_OFFSET_NAME(r10, regs[10]),
521	REG_OFFSET_NAME(r11, regs[11]),
522	REG_OFFSET_NAME(r12, regs[12]),
523	REG_OFFSET_NAME(r13, regs[13]),
524	REG_OFFSET_NAME(r14, regs[14]),
525	REG_OFFSET_NAME(r15, regs[15]),
526	REG_OFFSET_NAME(r16, regs[16]),
527	REG_OFFSET_NAME(r17, regs[17]),
528	REG_OFFSET_NAME(r18, regs[18]),
529	REG_OFFSET_NAME(r19, regs[19]),
530	REG_OFFSET_NAME(r20, regs[20]),
531	REG_OFFSET_NAME(r21, regs[21]),
532	REG_OFFSET_NAME(r22, regs[22]),
533	REG_OFFSET_NAME(r23, regs[23]),
534	REG_OFFSET_NAME(r24, regs[24]),
535	REG_OFFSET_NAME(r25, regs[25]),
536	REG_OFFSET_NAME(r26, regs[26]),
537	REG_OFFSET_NAME(r27, regs[27]),
538	REG_OFFSET_NAME(r28, regs[28]),
539	REG_OFFSET_NAME(r29, regs[29]),
540	REG_OFFSET_NAME(r30, regs[30]),
541	REG_OFFSET_NAME(r31, regs[31]),
542	REG_OFFSET_NAME(c0_status, cp0_status),
543	REG_OFFSET_NAME(hi, hi),
544	REG_OFFSET_NAME(lo, lo),
545#ifdef CONFIG_CPU_HAS_SMARTMIPS
546	REG_OFFSET_NAME(acx, acx),
547#endif
548	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
549	REG_OFFSET_NAME(c0_cause, cp0_cause),
550	REG_OFFSET_NAME(c0_epc, cp0_epc),
551#ifdef CONFIG_CPU_CAVIUM_OCTEON
552	REG_OFFSET_NAME(mpl0, mpl[0]),
553	REG_OFFSET_NAME(mpl1, mpl[1]),
554	REG_OFFSET_NAME(mpl2, mpl[2]),
555	REG_OFFSET_NAME(mtp0, mtp[0]),
556	REG_OFFSET_NAME(mtp1, mtp[1]),
557	REG_OFFSET_NAME(mtp2, mtp[2]),
558#endif
559	REG_OFFSET_END,
560};
561
562/**
563 * regs_query_register_offset() - query register offset from its name
564 * @name:       the name of a register
565 *
566 * regs_query_register_offset() returns the offset of a register in struct
567 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
568 */
569int regs_query_register_offset(const char *name)
570{
571        const struct pt_regs_offset *roff;
572        for (roff = regoffset_table; roff->name != NULL; roff++)
573                if (!strcmp(roff->name, name))
574                        return roff->offset;
575        return -EINVAL;
576}
577
578#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
579
580static const struct user_regset mips_regsets[] = {
581	[REGSET_GPR] = {
582		.core_note_type	= NT_PRSTATUS,
583		.n		= ELF_NGREG,
584		.size		= sizeof(unsigned int),
585		.align		= sizeof(unsigned int),
586		.get		= gpr32_get,
587		.set		= gpr32_set,
588	},
589	[REGSET_FPR] = {
590		.core_note_type	= NT_PRFPREG,
591		.n		= ELF_NFPREG,
592		.size		= sizeof(elf_fpreg_t),
593		.align		= sizeof(elf_fpreg_t),
594		.get		= fpr_get,
595		.set		= fpr_set,
596	},
597};
598
599static const struct user_regset_view user_mips_view = {
600	.name		= "mips",
601	.e_machine	= ELF_ARCH,
602	.ei_osabi	= ELF_OSABI,
603	.regsets	= mips_regsets,
604	.n		= ARRAY_SIZE(mips_regsets),
605};
606
607#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
608
609#ifdef CONFIG_64BIT
610
611static const struct user_regset mips64_regsets[] = {
612	[REGSET_GPR] = {
613		.core_note_type	= NT_PRSTATUS,
614		.n		= ELF_NGREG,
615		.size		= sizeof(unsigned long),
616		.align		= sizeof(unsigned long),
617		.get		= gpr64_get,
618		.set		= gpr64_set,
619	},
620	[REGSET_FPR] = {
621		.core_note_type	= NT_PRFPREG,
622		.n		= ELF_NFPREG,
623		.size		= sizeof(elf_fpreg_t),
624		.align		= sizeof(elf_fpreg_t),
625		.get		= fpr_get,
626		.set		= fpr_set,
627	},
628};
629
630static const struct user_regset_view user_mips64_view = {
631	.name		= "mips64",
632	.e_machine	= ELF_ARCH,
633	.ei_osabi	= ELF_OSABI,
634	.regsets	= mips64_regsets,
635	.n		= ARRAY_SIZE(mips64_regsets),
636};
637
 
 
 
 
 
 
 
 
 
 
 
 
 
638#endif /* CONFIG_64BIT */
639
640const struct user_regset_view *task_user_regset_view(struct task_struct *task)
641{
642#ifdef CONFIG_32BIT
643	return &user_mips_view;
644#else
645#ifdef CONFIG_MIPS32_O32
646	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
647		return &user_mips_view;
648#endif
 
 
 
 
649	return &user_mips64_view;
650#endif
651}
652
653long arch_ptrace(struct task_struct *child, long request,
654		 unsigned long addr, unsigned long data)
655{
656	int ret;
657	void __user *addrp = (void __user *) addr;
658	void __user *datavp = (void __user *) data;
659	unsigned long __user *datalp = (void __user *) data;
660
661	switch (request) {
662	/* when I and D space are separate, these will need to be fixed. */
663	case PTRACE_PEEKTEXT: /* read word at location addr. */
664	case PTRACE_PEEKDATA:
665		ret = generic_ptrace_peekdata(child, addr, data);
666		break;
667
668	/* Read the word at location addr in the USER area. */
669	case PTRACE_PEEKUSR: {
670		struct pt_regs *regs;
671		union fpureg *fregs;
672		unsigned long tmp = 0;
673
674		regs = task_pt_regs(child);
675		ret = 0;  /* Default return value. */
676
677		switch (addr) {
678		case 0 ... 31:
679			tmp = regs->regs[addr];
680			break;
681		case FPR_BASE ... FPR_BASE + 31:
682			if (!tsk_used_math(child)) {
683				/* FP not yet used */
684				tmp = -1;
685				break;
686			}
687			fregs = get_fpu_regs(child);
688
689#ifdef CONFIG_32BIT
690			if (test_thread_flag(TIF_32BIT_FPREGS)) {
691				/*
692				 * The odd registers are actually the high
693				 * order bits of the values stored in the even
694				 * registers - unless we're using r2k_switch.S.
695				 */
696				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
697						addr & 1);
698				break;
699			}
700#endif
701			tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
702			break;
703		case PC:
704			tmp = regs->cp0_epc;
705			break;
706		case CAUSE:
707			tmp = regs->cp0_cause;
708			break;
709		case BADVADDR:
710			tmp = regs->cp0_badvaddr;
711			break;
712		case MMHI:
713			tmp = regs->hi;
714			break;
715		case MMLO:
716			tmp = regs->lo;
717			break;
718#ifdef CONFIG_CPU_HAS_SMARTMIPS
719		case ACX:
720			tmp = regs->acx;
721			break;
722#endif
723		case FPC_CSR:
724			tmp = child->thread.fpu.fcr31;
725			break;
726		case FPC_EIR:
727			/* implementation / version register */
728			tmp = boot_cpu_data.fpu_id;
729			break;
730		case DSP_BASE ... DSP_BASE + 5: {
731			dspreg_t *dregs;
732
733			if (!cpu_has_dsp) {
734				tmp = 0;
735				ret = -EIO;
736				goto out;
737			}
738			dregs = __get_dsp_regs(child);
739			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
740			break;
741		}
742		case DSP_CONTROL:
743			if (!cpu_has_dsp) {
744				tmp = 0;
745				ret = -EIO;
746				goto out;
747			}
748			tmp = child->thread.dsp.dspcontrol;
749			break;
750		default:
751			tmp = 0;
752			ret = -EIO;
753			goto out;
754		}
755		ret = put_user(tmp, datalp);
756		break;
757	}
758
759	/* when I and D space are separate, this will have to be fixed. */
760	case PTRACE_POKETEXT: /* write the word at location addr. */
761	case PTRACE_POKEDATA:
762		ret = generic_ptrace_pokedata(child, addr, data);
763		break;
764
765	case PTRACE_POKEUSR: {
766		struct pt_regs *regs;
767		ret = 0;
768		regs = task_pt_regs(child);
769
770		switch (addr) {
771		case 0 ... 31:
772			regs->regs[addr] = data;
 
 
 
 
 
 
773			break;
774		case FPR_BASE ... FPR_BASE + 31: {
775			union fpureg *fregs = get_fpu_regs(child);
776
777			init_fp_ctx(child);
778#ifdef CONFIG_32BIT
779			if (test_thread_flag(TIF_32BIT_FPREGS)) {
780				/*
781				 * The odd registers are actually the high
782				 * order bits of the values stored in the even
783				 * registers - unless we're using r2k_switch.S.
784				 */
785				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
786					  addr & 1, data);
787				break;
788			}
789#endif
790			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
791			break;
792		}
793		case PC:
794			regs->cp0_epc = data;
795			break;
796		case MMHI:
797			regs->hi = data;
798			break;
799		case MMLO:
800			regs->lo = data;
801			break;
802#ifdef CONFIG_CPU_HAS_SMARTMIPS
803		case ACX:
804			regs->acx = data;
805			break;
806#endif
807		case FPC_CSR:
808			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
 
809			break;
810		case DSP_BASE ... DSP_BASE + 5: {
811			dspreg_t *dregs;
812
813			if (!cpu_has_dsp) {
814				ret = -EIO;
815				break;
816			}
817
818			dregs = __get_dsp_regs(child);
819			dregs[addr - DSP_BASE] = data;
820			break;
821		}
822		case DSP_CONTROL:
823			if (!cpu_has_dsp) {
824				ret = -EIO;
825				break;
826			}
827			child->thread.dsp.dspcontrol = data;
828			break;
829		default:
830			/* The rest are not allowed. */
831			ret = -EIO;
832			break;
833		}
834		break;
835		}
836
837	case PTRACE_GETREGS:
838		ret = ptrace_getregs(child, datavp);
839		break;
840
841	case PTRACE_SETREGS:
842		ret = ptrace_setregs(child, datavp);
843		break;
844
845	case PTRACE_GETFPREGS:
846		ret = ptrace_getfpregs(child, datavp);
847		break;
848
849	case PTRACE_SETFPREGS:
850		ret = ptrace_setfpregs(child, datavp);
851		break;
852
853	case PTRACE_GET_THREAD_AREA:
854		ret = put_user(task_thread_info(child)->tp_value, datalp);
855		break;
856
857	case PTRACE_GET_WATCH_REGS:
858		ret = ptrace_get_watch_regs(child, addrp);
859		break;
860
861	case PTRACE_SET_WATCH_REGS:
862		ret = ptrace_set_watch_regs(child, addrp);
863		break;
864
865	default:
866		ret = ptrace_request(child, request, addr, data);
867		break;
868	}
869 out:
870	return ret;
871}
872
873/*
874 * Notification of system call entry/exit
875 * - triggered by current->work.syscall_trace
876 */
877asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
878{
879	long ret = 0;
880	user_exit();
881
882	current_thread_info()->syscall = syscall;
883
884	if (secure_computing() == -1)
885		return -1;
 
 
 
886
887	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
888	    tracehook_report_syscall_entry(regs))
889		ret = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
890
891	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
892		trace_sys_enter(regs, regs->regs[2]);
893
894	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
895			    regs->regs[6], regs->regs[7]);
 
 
 
 
 
 
 
896	return syscall;
897}
898
899/*
900 * Notification of system call entry/exit
901 * - triggered by current->work.syscall_trace
902 */
903asmlinkage void syscall_trace_leave(struct pt_regs *regs)
904{
905        /*
906	 * We may come here right after calling schedule_user()
907	 * or do_notify_resume(), in which case we can be in RCU
908	 * user mode.
909	 */
910	user_exit();
911
912	audit_syscall_exit(regs);
913
914	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
915		trace_sys_exit(regs, regs->regs[2]);
916
917	if (test_thread_flag(TIF_SYSCALL_TRACE))
918		tracehook_report_syscall_exit(regs, 0);
919
920	user_enter();
921}
v4.17
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
  28#include <linux/security.h>
  29#include <linux/stddef.h>
  30#include <linux/tracehook.h>
  31#include <linux/audit.h>
  32#include <linux/seccomp.h>
  33#include <linux/ftrace.h>
  34
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
  42#include <asm/pgtable.h>
  43#include <asm/page.h>
  44#include <asm/syscall.h>
  45#include <linux/uaccess.h>
  46#include <asm/bootinfo.h>
  47#include <asm/reg.h>
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/syscalls.h>
  51
  52static void init_fp_ctx(struct task_struct *target)
  53{
  54	/* If FP has been used then the target already has context */
  55	if (tsk_used_math(target))
  56		return;
  57
  58	/* Begin with data registers set to all 1s... */
  59	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
  60
  61	/* FCSR has been preset by `mips_set_personality_nan'.  */
 
  62
  63	/*
  64	 * Record that the target has "used" math, such that the context
  65	 * just initialised, and any modifications made by the caller,
  66	 * aren't discarded.
  67	 */
  68	set_stopped_child_used_math(target);
  69}
  70
  71/*
  72 * Called by kernel/ptrace.c when detaching..
  73 *
  74 * Make sure single step bits etc are not set.
  75 */
  76void ptrace_disable(struct task_struct *child)
  77{
  78	/* Don't load the watchpoint registers for the ex-child. */
  79	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  80}
  81
  82/*
  83 * Poke at FCSR according to its mask.  Set the Cause bits even
  84 * if a corresponding Enable bit is set.  This will be noticed at
  85 * the time the thread is switched to and SIGFPE thrown accordingly.
  86 */
  87static void ptrace_setfcr31(struct task_struct *child, u32 value)
  88{
  89	u32 fcr31;
  90	u32 mask;
  91
  92	fcr31 = child->thread.fpu.fcr31;
  93	mask = boot_cpu_data.fpu_msk31;
  94	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
  95}
  96
  97/*
  98 * Read a general register set.	 We always use the 64-bit format, even
  99 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
 100 * Registers are sign extended to fill the available space.
 101 */
 102int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
 103{
 104	struct pt_regs *regs;
 105	int i;
 106
 107	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
 108		return -EIO;
 109
 110	regs = task_pt_regs(child);
 111
 112	for (i = 0; i < 32; i++)
 113		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
 114	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
 115	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
 116	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 117	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
 118	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
 119	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
 120
 121	return 0;
 122}
 123
 124/*
 125 * Write a general register set.  As for PTRACE_GETREGS, we always use
 126 * the 64-bit format.  On a 32-bit kernel only the lower order half
 127 * (according to endianness) will be used.
 128 */
 129int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
 130{
 131	struct pt_regs *regs;
 132	int i;
 133
 134	if (!access_ok(VERIFY_READ, data, 38 * 8))
 135		return -EIO;
 136
 137	regs = task_pt_regs(child);
 138
 139	for (i = 0; i < 32; i++)
 140		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 141	__get_user(regs->lo, (__s64 __user *)&data->lo);
 142	__get_user(regs->hi, (__s64 __user *)&data->hi);
 143	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 144
 145	/* badvaddr, status, and cause may not be written.  */
 146
 147	/* System call number may have been changed */
 148	mips_syscall_update_nr(child, regs);
 149
 150	return 0;
 151}
 152
 153int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 154{
 155	int i;
 156
 157	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
 158		return -EIO;
 159
 160	if (tsk_used_math(child)) {
 161		union fpureg *fregs = get_fpu_regs(child);
 162		for (i = 0; i < 32; i++)
 163			__put_user(get_fpr64(&fregs[i], 0),
 164				   i + (__u64 __user *)data);
 165	} else {
 166		for (i = 0; i < 32; i++)
 167			__put_user((__u64) -1, i + (__u64 __user *) data);
 168	}
 169
 170	__put_user(child->thread.fpu.fcr31, data + 64);
 171	__put_user(boot_cpu_data.fpu_id, data + 65);
 172
 173	return 0;
 174}
 175
 176int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 177{
 178	union fpureg *fregs;
 179	u64 fpr_val;
 
 180	u32 value;
 
 181	int i;
 182
 183	if (!access_ok(VERIFY_READ, data, 33 * 8))
 184		return -EIO;
 185
 186	init_fp_ctx(child);
 187	fregs = get_fpu_regs(child);
 188
 189	for (i = 0; i < 32; i++) {
 190		__get_user(fpr_val, i + (__u64 __user *)data);
 191		set_fpr64(&fregs[i], 0, fpr_val);
 192	}
 193
 194	__get_user(value, data + 64);
 195	ptrace_setfcr31(child, value);
 
 
 196
 197	/* FIR may not be written.  */
 198
 199	return 0;
 200}
 201
 202int ptrace_get_watch_regs(struct task_struct *child,
 203			  struct pt_watch_regs __user *addr)
 204{
 205	enum pt_watch_style style;
 206	int i;
 207
 208	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 209		return -EIO;
 210	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
 211		return -EIO;
 212
 213#ifdef CONFIG_32BIT
 214	style = pt_watch_style_mips32;
 215#define WATCH_STYLE mips32
 216#else
 217	style = pt_watch_style_mips64;
 218#define WATCH_STYLE mips64
 219#endif
 220
 221	__put_user(style, &addr->style);
 222	__put_user(boot_cpu_data.watch_reg_use_cnt,
 223		   &addr->WATCH_STYLE.num_valid);
 224	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 225		__put_user(child->thread.watch.mips3264.watchlo[i],
 226			   &addr->WATCH_STYLE.watchlo[i]);
 227		__put_user(child->thread.watch.mips3264.watchhi[i] &
 228				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 229			   &addr->WATCH_STYLE.watchhi[i]);
 230		__put_user(boot_cpu_data.watch_reg_masks[i],
 231			   &addr->WATCH_STYLE.watch_masks[i]);
 232	}
 233	for (; i < 8; i++) {
 234		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 235		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 236		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 237	}
 238
 239	return 0;
 240}
 241
 242int ptrace_set_watch_regs(struct task_struct *child,
 243			  struct pt_watch_regs __user *addr)
 244{
 245	int i;
 246	int watch_active = 0;
 247	unsigned long lt[NUM_WATCH_REGS];
 248	u16 ht[NUM_WATCH_REGS];
 249
 250	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 251		return -EIO;
 252	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
 253		return -EIO;
 254	/* Check the values. */
 255	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 256		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 257#ifdef CONFIG_32BIT
 258		if (lt[i] & __UA_LIMIT)
 259			return -EINVAL;
 260#else
 261		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 262			if (lt[i] & 0xffffffff80000000UL)
 263				return -EINVAL;
 264		} else {
 265			if (lt[i] & __UA_LIMIT)
 266				return -EINVAL;
 267		}
 268#endif
 269		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 270		if (ht[i] & ~MIPS_WATCHHI_MASK)
 271			return -EINVAL;
 272	}
 273	/* Install them. */
 274	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 275		if (lt[i] & MIPS_WATCHLO_IRW)
 276			watch_active = 1;
 277		child->thread.watch.mips3264.watchlo[i] = lt[i];
 278		/* Set the G bit. */
 279		child->thread.watch.mips3264.watchhi[i] = ht[i];
 280	}
 281
 282	if (watch_active)
 283		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 284	else
 285		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 286
 287	return 0;
 288}
 289
 290/* regset get/set implementations */
 291
 292#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 293
 294static int gpr32_get(struct task_struct *target,
 295		     const struct user_regset *regset,
 296		     unsigned int pos, unsigned int count,
 297		     void *kbuf, void __user *ubuf)
 298{
 299	struct pt_regs *regs = task_pt_regs(target);
 300	u32 uregs[ELF_NGREG] = {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 301
 302	mips_dump_regs32(uregs, regs);
 303	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 304				   sizeof(uregs));
 305}
 306
 307static int gpr32_set(struct task_struct *target,
 308		     const struct user_regset *regset,
 309		     unsigned int pos, unsigned int count,
 310		     const void *kbuf, const void __user *ubuf)
 311{
 312	struct pt_regs *regs = task_pt_regs(target);
 313	u32 uregs[ELF_NGREG];
 314	unsigned start, num_regs, i;
 315	int err;
 316
 317	start = pos / sizeof(u32);
 318	num_regs = count / sizeof(u32);
 319
 320	if (start + num_regs > ELF_NGREG)
 321		return -EIO;
 322
 323	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 324				 sizeof(uregs));
 325	if (err)
 326		return err;
 327
 328	for (i = start; i < num_regs; i++) {
 329		/*
 330		 * Cast all values to signed here so that if this is a 64-bit
 331		 * kernel, the supplied 32-bit values will be sign extended.
 332		 */
 333		switch (i) {
 334		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 335			/* k0/k1 are ignored. */
 336		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 337			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 338			break;
 339		case MIPS32_EF_LO:
 340			regs->lo = (s32)uregs[i];
 341			break;
 342		case MIPS32_EF_HI:
 343			regs->hi = (s32)uregs[i];
 344			break;
 345		case MIPS32_EF_CP0_EPC:
 346			regs->cp0_epc = (s32)uregs[i];
 347			break;
 348		}
 349	}
 350
 351	/* System call number may have been changed */
 352	mips_syscall_update_nr(target, regs);
 353
 354	return 0;
 355}
 356
 357#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 358
 359#ifdef CONFIG_64BIT
 360
 361static int gpr64_get(struct task_struct *target,
 362		     const struct user_regset *regset,
 363		     unsigned int pos, unsigned int count,
 364		     void *kbuf, void __user *ubuf)
 365{
 366	struct pt_regs *regs = task_pt_regs(target);
 367	u64 uregs[ELF_NGREG] = {};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 368
 369	mips_dump_regs64(uregs, regs);
 370	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 371				   sizeof(uregs));
 372}
 373
 374static int gpr64_set(struct task_struct *target,
 375		     const struct user_regset *regset,
 376		     unsigned int pos, unsigned int count,
 377		     const void *kbuf, const void __user *ubuf)
 378{
 379	struct pt_regs *regs = task_pt_regs(target);
 380	u64 uregs[ELF_NGREG];
 381	unsigned start, num_regs, i;
 382	int err;
 383
 384	start = pos / sizeof(u64);
 385	num_regs = count / sizeof(u64);
 386
 387	if (start + num_regs > ELF_NGREG)
 388		return -EIO;
 389
 390	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 391				 sizeof(uregs));
 392	if (err)
 393		return err;
 394
 395	for (i = start; i < num_regs; i++) {
 396		switch (i) {
 397		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 398			/* k0/k1 are ignored. */
 399		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 400			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 401			break;
 402		case MIPS64_EF_LO:
 403			regs->lo = uregs[i];
 404			break;
 405		case MIPS64_EF_HI:
 406			regs->hi = uregs[i];
 407			break;
 408		case MIPS64_EF_CP0_EPC:
 409			regs->cp0_epc = uregs[i];
 410			break;
 411		}
 412	}
 413
 414	/* System call number may have been changed */
 415	mips_syscall_update_nr(target, regs);
 416
 417	return 0;
 418}
 419
 420#endif /* CONFIG_64BIT */
 421
 422/*
 423 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 424 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 425 * correspond 1:1 to buffer slots.  Only general registers are copied.
 426 */
 427static int fpr_get_fpa(struct task_struct *target,
 428		       unsigned int *pos, unsigned int *count,
 429		       void **kbuf, void __user **ubuf)
 430{
 431	return user_regset_copyout(pos, count, kbuf, ubuf,
 432				   &target->thread.fpu,
 433				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 434}
 435
 436/*
 437 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 438 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 439 * general register slots are copied to buffer slots.  Only general
 440 * registers are copied.
 441 */
 442static int fpr_get_msa(struct task_struct *target,
 443		       unsigned int *pos, unsigned int *count,
 444		       void **kbuf, void __user **ubuf)
 445{
 446	unsigned int i;
 447	u64 fpr_val;
 448	int err;
 449
 450	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 451	for (i = 0; i < NUM_FPU_REGS; i++) {
 452		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 453		err = user_regset_copyout(pos, count, kbuf, ubuf,
 454					  &fpr_val, i * sizeof(elf_fpreg_t),
 455					  (i + 1) * sizeof(elf_fpreg_t));
 456		if (err)
 457			return err;
 458	}
 459
 460	return 0;
 461}
 462
 463/*
 464 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 465 * Choose the appropriate helper for general registers, and then copy
 466 * the FCSR and FIR registers separately.
 467 */
 468static int fpr_get(struct task_struct *target,
 469		   const struct user_regset *regset,
 470		   unsigned int pos, unsigned int count,
 471		   void *kbuf, void __user *ubuf)
 472{
 473	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 474	const int fir_pos = fcr31_pos + sizeof(u32);
 475	int err;
 
 476
 477	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 478		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
 479	else
 480		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
 481	if (err)
 482		return err;
 483
 484	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 485				  &target->thread.fpu.fcr31,
 486				  fcr31_pos, fcr31_pos + sizeof(u32));
 487	if (err)
 488		return err;
 489
 490	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 491				  &boot_cpu_data.fpu_id,
 492				  fir_pos, fir_pos + sizeof(u32));
 493
 494	return err;
 495}
 496
 497/*
 498 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 499 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 500 * context's general register slots.  Only general registers are copied.
 501 */
 502static int fpr_set_fpa(struct task_struct *target,
 503		       unsigned int *pos, unsigned int *count,
 504		       const void **kbuf, const void __user **ubuf)
 505{
 506	return user_regset_copyin(pos, count, kbuf, ubuf,
 507				  &target->thread.fpu,
 508				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 509}
 510
 511/*
 512 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 513 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 514 * bits only of FP context's general register slots.  Only general
 515 * registers are copied.
 516 */
 517static int fpr_set_msa(struct task_struct *target,
 518		       unsigned int *pos, unsigned int *count,
 519		       const void **kbuf, const void __user **ubuf)
 520{
 521	unsigned int i;
 522	u64 fpr_val;
 523	int err;
 524
 525	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 526	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 527		err = user_regset_copyin(pos, count, kbuf, ubuf,
 528					 &fpr_val, i * sizeof(elf_fpreg_t),
 529					 (i + 1) * sizeof(elf_fpreg_t));
 530		if (err)
 531			return err;
 532		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 533	}
 534
 535	return 0;
 536}
 537
 538/*
 539 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 540 * Choose the appropriate helper for general registers, and then copy
 541 * the FCSR register separately.  Ignore the incoming FIR register
 542 * contents though, as the register is read-only.
 543 *
 544 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 545 * which is supposed to have been guaranteed by the kernel before
 546 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 547 * so that we can safely avoid preinitializing temporaries for
 548 * partial register writes.
 549 */
 550static int fpr_set(struct task_struct *target,
 551		   const struct user_regset *regset,
 552		   unsigned int pos, unsigned int count,
 553		   const void *kbuf, const void __user *ubuf)
 554{
 555	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 556	const int fir_pos = fcr31_pos + sizeof(u32);
 557	u32 fcr31;
 558	int err;
 
 559
 560	BUG_ON(count % sizeof(elf_fpreg_t));
 561
 562	if (pos + count > sizeof(elf_fpregset_t))
 563		return -EIO;
 564
 565	init_fp_ctx(target);
 566
 567	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 568		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 569	else
 570		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 571	if (err)
 572		return err;
 573
 574	if (count > 0) {
 575		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 576					 &fcr31,
 577					 fcr31_pos, fcr31_pos + sizeof(u32));
 578		if (err)
 579			return err;
 580
 581		ptrace_setfcr31(target, fcr31);
 582	}
 583
 584	if (count > 0)
 585		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 586						fir_pos,
 587						fir_pos + sizeof(u32));
 588
 589	return err;
 590}
 591
 592enum mips_regset {
 593	REGSET_GPR,
 594	REGSET_FPR,
 595};
 596
 597struct pt_regs_offset {
 598	const char *name;
 599	int offset;
 600};
 601
 602#define REG_OFFSET_NAME(reg, r) {					\
 603	.name = #reg,							\
 604	.offset = offsetof(struct pt_regs, r)				\
 605}
 606
 607#define REG_OFFSET_END {						\
 608	.name = NULL,							\
 609	.offset = 0							\
 610}
 611
 612static const struct pt_regs_offset regoffset_table[] = {
 613	REG_OFFSET_NAME(r0, regs[0]),
 614	REG_OFFSET_NAME(r1, regs[1]),
 615	REG_OFFSET_NAME(r2, regs[2]),
 616	REG_OFFSET_NAME(r3, regs[3]),
 617	REG_OFFSET_NAME(r4, regs[4]),
 618	REG_OFFSET_NAME(r5, regs[5]),
 619	REG_OFFSET_NAME(r6, regs[6]),
 620	REG_OFFSET_NAME(r7, regs[7]),
 621	REG_OFFSET_NAME(r8, regs[8]),
 622	REG_OFFSET_NAME(r9, regs[9]),
 623	REG_OFFSET_NAME(r10, regs[10]),
 624	REG_OFFSET_NAME(r11, regs[11]),
 625	REG_OFFSET_NAME(r12, regs[12]),
 626	REG_OFFSET_NAME(r13, regs[13]),
 627	REG_OFFSET_NAME(r14, regs[14]),
 628	REG_OFFSET_NAME(r15, regs[15]),
 629	REG_OFFSET_NAME(r16, regs[16]),
 630	REG_OFFSET_NAME(r17, regs[17]),
 631	REG_OFFSET_NAME(r18, regs[18]),
 632	REG_OFFSET_NAME(r19, regs[19]),
 633	REG_OFFSET_NAME(r20, regs[20]),
 634	REG_OFFSET_NAME(r21, regs[21]),
 635	REG_OFFSET_NAME(r22, regs[22]),
 636	REG_OFFSET_NAME(r23, regs[23]),
 637	REG_OFFSET_NAME(r24, regs[24]),
 638	REG_OFFSET_NAME(r25, regs[25]),
 639	REG_OFFSET_NAME(r26, regs[26]),
 640	REG_OFFSET_NAME(r27, regs[27]),
 641	REG_OFFSET_NAME(r28, regs[28]),
 642	REG_OFFSET_NAME(r29, regs[29]),
 643	REG_OFFSET_NAME(r30, regs[30]),
 644	REG_OFFSET_NAME(r31, regs[31]),
 645	REG_OFFSET_NAME(c0_status, cp0_status),
 646	REG_OFFSET_NAME(hi, hi),
 647	REG_OFFSET_NAME(lo, lo),
 648#ifdef CONFIG_CPU_HAS_SMARTMIPS
 649	REG_OFFSET_NAME(acx, acx),
 650#endif
 651	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 652	REG_OFFSET_NAME(c0_cause, cp0_cause),
 653	REG_OFFSET_NAME(c0_epc, cp0_epc),
 654#ifdef CONFIG_CPU_CAVIUM_OCTEON
 655	REG_OFFSET_NAME(mpl0, mpl[0]),
 656	REG_OFFSET_NAME(mpl1, mpl[1]),
 657	REG_OFFSET_NAME(mpl2, mpl[2]),
 658	REG_OFFSET_NAME(mtp0, mtp[0]),
 659	REG_OFFSET_NAME(mtp1, mtp[1]),
 660	REG_OFFSET_NAME(mtp2, mtp[2]),
 661#endif
 662	REG_OFFSET_END,
 663};
 664
 665/**
 666 * regs_query_register_offset() - query register offset from its name
 667 * @name:       the name of a register
 668 *
 669 * regs_query_register_offset() returns the offset of a register in struct
 670 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 671 */
 672int regs_query_register_offset(const char *name)
 673{
 674        const struct pt_regs_offset *roff;
 675        for (roff = regoffset_table; roff->name != NULL; roff++)
 676                if (!strcmp(roff->name, name))
 677                        return roff->offset;
 678        return -EINVAL;
 679}
 680
 681#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 682
 683static const struct user_regset mips_regsets[] = {
 684	[REGSET_GPR] = {
 685		.core_note_type	= NT_PRSTATUS,
 686		.n		= ELF_NGREG,
 687		.size		= sizeof(unsigned int),
 688		.align		= sizeof(unsigned int),
 689		.get		= gpr32_get,
 690		.set		= gpr32_set,
 691	},
 692	[REGSET_FPR] = {
 693		.core_note_type	= NT_PRFPREG,
 694		.n		= ELF_NFPREG,
 695		.size		= sizeof(elf_fpreg_t),
 696		.align		= sizeof(elf_fpreg_t),
 697		.get		= fpr_get,
 698		.set		= fpr_set,
 699	},
 700};
 701
 702static const struct user_regset_view user_mips_view = {
 703	.name		= "mips",
 704	.e_machine	= ELF_ARCH,
 705	.ei_osabi	= ELF_OSABI,
 706	.regsets	= mips_regsets,
 707	.n		= ARRAY_SIZE(mips_regsets),
 708};
 709
 710#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 711
 712#ifdef CONFIG_64BIT
 713
 714static const struct user_regset mips64_regsets[] = {
 715	[REGSET_GPR] = {
 716		.core_note_type	= NT_PRSTATUS,
 717		.n		= ELF_NGREG,
 718		.size		= sizeof(unsigned long),
 719		.align		= sizeof(unsigned long),
 720		.get		= gpr64_get,
 721		.set		= gpr64_set,
 722	},
 723	[REGSET_FPR] = {
 724		.core_note_type	= NT_PRFPREG,
 725		.n		= ELF_NFPREG,
 726		.size		= sizeof(elf_fpreg_t),
 727		.align		= sizeof(elf_fpreg_t),
 728		.get		= fpr_get,
 729		.set		= fpr_set,
 730	},
 731};
 732
 733static const struct user_regset_view user_mips64_view = {
 734	.name		= "mips64",
 735	.e_machine	= ELF_ARCH,
 736	.ei_osabi	= ELF_OSABI,
 737	.regsets	= mips64_regsets,
 738	.n		= ARRAY_SIZE(mips64_regsets),
 739};
 740
 741#ifdef CONFIG_MIPS32_N32
 742
 743static const struct user_regset_view user_mipsn32_view = {
 744	.name		= "mipsn32",
 745	.e_flags	= EF_MIPS_ABI2,
 746	.e_machine	= ELF_ARCH,
 747	.ei_osabi	= ELF_OSABI,
 748	.regsets	= mips64_regsets,
 749	.n		= ARRAY_SIZE(mips64_regsets),
 750};
 751
 752#endif /* CONFIG_MIPS32_N32 */
 753
 754#endif /* CONFIG_64BIT */
 755
 756const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 757{
 758#ifdef CONFIG_32BIT
 759	return &user_mips_view;
 760#else
 761#ifdef CONFIG_MIPS32_O32
 762	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
 763		return &user_mips_view;
 764#endif
 765#ifdef CONFIG_MIPS32_N32
 766	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
 767		return &user_mipsn32_view;
 768#endif
 769	return &user_mips64_view;
 770#endif
 771}
 772
 773long arch_ptrace(struct task_struct *child, long request,
 774		 unsigned long addr, unsigned long data)
 775{
 776	int ret;
 777	void __user *addrp = (void __user *) addr;
 778	void __user *datavp = (void __user *) data;
 779	unsigned long __user *datalp = (void __user *) data;
 780
 781	switch (request) {
 782	/* when I and D space are separate, these will need to be fixed. */
 783	case PTRACE_PEEKTEXT: /* read word at location addr. */
 784	case PTRACE_PEEKDATA:
 785		ret = generic_ptrace_peekdata(child, addr, data);
 786		break;
 787
 788	/* Read the word at location addr in the USER area. */
 789	case PTRACE_PEEKUSR: {
 790		struct pt_regs *regs;
 791		union fpureg *fregs;
 792		unsigned long tmp = 0;
 793
 794		regs = task_pt_regs(child);
 795		ret = 0;  /* Default return value. */
 796
 797		switch (addr) {
 798		case 0 ... 31:
 799			tmp = regs->regs[addr];
 800			break;
 801		case FPR_BASE ... FPR_BASE + 31:
 802			if (!tsk_used_math(child)) {
 803				/* FP not yet used */
 804				tmp = -1;
 805				break;
 806			}
 807			fregs = get_fpu_regs(child);
 808
 809#ifdef CONFIG_32BIT
 810			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 811				/*
 812				 * The odd registers are actually the high
 813				 * order bits of the values stored in the even
 814				 * registers - unless we're using r2k_switch.S.
 815				 */
 816				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
 817						addr & 1);
 818				break;
 819			}
 820#endif
 821			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
 822			break;
 823		case PC:
 824			tmp = regs->cp0_epc;
 825			break;
 826		case CAUSE:
 827			tmp = regs->cp0_cause;
 828			break;
 829		case BADVADDR:
 830			tmp = regs->cp0_badvaddr;
 831			break;
 832		case MMHI:
 833			tmp = regs->hi;
 834			break;
 835		case MMLO:
 836			tmp = regs->lo;
 837			break;
 838#ifdef CONFIG_CPU_HAS_SMARTMIPS
 839		case ACX:
 840			tmp = regs->acx;
 841			break;
 842#endif
 843		case FPC_CSR:
 844			tmp = child->thread.fpu.fcr31;
 845			break;
 846		case FPC_EIR:
 847			/* implementation / version register */
 848			tmp = boot_cpu_data.fpu_id;
 849			break;
 850		case DSP_BASE ... DSP_BASE + 5: {
 851			dspreg_t *dregs;
 852
 853			if (!cpu_has_dsp) {
 854				tmp = 0;
 855				ret = -EIO;
 856				goto out;
 857			}
 858			dregs = __get_dsp_regs(child);
 859			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
 860			break;
 861		}
 862		case DSP_CONTROL:
 863			if (!cpu_has_dsp) {
 864				tmp = 0;
 865				ret = -EIO;
 866				goto out;
 867			}
 868			tmp = child->thread.dsp.dspcontrol;
 869			break;
 870		default:
 871			tmp = 0;
 872			ret = -EIO;
 873			goto out;
 874		}
 875		ret = put_user(tmp, datalp);
 876		break;
 877	}
 878
 879	/* when I and D space are separate, this will have to be fixed. */
 880	case PTRACE_POKETEXT: /* write the word at location addr. */
 881	case PTRACE_POKEDATA:
 882		ret = generic_ptrace_pokedata(child, addr, data);
 883		break;
 884
 885	case PTRACE_POKEUSR: {
 886		struct pt_regs *regs;
 887		ret = 0;
 888		regs = task_pt_regs(child);
 889
 890		switch (addr) {
 891		case 0 ... 31:
 892			regs->regs[addr] = data;
 893			/* System call number may have been changed */
 894			if (addr == 2)
 895				mips_syscall_update_nr(child, regs);
 896			else if (addr == 4 &&
 897				 mips_syscall_is_indirect(child, regs))
 898				mips_syscall_update_nr(child, regs);
 899			break;
 900		case FPR_BASE ... FPR_BASE + 31: {
 901			union fpureg *fregs = get_fpu_regs(child);
 902
 903			init_fp_ctx(child);
 904#ifdef CONFIG_32BIT
 905			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 906				/*
 907				 * The odd registers are actually the high
 908				 * order bits of the values stored in the even
 909				 * registers - unless we're using r2k_switch.S.
 910				 */
 911				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
 912					  addr & 1, data);
 913				break;
 914			}
 915#endif
 916			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
 917			break;
 918		}
 919		case PC:
 920			regs->cp0_epc = data;
 921			break;
 922		case MMHI:
 923			regs->hi = data;
 924			break;
 925		case MMLO:
 926			regs->lo = data;
 927			break;
 928#ifdef CONFIG_CPU_HAS_SMARTMIPS
 929		case ACX:
 930			regs->acx = data;
 931			break;
 932#endif
 933		case FPC_CSR:
 934			init_fp_ctx(child);
 935			ptrace_setfcr31(child, data);
 936			break;
 937		case DSP_BASE ... DSP_BASE + 5: {
 938			dspreg_t *dregs;
 939
 940			if (!cpu_has_dsp) {
 941				ret = -EIO;
 942				break;
 943			}
 944
 945			dregs = __get_dsp_regs(child);
 946			dregs[addr - DSP_BASE] = data;
 947			break;
 948		}
 949		case DSP_CONTROL:
 950			if (!cpu_has_dsp) {
 951				ret = -EIO;
 952				break;
 953			}
 954			child->thread.dsp.dspcontrol = data;
 955			break;
 956		default:
 957			/* The rest are not allowed. */
 958			ret = -EIO;
 959			break;
 960		}
 961		break;
 962		}
 963
 964	case PTRACE_GETREGS:
 965		ret = ptrace_getregs(child, datavp);
 966		break;
 967
 968	case PTRACE_SETREGS:
 969		ret = ptrace_setregs(child, datavp);
 970		break;
 971
 972	case PTRACE_GETFPREGS:
 973		ret = ptrace_getfpregs(child, datavp);
 974		break;
 975
 976	case PTRACE_SETFPREGS:
 977		ret = ptrace_setfpregs(child, datavp);
 978		break;
 979
 980	case PTRACE_GET_THREAD_AREA:
 981		ret = put_user(task_thread_info(child)->tp_value, datalp);
 982		break;
 983
 984	case PTRACE_GET_WATCH_REGS:
 985		ret = ptrace_get_watch_regs(child, addrp);
 986		break;
 987
 988	case PTRACE_SET_WATCH_REGS:
 989		ret = ptrace_set_watch_regs(child, addrp);
 990		break;
 991
 992	default:
 993		ret = ptrace_request(child, request, addr, data);
 994		break;
 995	}
 996 out:
 997	return ret;
 998}
 999
1000/*
1001 * Notification of system call entry/exit
1002 * - triggered by current->work.syscall_trace
1003 */
1004asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1005{
 
1006	user_exit();
1007
1008	current_thread_info()->syscall = syscall;
1009
1010	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1011		if (tracehook_report_syscall_entry(regs))
1012			return -1;
1013		syscall = current_thread_info()->syscall;
1014	}
1015
1016#ifdef CONFIG_SECCOMP
1017	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1018		int ret, i;
1019		struct seccomp_data sd;
1020		unsigned long args[6];
1021
1022		sd.nr = syscall;
1023		sd.arch = syscall_get_arch();
1024		syscall_get_arguments(current, regs, 0, 6, args);
1025		for (i = 0; i < 6; i++)
1026			sd.args[i] = args[i];
1027		sd.instruction_pointer = KSTK_EIP(current);
1028
1029		ret = __secure_computing(&sd);
1030		if (ret == -1)
1031			return ret;
1032		syscall = current_thread_info()->syscall;
1033	}
1034#endif
1035
1036	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1037		trace_sys_enter(regs, regs->regs[2]);
1038
1039	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1040			    regs->regs[6], regs->regs[7]);
1041
1042	/*
1043	 * Negative syscall numbers are mistaken for rejected syscalls, but
1044	 * won't have had the return value set appropriately, so we do so now.
1045	 */
1046	if (syscall < 0)
1047		syscall_set_return_value(current, regs, -ENOSYS, 0);
1048	return syscall;
1049}
1050
1051/*
1052 * Notification of system call entry/exit
1053 * - triggered by current->work.syscall_trace
1054 */
1055asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1056{
1057        /*
1058	 * We may come here right after calling schedule_user()
1059	 * or do_notify_resume(), in which case we can be in RCU
1060	 * user mode.
1061	 */
1062	user_exit();
1063
1064	audit_syscall_exit(regs);
1065
1066	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1067		trace_sys_exit(regs, regs_return_value(regs));
1068
1069	if (test_thread_flag(TIF_SYSCALL_TRACE))
1070		tracehook_report_syscall_exit(regs, 0);
1071
1072	user_enter();
1073}