Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
  28#include <linux/security.h>
  29#include <linux/stddef.h>
  30#include <linux/tracehook.h>
  31#include <linux/audit.h>
  32#include <linux/seccomp.h>
  33#include <linux/ftrace.h>
  34
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
  42#include <asm/pgtable.h>
  43#include <asm/page.h>
  44#include <asm/processor.h>
  45#include <asm/syscall.h>
  46#include <linux/uaccess.h>
  47#include <asm/bootinfo.h>
  48#include <asm/reg.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/syscalls.h>
  52
  53/*
  54 * Called by kernel/ptrace.c when detaching..
  55 *
  56 * Make sure single step bits etc are not set.
  57 */
  58void ptrace_disable(struct task_struct *child)
  59{
  60	/* Don't load the watchpoint registers for the ex-child. */
  61	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  62}
  63
  64/*
  65 * Read a general register set.	 We always use the 64-bit format, even
  66 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  67 * Registers are sign extended to fill the available space.
  68 */
  69int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  70{
  71	struct pt_regs *regs;
  72	int i;
  73
  74	if (!access_ok(data, 38 * 8))
  75		return -EIO;
  76
  77	regs = task_pt_regs(child);
  78
  79	for (i = 0; i < 32; i++)
  80		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  81	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
  82	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
  83	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  84	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  85	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  86	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  87
  88	return 0;
  89}
  90
  91/*
  92 * Write a general register set.  As for PTRACE_GETREGS, we always use
  93 * the 64-bit format.  On a 32-bit kernel only the lower order half
  94 * (according to endianness) will be used.
  95 */
  96int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
  97{
  98	struct pt_regs *regs;
  99	int i;
 100
 101	if (!access_ok(data, 38 * 8))
 102		return -EIO;
 103
 104	regs = task_pt_regs(child);
 105
 106	for (i = 0; i < 32; i++)
 107		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 108	__get_user(regs->lo, (__s64 __user *)&data->lo);
 109	__get_user(regs->hi, (__s64 __user *)&data->hi);
 110	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 111
 112	/* badvaddr, status, and cause may not be written.  */
 113
 114	/* System call number may have been changed */
 115	mips_syscall_update_nr(child, regs);
 116
 117	return 0;
 118}
 119
 120int ptrace_get_watch_regs(struct task_struct *child,
 121			  struct pt_watch_regs __user *addr)
 122{
 123	enum pt_watch_style style;
 124	int i;
 125
 126	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 127		return -EIO;
 128	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 129		return -EIO;
 130
 131#ifdef CONFIG_32BIT
 132	style = pt_watch_style_mips32;
 133#define WATCH_STYLE mips32
 134#else
 135	style = pt_watch_style_mips64;
 136#define WATCH_STYLE mips64
 137#endif
 138
 139	__put_user(style, &addr->style);
 140	__put_user(boot_cpu_data.watch_reg_use_cnt,
 141		   &addr->WATCH_STYLE.num_valid);
 142	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 143		__put_user(child->thread.watch.mips3264.watchlo[i],
 144			   &addr->WATCH_STYLE.watchlo[i]);
 145		__put_user(child->thread.watch.mips3264.watchhi[i] &
 146				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 147			   &addr->WATCH_STYLE.watchhi[i]);
 148		__put_user(boot_cpu_data.watch_reg_masks[i],
 149			   &addr->WATCH_STYLE.watch_masks[i]);
 150	}
 151	for (; i < 8; i++) {
 152		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 153		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 154		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 155	}
 156
 157	return 0;
 158}
 159
 160int ptrace_set_watch_regs(struct task_struct *child,
 161			  struct pt_watch_regs __user *addr)
 162{
 163	int i;
 164	int watch_active = 0;
 165	unsigned long lt[NUM_WATCH_REGS];
 166	u16 ht[NUM_WATCH_REGS];
 167
 168	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 169		return -EIO;
 170	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 171		return -EIO;
 172	/* Check the values. */
 173	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 174		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 175#ifdef CONFIG_32BIT
 176		if (lt[i] & __UA_LIMIT)
 177			return -EINVAL;
 178#else
 179		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 180			if (lt[i] & 0xffffffff80000000UL)
 181				return -EINVAL;
 182		} else {
 183			if (lt[i] & __UA_LIMIT)
 184				return -EINVAL;
 185		}
 186#endif
 187		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 188		if (ht[i] & ~MIPS_WATCHHI_MASK)
 189			return -EINVAL;
 190	}
 191	/* Install them. */
 192	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 193		if (lt[i] & MIPS_WATCHLO_IRW)
 194			watch_active = 1;
 195		child->thread.watch.mips3264.watchlo[i] = lt[i];
 196		/* Set the G bit. */
 197		child->thread.watch.mips3264.watchhi[i] = ht[i];
 198	}
 199
 200	if (watch_active)
 201		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 202	else
 203		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 204
 205	return 0;
 206}
 207
 208/* regset get/set implementations */
 209
 210#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 211
 212static int gpr32_get(struct task_struct *target,
 213		     const struct user_regset *regset,
 214		     unsigned int pos, unsigned int count,
 215		     void *kbuf, void __user *ubuf)
 216{
 217	struct pt_regs *regs = task_pt_regs(target);
 218	u32 uregs[ELF_NGREG] = {};
 219
 220	mips_dump_regs32(uregs, regs);
 221	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 222				   sizeof(uregs));
 223}
 224
 225static int gpr32_set(struct task_struct *target,
 226		     const struct user_regset *regset,
 227		     unsigned int pos, unsigned int count,
 228		     const void *kbuf, const void __user *ubuf)
 229{
 230	struct pt_regs *regs = task_pt_regs(target);
 231	u32 uregs[ELF_NGREG];
 232	unsigned start, num_regs, i;
 233	int err;
 234
 235	start = pos / sizeof(u32);
 236	num_regs = count / sizeof(u32);
 237
 238	if (start + num_regs > ELF_NGREG)
 239		return -EIO;
 240
 241	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 242				 sizeof(uregs));
 243	if (err)
 244		return err;
 245
 246	for (i = start; i < num_regs; i++) {
 247		/*
 248		 * Cast all values to signed here so that if this is a 64-bit
 249		 * kernel, the supplied 32-bit values will be sign extended.
 250		 */
 251		switch (i) {
 252		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 253			/* k0/k1 are ignored. */
 254		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 255			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 256			break;
 257		case MIPS32_EF_LO:
 258			regs->lo = (s32)uregs[i];
 259			break;
 260		case MIPS32_EF_HI:
 261			regs->hi = (s32)uregs[i];
 262			break;
 263		case MIPS32_EF_CP0_EPC:
 264			regs->cp0_epc = (s32)uregs[i];
 265			break;
 266		}
 267	}
 268
 269	/* System call number may have been changed */
 270	mips_syscall_update_nr(target, regs);
 271
 272	return 0;
 273}
 274
 275#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 276
 277#ifdef CONFIG_64BIT
 278
 279static int gpr64_get(struct task_struct *target,
 280		     const struct user_regset *regset,
 281		     unsigned int pos, unsigned int count,
 282		     void *kbuf, void __user *ubuf)
 283{
 284	struct pt_regs *regs = task_pt_regs(target);
 285	u64 uregs[ELF_NGREG] = {};
 286
 287	mips_dump_regs64(uregs, regs);
 288	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 289				   sizeof(uregs));
 290}
 291
 292static int gpr64_set(struct task_struct *target,
 293		     const struct user_regset *regset,
 294		     unsigned int pos, unsigned int count,
 295		     const void *kbuf, const void __user *ubuf)
 296{
 297	struct pt_regs *regs = task_pt_regs(target);
 298	u64 uregs[ELF_NGREG];
 299	unsigned start, num_regs, i;
 300	int err;
 301
 302	start = pos / sizeof(u64);
 303	num_regs = count / sizeof(u64);
 304
 305	if (start + num_regs > ELF_NGREG)
 306		return -EIO;
 307
 308	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 309				 sizeof(uregs));
 310	if (err)
 311		return err;
 312
 313	for (i = start; i < num_regs; i++) {
 314		switch (i) {
 315		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 316			/* k0/k1 are ignored. */
 317		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 318			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 319			break;
 320		case MIPS64_EF_LO:
 321			regs->lo = uregs[i];
 322			break;
 323		case MIPS64_EF_HI:
 324			regs->hi = uregs[i];
 325			break;
 326		case MIPS64_EF_CP0_EPC:
 327			regs->cp0_epc = uregs[i];
 328			break;
 329		}
 330	}
 331
 332	/* System call number may have been changed */
 333	mips_syscall_update_nr(target, regs);
 334
 335	return 0;
 336}
 337
 338#endif /* CONFIG_64BIT */
 339
 340
 341#ifdef CONFIG_MIPS_FP_SUPPORT
 342
 343/*
 344 * Poke at FCSR according to its mask.  Set the Cause bits even
 345 * if a corresponding Enable bit is set.  This will be noticed at
 346 * the time the thread is switched to and SIGFPE thrown accordingly.
 347 */
 348static void ptrace_setfcr31(struct task_struct *child, u32 value)
 349{
 350	u32 fcr31;
 351	u32 mask;
 352
 353	fcr31 = child->thread.fpu.fcr31;
 354	mask = boot_cpu_data.fpu_msk31;
 355	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 356}
 357
 358int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 359{
 360	int i;
 361
 362	if (!access_ok(data, 33 * 8))
 363		return -EIO;
 364
 365	if (tsk_used_math(child)) {
 366		union fpureg *fregs = get_fpu_regs(child);
 367		for (i = 0; i < 32; i++)
 368			__put_user(get_fpr64(&fregs[i], 0),
 369				   i + (__u64 __user *)data);
 370	} else {
 371		for (i = 0; i < 32; i++)
 372			__put_user((__u64) -1, i + (__u64 __user *) data);
 373	}
 374
 375	__put_user(child->thread.fpu.fcr31, data + 64);
 376	__put_user(boot_cpu_data.fpu_id, data + 65);
 377
 378	return 0;
 379}
 380
 381int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 382{
 383	union fpureg *fregs;
 384	u64 fpr_val;
 385	u32 value;
 386	int i;
 387
 388	if (!access_ok(data, 33 * 8))
 389		return -EIO;
 390
 391	init_fp_ctx(child);
 392	fregs = get_fpu_regs(child);
 393
 394	for (i = 0; i < 32; i++) {
 395		__get_user(fpr_val, i + (__u64 __user *)data);
 396		set_fpr64(&fregs[i], 0, fpr_val);
 397	}
 398
 399	__get_user(value, data + 64);
 400	ptrace_setfcr31(child, value);
 401
 402	/* FIR may not be written.  */
 403
 404	return 0;
 405}
 406
 407/*
 408 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 409 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 410 * correspond 1:1 to buffer slots.  Only general registers are copied.
 411 */
 412static int fpr_get_fpa(struct task_struct *target,
 413		       unsigned int *pos, unsigned int *count,
 414		       void **kbuf, void __user **ubuf)
 415{
 416	return user_regset_copyout(pos, count, kbuf, ubuf,
 417				   &target->thread.fpu,
 418				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 419}
 420
 421/*
 422 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 423 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 424 * general register slots are copied to buffer slots.  Only general
 425 * registers are copied.
 426 */
 427static int fpr_get_msa(struct task_struct *target,
 428		       unsigned int *pos, unsigned int *count,
 429		       void **kbuf, void __user **ubuf)
 430{
 431	unsigned int i;
 432	u64 fpr_val;
 433	int err;
 434
 435	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 436	for (i = 0; i < NUM_FPU_REGS; i++) {
 437		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 438		err = user_regset_copyout(pos, count, kbuf, ubuf,
 439					  &fpr_val, i * sizeof(elf_fpreg_t),
 440					  (i + 1) * sizeof(elf_fpreg_t));
 441		if (err)
 442			return err;
 443	}
 444
 445	return 0;
 
 
 446}
 447
 448/*
 449 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 450 * Choose the appropriate helper for general registers, and then copy
 451 * the FCSR and FIR registers separately.
 452 */
 453static int fpr_get(struct task_struct *target,
 454		   const struct user_regset *regset,
 455		   unsigned int pos, unsigned int count,
 456		   void *kbuf, void __user *ubuf)
 457{
 458	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 459	const int fir_pos = fcr31_pos + sizeof(u32);
 460	int err;
 461
 462	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 463		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
 464	else
 465		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
 466	if (err)
 467		return err;
 468
 469	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 470				  &target->thread.fpu.fcr31,
 471				  fcr31_pos, fcr31_pos + sizeof(u32));
 472	if (err)
 473		return err;
 474
 475	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 476				  &boot_cpu_data.fpu_id,
 477				  fir_pos, fir_pos + sizeof(u32));
 478
 479	return err;
 480}
 481
 482/*
 483 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 484 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 485 * context's general register slots.  Only general registers are copied.
 486 */
 487static int fpr_set_fpa(struct task_struct *target,
 488		       unsigned int *pos, unsigned int *count,
 489		       const void **kbuf, const void __user **ubuf)
 490{
 491	return user_regset_copyin(pos, count, kbuf, ubuf,
 492				  &target->thread.fpu,
 493				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 494}
 495
 496/*
 497 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 498 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 499 * bits only of FP context's general register slots.  Only general
 500 * registers are copied.
 501 */
 502static int fpr_set_msa(struct task_struct *target,
 503		       unsigned int *pos, unsigned int *count,
 504		       const void **kbuf, const void __user **ubuf)
 505{
 506	unsigned int i;
 507	u64 fpr_val;
 508	int err;
 509
 510	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 511	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 512		err = user_regset_copyin(pos, count, kbuf, ubuf,
 513					 &fpr_val, i * sizeof(elf_fpreg_t),
 514					 (i + 1) * sizeof(elf_fpreg_t));
 515		if (err)
 516			return err;
 517		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 518	}
 519
 520	return 0;
 521}
 522
 523/*
 524 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 525 * Choose the appropriate helper for general registers, and then copy
 526 * the FCSR register separately.  Ignore the incoming FIR register
 527 * contents though, as the register is read-only.
 528 *
 529 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 530 * which is supposed to have been guaranteed by the kernel before
 531 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 532 * so that we can safely avoid preinitializing temporaries for
 533 * partial register writes.
 534 */
 535static int fpr_set(struct task_struct *target,
 536		   const struct user_regset *regset,
 537		   unsigned int pos, unsigned int count,
 538		   const void *kbuf, const void __user *ubuf)
 539{
 540	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 541	const int fir_pos = fcr31_pos + sizeof(u32);
 542	u32 fcr31;
 543	int err;
 544
 545	BUG_ON(count % sizeof(elf_fpreg_t));
 546
 547	if (pos + count > sizeof(elf_fpregset_t))
 548		return -EIO;
 549
 550	init_fp_ctx(target);
 551
 552	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 553		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 554	else
 555		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 556	if (err)
 557		return err;
 558
 559	if (count > 0) {
 560		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 561					 &fcr31,
 562					 fcr31_pos, fcr31_pos + sizeof(u32));
 563		if (err)
 564			return err;
 565
 566		ptrace_setfcr31(target, fcr31);
 567	}
 568
 569	if (count > 0)
 570		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 571						fir_pos,
 572						fir_pos + sizeof(u32));
 573
 574	return err;
 575}
 576
 577/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
 578static int fp_mode_get(struct task_struct *target,
 579		       const struct user_regset *regset,
 580		       unsigned int pos, unsigned int count,
 581		       void *kbuf, void __user *ubuf)
 582{
 583	int fp_mode;
 584
 585	fp_mode = mips_get_process_fp_mode(target);
 586	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 587				   sizeof(fp_mode));
 588}
 589
 590/*
 591 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
 592 *
 593 * We optimize for the case where `count % sizeof(int) == 0', which
 594 * is supposed to have been guaranteed by the kernel before calling
 595 * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
 596 * that we can safely avoid preinitializing temporaries for partial
 597 * mode writes.
 598 */
 599static int fp_mode_set(struct task_struct *target,
 600		       const struct user_regset *regset,
 601		       unsigned int pos, unsigned int count,
 602		       const void *kbuf, const void __user *ubuf)
 603{
 604	int fp_mode;
 605	int err;
 606
 607	BUG_ON(count % sizeof(int));
 608
 609	if (pos + count > sizeof(fp_mode))
 610		return -EIO;
 611
 612	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 613				 sizeof(fp_mode));
 614	if (err)
 615		return err;
 616
 617	if (count > 0)
 618		err = mips_set_process_fp_mode(target, fp_mode);
 619
 620	return err;
 621}
 622
 623#endif /* CONFIG_MIPS_FP_SUPPORT */
 624
 625#ifdef CONFIG_CPU_HAS_MSA
 626
 627struct msa_control_regs {
 628	unsigned int fir;
 629	unsigned int fcsr;
 630	unsigned int msair;
 631	unsigned int msacsr;
 632};
 633
 634static int copy_pad_fprs(struct task_struct *target,
 635			 const struct user_regset *regset,
 636			 unsigned int *ppos, unsigned int *pcount,
 637			 void **pkbuf, void __user **pubuf,
 638			 unsigned int live_sz)
 639{
 640	int i, j, start, start_pad, err;
 641	unsigned long long fill = ~0ull;
 642	unsigned int cp_sz, pad_sz;
 643
 644	cp_sz = min(regset->size, live_sz);
 645	pad_sz = regset->size - cp_sz;
 646	WARN_ON(pad_sz % sizeof(fill));
 647
 648	i = start = err = 0;
 649	for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 650		err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
 651					   &target->thread.fpu.fpr[i],
 652					   start, start + cp_sz);
 653
 654		start_pad = start + cp_sz;
 655		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
 656			err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
 657						   &fill, start_pad,
 658						   start_pad + sizeof(fill));
 659			start_pad += sizeof(fill);
 660		}
 661	}
 662
 663	return err;
 664}
 665
 666static int msa_get(struct task_struct *target,
 667		   const struct user_regset *regset,
 668		   unsigned int pos, unsigned int count,
 669		   void *kbuf, void __user *ubuf)
 670{
 671	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 672	const struct msa_control_regs ctrl_regs = {
 673		.fir = boot_cpu_data.fpu_id,
 674		.fcsr = target->thread.fpu.fcr31,
 675		.msair = boot_cpu_data.msa_id,
 676		.msacsr = target->thread.fpu.msacsr,
 677	};
 678	int err;
 679
 680	if (!tsk_used_math(target)) {
 681		/* The task hasn't used FP or MSA, fill with 0xff */
 682		err = copy_pad_fprs(target, regset, &pos, &count,
 683				    &kbuf, &ubuf, 0);
 684	} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
 685		/* Copy scalar FP context, fill the rest with 0xff */
 686		err = copy_pad_fprs(target, regset, &pos, &count,
 687				    &kbuf, &ubuf, 8);
 688	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 689		/* Trivially copy the vector registers */
 690		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 691					  &target->thread.fpu.fpr,
 692					  0, wr_size);
 693	} else {
 694		/* Copy as much context as possible, fill the rest with 0xff */
 695		err = copy_pad_fprs(target, regset, &pos, &count,
 696				    &kbuf, &ubuf,
 697				    sizeof(target->thread.fpu.fpr[0]));
 698	}
 699
 700	err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 701				   &ctrl_regs, wr_size,
 702				   wr_size + sizeof(ctrl_regs));
 703	return err;
 704}
 705
 706static int msa_set(struct task_struct *target,
 707		   const struct user_regset *regset,
 708		   unsigned int pos, unsigned int count,
 709		   const void *kbuf, const void __user *ubuf)
 710{
 711	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 712	struct msa_control_regs ctrl_regs;
 713	unsigned int cp_sz;
 714	int i, err, start;
 715
 716	init_fp_ctx(target);
 717
 718	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 719		/* Trivially copy the vector registers */
 720		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 721					 &target->thread.fpu.fpr,
 722					 0, wr_size);
 723	} else {
 724		/* Copy as much context as possible */
 725		cp_sz = min_t(unsigned int, regset->size,
 726			      sizeof(target->thread.fpu.fpr[0]));
 727
 728		i = start = err = 0;
 729		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 730			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 731						  &target->thread.fpu.fpr[i],
 732						  start, start + cp_sz);
 733		}
 734	}
 735
 736	if (!err)
 737		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
 738					 wr_size, wr_size + sizeof(ctrl_regs));
 739	if (!err) {
 740		target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
 741		target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
 742	}
 743
 744	return err;
 745}
 746
 747#endif /* CONFIG_CPU_HAS_MSA */
 748
 749#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 750
 751/*
 752 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
 753 */
 754static int dsp32_get(struct task_struct *target,
 755		     const struct user_regset *regset,
 756		     unsigned int pos, unsigned int count,
 757		     void *kbuf, void __user *ubuf)
 758{
 759	unsigned int start, num_regs, i;
 760	u32 dspregs[NUM_DSP_REGS + 1];
 
 761
 762	BUG_ON(count % sizeof(u32));
 763
 764	if (!cpu_has_dsp)
 765		return -EIO;
 766
 767	start = pos / sizeof(u32);
 768	num_regs = count / sizeof(u32);
 769
 770	if (start + num_regs > NUM_DSP_REGS + 1)
 771		return -EIO;
 772
 773	for (i = start; i < num_regs; i++)
 774		switch (i) {
 775		case 0 ... NUM_DSP_REGS - 1:
 776			dspregs[i] = target->thread.dsp.dspr[i];
 777			break;
 778		case NUM_DSP_REGS:
 779			dspregs[i] = target->thread.dsp.dspcontrol;
 780			break;
 781		}
 782	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 783				   sizeof(dspregs));
 784}
 785
 786/*
 787 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
 788 */
 789static int dsp32_set(struct task_struct *target,
 790		     const struct user_regset *regset,
 791		     unsigned int pos, unsigned int count,
 792		     const void *kbuf, const void __user *ubuf)
 793{
 794	unsigned int start, num_regs, i;
 795	u32 dspregs[NUM_DSP_REGS + 1];
 796	int err;
 797
 798	BUG_ON(count % sizeof(u32));
 799
 800	if (!cpu_has_dsp)
 801		return -EIO;
 802
 803	start = pos / sizeof(u32);
 804	num_regs = count / sizeof(u32);
 805
 806	if (start + num_regs > NUM_DSP_REGS + 1)
 807		return -EIO;
 808
 809	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 810				 sizeof(dspregs));
 811	if (err)
 812		return err;
 813
 814	for (i = start; i < num_regs; i++)
 815		switch (i) {
 816		case 0 ... NUM_DSP_REGS - 1:
 817			target->thread.dsp.dspr[i] = (s32)dspregs[i];
 818			break;
 819		case NUM_DSP_REGS:
 820			target->thread.dsp.dspcontrol = (s32)dspregs[i];
 821			break;
 822		}
 823
 824	return 0;
 825}
 826
 827#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 828
 829#ifdef CONFIG_64BIT
 830
 831/*
 832 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
 833 */
 834static int dsp64_get(struct task_struct *target,
 835		     const struct user_regset *regset,
 836		     unsigned int pos, unsigned int count,
 837		     void *kbuf, void __user *ubuf)
 838{
 839	unsigned int start, num_regs, i;
 840	u64 dspregs[NUM_DSP_REGS + 1];
 
 841
 842	BUG_ON(count % sizeof(u64));
 843
 844	if (!cpu_has_dsp)
 845		return -EIO;
 846
 847	start = pos / sizeof(u64);
 848	num_regs = count / sizeof(u64);
 849
 850	if (start + num_regs > NUM_DSP_REGS + 1)
 851		return -EIO;
 852
 853	for (i = start; i < num_regs; i++)
 854		switch (i) {
 855		case 0 ... NUM_DSP_REGS - 1:
 856			dspregs[i] = target->thread.dsp.dspr[i];
 857			break;
 858		case NUM_DSP_REGS:
 859			dspregs[i] = target->thread.dsp.dspcontrol;
 860			break;
 861		}
 862	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 863				   sizeof(dspregs));
 864}
 865
 866/*
 867 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
 868 */
 869static int dsp64_set(struct task_struct *target,
 870		     const struct user_regset *regset,
 871		     unsigned int pos, unsigned int count,
 872		     const void *kbuf, const void __user *ubuf)
 873{
 874	unsigned int start, num_regs, i;
 875	u64 dspregs[NUM_DSP_REGS + 1];
 876	int err;
 877
 878	BUG_ON(count % sizeof(u64));
 879
 880	if (!cpu_has_dsp)
 881		return -EIO;
 882
 883	start = pos / sizeof(u64);
 884	num_regs = count / sizeof(u64);
 885
 886	if (start + num_regs > NUM_DSP_REGS + 1)
 887		return -EIO;
 888
 889	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 890				 sizeof(dspregs));
 891	if (err)
 892		return err;
 893
 894	for (i = start; i < num_regs; i++)
 895		switch (i) {
 896		case 0 ... NUM_DSP_REGS - 1:
 897			target->thread.dsp.dspr[i] = dspregs[i];
 898			break;
 899		case NUM_DSP_REGS:
 900			target->thread.dsp.dspcontrol = dspregs[i];
 901			break;
 902		}
 903
 904	return 0;
 905}
 906
 907#endif /* CONFIG_64BIT */
 908
 909/*
 910 * Determine whether the DSP context is present.
 911 */
 912static int dsp_active(struct task_struct *target,
 913		      const struct user_regset *regset)
 914{
 915	return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
 916}
 917
 918enum mips_regset {
 919	REGSET_GPR,
 920	REGSET_DSP,
 921#ifdef CONFIG_MIPS_FP_SUPPORT
 922	REGSET_FPR,
 923	REGSET_FP_MODE,
 924#endif
 925#ifdef CONFIG_CPU_HAS_MSA
 926	REGSET_MSA,
 927#endif
 928};
 929
 930struct pt_regs_offset {
 931	const char *name;
 932	int offset;
 933};
 934
 935#define REG_OFFSET_NAME(reg, r) {					\
 936	.name = #reg,							\
 937	.offset = offsetof(struct pt_regs, r)				\
 938}
 939
 940#define REG_OFFSET_END {						\
 941	.name = NULL,							\
 942	.offset = 0							\
 943}
 944
 945static const struct pt_regs_offset regoffset_table[] = {
 946	REG_OFFSET_NAME(r0, regs[0]),
 947	REG_OFFSET_NAME(r1, regs[1]),
 948	REG_OFFSET_NAME(r2, regs[2]),
 949	REG_OFFSET_NAME(r3, regs[3]),
 950	REG_OFFSET_NAME(r4, regs[4]),
 951	REG_OFFSET_NAME(r5, regs[5]),
 952	REG_OFFSET_NAME(r6, regs[6]),
 953	REG_OFFSET_NAME(r7, regs[7]),
 954	REG_OFFSET_NAME(r8, regs[8]),
 955	REG_OFFSET_NAME(r9, regs[9]),
 956	REG_OFFSET_NAME(r10, regs[10]),
 957	REG_OFFSET_NAME(r11, regs[11]),
 958	REG_OFFSET_NAME(r12, regs[12]),
 959	REG_OFFSET_NAME(r13, regs[13]),
 960	REG_OFFSET_NAME(r14, regs[14]),
 961	REG_OFFSET_NAME(r15, regs[15]),
 962	REG_OFFSET_NAME(r16, regs[16]),
 963	REG_OFFSET_NAME(r17, regs[17]),
 964	REG_OFFSET_NAME(r18, regs[18]),
 965	REG_OFFSET_NAME(r19, regs[19]),
 966	REG_OFFSET_NAME(r20, regs[20]),
 967	REG_OFFSET_NAME(r21, regs[21]),
 968	REG_OFFSET_NAME(r22, regs[22]),
 969	REG_OFFSET_NAME(r23, regs[23]),
 970	REG_OFFSET_NAME(r24, regs[24]),
 971	REG_OFFSET_NAME(r25, regs[25]),
 972	REG_OFFSET_NAME(r26, regs[26]),
 973	REG_OFFSET_NAME(r27, regs[27]),
 974	REG_OFFSET_NAME(r28, regs[28]),
 975	REG_OFFSET_NAME(r29, regs[29]),
 976	REG_OFFSET_NAME(r30, regs[30]),
 977	REG_OFFSET_NAME(r31, regs[31]),
 978	REG_OFFSET_NAME(c0_status, cp0_status),
 979	REG_OFFSET_NAME(hi, hi),
 980	REG_OFFSET_NAME(lo, lo),
 981#ifdef CONFIG_CPU_HAS_SMARTMIPS
 982	REG_OFFSET_NAME(acx, acx),
 983#endif
 984	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 985	REG_OFFSET_NAME(c0_cause, cp0_cause),
 986	REG_OFFSET_NAME(c0_epc, cp0_epc),
 987#ifdef CONFIG_CPU_CAVIUM_OCTEON
 988	REG_OFFSET_NAME(mpl0, mpl[0]),
 989	REG_OFFSET_NAME(mpl1, mpl[1]),
 990	REG_OFFSET_NAME(mpl2, mpl[2]),
 991	REG_OFFSET_NAME(mtp0, mtp[0]),
 992	REG_OFFSET_NAME(mtp1, mtp[1]),
 993	REG_OFFSET_NAME(mtp2, mtp[2]),
 994#endif
 995	REG_OFFSET_END,
 996};
 997
 998/**
 999 * regs_query_register_offset() - query register offset from its name
1000 * @name:       the name of a register
1001 *
1002 * regs_query_register_offset() returns the offset of a register in struct
1003 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
1004 */
1005int regs_query_register_offset(const char *name)
1006{
1007        const struct pt_regs_offset *roff;
1008        for (roff = regoffset_table; roff->name != NULL; roff++)
1009                if (!strcmp(roff->name, name))
1010                        return roff->offset;
1011        return -EINVAL;
1012}
1013
1014#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
1015
1016static const struct user_regset mips_regsets[] = {
1017	[REGSET_GPR] = {
1018		.core_note_type	= NT_PRSTATUS,
1019		.n		= ELF_NGREG,
1020		.size		= sizeof(unsigned int),
1021		.align		= sizeof(unsigned int),
1022		.get		= gpr32_get,
1023		.set		= gpr32_set,
1024	},
1025	[REGSET_DSP] = {
1026		.core_note_type	= NT_MIPS_DSP,
1027		.n		= NUM_DSP_REGS + 1,
1028		.size		= sizeof(u32),
1029		.align		= sizeof(u32),
1030		.get		= dsp32_get,
1031		.set		= dsp32_set,
1032		.active		= dsp_active,
1033	},
1034#ifdef CONFIG_MIPS_FP_SUPPORT
1035	[REGSET_FPR] = {
1036		.core_note_type	= NT_PRFPREG,
1037		.n		= ELF_NFPREG,
1038		.size		= sizeof(elf_fpreg_t),
1039		.align		= sizeof(elf_fpreg_t),
1040		.get		= fpr_get,
1041		.set		= fpr_set,
1042	},
1043	[REGSET_FP_MODE] = {
1044		.core_note_type	= NT_MIPS_FP_MODE,
1045		.n		= 1,
1046		.size		= sizeof(int),
1047		.align		= sizeof(int),
1048		.get		= fp_mode_get,
1049		.set		= fp_mode_set,
1050	},
1051#endif
1052#ifdef CONFIG_CPU_HAS_MSA
1053	[REGSET_MSA] = {
1054		.core_note_type	= NT_MIPS_MSA,
1055		.n		= NUM_FPU_REGS + 1,
1056		.size		= 16,
1057		.align		= 16,
1058		.get		= msa_get,
1059		.set		= msa_set,
1060	},
1061#endif
1062};
1063
1064static const struct user_regset_view user_mips_view = {
1065	.name		= "mips",
1066	.e_machine	= ELF_ARCH,
1067	.ei_osabi	= ELF_OSABI,
1068	.regsets	= mips_regsets,
1069	.n		= ARRAY_SIZE(mips_regsets),
1070};
1071
1072#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
1073
1074#ifdef CONFIG_64BIT
1075
1076static const struct user_regset mips64_regsets[] = {
1077	[REGSET_GPR] = {
1078		.core_note_type	= NT_PRSTATUS,
1079		.n		= ELF_NGREG,
1080		.size		= sizeof(unsigned long),
1081		.align		= sizeof(unsigned long),
1082		.get		= gpr64_get,
1083		.set		= gpr64_set,
1084	},
1085	[REGSET_DSP] = {
1086		.core_note_type	= NT_MIPS_DSP,
1087		.n		= NUM_DSP_REGS + 1,
1088		.size		= sizeof(u64),
1089		.align		= sizeof(u64),
1090		.get		= dsp64_get,
1091		.set		= dsp64_set,
1092		.active		= dsp_active,
1093	},
1094#ifdef CONFIG_MIPS_FP_SUPPORT
1095	[REGSET_FP_MODE] = {
1096		.core_note_type	= NT_MIPS_FP_MODE,
1097		.n		= 1,
1098		.size		= sizeof(int),
1099		.align		= sizeof(int),
1100		.get		= fp_mode_get,
1101		.set		= fp_mode_set,
1102	},
1103	[REGSET_FPR] = {
1104		.core_note_type	= NT_PRFPREG,
1105		.n		= ELF_NFPREG,
1106		.size		= sizeof(elf_fpreg_t),
1107		.align		= sizeof(elf_fpreg_t),
1108		.get		= fpr_get,
1109		.set		= fpr_set,
1110	},
1111#endif
1112#ifdef CONFIG_CPU_HAS_MSA
1113	[REGSET_MSA] = {
1114		.core_note_type	= NT_MIPS_MSA,
1115		.n		= NUM_FPU_REGS + 1,
1116		.size		= 16,
1117		.align		= 16,
1118		.get		= msa_get,
1119		.set		= msa_set,
1120	},
1121#endif
1122};
1123
1124static const struct user_regset_view user_mips64_view = {
1125	.name		= "mips64",
1126	.e_machine	= ELF_ARCH,
1127	.ei_osabi	= ELF_OSABI,
1128	.regsets	= mips64_regsets,
1129	.n		= ARRAY_SIZE(mips64_regsets),
1130};
1131
1132#ifdef CONFIG_MIPS32_N32
1133
1134static const struct user_regset_view user_mipsn32_view = {
1135	.name		= "mipsn32",
1136	.e_flags	= EF_MIPS_ABI2,
1137	.e_machine	= ELF_ARCH,
1138	.ei_osabi	= ELF_OSABI,
1139	.regsets	= mips64_regsets,
1140	.n		= ARRAY_SIZE(mips64_regsets),
1141};
1142
1143#endif /* CONFIG_MIPS32_N32 */
1144
1145#endif /* CONFIG_64BIT */
1146
1147const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1148{
1149#ifdef CONFIG_32BIT
1150	return &user_mips_view;
1151#else
1152#ifdef CONFIG_MIPS32_O32
1153	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1154		return &user_mips_view;
1155#endif
1156#ifdef CONFIG_MIPS32_N32
1157	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1158		return &user_mipsn32_view;
1159#endif
1160	return &user_mips64_view;
1161#endif
1162}
1163
1164long arch_ptrace(struct task_struct *child, long request,
1165		 unsigned long addr, unsigned long data)
1166{
1167	int ret;
1168	void __user *addrp = (void __user *) addr;
1169	void __user *datavp = (void __user *) data;
1170	unsigned long __user *datalp = (void __user *) data;
1171
1172	switch (request) {
1173	/* when I and D space are separate, these will need to be fixed. */
1174	case PTRACE_PEEKTEXT: /* read word at location addr. */
1175	case PTRACE_PEEKDATA:
1176		ret = generic_ptrace_peekdata(child, addr, data);
1177		break;
1178
1179	/* Read the word at location addr in the USER area. */
1180	case PTRACE_PEEKUSR: {
1181		struct pt_regs *regs;
1182		unsigned long tmp = 0;
1183
1184		regs = task_pt_regs(child);
1185		ret = 0;  /* Default return value. */
1186
1187		switch (addr) {
1188		case 0 ... 31:
1189			tmp = regs->regs[addr];
1190			break;
1191#ifdef CONFIG_MIPS_FP_SUPPORT
1192		case FPR_BASE ... FPR_BASE + 31: {
1193			union fpureg *fregs;
1194
1195			if (!tsk_used_math(child)) {
1196				/* FP not yet used */
1197				tmp = -1;
1198				break;
1199			}
1200			fregs = get_fpu_regs(child);
1201
1202#ifdef CONFIG_32BIT
1203			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1204				/*
1205				 * The odd registers are actually the high
1206				 * order bits of the values stored in the even
1207				 * registers.
1208				 */
1209				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1210						addr & 1);
1211				break;
1212			}
1213#endif
1214			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1215			break;
1216		}
1217		case FPC_CSR:
1218			tmp = child->thread.fpu.fcr31;
1219			break;
1220		case FPC_EIR:
1221			/* implementation / version register */
1222			tmp = boot_cpu_data.fpu_id;
1223			break;
1224#endif
1225		case PC:
1226			tmp = regs->cp0_epc;
1227			break;
1228		case CAUSE:
1229			tmp = regs->cp0_cause;
1230			break;
1231		case BADVADDR:
1232			tmp = regs->cp0_badvaddr;
1233			break;
1234		case MMHI:
1235			tmp = regs->hi;
1236			break;
1237		case MMLO:
1238			tmp = regs->lo;
1239			break;
1240#ifdef CONFIG_CPU_HAS_SMARTMIPS
1241		case ACX:
1242			tmp = regs->acx;
1243			break;
1244#endif
1245		case DSP_BASE ... DSP_BASE + 5: {
1246			dspreg_t *dregs;
1247
1248			if (!cpu_has_dsp) {
1249				tmp = 0;
1250				ret = -EIO;
1251				goto out;
1252			}
1253			dregs = __get_dsp_regs(child);
1254			tmp = dregs[addr - DSP_BASE];
1255			break;
1256		}
1257		case DSP_CONTROL:
1258			if (!cpu_has_dsp) {
1259				tmp = 0;
1260				ret = -EIO;
1261				goto out;
1262			}
1263			tmp = child->thread.dsp.dspcontrol;
1264			break;
1265		default:
1266			tmp = 0;
1267			ret = -EIO;
1268			goto out;
1269		}
1270		ret = put_user(tmp, datalp);
1271		break;
1272	}
1273
1274	/* when I and D space are separate, this will have to be fixed. */
1275	case PTRACE_POKETEXT: /* write the word at location addr. */
1276	case PTRACE_POKEDATA:
1277		ret = generic_ptrace_pokedata(child, addr, data);
1278		break;
1279
1280	case PTRACE_POKEUSR: {
1281		struct pt_regs *regs;
1282		ret = 0;
1283		regs = task_pt_regs(child);
1284
1285		switch (addr) {
1286		case 0 ... 31:
1287			regs->regs[addr] = data;
1288			/* System call number may have been changed */
1289			if (addr == 2)
1290				mips_syscall_update_nr(child, regs);
1291			else if (addr == 4 &&
1292				 mips_syscall_is_indirect(child, regs))
1293				mips_syscall_update_nr(child, regs);
1294			break;
1295#ifdef CONFIG_MIPS_FP_SUPPORT
1296		case FPR_BASE ... FPR_BASE + 31: {
1297			union fpureg *fregs = get_fpu_regs(child);
1298
1299			init_fp_ctx(child);
1300#ifdef CONFIG_32BIT
1301			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1302				/*
1303				 * The odd registers are actually the high
1304				 * order bits of the values stored in the even
1305				 * registers.
1306				 */
1307				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1308					  addr & 1, data);
1309				break;
1310			}
1311#endif
1312			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1313			break;
1314		}
1315		case FPC_CSR:
1316			init_fp_ctx(child);
1317			ptrace_setfcr31(child, data);
1318			break;
1319#endif
1320		case PC:
1321			regs->cp0_epc = data;
1322			break;
1323		case MMHI:
1324			regs->hi = data;
1325			break;
1326		case MMLO:
1327			regs->lo = data;
1328			break;
1329#ifdef CONFIG_CPU_HAS_SMARTMIPS
1330		case ACX:
1331			regs->acx = data;
1332			break;
1333#endif
1334		case DSP_BASE ... DSP_BASE + 5: {
1335			dspreg_t *dregs;
1336
1337			if (!cpu_has_dsp) {
1338				ret = -EIO;
1339				break;
1340			}
1341
1342			dregs = __get_dsp_regs(child);
1343			dregs[addr - DSP_BASE] = data;
1344			break;
1345		}
1346		case DSP_CONTROL:
1347			if (!cpu_has_dsp) {
1348				ret = -EIO;
1349				break;
1350			}
1351			child->thread.dsp.dspcontrol = data;
1352			break;
1353		default:
1354			/* The rest are not allowed. */
1355			ret = -EIO;
1356			break;
1357		}
1358		break;
1359		}
1360
1361	case PTRACE_GETREGS:
1362		ret = ptrace_getregs(child, datavp);
1363		break;
1364
1365	case PTRACE_SETREGS:
1366		ret = ptrace_setregs(child, datavp);
1367		break;
1368
1369#ifdef CONFIG_MIPS_FP_SUPPORT
1370	case PTRACE_GETFPREGS:
1371		ret = ptrace_getfpregs(child, datavp);
1372		break;
1373
1374	case PTRACE_SETFPREGS:
1375		ret = ptrace_setfpregs(child, datavp);
1376		break;
1377#endif
1378	case PTRACE_GET_THREAD_AREA:
1379		ret = put_user(task_thread_info(child)->tp_value, datalp);
1380		break;
1381
1382	case PTRACE_GET_WATCH_REGS:
1383		ret = ptrace_get_watch_regs(child, addrp);
1384		break;
1385
1386	case PTRACE_SET_WATCH_REGS:
1387		ret = ptrace_set_watch_regs(child, addrp);
1388		break;
1389
1390	default:
1391		ret = ptrace_request(child, request, addr, data);
1392		break;
1393	}
1394 out:
1395	return ret;
1396}
1397
1398/*
1399 * Notification of system call entry/exit
1400 * - triggered by current->work.syscall_trace
1401 */
1402asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1403{
1404	user_exit();
1405
1406	current_thread_info()->syscall = syscall;
1407
1408	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1409		if (tracehook_report_syscall_entry(regs))
1410			return -1;
1411		syscall = current_thread_info()->syscall;
1412	}
1413
1414#ifdef CONFIG_SECCOMP
1415	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1416		int ret, i;
1417		struct seccomp_data sd;
1418		unsigned long args[6];
1419
1420		sd.nr = syscall;
1421		sd.arch = syscall_get_arch(current);
1422		syscall_get_arguments(current, regs, args);
1423		for (i = 0; i < 6; i++)
1424			sd.args[i] = args[i];
1425		sd.instruction_pointer = KSTK_EIP(current);
1426
1427		ret = __secure_computing(&sd);
1428		if (ret == -1)
1429			return ret;
1430		syscall = current_thread_info()->syscall;
1431	}
1432#endif
1433
1434	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1435		trace_sys_enter(regs, regs->regs[2]);
1436
1437	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1438			    regs->regs[6], regs->regs[7]);
1439
1440	/*
1441	 * Negative syscall numbers are mistaken for rejected syscalls, but
1442	 * won't have had the return value set appropriately, so we do so now.
1443	 */
1444	if (syscall < 0)
1445		syscall_set_return_value(current, regs, -ENOSYS, 0);
1446	return syscall;
1447}
1448
1449/*
1450 * Notification of system call entry/exit
1451 * - triggered by current->work.syscall_trace
1452 */
1453asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1454{
1455        /*
1456	 * We may come here right after calling schedule_user()
1457	 * or do_notify_resume(), in which case we can be in RCU
1458	 * user mode.
1459	 */
1460	user_exit();
1461
1462	audit_syscall_exit(regs);
1463
1464	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1465		trace_sys_exit(regs, regs_return_value(regs));
1466
1467	if (test_thread_flag(TIF_SYSCALL_TRACE))
1468		tracehook_report_syscall_exit(regs, 0);
1469
1470	user_enter();
1471}
v5.9
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
  28#include <linux/security.h>
  29#include <linux/stddef.h>
  30#include <linux/tracehook.h>
  31#include <linux/audit.h>
  32#include <linux/seccomp.h>
  33#include <linux/ftrace.h>
  34
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
 
  42#include <asm/page.h>
  43#include <asm/processor.h>
  44#include <asm/syscall.h>
  45#include <linux/uaccess.h>
  46#include <asm/bootinfo.h>
  47#include <asm/reg.h>
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/syscalls.h>
  51
  52/*
  53 * Called by kernel/ptrace.c when detaching..
  54 *
  55 * Make sure single step bits etc are not set.
  56 */
  57void ptrace_disable(struct task_struct *child)
  58{
  59	/* Don't load the watchpoint registers for the ex-child. */
  60	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  61}
  62
  63/*
  64 * Read a general register set.	 We always use the 64-bit format, even
  65 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  66 * Registers are sign extended to fill the available space.
  67 */
  68int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  69{
  70	struct pt_regs *regs;
  71	int i;
  72
  73	if (!access_ok(data, 38 * 8))
  74		return -EIO;
  75
  76	regs = task_pt_regs(child);
  77
  78	for (i = 0; i < 32; i++)
  79		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  80	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
  81	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
  82	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  83	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  84	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  85	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  86
  87	return 0;
  88}
  89
  90/*
  91 * Write a general register set.  As for PTRACE_GETREGS, we always use
  92 * the 64-bit format.  On a 32-bit kernel only the lower order half
  93 * (according to endianness) will be used.
  94 */
  95int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
  96{
  97	struct pt_regs *regs;
  98	int i;
  99
 100	if (!access_ok(data, 38 * 8))
 101		return -EIO;
 102
 103	regs = task_pt_regs(child);
 104
 105	for (i = 0; i < 32; i++)
 106		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 107	__get_user(regs->lo, (__s64 __user *)&data->lo);
 108	__get_user(regs->hi, (__s64 __user *)&data->hi);
 109	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 110
 111	/* badvaddr, status, and cause may not be written.  */
 112
 113	/* System call number may have been changed */
 114	mips_syscall_update_nr(child, regs);
 115
 116	return 0;
 117}
 118
 119int ptrace_get_watch_regs(struct task_struct *child,
 120			  struct pt_watch_regs __user *addr)
 121{
 122	enum pt_watch_style style;
 123	int i;
 124
 125	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 126		return -EIO;
 127	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 128		return -EIO;
 129
 130#ifdef CONFIG_32BIT
 131	style = pt_watch_style_mips32;
 132#define WATCH_STYLE mips32
 133#else
 134	style = pt_watch_style_mips64;
 135#define WATCH_STYLE mips64
 136#endif
 137
 138	__put_user(style, &addr->style);
 139	__put_user(boot_cpu_data.watch_reg_use_cnt,
 140		   &addr->WATCH_STYLE.num_valid);
 141	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 142		__put_user(child->thread.watch.mips3264.watchlo[i],
 143			   &addr->WATCH_STYLE.watchlo[i]);
 144		__put_user(child->thread.watch.mips3264.watchhi[i] &
 145				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 146			   &addr->WATCH_STYLE.watchhi[i]);
 147		__put_user(boot_cpu_data.watch_reg_masks[i],
 148			   &addr->WATCH_STYLE.watch_masks[i]);
 149	}
 150	for (; i < 8; i++) {
 151		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 152		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 153		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 154	}
 155
 156	return 0;
 157}
 158
 159int ptrace_set_watch_regs(struct task_struct *child,
 160			  struct pt_watch_regs __user *addr)
 161{
 162	int i;
 163	int watch_active = 0;
 164	unsigned long lt[NUM_WATCH_REGS];
 165	u16 ht[NUM_WATCH_REGS];
 166
 167	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 168		return -EIO;
 169	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 170		return -EIO;
 171	/* Check the values. */
 172	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 173		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 174#ifdef CONFIG_32BIT
 175		if (lt[i] & __UA_LIMIT)
 176			return -EINVAL;
 177#else
 178		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 179			if (lt[i] & 0xffffffff80000000UL)
 180				return -EINVAL;
 181		} else {
 182			if (lt[i] & __UA_LIMIT)
 183				return -EINVAL;
 184		}
 185#endif
 186		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 187		if (ht[i] & ~MIPS_WATCHHI_MASK)
 188			return -EINVAL;
 189	}
 190	/* Install them. */
 191	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 192		if (lt[i] & MIPS_WATCHLO_IRW)
 193			watch_active = 1;
 194		child->thread.watch.mips3264.watchlo[i] = lt[i];
 195		/* Set the G bit. */
 196		child->thread.watch.mips3264.watchhi[i] = ht[i];
 197	}
 198
 199	if (watch_active)
 200		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 201	else
 202		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 203
 204	return 0;
 205}
 206
 207/* regset get/set implementations */
 208
 209#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 210
 211static int gpr32_get(struct task_struct *target,
 212		     const struct user_regset *regset,
 213		     struct membuf to)
 
 214{
 215	struct pt_regs *regs = task_pt_regs(target);
 216	u32 uregs[ELF_NGREG] = {};
 217
 218	mips_dump_regs32(uregs, regs);
 219	return membuf_write(&to, uregs, sizeof(uregs));
 
 220}
 221
 222static int gpr32_set(struct task_struct *target,
 223		     const struct user_regset *regset,
 224		     unsigned int pos, unsigned int count,
 225		     const void *kbuf, const void __user *ubuf)
 226{
 227	struct pt_regs *regs = task_pt_regs(target);
 228	u32 uregs[ELF_NGREG];
 229	unsigned start, num_regs, i;
 230	int err;
 231
 232	start = pos / sizeof(u32);
 233	num_regs = count / sizeof(u32);
 234
 235	if (start + num_regs > ELF_NGREG)
 236		return -EIO;
 237
 238	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 239				 sizeof(uregs));
 240	if (err)
 241		return err;
 242
 243	for (i = start; i < num_regs; i++) {
 244		/*
 245		 * Cast all values to signed here so that if this is a 64-bit
 246		 * kernel, the supplied 32-bit values will be sign extended.
 247		 */
 248		switch (i) {
 249		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 250			/* k0/k1 are ignored. */
 251		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 252			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 253			break;
 254		case MIPS32_EF_LO:
 255			regs->lo = (s32)uregs[i];
 256			break;
 257		case MIPS32_EF_HI:
 258			regs->hi = (s32)uregs[i];
 259			break;
 260		case MIPS32_EF_CP0_EPC:
 261			regs->cp0_epc = (s32)uregs[i];
 262			break;
 263		}
 264	}
 265
 266	/* System call number may have been changed */
 267	mips_syscall_update_nr(target, regs);
 268
 269	return 0;
 270}
 271
 272#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 273
 274#ifdef CONFIG_64BIT
 275
 276static int gpr64_get(struct task_struct *target,
 277		     const struct user_regset *regset,
 278		     struct membuf to)
 
 279{
 280	struct pt_regs *regs = task_pt_regs(target);
 281	u64 uregs[ELF_NGREG] = {};
 282
 283	mips_dump_regs64(uregs, regs);
 284	return membuf_write(&to, uregs, sizeof(uregs));
 
 285}
 286
 287static int gpr64_set(struct task_struct *target,
 288		     const struct user_regset *regset,
 289		     unsigned int pos, unsigned int count,
 290		     const void *kbuf, const void __user *ubuf)
 291{
 292	struct pt_regs *regs = task_pt_regs(target);
 293	u64 uregs[ELF_NGREG];
 294	unsigned start, num_regs, i;
 295	int err;
 296
 297	start = pos / sizeof(u64);
 298	num_regs = count / sizeof(u64);
 299
 300	if (start + num_regs > ELF_NGREG)
 301		return -EIO;
 302
 303	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 304				 sizeof(uregs));
 305	if (err)
 306		return err;
 307
 308	for (i = start; i < num_regs; i++) {
 309		switch (i) {
 310		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 311			/* k0/k1 are ignored. */
 312		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 313			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 314			break;
 315		case MIPS64_EF_LO:
 316			regs->lo = uregs[i];
 317			break;
 318		case MIPS64_EF_HI:
 319			regs->hi = uregs[i];
 320			break;
 321		case MIPS64_EF_CP0_EPC:
 322			regs->cp0_epc = uregs[i];
 323			break;
 324		}
 325	}
 326
 327	/* System call number may have been changed */
 328	mips_syscall_update_nr(target, regs);
 329
 330	return 0;
 331}
 332
 333#endif /* CONFIG_64BIT */
 334
 335
 336#ifdef CONFIG_MIPS_FP_SUPPORT
 337
 338/*
 339 * Poke at FCSR according to its mask.  Set the Cause bits even
 340 * if a corresponding Enable bit is set.  This will be noticed at
 341 * the time the thread is switched to and SIGFPE thrown accordingly.
 342 */
 343static void ptrace_setfcr31(struct task_struct *child, u32 value)
 344{
 345	u32 fcr31;
 346	u32 mask;
 347
 348	fcr31 = child->thread.fpu.fcr31;
 349	mask = boot_cpu_data.fpu_msk31;
 350	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 351}
 352
 353int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 354{
 355	int i;
 356
 357	if (!access_ok(data, 33 * 8))
 358		return -EIO;
 359
 360	if (tsk_used_math(child)) {
 361		union fpureg *fregs = get_fpu_regs(child);
 362		for (i = 0; i < 32; i++)
 363			__put_user(get_fpr64(&fregs[i], 0),
 364				   i + (__u64 __user *)data);
 365	} else {
 366		for (i = 0; i < 32; i++)
 367			__put_user((__u64) -1, i + (__u64 __user *) data);
 368	}
 369
 370	__put_user(child->thread.fpu.fcr31, data + 64);
 371	__put_user(boot_cpu_data.fpu_id, data + 65);
 372
 373	return 0;
 374}
 375
 376int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 377{
 378	union fpureg *fregs;
 379	u64 fpr_val;
 380	u32 value;
 381	int i;
 382
 383	if (!access_ok(data, 33 * 8))
 384		return -EIO;
 385
 386	init_fp_ctx(child);
 387	fregs = get_fpu_regs(child);
 388
 389	for (i = 0; i < 32; i++) {
 390		__get_user(fpr_val, i + (__u64 __user *)data);
 391		set_fpr64(&fregs[i], 0, fpr_val);
 392	}
 393
 394	__get_user(value, data + 64);
 395	ptrace_setfcr31(child, value);
 396
 397	/* FIR may not be written.  */
 398
 399	return 0;
 400}
 401
 402/*
 403 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 404 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 405 * correspond 1:1 to buffer slots.  Only general registers are copied.
 406 */
 407static void fpr_get_fpa(struct task_struct *target,
 408		       struct membuf *to)
 
 409{
 410	membuf_write(to, &target->thread.fpu,
 411			NUM_FPU_REGS * sizeof(elf_fpreg_t));
 
 412}
 413
 414/*
 415 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 416 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 417 * general register slots are copied to buffer slots.  Only general
 418 * registers are copied.
 419 */
 420static void fpr_get_msa(struct task_struct *target, struct membuf *to)
 
 
 421{
 422	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
 
 423
 424	BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
 425	for (i = 0; i < NUM_FPU_REGS; i++)
 426		membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
 427}
 428
 429/*
 430 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 431 * Choose the appropriate helper for general registers, and then copy
 432 * the FCSR and FIR registers separately.
 433 */
 434static int fpr_get(struct task_struct *target,
 435		   const struct user_regset *regset,
 436		   struct membuf to)
 
 437{
 
 
 
 
 438	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 439		fpr_get_fpa(target, &to);
 440	else
 441		fpr_get_msa(target, &to);
 
 
 
 
 
 
 
 
 442
 443	membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
 444	membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
 445	return 0;
 
 
 446}
 447
 448/*
 449 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 450 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 451 * context's general register slots.  Only general registers are copied.
 452 */
 453static int fpr_set_fpa(struct task_struct *target,
 454		       unsigned int *pos, unsigned int *count,
 455		       const void **kbuf, const void __user **ubuf)
 456{
 457	return user_regset_copyin(pos, count, kbuf, ubuf,
 458				  &target->thread.fpu,
 459				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 460}
 461
 462/*
 463 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 464 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 465 * bits only of FP context's general register slots.  Only general
 466 * registers are copied.
 467 */
 468static int fpr_set_msa(struct task_struct *target,
 469		       unsigned int *pos, unsigned int *count,
 470		       const void **kbuf, const void __user **ubuf)
 471{
 472	unsigned int i;
 473	u64 fpr_val;
 474	int err;
 475
 476	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 477	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 478		err = user_regset_copyin(pos, count, kbuf, ubuf,
 479					 &fpr_val, i * sizeof(elf_fpreg_t),
 480					 (i + 1) * sizeof(elf_fpreg_t));
 481		if (err)
 482			return err;
 483		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 484	}
 485
 486	return 0;
 487}
 488
 489/*
 490 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 491 * Choose the appropriate helper for general registers, and then copy
 492 * the FCSR register separately.  Ignore the incoming FIR register
 493 * contents though, as the register is read-only.
 494 *
 495 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 496 * which is supposed to have been guaranteed by the kernel before
 497 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 498 * so that we can safely avoid preinitializing temporaries for
 499 * partial register writes.
 500 */
 501static int fpr_set(struct task_struct *target,
 502		   const struct user_regset *regset,
 503		   unsigned int pos, unsigned int count,
 504		   const void *kbuf, const void __user *ubuf)
 505{
 506	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 507	const int fir_pos = fcr31_pos + sizeof(u32);
 508	u32 fcr31;
 509	int err;
 510
 511	BUG_ON(count % sizeof(elf_fpreg_t));
 512
 513	if (pos + count > sizeof(elf_fpregset_t))
 514		return -EIO;
 515
 516	init_fp_ctx(target);
 517
 518	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 519		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 520	else
 521		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 522	if (err)
 523		return err;
 524
 525	if (count > 0) {
 526		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 527					 &fcr31,
 528					 fcr31_pos, fcr31_pos + sizeof(u32));
 529		if (err)
 530			return err;
 531
 532		ptrace_setfcr31(target, fcr31);
 533	}
 534
 535	if (count > 0)
 536		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 537						fir_pos,
 538						fir_pos + sizeof(u32));
 539
 540	return err;
 541}
 542
 543/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
 544static int fp_mode_get(struct task_struct *target,
 545		       const struct user_regset *regset,
 546		       struct membuf to)
 
 547{
 548	return membuf_store(&to, (int)mips_get_process_fp_mode(target));
 
 
 
 
 549}
 550
 551/*
 552 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
 553 *
 554 * We optimize for the case where `count % sizeof(int) == 0', which
 555 * is supposed to have been guaranteed by the kernel before calling
 556 * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
 557 * that we can safely avoid preinitializing temporaries for partial
 558 * mode writes.
 559 */
 560static int fp_mode_set(struct task_struct *target,
 561		       const struct user_regset *regset,
 562		       unsigned int pos, unsigned int count,
 563		       const void *kbuf, const void __user *ubuf)
 564{
 565	int fp_mode;
 566	int err;
 567
 568	BUG_ON(count % sizeof(int));
 569
 570	if (pos + count > sizeof(fp_mode))
 571		return -EIO;
 572
 573	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 574				 sizeof(fp_mode));
 575	if (err)
 576		return err;
 577
 578	if (count > 0)
 579		err = mips_set_process_fp_mode(target, fp_mode);
 580
 581	return err;
 582}
 583
 584#endif /* CONFIG_MIPS_FP_SUPPORT */
 585
 586#ifdef CONFIG_CPU_HAS_MSA
 587
 588struct msa_control_regs {
 589	unsigned int fir;
 590	unsigned int fcsr;
 591	unsigned int msair;
 592	unsigned int msacsr;
 593};
 594
 595static void copy_pad_fprs(struct task_struct *target,
 596			 const struct user_regset *regset,
 597			 struct membuf *to,
 
 598			 unsigned int live_sz)
 599{
 600	int i, j;
 601	unsigned long long fill = ~0ull;
 602	unsigned int cp_sz, pad_sz;
 603
 604	cp_sz = min(regset->size, live_sz);
 605	pad_sz = regset->size - cp_sz;
 606	WARN_ON(pad_sz % sizeof(fill));
 607
 608	for (i = 0; i < NUM_FPU_REGS; i++) {
 609		membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
 610		for (j = 0; j < (pad_sz / sizeof(fill)); j++)
 611			membuf_store(to, fill);
 
 
 
 
 
 
 
 
 
 612	}
 
 
 613}
 614
 615static int msa_get(struct task_struct *target,
 616		   const struct user_regset *regset,
 617		   struct membuf to)
 
 618{
 619	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 620	const struct msa_control_regs ctrl_regs = {
 621		.fir = boot_cpu_data.fpu_id,
 622		.fcsr = target->thread.fpu.fcr31,
 623		.msair = boot_cpu_data.msa_id,
 624		.msacsr = target->thread.fpu.msacsr,
 625	};
 
 626
 627	if (!tsk_used_math(target)) {
 628		/* The task hasn't used FP or MSA, fill with 0xff */
 629		copy_pad_fprs(target, regset, &to, 0);
 
 630	} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
 631		/* Copy scalar FP context, fill the rest with 0xff */
 632		copy_pad_fprs(target, regset, &to, 8);
 
 633	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 634		/* Trivially copy the vector registers */
 635		membuf_write(&to, &target->thread.fpu.fpr, wr_size);
 
 
 636	} else {
 637		/* Copy as much context as possible, fill the rest with 0xff */
 638		copy_pad_fprs(target, regset, &to,
 639				sizeof(target->thread.fpu.fpr[0]));
 
 640	}
 641
 642	return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
 
 
 
 643}
 644
 645static int msa_set(struct task_struct *target,
 646		   const struct user_regset *regset,
 647		   unsigned int pos, unsigned int count,
 648		   const void *kbuf, const void __user *ubuf)
 649{
 650	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 651	struct msa_control_regs ctrl_regs;
 652	unsigned int cp_sz;
 653	int i, err, start;
 654
 655	init_fp_ctx(target);
 656
 657	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 658		/* Trivially copy the vector registers */
 659		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 660					 &target->thread.fpu.fpr,
 661					 0, wr_size);
 662	} else {
 663		/* Copy as much context as possible */
 664		cp_sz = min_t(unsigned int, regset->size,
 665			      sizeof(target->thread.fpu.fpr[0]));
 666
 667		i = start = err = 0;
 668		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 669			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 670						  &target->thread.fpu.fpr[i],
 671						  start, start + cp_sz);
 672		}
 673	}
 674
 675	if (!err)
 676		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
 677					 wr_size, wr_size + sizeof(ctrl_regs));
 678	if (!err) {
 679		target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
 680		target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
 681	}
 682
 683	return err;
 684}
 685
 686#endif /* CONFIG_CPU_HAS_MSA */
 687
 688#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 689
 690/*
 691 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
 692 */
 693static int dsp32_get(struct task_struct *target,
 694		     const struct user_regset *regset,
 695		     struct membuf to)
 
 696{
 
 697	u32 dspregs[NUM_DSP_REGS + 1];
 698	unsigned int i;
 699
 700	BUG_ON(to.left % sizeof(u32));
 701
 702	if (!cpu_has_dsp)
 703		return -EIO;
 704
 705	for (i = 0; i < NUM_DSP_REGS; i++)
 706		dspregs[i] = target->thread.dsp.dspr[i];
 707	dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
 708	return membuf_write(&to, dspregs, sizeof(dspregs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 709}
 710
 711/*
 712 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
 713 */
 714static int dsp32_set(struct task_struct *target,
 715		     const struct user_regset *regset,
 716		     unsigned int pos, unsigned int count,
 717		     const void *kbuf, const void __user *ubuf)
 718{
 719	unsigned int start, num_regs, i;
 720	u32 dspregs[NUM_DSP_REGS + 1];
 721	int err;
 722
 723	BUG_ON(count % sizeof(u32));
 724
 725	if (!cpu_has_dsp)
 726		return -EIO;
 727
 728	start = pos / sizeof(u32);
 729	num_regs = count / sizeof(u32);
 730
 731	if (start + num_regs > NUM_DSP_REGS + 1)
 732		return -EIO;
 733
 734	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 735				 sizeof(dspregs));
 736	if (err)
 737		return err;
 738
 739	for (i = start; i < num_regs; i++)
 740		switch (i) {
 741		case 0 ... NUM_DSP_REGS - 1:
 742			target->thread.dsp.dspr[i] = (s32)dspregs[i];
 743			break;
 744		case NUM_DSP_REGS:
 745			target->thread.dsp.dspcontrol = (s32)dspregs[i];
 746			break;
 747		}
 748
 749	return 0;
 750}
 751
 752#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 753
 754#ifdef CONFIG_64BIT
 755
 756/*
 757 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
 758 */
 759static int dsp64_get(struct task_struct *target,
 760		     const struct user_regset *regset,
 761		     struct membuf to)
 
 762{
 
 763	u64 dspregs[NUM_DSP_REGS + 1];
 764	unsigned int i;
 765
 766	BUG_ON(to.left % sizeof(u64));
 767
 768	if (!cpu_has_dsp)
 769		return -EIO;
 770
 771	for (i = 0; i < NUM_DSP_REGS; i++)
 772		dspregs[i] = target->thread.dsp.dspr[i];
 773	dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
 774	return membuf_write(&to, dspregs, sizeof(dspregs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 775}
 776
 777/*
 778 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
 779 */
 780static int dsp64_set(struct task_struct *target,
 781		     const struct user_regset *regset,
 782		     unsigned int pos, unsigned int count,
 783		     const void *kbuf, const void __user *ubuf)
 784{
 785	unsigned int start, num_regs, i;
 786	u64 dspregs[NUM_DSP_REGS + 1];
 787	int err;
 788
 789	BUG_ON(count % sizeof(u64));
 790
 791	if (!cpu_has_dsp)
 792		return -EIO;
 793
 794	start = pos / sizeof(u64);
 795	num_regs = count / sizeof(u64);
 796
 797	if (start + num_regs > NUM_DSP_REGS + 1)
 798		return -EIO;
 799
 800	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 801				 sizeof(dspregs));
 802	if (err)
 803		return err;
 804
 805	for (i = start; i < num_regs; i++)
 806		switch (i) {
 807		case 0 ... NUM_DSP_REGS - 1:
 808			target->thread.dsp.dspr[i] = dspregs[i];
 809			break;
 810		case NUM_DSP_REGS:
 811			target->thread.dsp.dspcontrol = dspregs[i];
 812			break;
 813		}
 814
 815	return 0;
 816}
 817
 818#endif /* CONFIG_64BIT */
 819
 820/*
 821 * Determine whether the DSP context is present.
 822 */
 823static int dsp_active(struct task_struct *target,
 824		      const struct user_regset *regset)
 825{
 826	return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
 827}
 828
 829enum mips_regset {
 830	REGSET_GPR,
 831	REGSET_DSP,
 832#ifdef CONFIG_MIPS_FP_SUPPORT
 833	REGSET_FPR,
 834	REGSET_FP_MODE,
 835#endif
 836#ifdef CONFIG_CPU_HAS_MSA
 837	REGSET_MSA,
 838#endif
 839};
 840
 841struct pt_regs_offset {
 842	const char *name;
 843	int offset;
 844};
 845
 846#define REG_OFFSET_NAME(reg, r) {					\
 847	.name = #reg,							\
 848	.offset = offsetof(struct pt_regs, r)				\
 849}
 850
 851#define REG_OFFSET_END {						\
 852	.name = NULL,							\
 853	.offset = 0							\
 854}
 855
 856static const struct pt_regs_offset regoffset_table[] = {
 857	REG_OFFSET_NAME(r0, regs[0]),
 858	REG_OFFSET_NAME(r1, regs[1]),
 859	REG_OFFSET_NAME(r2, regs[2]),
 860	REG_OFFSET_NAME(r3, regs[3]),
 861	REG_OFFSET_NAME(r4, regs[4]),
 862	REG_OFFSET_NAME(r5, regs[5]),
 863	REG_OFFSET_NAME(r6, regs[6]),
 864	REG_OFFSET_NAME(r7, regs[7]),
 865	REG_OFFSET_NAME(r8, regs[8]),
 866	REG_OFFSET_NAME(r9, regs[9]),
 867	REG_OFFSET_NAME(r10, regs[10]),
 868	REG_OFFSET_NAME(r11, regs[11]),
 869	REG_OFFSET_NAME(r12, regs[12]),
 870	REG_OFFSET_NAME(r13, regs[13]),
 871	REG_OFFSET_NAME(r14, regs[14]),
 872	REG_OFFSET_NAME(r15, regs[15]),
 873	REG_OFFSET_NAME(r16, regs[16]),
 874	REG_OFFSET_NAME(r17, regs[17]),
 875	REG_OFFSET_NAME(r18, regs[18]),
 876	REG_OFFSET_NAME(r19, regs[19]),
 877	REG_OFFSET_NAME(r20, regs[20]),
 878	REG_OFFSET_NAME(r21, regs[21]),
 879	REG_OFFSET_NAME(r22, regs[22]),
 880	REG_OFFSET_NAME(r23, regs[23]),
 881	REG_OFFSET_NAME(r24, regs[24]),
 882	REG_OFFSET_NAME(r25, regs[25]),
 883	REG_OFFSET_NAME(r26, regs[26]),
 884	REG_OFFSET_NAME(r27, regs[27]),
 885	REG_OFFSET_NAME(r28, regs[28]),
 886	REG_OFFSET_NAME(r29, regs[29]),
 887	REG_OFFSET_NAME(r30, regs[30]),
 888	REG_OFFSET_NAME(r31, regs[31]),
 889	REG_OFFSET_NAME(c0_status, cp0_status),
 890	REG_OFFSET_NAME(hi, hi),
 891	REG_OFFSET_NAME(lo, lo),
 892#ifdef CONFIG_CPU_HAS_SMARTMIPS
 893	REG_OFFSET_NAME(acx, acx),
 894#endif
 895	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 896	REG_OFFSET_NAME(c0_cause, cp0_cause),
 897	REG_OFFSET_NAME(c0_epc, cp0_epc),
 898#ifdef CONFIG_CPU_CAVIUM_OCTEON
 899	REG_OFFSET_NAME(mpl0, mpl[0]),
 900	REG_OFFSET_NAME(mpl1, mpl[1]),
 901	REG_OFFSET_NAME(mpl2, mpl[2]),
 902	REG_OFFSET_NAME(mtp0, mtp[0]),
 903	REG_OFFSET_NAME(mtp1, mtp[1]),
 904	REG_OFFSET_NAME(mtp2, mtp[2]),
 905#endif
 906	REG_OFFSET_END,
 907};
 908
 909/**
 910 * regs_query_register_offset() - query register offset from its name
 911 * @name:       the name of a register
 912 *
 913 * regs_query_register_offset() returns the offset of a register in struct
 914 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 915 */
 916int regs_query_register_offset(const char *name)
 917{
 918        const struct pt_regs_offset *roff;
 919        for (roff = regoffset_table; roff->name != NULL; roff++)
 920                if (!strcmp(roff->name, name))
 921                        return roff->offset;
 922        return -EINVAL;
 923}
 924
 925#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 926
 927static const struct user_regset mips_regsets[] = {
 928	[REGSET_GPR] = {
 929		.core_note_type	= NT_PRSTATUS,
 930		.n		= ELF_NGREG,
 931		.size		= sizeof(unsigned int),
 932		.align		= sizeof(unsigned int),
 933		.regset_get		= gpr32_get,
 934		.set		= gpr32_set,
 935	},
 936	[REGSET_DSP] = {
 937		.core_note_type	= NT_MIPS_DSP,
 938		.n		= NUM_DSP_REGS + 1,
 939		.size		= sizeof(u32),
 940		.align		= sizeof(u32),
 941		.regset_get		= dsp32_get,
 942		.set		= dsp32_set,
 943		.active		= dsp_active,
 944	},
 945#ifdef CONFIG_MIPS_FP_SUPPORT
 946	[REGSET_FPR] = {
 947		.core_note_type	= NT_PRFPREG,
 948		.n		= ELF_NFPREG,
 949		.size		= sizeof(elf_fpreg_t),
 950		.align		= sizeof(elf_fpreg_t),
 951		.regset_get		= fpr_get,
 952		.set		= fpr_set,
 953	},
 954	[REGSET_FP_MODE] = {
 955		.core_note_type	= NT_MIPS_FP_MODE,
 956		.n		= 1,
 957		.size		= sizeof(int),
 958		.align		= sizeof(int),
 959		.regset_get		= fp_mode_get,
 960		.set		= fp_mode_set,
 961	},
 962#endif
 963#ifdef CONFIG_CPU_HAS_MSA
 964	[REGSET_MSA] = {
 965		.core_note_type	= NT_MIPS_MSA,
 966		.n		= NUM_FPU_REGS + 1,
 967		.size		= 16,
 968		.align		= 16,
 969		.regset_get		= msa_get,
 970		.set		= msa_set,
 971	},
 972#endif
 973};
 974
 975static const struct user_regset_view user_mips_view = {
 976	.name		= "mips",
 977	.e_machine	= ELF_ARCH,
 978	.ei_osabi	= ELF_OSABI,
 979	.regsets	= mips_regsets,
 980	.n		= ARRAY_SIZE(mips_regsets),
 981};
 982
 983#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 984
 985#ifdef CONFIG_64BIT
 986
 987static const struct user_regset mips64_regsets[] = {
 988	[REGSET_GPR] = {
 989		.core_note_type	= NT_PRSTATUS,
 990		.n		= ELF_NGREG,
 991		.size		= sizeof(unsigned long),
 992		.align		= sizeof(unsigned long),
 993		.regset_get		= gpr64_get,
 994		.set		= gpr64_set,
 995	},
 996	[REGSET_DSP] = {
 997		.core_note_type	= NT_MIPS_DSP,
 998		.n		= NUM_DSP_REGS + 1,
 999		.size		= sizeof(u64),
1000		.align		= sizeof(u64),
1001		.regset_get		= dsp64_get,
1002		.set		= dsp64_set,
1003		.active		= dsp_active,
1004	},
1005#ifdef CONFIG_MIPS_FP_SUPPORT
1006	[REGSET_FP_MODE] = {
1007		.core_note_type	= NT_MIPS_FP_MODE,
1008		.n		= 1,
1009		.size		= sizeof(int),
1010		.align		= sizeof(int),
1011		.regset_get		= fp_mode_get,
1012		.set		= fp_mode_set,
1013	},
1014	[REGSET_FPR] = {
1015		.core_note_type	= NT_PRFPREG,
1016		.n		= ELF_NFPREG,
1017		.size		= sizeof(elf_fpreg_t),
1018		.align		= sizeof(elf_fpreg_t),
1019		.regset_get		= fpr_get,
1020		.set		= fpr_set,
1021	},
1022#endif
1023#ifdef CONFIG_CPU_HAS_MSA
1024	[REGSET_MSA] = {
1025		.core_note_type	= NT_MIPS_MSA,
1026		.n		= NUM_FPU_REGS + 1,
1027		.size		= 16,
1028		.align		= 16,
1029		.regset_get		= msa_get,
1030		.set		= msa_set,
1031	},
1032#endif
1033};
1034
1035static const struct user_regset_view user_mips64_view = {
1036	.name		= "mips64",
1037	.e_machine	= ELF_ARCH,
1038	.ei_osabi	= ELF_OSABI,
1039	.regsets	= mips64_regsets,
1040	.n		= ARRAY_SIZE(mips64_regsets),
1041};
1042
1043#ifdef CONFIG_MIPS32_N32
1044
1045static const struct user_regset_view user_mipsn32_view = {
1046	.name		= "mipsn32",
1047	.e_flags	= EF_MIPS_ABI2,
1048	.e_machine	= ELF_ARCH,
1049	.ei_osabi	= ELF_OSABI,
1050	.regsets	= mips64_regsets,
1051	.n		= ARRAY_SIZE(mips64_regsets),
1052};
1053
1054#endif /* CONFIG_MIPS32_N32 */
1055
1056#endif /* CONFIG_64BIT */
1057
1058const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1059{
1060#ifdef CONFIG_32BIT
1061	return &user_mips_view;
1062#else
1063#ifdef CONFIG_MIPS32_O32
1064	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1065		return &user_mips_view;
1066#endif
1067#ifdef CONFIG_MIPS32_N32
1068	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1069		return &user_mipsn32_view;
1070#endif
1071	return &user_mips64_view;
1072#endif
1073}
1074
1075long arch_ptrace(struct task_struct *child, long request,
1076		 unsigned long addr, unsigned long data)
1077{
1078	int ret;
1079	void __user *addrp = (void __user *) addr;
1080	void __user *datavp = (void __user *) data;
1081	unsigned long __user *datalp = (void __user *) data;
1082
1083	switch (request) {
1084	/* when I and D space are separate, these will need to be fixed. */
1085	case PTRACE_PEEKTEXT: /* read word at location addr. */
1086	case PTRACE_PEEKDATA:
1087		ret = generic_ptrace_peekdata(child, addr, data);
1088		break;
1089
1090	/* Read the word at location addr in the USER area. */
1091	case PTRACE_PEEKUSR: {
1092		struct pt_regs *regs;
1093		unsigned long tmp = 0;
1094
1095		regs = task_pt_regs(child);
1096		ret = 0;  /* Default return value. */
1097
1098		switch (addr) {
1099		case 0 ... 31:
1100			tmp = regs->regs[addr];
1101			break;
1102#ifdef CONFIG_MIPS_FP_SUPPORT
1103		case FPR_BASE ... FPR_BASE + 31: {
1104			union fpureg *fregs;
1105
1106			if (!tsk_used_math(child)) {
1107				/* FP not yet used */
1108				tmp = -1;
1109				break;
1110			}
1111			fregs = get_fpu_regs(child);
1112
1113#ifdef CONFIG_32BIT
1114			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1115				/*
1116				 * The odd registers are actually the high
1117				 * order bits of the values stored in the even
1118				 * registers.
1119				 */
1120				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1121						addr & 1);
1122				break;
1123			}
1124#endif
1125			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1126			break;
1127		}
1128		case FPC_CSR:
1129			tmp = child->thread.fpu.fcr31;
1130			break;
1131		case FPC_EIR:
1132			/* implementation / version register */
1133			tmp = boot_cpu_data.fpu_id;
1134			break;
1135#endif
1136		case PC:
1137			tmp = regs->cp0_epc;
1138			break;
1139		case CAUSE:
1140			tmp = regs->cp0_cause;
1141			break;
1142		case BADVADDR:
1143			tmp = regs->cp0_badvaddr;
1144			break;
1145		case MMHI:
1146			tmp = regs->hi;
1147			break;
1148		case MMLO:
1149			tmp = regs->lo;
1150			break;
1151#ifdef CONFIG_CPU_HAS_SMARTMIPS
1152		case ACX:
1153			tmp = regs->acx;
1154			break;
1155#endif
1156		case DSP_BASE ... DSP_BASE + 5: {
1157			dspreg_t *dregs;
1158
1159			if (!cpu_has_dsp) {
1160				tmp = 0;
1161				ret = -EIO;
1162				goto out;
1163			}
1164			dregs = __get_dsp_regs(child);
1165			tmp = dregs[addr - DSP_BASE];
1166			break;
1167		}
1168		case DSP_CONTROL:
1169			if (!cpu_has_dsp) {
1170				tmp = 0;
1171				ret = -EIO;
1172				goto out;
1173			}
1174			tmp = child->thread.dsp.dspcontrol;
1175			break;
1176		default:
1177			tmp = 0;
1178			ret = -EIO;
1179			goto out;
1180		}
1181		ret = put_user(tmp, datalp);
1182		break;
1183	}
1184
1185	/* when I and D space are separate, this will have to be fixed. */
1186	case PTRACE_POKETEXT: /* write the word at location addr. */
1187	case PTRACE_POKEDATA:
1188		ret = generic_ptrace_pokedata(child, addr, data);
1189		break;
1190
1191	case PTRACE_POKEUSR: {
1192		struct pt_regs *regs;
1193		ret = 0;
1194		regs = task_pt_regs(child);
1195
1196		switch (addr) {
1197		case 0 ... 31:
1198			regs->regs[addr] = data;
1199			/* System call number may have been changed */
1200			if (addr == 2)
1201				mips_syscall_update_nr(child, regs);
1202			else if (addr == 4 &&
1203				 mips_syscall_is_indirect(child, regs))
1204				mips_syscall_update_nr(child, regs);
1205			break;
1206#ifdef CONFIG_MIPS_FP_SUPPORT
1207		case FPR_BASE ... FPR_BASE + 31: {
1208			union fpureg *fregs = get_fpu_regs(child);
1209
1210			init_fp_ctx(child);
1211#ifdef CONFIG_32BIT
1212			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1213				/*
1214				 * The odd registers are actually the high
1215				 * order bits of the values stored in the even
1216				 * registers.
1217				 */
1218				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1219					  addr & 1, data);
1220				break;
1221			}
1222#endif
1223			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1224			break;
1225		}
1226		case FPC_CSR:
1227			init_fp_ctx(child);
1228			ptrace_setfcr31(child, data);
1229			break;
1230#endif
1231		case PC:
1232			regs->cp0_epc = data;
1233			break;
1234		case MMHI:
1235			regs->hi = data;
1236			break;
1237		case MMLO:
1238			regs->lo = data;
1239			break;
1240#ifdef CONFIG_CPU_HAS_SMARTMIPS
1241		case ACX:
1242			regs->acx = data;
1243			break;
1244#endif
1245		case DSP_BASE ... DSP_BASE + 5: {
1246			dspreg_t *dregs;
1247
1248			if (!cpu_has_dsp) {
1249				ret = -EIO;
1250				break;
1251			}
1252
1253			dregs = __get_dsp_regs(child);
1254			dregs[addr - DSP_BASE] = data;
1255			break;
1256		}
1257		case DSP_CONTROL:
1258			if (!cpu_has_dsp) {
1259				ret = -EIO;
1260				break;
1261			}
1262			child->thread.dsp.dspcontrol = data;
1263			break;
1264		default:
1265			/* The rest are not allowed. */
1266			ret = -EIO;
1267			break;
1268		}
1269		break;
1270		}
1271
1272	case PTRACE_GETREGS:
1273		ret = ptrace_getregs(child, datavp);
1274		break;
1275
1276	case PTRACE_SETREGS:
1277		ret = ptrace_setregs(child, datavp);
1278		break;
1279
1280#ifdef CONFIG_MIPS_FP_SUPPORT
1281	case PTRACE_GETFPREGS:
1282		ret = ptrace_getfpregs(child, datavp);
1283		break;
1284
1285	case PTRACE_SETFPREGS:
1286		ret = ptrace_setfpregs(child, datavp);
1287		break;
1288#endif
1289	case PTRACE_GET_THREAD_AREA:
1290		ret = put_user(task_thread_info(child)->tp_value, datalp);
1291		break;
1292
1293	case PTRACE_GET_WATCH_REGS:
1294		ret = ptrace_get_watch_regs(child, addrp);
1295		break;
1296
1297	case PTRACE_SET_WATCH_REGS:
1298		ret = ptrace_set_watch_regs(child, addrp);
1299		break;
1300
1301	default:
1302		ret = ptrace_request(child, request, addr, data);
1303		break;
1304	}
1305 out:
1306	return ret;
1307}
1308
1309/*
1310 * Notification of system call entry/exit
1311 * - triggered by current->work.syscall_trace
1312 */
1313asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1314{
1315	user_exit();
1316
1317	current_thread_info()->syscall = syscall;
1318
1319	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1320		if (tracehook_report_syscall_entry(regs))
1321			return -1;
1322		syscall = current_thread_info()->syscall;
1323	}
1324
1325#ifdef CONFIG_SECCOMP
1326	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1327		int ret, i;
1328		struct seccomp_data sd;
1329		unsigned long args[6];
1330
1331		sd.nr = syscall;
1332		sd.arch = syscall_get_arch(current);
1333		syscall_get_arguments(current, regs, args);
1334		for (i = 0; i < 6; i++)
1335			sd.args[i] = args[i];
1336		sd.instruction_pointer = KSTK_EIP(current);
1337
1338		ret = __secure_computing(&sd);
1339		if (ret == -1)
1340			return ret;
1341		syscall = current_thread_info()->syscall;
1342	}
1343#endif
1344
1345	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1346		trace_sys_enter(regs, regs->regs[2]);
1347
1348	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1349			    regs->regs[6], regs->regs[7]);
1350
1351	/*
1352	 * Negative syscall numbers are mistaken for rejected syscalls, but
1353	 * won't have had the return value set appropriately, so we do so now.
1354	 */
1355	if (syscall < 0)
1356		syscall_set_return_value(current, regs, -ENOSYS, 0);
1357	return syscall;
1358}
1359
1360/*
1361 * Notification of system call entry/exit
1362 * - triggered by current->work.syscall_trace
1363 */
1364asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1365{
1366        /*
1367	 * We may come here right after calling schedule_user()
1368	 * or do_notify_resume(), in which case we can be in RCU
1369	 * user mode.
1370	 */
1371	user_exit();
1372
1373	audit_syscall_exit(regs);
1374
1375	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1376		trace_sys_exit(regs, regs_return_value(regs));
1377
1378	if (test_thread_flag(TIF_SYSCALL_TRACE))
1379		tracehook_report_syscall_exit(regs, 0);
1380
1381	user_enter();
1382}