Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
  28#include <linux/security.h>
  29#include <linux/stddef.h>
  30#include <linux/tracehook.h>
  31#include <linux/audit.h>
  32#include <linux/seccomp.h>
  33#include <linux/ftrace.h>
  34
 
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
  42#include <asm/pgtable.h>
  43#include <asm/page.h>
  44#include <asm/processor.h>
  45#include <asm/syscall.h>
  46#include <linux/uaccess.h>
  47#include <asm/bootinfo.h>
  48#include <asm/reg.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/syscalls.h>
  52
 
 
 
 
 
 
  53/*
  54 * Called by kernel/ptrace.c when detaching..
  55 *
  56 * Make sure single step bits etc are not set.
  57 */
  58void ptrace_disable(struct task_struct *child)
  59{
  60	/* Don't load the watchpoint registers for the ex-child. */
  61	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  62}
  63
  64/*
  65 * Read a general register set.	 We always use the 64-bit format, even
  66 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  67 * Registers are sign extended to fill the available space.
  68 */
  69int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  70{
  71	struct pt_regs *regs;
  72	int i;
  73
  74	if (!access_ok(data, 38 * 8))
  75		return -EIO;
  76
  77	regs = task_pt_regs(child);
  78
  79	for (i = 0; i < 32; i++)
  80		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  81	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
  82	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
  83	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  84	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  85	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  86	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  87
  88	return 0;
  89}
  90
  91/*
  92 * Write a general register set.  As for PTRACE_GETREGS, we always use
  93 * the 64-bit format.  On a 32-bit kernel only the lower order half
  94 * (according to endianness) will be used.
  95 */
  96int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
  97{
  98	struct pt_regs *regs;
  99	int i;
 100
 101	if (!access_ok(data, 38 * 8))
 102		return -EIO;
 103
 104	regs = task_pt_regs(child);
 105
 106	for (i = 0; i < 32; i++)
 107		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 108	__get_user(regs->lo, (__s64 __user *)&data->lo);
 109	__get_user(regs->hi, (__s64 __user *)&data->hi);
 110	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 111
 112	/* badvaddr, status, and cause may not be written.  */
 113
 114	/* System call number may have been changed */
 115	mips_syscall_update_nr(child, regs);
 116
 117	return 0;
 118}
 119
 120int ptrace_get_watch_regs(struct task_struct *child,
 121			  struct pt_watch_regs __user *addr)
 122{
 123	enum pt_watch_style style;
 124	int i;
 125
 126	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 127		return -EIO;
 128	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 129		return -EIO;
 130
 131#ifdef CONFIG_32BIT
 132	style = pt_watch_style_mips32;
 133#define WATCH_STYLE mips32
 134#else
 135	style = pt_watch_style_mips64;
 136#define WATCH_STYLE mips64
 137#endif
 138
 139	__put_user(style, &addr->style);
 140	__put_user(boot_cpu_data.watch_reg_use_cnt,
 141		   &addr->WATCH_STYLE.num_valid);
 142	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 143		__put_user(child->thread.watch.mips3264.watchlo[i],
 144			   &addr->WATCH_STYLE.watchlo[i]);
 145		__put_user(child->thread.watch.mips3264.watchhi[i] &
 146				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 147			   &addr->WATCH_STYLE.watchhi[i]);
 148		__put_user(boot_cpu_data.watch_reg_masks[i],
 149			   &addr->WATCH_STYLE.watch_masks[i]);
 150	}
 151	for (; i < 8; i++) {
 152		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 153		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 154		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 155	}
 156
 157	return 0;
 158}
 159
 160int ptrace_set_watch_regs(struct task_struct *child,
 161			  struct pt_watch_regs __user *addr)
 162{
 163	int i;
 164	int watch_active = 0;
 165	unsigned long lt[NUM_WATCH_REGS];
 166	u16 ht[NUM_WATCH_REGS];
 167
 168	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 169		return -EIO;
 170	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 171		return -EIO;
 172	/* Check the values. */
 173	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 174		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 175#ifdef CONFIG_32BIT
 176		if (lt[i] & __UA_LIMIT)
 177			return -EINVAL;
 178#else
 179		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 180			if (lt[i] & 0xffffffff80000000UL)
 181				return -EINVAL;
 182		} else {
 183			if (lt[i] & __UA_LIMIT)
 184				return -EINVAL;
 185		}
 186#endif
 187		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 188		if (ht[i] & ~MIPS_WATCHHI_MASK)
 189			return -EINVAL;
 190	}
 191	/* Install them. */
 192	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 193		if (lt[i] & MIPS_WATCHLO_IRW)
 194			watch_active = 1;
 195		child->thread.watch.mips3264.watchlo[i] = lt[i];
 196		/* Set the G bit. */
 197		child->thread.watch.mips3264.watchhi[i] = ht[i];
 198	}
 199
 200	if (watch_active)
 201		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 202	else
 203		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 204
 205	return 0;
 206}
 207
 208/* regset get/set implementations */
 209
 210#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 211
 212static int gpr32_get(struct task_struct *target,
 213		     const struct user_regset *regset,
 214		     unsigned int pos, unsigned int count,
 215		     void *kbuf, void __user *ubuf)
 216{
 217	struct pt_regs *regs = task_pt_regs(target);
 218	u32 uregs[ELF_NGREG] = {};
 219
 220	mips_dump_regs32(uregs, regs);
 221	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 222				   sizeof(uregs));
 223}
 224
 225static int gpr32_set(struct task_struct *target,
 226		     const struct user_regset *regset,
 227		     unsigned int pos, unsigned int count,
 228		     const void *kbuf, const void __user *ubuf)
 229{
 230	struct pt_regs *regs = task_pt_regs(target);
 231	u32 uregs[ELF_NGREG];
 232	unsigned start, num_regs, i;
 233	int err;
 234
 235	start = pos / sizeof(u32);
 236	num_regs = count / sizeof(u32);
 237
 238	if (start + num_regs > ELF_NGREG)
 239		return -EIO;
 240
 241	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 242				 sizeof(uregs));
 243	if (err)
 244		return err;
 245
 246	for (i = start; i < num_regs; i++) {
 247		/*
 248		 * Cast all values to signed here so that if this is a 64-bit
 249		 * kernel, the supplied 32-bit values will be sign extended.
 250		 */
 251		switch (i) {
 252		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 253			/* k0/k1 are ignored. */
 254		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 255			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 256			break;
 257		case MIPS32_EF_LO:
 258			regs->lo = (s32)uregs[i];
 259			break;
 260		case MIPS32_EF_HI:
 261			regs->hi = (s32)uregs[i];
 262			break;
 263		case MIPS32_EF_CP0_EPC:
 264			regs->cp0_epc = (s32)uregs[i];
 265			break;
 266		}
 267	}
 268
 269	/* System call number may have been changed */
 270	mips_syscall_update_nr(target, regs);
 271
 272	return 0;
 273}
 274
 275#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 276
 277#ifdef CONFIG_64BIT
 278
 279static int gpr64_get(struct task_struct *target,
 280		     const struct user_regset *regset,
 281		     unsigned int pos, unsigned int count,
 282		     void *kbuf, void __user *ubuf)
 283{
 284	struct pt_regs *regs = task_pt_regs(target);
 285	u64 uregs[ELF_NGREG] = {};
 286
 287	mips_dump_regs64(uregs, regs);
 288	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 289				   sizeof(uregs));
 290}
 291
 292static int gpr64_set(struct task_struct *target,
 293		     const struct user_regset *regset,
 294		     unsigned int pos, unsigned int count,
 295		     const void *kbuf, const void __user *ubuf)
 296{
 297	struct pt_regs *regs = task_pt_regs(target);
 298	u64 uregs[ELF_NGREG];
 299	unsigned start, num_regs, i;
 300	int err;
 301
 302	start = pos / sizeof(u64);
 303	num_regs = count / sizeof(u64);
 304
 305	if (start + num_regs > ELF_NGREG)
 306		return -EIO;
 307
 308	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 309				 sizeof(uregs));
 310	if (err)
 311		return err;
 312
 313	for (i = start; i < num_regs; i++) {
 314		switch (i) {
 315		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 316			/* k0/k1 are ignored. */
 317		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 318			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 319			break;
 320		case MIPS64_EF_LO:
 321			regs->lo = uregs[i];
 322			break;
 323		case MIPS64_EF_HI:
 324			regs->hi = uregs[i];
 325			break;
 326		case MIPS64_EF_CP0_EPC:
 327			regs->cp0_epc = uregs[i];
 328			break;
 329		}
 330	}
 331
 332	/* System call number may have been changed */
 333	mips_syscall_update_nr(target, regs);
 334
 335	return 0;
 336}
 337
 338#endif /* CONFIG_64BIT */
 339
 340
 341#ifdef CONFIG_MIPS_FP_SUPPORT
 342
 343/*
 344 * Poke at FCSR according to its mask.  Set the Cause bits even
 345 * if a corresponding Enable bit is set.  This will be noticed at
 346 * the time the thread is switched to and SIGFPE thrown accordingly.
 347 */
 348static void ptrace_setfcr31(struct task_struct *child, u32 value)
 349{
 350	u32 fcr31;
 351	u32 mask;
 352
 353	fcr31 = child->thread.fpu.fcr31;
 354	mask = boot_cpu_data.fpu_msk31;
 355	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 356}
 357
 358int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 359{
 360	int i;
 361
 362	if (!access_ok(data, 33 * 8))
 363		return -EIO;
 364
 365	if (tsk_used_math(child)) {
 366		union fpureg *fregs = get_fpu_regs(child);
 367		for (i = 0; i < 32; i++)
 368			__put_user(get_fpr64(&fregs[i], 0),
 369				   i + (__u64 __user *)data);
 370	} else {
 371		for (i = 0; i < 32; i++)
 372			__put_user((__u64) -1, i + (__u64 __user *) data);
 373	}
 374
 375	__put_user(child->thread.fpu.fcr31, data + 64);
 376	__put_user(boot_cpu_data.fpu_id, data + 65);
 377
 378	return 0;
 379}
 380
 381int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 382{
 383	union fpureg *fregs;
 384	u64 fpr_val;
 385	u32 value;
 386	int i;
 387
 388	if (!access_ok(data, 33 * 8))
 389		return -EIO;
 390
 391	init_fp_ctx(child);
 392	fregs = get_fpu_regs(child);
 393
 394	for (i = 0; i < 32; i++) {
 395		__get_user(fpr_val, i + (__u64 __user *)data);
 396		set_fpr64(&fregs[i], 0, fpr_val);
 397	}
 398
 399	__get_user(value, data + 64);
 400	ptrace_setfcr31(child, value);
 401
 402	/* FIR may not be written.  */
 403
 404	return 0;
 405}
 406
 407/*
 408 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 409 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 410 * correspond 1:1 to buffer slots.  Only general registers are copied.
 411 */
 412static int fpr_get_fpa(struct task_struct *target,
 413		       unsigned int *pos, unsigned int *count,
 414		       void **kbuf, void __user **ubuf)
 415{
 416	return user_regset_copyout(pos, count, kbuf, ubuf,
 417				   &target->thread.fpu,
 418				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 419}
 420
 421/*
 422 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 423 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 424 * general register slots are copied to buffer slots.  Only general
 425 * registers are copied.
 426 */
 427static int fpr_get_msa(struct task_struct *target,
 428		       unsigned int *pos, unsigned int *count,
 429		       void **kbuf, void __user **ubuf)
 430{
 431	unsigned int i;
 432	u64 fpr_val;
 433	int err;
 434
 435	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 436	for (i = 0; i < NUM_FPU_REGS; i++) {
 437		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 438		err = user_regset_copyout(pos, count, kbuf, ubuf,
 439					  &fpr_val, i * sizeof(elf_fpreg_t),
 440					  (i + 1) * sizeof(elf_fpreg_t));
 441		if (err)
 442			return err;
 443	}
 444
 445	return 0;
 
 
 446}
 447
 448/*
 449 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 450 * Choose the appropriate helper for general registers, and then copy
 451 * the FCSR and FIR registers separately.
 452 */
 453static int fpr_get(struct task_struct *target,
 454		   const struct user_regset *regset,
 455		   unsigned int pos, unsigned int count,
 456		   void *kbuf, void __user *ubuf)
 457{
 458	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 459	const int fir_pos = fcr31_pos + sizeof(u32);
 460	int err;
 461
 462	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 463		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
 464	else
 465		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
 466	if (err)
 467		return err;
 468
 469	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 470				  &target->thread.fpu.fcr31,
 471				  fcr31_pos, fcr31_pos + sizeof(u32));
 472	if (err)
 473		return err;
 474
 475	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 476				  &boot_cpu_data.fpu_id,
 477				  fir_pos, fir_pos + sizeof(u32));
 478
 479	return err;
 
 
 480}
 481
 482/*
 483 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 484 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 485 * context's general register slots.  Only general registers are copied.
 486 */
 487static int fpr_set_fpa(struct task_struct *target,
 488		       unsigned int *pos, unsigned int *count,
 489		       const void **kbuf, const void __user **ubuf)
 490{
 491	return user_regset_copyin(pos, count, kbuf, ubuf,
 492				  &target->thread.fpu,
 493				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 494}
 495
 496/*
 497 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 498 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 499 * bits only of FP context's general register slots.  Only general
 500 * registers are copied.
 501 */
 502static int fpr_set_msa(struct task_struct *target,
 503		       unsigned int *pos, unsigned int *count,
 504		       const void **kbuf, const void __user **ubuf)
 505{
 506	unsigned int i;
 507	u64 fpr_val;
 508	int err;
 509
 510	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 511	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 512		err = user_regset_copyin(pos, count, kbuf, ubuf,
 513					 &fpr_val, i * sizeof(elf_fpreg_t),
 514					 (i + 1) * sizeof(elf_fpreg_t));
 515		if (err)
 516			return err;
 517		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 518	}
 519
 520	return 0;
 521}
 522
 523/*
 524 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 525 * Choose the appropriate helper for general registers, and then copy
 526 * the FCSR register separately.  Ignore the incoming FIR register
 527 * contents though, as the register is read-only.
 528 *
 529 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 530 * which is supposed to have been guaranteed by the kernel before
 531 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 532 * so that we can safely avoid preinitializing temporaries for
 533 * partial register writes.
 534 */
 535static int fpr_set(struct task_struct *target,
 536		   const struct user_regset *regset,
 537		   unsigned int pos, unsigned int count,
 538		   const void *kbuf, const void __user *ubuf)
 539{
 540	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 541	const int fir_pos = fcr31_pos + sizeof(u32);
 542	u32 fcr31;
 543	int err;
 544
 545	BUG_ON(count % sizeof(elf_fpreg_t));
 546
 547	if (pos + count > sizeof(elf_fpregset_t))
 548		return -EIO;
 549
 550	init_fp_ctx(target);
 551
 552	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 553		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 554	else
 555		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 556	if (err)
 557		return err;
 558
 559	if (count > 0) {
 560		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 561					 &fcr31,
 562					 fcr31_pos, fcr31_pos + sizeof(u32));
 563		if (err)
 564			return err;
 565
 566		ptrace_setfcr31(target, fcr31);
 567	}
 568
 569	if (count > 0)
 570		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 571						fir_pos,
 572						fir_pos + sizeof(u32));
 
 573
 574	return err;
 575}
 576
 577/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
 578static int fp_mode_get(struct task_struct *target,
 579		       const struct user_regset *regset,
 580		       unsigned int pos, unsigned int count,
 581		       void *kbuf, void __user *ubuf)
 582{
 583	int fp_mode;
 584
 585	fp_mode = mips_get_process_fp_mode(target);
 586	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 587				   sizeof(fp_mode));
 588}
 589
 590/*
 591 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
 592 *
 593 * We optimize for the case where `count % sizeof(int) == 0', which
 594 * is supposed to have been guaranteed by the kernel before calling
 595 * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
 596 * that we can safely avoid preinitializing temporaries for partial
 597 * mode writes.
 598 */
 599static int fp_mode_set(struct task_struct *target,
 600		       const struct user_regset *regset,
 601		       unsigned int pos, unsigned int count,
 602		       const void *kbuf, const void __user *ubuf)
 603{
 604	int fp_mode;
 605	int err;
 606
 607	BUG_ON(count % sizeof(int));
 608
 609	if (pos + count > sizeof(fp_mode))
 610		return -EIO;
 611
 612	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 613				 sizeof(fp_mode));
 614	if (err)
 615		return err;
 616
 617	if (count > 0)
 618		err = mips_set_process_fp_mode(target, fp_mode);
 619
 620	return err;
 621}
 622
 623#endif /* CONFIG_MIPS_FP_SUPPORT */
 624
 625#ifdef CONFIG_CPU_HAS_MSA
 626
 627struct msa_control_regs {
 628	unsigned int fir;
 629	unsigned int fcsr;
 630	unsigned int msair;
 631	unsigned int msacsr;
 632};
 633
 634static int copy_pad_fprs(struct task_struct *target,
 635			 const struct user_regset *regset,
 636			 unsigned int *ppos, unsigned int *pcount,
 637			 void **pkbuf, void __user **pubuf,
 638			 unsigned int live_sz)
 639{
 640	int i, j, start, start_pad, err;
 641	unsigned long long fill = ~0ull;
 642	unsigned int cp_sz, pad_sz;
 643
 644	cp_sz = min(regset->size, live_sz);
 645	pad_sz = regset->size - cp_sz;
 646	WARN_ON(pad_sz % sizeof(fill));
 647
 648	i = start = err = 0;
 649	for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 650		err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
 651					   &target->thread.fpu.fpr[i],
 652					   start, start + cp_sz);
 653
 654		start_pad = start + cp_sz;
 655		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
 656			err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
 657						   &fill, start_pad,
 658						   start_pad + sizeof(fill));
 659			start_pad += sizeof(fill);
 660		}
 661	}
 662
 663	return err;
 664}
 665
 666static int msa_get(struct task_struct *target,
 667		   const struct user_regset *regset,
 668		   unsigned int pos, unsigned int count,
 669		   void *kbuf, void __user *ubuf)
 670{
 671	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 672	const struct msa_control_regs ctrl_regs = {
 673		.fir = boot_cpu_data.fpu_id,
 674		.fcsr = target->thread.fpu.fcr31,
 675		.msair = boot_cpu_data.msa_id,
 676		.msacsr = target->thread.fpu.msacsr,
 677	};
 678	int err;
 679
 680	if (!tsk_used_math(target)) {
 681		/* The task hasn't used FP or MSA, fill with 0xff */
 682		err = copy_pad_fprs(target, regset, &pos, &count,
 683				    &kbuf, &ubuf, 0);
 684	} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
 685		/* Copy scalar FP context, fill the rest with 0xff */
 686		err = copy_pad_fprs(target, regset, &pos, &count,
 687				    &kbuf, &ubuf, 8);
 688	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 689		/* Trivially copy the vector registers */
 690		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 691					  &target->thread.fpu.fpr,
 692					  0, wr_size);
 693	} else {
 694		/* Copy as much context as possible, fill the rest with 0xff */
 695		err = copy_pad_fprs(target, regset, &pos, &count,
 696				    &kbuf, &ubuf,
 697				    sizeof(target->thread.fpu.fpr[0]));
 698	}
 699
 700	err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 701				   &ctrl_regs, wr_size,
 702				   wr_size + sizeof(ctrl_regs));
 703	return err;
 704}
 705
 706static int msa_set(struct task_struct *target,
 707		   const struct user_regset *regset,
 708		   unsigned int pos, unsigned int count,
 709		   const void *kbuf, const void __user *ubuf)
 710{
 711	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 712	struct msa_control_regs ctrl_regs;
 713	unsigned int cp_sz;
 714	int i, err, start;
 715
 716	init_fp_ctx(target);
 717
 718	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 719		/* Trivially copy the vector registers */
 720		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 721					 &target->thread.fpu.fpr,
 722					 0, wr_size);
 723	} else {
 724		/* Copy as much context as possible */
 725		cp_sz = min_t(unsigned int, regset->size,
 726			      sizeof(target->thread.fpu.fpr[0]));
 727
 728		i = start = err = 0;
 729		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 730			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 731						  &target->thread.fpu.fpr[i],
 732						  start, start + cp_sz);
 733		}
 734	}
 735
 736	if (!err)
 737		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
 738					 wr_size, wr_size + sizeof(ctrl_regs));
 739	if (!err) {
 740		target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
 741		target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
 742	}
 743
 744	return err;
 745}
 746
 747#endif /* CONFIG_CPU_HAS_MSA */
 748
 749#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 750
 751/*
 752 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
 753 */
 754static int dsp32_get(struct task_struct *target,
 755		     const struct user_regset *regset,
 756		     unsigned int pos, unsigned int count,
 757		     void *kbuf, void __user *ubuf)
 758{
 759	unsigned int start, num_regs, i;
 760	u32 dspregs[NUM_DSP_REGS + 1];
 
 761
 762	BUG_ON(count % sizeof(u32));
 763
 764	if (!cpu_has_dsp)
 765		return -EIO;
 766
 767	start = pos / sizeof(u32);
 768	num_regs = count / sizeof(u32);
 769
 770	if (start + num_regs > NUM_DSP_REGS + 1)
 771		return -EIO;
 772
 773	for (i = start; i < num_regs; i++)
 774		switch (i) {
 775		case 0 ... NUM_DSP_REGS - 1:
 776			dspregs[i] = target->thread.dsp.dspr[i];
 777			break;
 778		case NUM_DSP_REGS:
 779			dspregs[i] = target->thread.dsp.dspcontrol;
 780			break;
 781		}
 782	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 783				   sizeof(dspregs));
 784}
 785
 786/*
 787 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
 788 */
 789static int dsp32_set(struct task_struct *target,
 790		     const struct user_regset *regset,
 791		     unsigned int pos, unsigned int count,
 792		     const void *kbuf, const void __user *ubuf)
 793{
 794	unsigned int start, num_regs, i;
 795	u32 dspregs[NUM_DSP_REGS + 1];
 796	int err;
 797
 798	BUG_ON(count % sizeof(u32));
 799
 800	if (!cpu_has_dsp)
 801		return -EIO;
 802
 803	start = pos / sizeof(u32);
 804	num_regs = count / sizeof(u32);
 805
 806	if (start + num_regs > NUM_DSP_REGS + 1)
 807		return -EIO;
 808
 809	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 810				 sizeof(dspregs));
 811	if (err)
 812		return err;
 813
 814	for (i = start; i < num_regs; i++)
 815		switch (i) {
 816		case 0 ... NUM_DSP_REGS - 1:
 817			target->thread.dsp.dspr[i] = (s32)dspregs[i];
 818			break;
 819		case NUM_DSP_REGS:
 820			target->thread.dsp.dspcontrol = (s32)dspregs[i];
 821			break;
 822		}
 823
 824	return 0;
 825}
 826
 827#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 828
 829#ifdef CONFIG_64BIT
 830
 831/*
 832 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
 833 */
 834static int dsp64_get(struct task_struct *target,
 835		     const struct user_regset *regset,
 836		     unsigned int pos, unsigned int count,
 837		     void *kbuf, void __user *ubuf)
 838{
 839	unsigned int start, num_regs, i;
 840	u64 dspregs[NUM_DSP_REGS + 1];
 
 841
 842	BUG_ON(count % sizeof(u64));
 843
 844	if (!cpu_has_dsp)
 845		return -EIO;
 846
 847	start = pos / sizeof(u64);
 848	num_regs = count / sizeof(u64);
 849
 850	if (start + num_regs > NUM_DSP_REGS + 1)
 851		return -EIO;
 852
 853	for (i = start; i < num_regs; i++)
 854		switch (i) {
 855		case 0 ... NUM_DSP_REGS - 1:
 856			dspregs[i] = target->thread.dsp.dspr[i];
 857			break;
 858		case NUM_DSP_REGS:
 859			dspregs[i] = target->thread.dsp.dspcontrol;
 860			break;
 861		}
 862	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 863				   sizeof(dspregs));
 864}
 865
 866/*
 867 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
 868 */
 869static int dsp64_set(struct task_struct *target,
 870		     const struct user_regset *regset,
 871		     unsigned int pos, unsigned int count,
 872		     const void *kbuf, const void __user *ubuf)
 873{
 874	unsigned int start, num_regs, i;
 875	u64 dspregs[NUM_DSP_REGS + 1];
 876	int err;
 877
 878	BUG_ON(count % sizeof(u64));
 879
 880	if (!cpu_has_dsp)
 881		return -EIO;
 882
 883	start = pos / sizeof(u64);
 884	num_regs = count / sizeof(u64);
 885
 886	if (start + num_regs > NUM_DSP_REGS + 1)
 887		return -EIO;
 888
 889	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 890				 sizeof(dspregs));
 891	if (err)
 892		return err;
 893
 894	for (i = start; i < num_regs; i++)
 895		switch (i) {
 896		case 0 ... NUM_DSP_REGS - 1:
 897			target->thread.dsp.dspr[i] = dspregs[i];
 898			break;
 899		case NUM_DSP_REGS:
 900			target->thread.dsp.dspcontrol = dspregs[i];
 901			break;
 902		}
 903
 904	return 0;
 905}
 906
 907#endif /* CONFIG_64BIT */
 908
 909/*
 910 * Determine whether the DSP context is present.
 911 */
 912static int dsp_active(struct task_struct *target,
 913		      const struct user_regset *regset)
 914{
 915	return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
 916}
 917
 918enum mips_regset {
 919	REGSET_GPR,
 920	REGSET_DSP,
 921#ifdef CONFIG_MIPS_FP_SUPPORT
 922	REGSET_FPR,
 923	REGSET_FP_MODE,
 924#endif
 925#ifdef CONFIG_CPU_HAS_MSA
 926	REGSET_MSA,
 927#endif
 928};
 929
 930struct pt_regs_offset {
 931	const char *name;
 932	int offset;
 933};
 934
 935#define REG_OFFSET_NAME(reg, r) {					\
 936	.name = #reg,							\
 937	.offset = offsetof(struct pt_regs, r)				\
 938}
 939
 940#define REG_OFFSET_END {						\
 941	.name = NULL,							\
 942	.offset = 0							\
 943}
 944
 945static const struct pt_regs_offset regoffset_table[] = {
 946	REG_OFFSET_NAME(r0, regs[0]),
 947	REG_OFFSET_NAME(r1, regs[1]),
 948	REG_OFFSET_NAME(r2, regs[2]),
 949	REG_OFFSET_NAME(r3, regs[3]),
 950	REG_OFFSET_NAME(r4, regs[4]),
 951	REG_OFFSET_NAME(r5, regs[5]),
 952	REG_OFFSET_NAME(r6, regs[6]),
 953	REG_OFFSET_NAME(r7, regs[7]),
 954	REG_OFFSET_NAME(r8, regs[8]),
 955	REG_OFFSET_NAME(r9, regs[9]),
 956	REG_OFFSET_NAME(r10, regs[10]),
 957	REG_OFFSET_NAME(r11, regs[11]),
 958	REG_OFFSET_NAME(r12, regs[12]),
 959	REG_OFFSET_NAME(r13, regs[13]),
 960	REG_OFFSET_NAME(r14, regs[14]),
 961	REG_OFFSET_NAME(r15, regs[15]),
 962	REG_OFFSET_NAME(r16, regs[16]),
 963	REG_OFFSET_NAME(r17, regs[17]),
 964	REG_OFFSET_NAME(r18, regs[18]),
 965	REG_OFFSET_NAME(r19, regs[19]),
 966	REG_OFFSET_NAME(r20, regs[20]),
 967	REG_OFFSET_NAME(r21, regs[21]),
 968	REG_OFFSET_NAME(r22, regs[22]),
 969	REG_OFFSET_NAME(r23, regs[23]),
 970	REG_OFFSET_NAME(r24, regs[24]),
 971	REG_OFFSET_NAME(r25, regs[25]),
 972	REG_OFFSET_NAME(r26, regs[26]),
 973	REG_OFFSET_NAME(r27, regs[27]),
 974	REG_OFFSET_NAME(r28, regs[28]),
 975	REG_OFFSET_NAME(r29, regs[29]),
 976	REG_OFFSET_NAME(r30, regs[30]),
 977	REG_OFFSET_NAME(r31, regs[31]),
 978	REG_OFFSET_NAME(c0_status, cp0_status),
 979	REG_OFFSET_NAME(hi, hi),
 980	REG_OFFSET_NAME(lo, lo),
 981#ifdef CONFIG_CPU_HAS_SMARTMIPS
 982	REG_OFFSET_NAME(acx, acx),
 983#endif
 984	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 985	REG_OFFSET_NAME(c0_cause, cp0_cause),
 986	REG_OFFSET_NAME(c0_epc, cp0_epc),
 987#ifdef CONFIG_CPU_CAVIUM_OCTEON
 988	REG_OFFSET_NAME(mpl0, mpl[0]),
 989	REG_OFFSET_NAME(mpl1, mpl[1]),
 990	REG_OFFSET_NAME(mpl2, mpl[2]),
 991	REG_OFFSET_NAME(mtp0, mtp[0]),
 992	REG_OFFSET_NAME(mtp1, mtp[1]),
 993	REG_OFFSET_NAME(mtp2, mtp[2]),
 994#endif
 995	REG_OFFSET_END,
 996};
 997
 998/**
 999 * regs_query_register_offset() - query register offset from its name
1000 * @name:       the name of a register
1001 *
1002 * regs_query_register_offset() returns the offset of a register in struct
1003 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
1004 */
1005int regs_query_register_offset(const char *name)
1006{
1007        const struct pt_regs_offset *roff;
1008        for (roff = regoffset_table; roff->name != NULL; roff++)
1009                if (!strcmp(roff->name, name))
1010                        return roff->offset;
1011        return -EINVAL;
1012}
1013
1014#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
1015
1016static const struct user_regset mips_regsets[] = {
1017	[REGSET_GPR] = {
1018		.core_note_type	= NT_PRSTATUS,
1019		.n		= ELF_NGREG,
1020		.size		= sizeof(unsigned int),
1021		.align		= sizeof(unsigned int),
1022		.get		= gpr32_get,
1023		.set		= gpr32_set,
1024	},
1025	[REGSET_DSP] = {
1026		.core_note_type	= NT_MIPS_DSP,
1027		.n		= NUM_DSP_REGS + 1,
1028		.size		= sizeof(u32),
1029		.align		= sizeof(u32),
1030		.get		= dsp32_get,
1031		.set		= dsp32_set,
1032		.active		= dsp_active,
1033	},
1034#ifdef CONFIG_MIPS_FP_SUPPORT
1035	[REGSET_FPR] = {
1036		.core_note_type	= NT_PRFPREG,
1037		.n		= ELF_NFPREG,
1038		.size		= sizeof(elf_fpreg_t),
1039		.align		= sizeof(elf_fpreg_t),
1040		.get		= fpr_get,
1041		.set		= fpr_set,
1042	},
1043	[REGSET_FP_MODE] = {
1044		.core_note_type	= NT_MIPS_FP_MODE,
1045		.n		= 1,
1046		.size		= sizeof(int),
1047		.align		= sizeof(int),
1048		.get		= fp_mode_get,
1049		.set		= fp_mode_set,
1050	},
1051#endif
1052#ifdef CONFIG_CPU_HAS_MSA
1053	[REGSET_MSA] = {
1054		.core_note_type	= NT_MIPS_MSA,
1055		.n		= NUM_FPU_REGS + 1,
1056		.size		= 16,
1057		.align		= 16,
1058		.get		= msa_get,
1059		.set		= msa_set,
1060	},
1061#endif
1062};
1063
1064static const struct user_regset_view user_mips_view = {
1065	.name		= "mips",
1066	.e_machine	= ELF_ARCH,
1067	.ei_osabi	= ELF_OSABI,
1068	.regsets	= mips_regsets,
1069	.n		= ARRAY_SIZE(mips_regsets),
1070};
1071
1072#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
1073
1074#ifdef CONFIG_64BIT
1075
1076static const struct user_regset mips64_regsets[] = {
1077	[REGSET_GPR] = {
1078		.core_note_type	= NT_PRSTATUS,
1079		.n		= ELF_NGREG,
1080		.size		= sizeof(unsigned long),
1081		.align		= sizeof(unsigned long),
1082		.get		= gpr64_get,
1083		.set		= gpr64_set,
1084	},
1085	[REGSET_DSP] = {
1086		.core_note_type	= NT_MIPS_DSP,
1087		.n		= NUM_DSP_REGS + 1,
1088		.size		= sizeof(u64),
1089		.align		= sizeof(u64),
1090		.get		= dsp64_get,
1091		.set		= dsp64_set,
1092		.active		= dsp_active,
1093	},
1094#ifdef CONFIG_MIPS_FP_SUPPORT
1095	[REGSET_FP_MODE] = {
1096		.core_note_type	= NT_MIPS_FP_MODE,
1097		.n		= 1,
1098		.size		= sizeof(int),
1099		.align		= sizeof(int),
1100		.get		= fp_mode_get,
1101		.set		= fp_mode_set,
1102	},
1103	[REGSET_FPR] = {
1104		.core_note_type	= NT_PRFPREG,
1105		.n		= ELF_NFPREG,
1106		.size		= sizeof(elf_fpreg_t),
1107		.align		= sizeof(elf_fpreg_t),
1108		.get		= fpr_get,
1109		.set		= fpr_set,
1110	},
1111#endif
1112#ifdef CONFIG_CPU_HAS_MSA
1113	[REGSET_MSA] = {
1114		.core_note_type	= NT_MIPS_MSA,
1115		.n		= NUM_FPU_REGS + 1,
1116		.size		= 16,
1117		.align		= 16,
1118		.get		= msa_get,
1119		.set		= msa_set,
1120	},
1121#endif
1122};
1123
1124static const struct user_regset_view user_mips64_view = {
1125	.name		= "mips64",
1126	.e_machine	= ELF_ARCH,
1127	.ei_osabi	= ELF_OSABI,
1128	.regsets	= mips64_regsets,
1129	.n		= ARRAY_SIZE(mips64_regsets),
1130};
1131
1132#ifdef CONFIG_MIPS32_N32
1133
1134static const struct user_regset_view user_mipsn32_view = {
1135	.name		= "mipsn32",
1136	.e_flags	= EF_MIPS_ABI2,
1137	.e_machine	= ELF_ARCH,
1138	.ei_osabi	= ELF_OSABI,
1139	.regsets	= mips64_regsets,
1140	.n		= ARRAY_SIZE(mips64_regsets),
1141};
1142
1143#endif /* CONFIG_MIPS32_N32 */
1144
1145#endif /* CONFIG_64BIT */
1146
1147const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1148{
1149#ifdef CONFIG_32BIT
1150	return &user_mips_view;
1151#else
1152#ifdef CONFIG_MIPS32_O32
1153	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1154		return &user_mips_view;
1155#endif
1156#ifdef CONFIG_MIPS32_N32
1157	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1158		return &user_mipsn32_view;
1159#endif
1160	return &user_mips64_view;
1161#endif
1162}
1163
1164long arch_ptrace(struct task_struct *child, long request,
1165		 unsigned long addr, unsigned long data)
1166{
1167	int ret;
1168	void __user *addrp = (void __user *) addr;
1169	void __user *datavp = (void __user *) data;
1170	unsigned long __user *datalp = (void __user *) data;
1171
1172	switch (request) {
1173	/* when I and D space are separate, these will need to be fixed. */
1174	case PTRACE_PEEKTEXT: /* read word at location addr. */
1175	case PTRACE_PEEKDATA:
1176		ret = generic_ptrace_peekdata(child, addr, data);
1177		break;
1178
1179	/* Read the word at location addr in the USER area. */
1180	case PTRACE_PEEKUSR: {
1181		struct pt_regs *regs;
1182		unsigned long tmp = 0;
1183
1184		regs = task_pt_regs(child);
1185		ret = 0;  /* Default return value. */
1186
1187		switch (addr) {
1188		case 0 ... 31:
1189			tmp = regs->regs[addr];
1190			break;
1191#ifdef CONFIG_MIPS_FP_SUPPORT
1192		case FPR_BASE ... FPR_BASE + 31: {
1193			union fpureg *fregs;
1194
1195			if (!tsk_used_math(child)) {
1196				/* FP not yet used */
1197				tmp = -1;
1198				break;
1199			}
1200			fregs = get_fpu_regs(child);
1201
1202#ifdef CONFIG_32BIT
1203			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1204				/*
1205				 * The odd registers are actually the high
1206				 * order bits of the values stored in the even
1207				 * registers.
1208				 */
1209				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1210						addr & 1);
1211				break;
1212			}
1213#endif
1214			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1215			break;
1216		}
1217		case FPC_CSR:
1218			tmp = child->thread.fpu.fcr31;
1219			break;
1220		case FPC_EIR:
1221			/* implementation / version register */
1222			tmp = boot_cpu_data.fpu_id;
1223			break;
1224#endif
1225		case PC:
1226			tmp = regs->cp0_epc;
1227			break;
1228		case CAUSE:
1229			tmp = regs->cp0_cause;
1230			break;
1231		case BADVADDR:
1232			tmp = regs->cp0_badvaddr;
1233			break;
1234		case MMHI:
1235			tmp = regs->hi;
1236			break;
1237		case MMLO:
1238			tmp = regs->lo;
1239			break;
1240#ifdef CONFIG_CPU_HAS_SMARTMIPS
1241		case ACX:
1242			tmp = regs->acx;
1243			break;
1244#endif
1245		case DSP_BASE ... DSP_BASE + 5: {
1246			dspreg_t *dregs;
1247
1248			if (!cpu_has_dsp) {
1249				tmp = 0;
1250				ret = -EIO;
1251				goto out;
1252			}
1253			dregs = __get_dsp_regs(child);
1254			tmp = dregs[addr - DSP_BASE];
1255			break;
1256		}
1257		case DSP_CONTROL:
1258			if (!cpu_has_dsp) {
1259				tmp = 0;
1260				ret = -EIO;
1261				goto out;
1262			}
1263			tmp = child->thread.dsp.dspcontrol;
1264			break;
1265		default:
1266			tmp = 0;
1267			ret = -EIO;
1268			goto out;
1269		}
1270		ret = put_user(tmp, datalp);
1271		break;
1272	}
1273
1274	/* when I and D space are separate, this will have to be fixed. */
1275	case PTRACE_POKETEXT: /* write the word at location addr. */
1276	case PTRACE_POKEDATA:
1277		ret = generic_ptrace_pokedata(child, addr, data);
1278		break;
1279
1280	case PTRACE_POKEUSR: {
1281		struct pt_regs *regs;
1282		ret = 0;
1283		regs = task_pt_regs(child);
1284
1285		switch (addr) {
1286		case 0 ... 31:
1287			regs->regs[addr] = data;
1288			/* System call number may have been changed */
1289			if (addr == 2)
1290				mips_syscall_update_nr(child, regs);
1291			else if (addr == 4 &&
1292				 mips_syscall_is_indirect(child, regs))
1293				mips_syscall_update_nr(child, regs);
1294			break;
1295#ifdef CONFIG_MIPS_FP_SUPPORT
1296		case FPR_BASE ... FPR_BASE + 31: {
1297			union fpureg *fregs = get_fpu_regs(child);
1298
1299			init_fp_ctx(child);
1300#ifdef CONFIG_32BIT
1301			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1302				/*
1303				 * The odd registers are actually the high
1304				 * order bits of the values stored in the even
1305				 * registers.
1306				 */
1307				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1308					  addr & 1, data);
1309				break;
1310			}
1311#endif
1312			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1313			break;
1314		}
1315		case FPC_CSR:
1316			init_fp_ctx(child);
1317			ptrace_setfcr31(child, data);
1318			break;
1319#endif
1320		case PC:
1321			regs->cp0_epc = data;
1322			break;
1323		case MMHI:
1324			regs->hi = data;
1325			break;
1326		case MMLO:
1327			regs->lo = data;
1328			break;
1329#ifdef CONFIG_CPU_HAS_SMARTMIPS
1330		case ACX:
1331			regs->acx = data;
1332			break;
1333#endif
1334		case DSP_BASE ... DSP_BASE + 5: {
1335			dspreg_t *dregs;
1336
1337			if (!cpu_has_dsp) {
1338				ret = -EIO;
1339				break;
1340			}
1341
1342			dregs = __get_dsp_regs(child);
1343			dregs[addr - DSP_BASE] = data;
1344			break;
1345		}
1346		case DSP_CONTROL:
1347			if (!cpu_has_dsp) {
1348				ret = -EIO;
1349				break;
1350			}
1351			child->thread.dsp.dspcontrol = data;
1352			break;
1353		default:
1354			/* The rest are not allowed. */
1355			ret = -EIO;
1356			break;
1357		}
1358		break;
1359		}
1360
1361	case PTRACE_GETREGS:
1362		ret = ptrace_getregs(child, datavp);
1363		break;
1364
1365	case PTRACE_SETREGS:
1366		ret = ptrace_setregs(child, datavp);
1367		break;
1368
1369#ifdef CONFIG_MIPS_FP_SUPPORT
1370	case PTRACE_GETFPREGS:
1371		ret = ptrace_getfpregs(child, datavp);
1372		break;
1373
1374	case PTRACE_SETFPREGS:
1375		ret = ptrace_setfpregs(child, datavp);
1376		break;
1377#endif
1378	case PTRACE_GET_THREAD_AREA:
1379		ret = put_user(task_thread_info(child)->tp_value, datalp);
1380		break;
1381
1382	case PTRACE_GET_WATCH_REGS:
1383		ret = ptrace_get_watch_regs(child, addrp);
1384		break;
1385
1386	case PTRACE_SET_WATCH_REGS:
1387		ret = ptrace_set_watch_regs(child, addrp);
1388		break;
1389
1390	default:
1391		ret = ptrace_request(child, request, addr, data);
1392		break;
1393	}
1394 out:
1395	return ret;
1396}
1397
1398/*
1399 * Notification of system call entry/exit
1400 * - triggered by current->work.syscall_trace
1401 */
1402asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1403{
1404	user_exit();
1405
1406	current_thread_info()->syscall = syscall;
1407
1408	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1409		if (tracehook_report_syscall_entry(regs))
1410			return -1;
1411		syscall = current_thread_info()->syscall;
1412	}
1413
1414#ifdef CONFIG_SECCOMP
1415	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1416		int ret, i;
1417		struct seccomp_data sd;
1418		unsigned long args[6];
1419
1420		sd.nr = syscall;
1421		sd.arch = syscall_get_arch(current);
1422		syscall_get_arguments(current, regs, args);
1423		for (i = 0; i < 6; i++)
1424			sd.args[i] = args[i];
1425		sd.instruction_pointer = KSTK_EIP(current);
1426
1427		ret = __secure_computing(&sd);
1428		if (ret == -1)
1429			return ret;
1430		syscall = current_thread_info()->syscall;
1431	}
1432#endif
1433
1434	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1435		trace_sys_enter(regs, regs->regs[2]);
1436
1437	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
 
1438			    regs->regs[6], regs->regs[7]);
1439
1440	/*
1441	 * Negative syscall numbers are mistaken for rejected syscalls, but
1442	 * won't have had the return value set appropriately, so we do so now.
1443	 */
1444	if (syscall < 0)
1445		syscall_set_return_value(current, regs, -ENOSYS, 0);
1446	return syscall;
1447}
1448
1449/*
1450 * Notification of system call entry/exit
1451 * - triggered by current->work.syscall_trace
1452 */
1453asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1454{
1455        /*
1456	 * We may come here right after calling schedule_user()
1457	 * or do_notify_resume(), in which case we can be in RCU
1458	 * user mode.
1459	 */
1460	user_exit();
1461
1462	audit_syscall_exit(regs);
1463
1464	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1465		trace_sys_exit(regs, regs_return_value(regs));
1466
1467	if (test_thread_flag(TIF_SYSCALL_TRACE))
1468		tracehook_report_syscall_exit(regs, 0);
1469
1470	user_enter();
1471}
v6.13.7
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
  28#include <linux/security.h>
  29#include <linux/stddef.h>
 
  30#include <linux/audit.h>
  31#include <linux/seccomp.h>
  32#include <linux/ftrace.h>
  33
  34#include <asm/branch.h>
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
 
  42#include <asm/page.h>
  43#include <asm/processor.h>
  44#include <asm/syscall.h>
  45#include <linux/uaccess.h>
  46#include <asm/bootinfo.h>
  47#include <asm/reg.h>
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/syscalls.h>
  51
  52unsigned long exception_ip(struct pt_regs *regs)
  53{
  54	return exception_epc(regs);
  55}
  56EXPORT_SYMBOL(exception_ip);
  57
  58/*
  59 * Called by kernel/ptrace.c when detaching..
  60 *
  61 * Make sure single step bits etc are not set.
  62 */
  63void ptrace_disable(struct task_struct *child)
  64{
  65	/* Don't load the watchpoint registers for the ex-child. */
  66	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  67}
  68
  69/*
  70 * Read a general register set.	 We always use the 64-bit format, even
  71 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  72 * Registers are sign extended to fill the available space.
  73 */
  74int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  75{
  76	struct pt_regs *regs;
  77	int i;
  78
  79	if (!access_ok(data, 38 * 8))
  80		return -EIO;
  81
  82	regs = task_pt_regs(child);
  83
  84	for (i = 0; i < 32; i++)
  85		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  86	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
  87	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
  88	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  89	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  90	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  91	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  92
  93	return 0;
  94}
  95
  96/*
  97 * Write a general register set.  As for PTRACE_GETREGS, we always use
  98 * the 64-bit format.  On a 32-bit kernel only the lower order half
  99 * (according to endianness) will be used.
 100 */
 101int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
 102{
 103	struct pt_regs *regs;
 104	int i;
 105
 106	if (!access_ok(data, 38 * 8))
 107		return -EIO;
 108
 109	regs = task_pt_regs(child);
 110
 111	for (i = 0; i < 32; i++)
 112		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 113	__get_user(regs->lo, (__s64 __user *)&data->lo);
 114	__get_user(regs->hi, (__s64 __user *)&data->hi);
 115	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 116
 117	/* badvaddr, status, and cause may not be written.  */
 118
 119	/* System call number may have been changed */
 120	mips_syscall_update_nr(child, regs);
 121
 122	return 0;
 123}
 124
 125int ptrace_get_watch_regs(struct task_struct *child,
 126			  struct pt_watch_regs __user *addr)
 127{
 128	enum pt_watch_style style;
 129	int i;
 130
 131	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 132		return -EIO;
 133	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 134		return -EIO;
 135
 136#ifdef CONFIG_32BIT
 137	style = pt_watch_style_mips32;
 138#define WATCH_STYLE mips32
 139#else
 140	style = pt_watch_style_mips64;
 141#define WATCH_STYLE mips64
 142#endif
 143
 144	__put_user(style, &addr->style);
 145	__put_user(boot_cpu_data.watch_reg_use_cnt,
 146		   &addr->WATCH_STYLE.num_valid);
 147	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 148		__put_user(child->thread.watch.mips3264.watchlo[i],
 149			   &addr->WATCH_STYLE.watchlo[i]);
 150		__put_user(child->thread.watch.mips3264.watchhi[i] &
 151				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 152			   &addr->WATCH_STYLE.watchhi[i]);
 153		__put_user(boot_cpu_data.watch_reg_masks[i],
 154			   &addr->WATCH_STYLE.watch_masks[i]);
 155	}
 156	for (; i < 8; i++) {
 157		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 158		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 159		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 160	}
 161
 162	return 0;
 163}
 164
 165int ptrace_set_watch_regs(struct task_struct *child,
 166			  struct pt_watch_regs __user *addr)
 167{
 168	int i;
 169	int watch_active = 0;
 170	unsigned long lt[NUM_WATCH_REGS];
 171	u16 ht[NUM_WATCH_REGS];
 172
 173	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 174		return -EIO;
 175	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 176		return -EIO;
 177	/* Check the values. */
 178	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 179		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 180#ifdef CONFIG_32BIT
 181		if (lt[i] & __UA_LIMIT)
 182			return -EINVAL;
 183#else
 184		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 185			if (lt[i] & 0xffffffff80000000UL)
 186				return -EINVAL;
 187		} else {
 188			if (lt[i] & __UA_LIMIT)
 189				return -EINVAL;
 190		}
 191#endif
 192		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 193		if (ht[i] & ~MIPS_WATCHHI_MASK)
 194			return -EINVAL;
 195	}
 196	/* Install them. */
 197	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 198		if (lt[i] & MIPS_WATCHLO_IRW)
 199			watch_active = 1;
 200		child->thread.watch.mips3264.watchlo[i] = lt[i];
 201		/* Set the G bit. */
 202		child->thread.watch.mips3264.watchhi[i] = ht[i];
 203	}
 204
 205	if (watch_active)
 206		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 207	else
 208		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 209
 210	return 0;
 211}
 212
 213/* regset get/set implementations */
 214
 215#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 216
 217static int gpr32_get(struct task_struct *target,
 218		     const struct user_regset *regset,
 219		     struct membuf to)
 
 220{
 221	struct pt_regs *regs = task_pt_regs(target);
 222	u32 uregs[ELF_NGREG] = {};
 223
 224	mips_dump_regs32(uregs, regs);
 225	return membuf_write(&to, uregs, sizeof(uregs));
 
 226}
 227
 228static int gpr32_set(struct task_struct *target,
 229		     const struct user_regset *regset,
 230		     unsigned int pos, unsigned int count,
 231		     const void *kbuf, const void __user *ubuf)
 232{
 233	struct pt_regs *regs = task_pt_regs(target);
 234	u32 uregs[ELF_NGREG];
 235	unsigned start, num_regs, i;
 236	int err;
 237
 238	start = pos / sizeof(u32);
 239	num_regs = count / sizeof(u32);
 240
 241	if (start + num_regs > ELF_NGREG)
 242		return -EIO;
 243
 244	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 245				 sizeof(uregs));
 246	if (err)
 247		return err;
 248
 249	for (i = start; i < num_regs; i++) {
 250		/*
 251		 * Cast all values to signed here so that if this is a 64-bit
 252		 * kernel, the supplied 32-bit values will be sign extended.
 253		 */
 254		switch (i) {
 255		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 256			/* k0/k1 are ignored. */
 257		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 258			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 259			break;
 260		case MIPS32_EF_LO:
 261			regs->lo = (s32)uregs[i];
 262			break;
 263		case MIPS32_EF_HI:
 264			regs->hi = (s32)uregs[i];
 265			break;
 266		case MIPS32_EF_CP0_EPC:
 267			regs->cp0_epc = (s32)uregs[i];
 268			break;
 269		}
 270	}
 271
 272	/* System call number may have been changed */
 273	mips_syscall_update_nr(target, regs);
 274
 275	return 0;
 276}
 277
 278#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 279
 280#ifdef CONFIG_64BIT
 281
 282static int gpr64_get(struct task_struct *target,
 283		     const struct user_regset *regset,
 284		     struct membuf to)
 
 285{
 286	struct pt_regs *regs = task_pt_regs(target);
 287	u64 uregs[ELF_NGREG] = {};
 288
 289	mips_dump_regs64(uregs, regs);
 290	return membuf_write(&to, uregs, sizeof(uregs));
 
 291}
 292
 293static int gpr64_set(struct task_struct *target,
 294		     const struct user_regset *regset,
 295		     unsigned int pos, unsigned int count,
 296		     const void *kbuf, const void __user *ubuf)
 297{
 298	struct pt_regs *regs = task_pt_regs(target);
 299	u64 uregs[ELF_NGREG];
 300	unsigned start, num_regs, i;
 301	int err;
 302
 303	start = pos / sizeof(u64);
 304	num_regs = count / sizeof(u64);
 305
 306	if (start + num_regs > ELF_NGREG)
 307		return -EIO;
 308
 309	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 310				 sizeof(uregs));
 311	if (err)
 312		return err;
 313
 314	for (i = start; i < num_regs; i++) {
 315		switch (i) {
 316		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 317			/* k0/k1 are ignored. */
 318		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 319			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 320			break;
 321		case MIPS64_EF_LO:
 322			regs->lo = uregs[i];
 323			break;
 324		case MIPS64_EF_HI:
 325			regs->hi = uregs[i];
 326			break;
 327		case MIPS64_EF_CP0_EPC:
 328			regs->cp0_epc = uregs[i];
 329			break;
 330		}
 331	}
 332
 333	/* System call number may have been changed */
 334	mips_syscall_update_nr(target, regs);
 335
 336	return 0;
 337}
 338
 339#endif /* CONFIG_64BIT */
 340
 341
 342#ifdef CONFIG_MIPS_FP_SUPPORT
 343
 344/*
 345 * Poke at FCSR according to its mask.  Set the Cause bits even
 346 * if a corresponding Enable bit is set.  This will be noticed at
 347 * the time the thread is switched to and SIGFPE thrown accordingly.
 348 */
 349static void ptrace_setfcr31(struct task_struct *child, u32 value)
 350{
 351	u32 fcr31;
 352	u32 mask;
 353
 354	fcr31 = child->thread.fpu.fcr31;
 355	mask = boot_cpu_data.fpu_msk31;
 356	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 357}
 358
 359int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 360{
 361	int i;
 362
 363	if (!access_ok(data, 33 * 8))
 364		return -EIO;
 365
 366	if (tsk_used_math(child)) {
 367		union fpureg *fregs = get_fpu_regs(child);
 368		for (i = 0; i < 32; i++)
 369			__put_user(get_fpr64(&fregs[i], 0),
 370				   i + (__u64 __user *)data);
 371	} else {
 372		for (i = 0; i < 32; i++)
 373			__put_user((__u64) -1, i + (__u64 __user *) data);
 374	}
 375
 376	__put_user(child->thread.fpu.fcr31, data + 64);
 377	__put_user(boot_cpu_data.fpu_id, data + 65);
 378
 379	return 0;
 380}
 381
 382int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 383{
 384	union fpureg *fregs;
 385	u64 fpr_val;
 386	u32 value;
 387	int i;
 388
 389	if (!access_ok(data, 33 * 8))
 390		return -EIO;
 391
 392	init_fp_ctx(child);
 393	fregs = get_fpu_regs(child);
 394
 395	for (i = 0; i < 32; i++) {
 396		__get_user(fpr_val, i + (__u64 __user *)data);
 397		set_fpr64(&fregs[i], 0, fpr_val);
 398	}
 399
 400	__get_user(value, data + 64);
 401	ptrace_setfcr31(child, value);
 402
 403	/* FIR may not be written.  */
 404
 405	return 0;
 406}
 407
 408/*
 409 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 410 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 411 * correspond 1:1 to buffer slots.  Only general registers are copied.
 412 */
 413static void fpr_get_fpa(struct task_struct *target,
 414		       struct membuf *to)
 
 415{
 416	membuf_write(to, &target->thread.fpu,
 417			NUM_FPU_REGS * sizeof(elf_fpreg_t));
 
 418}
 419
 420/*
 421 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 422 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 423 * general register slots are copied to buffer slots.  Only general
 424 * registers are copied.
 425 */
 426static void fpr_get_msa(struct task_struct *target, struct membuf *to)
 
 
 427{
 428	unsigned int i;
 
 
 
 
 
 
 
 
 
 
 
 
 429
 430	BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
 431	for (i = 0; i < NUM_FPU_REGS; i++)
 432		membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
 433}
 434
 435/*
 436 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 437 * Choose the appropriate helper for general registers, and then copy
 438 * the FCSR and FIR registers separately.
 439 */
 440static int fpr_get(struct task_struct *target,
 441		   const struct user_regset *regset,
 442		   struct membuf to)
 
 443{
 
 
 
 
 444	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 445		fpr_get_fpa(target, &to);
 446	else
 447		fpr_get_msa(target, &to);
 
 
 
 
 
 
 
 
 
 
 
 
 448
 449	membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
 450	membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
 451	return 0;
 452}
 453
 454/*
 455 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 456 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 457 * context's general register slots.  Only general registers are copied.
 458 */
 459static int fpr_set_fpa(struct task_struct *target,
 460		       unsigned int *pos, unsigned int *count,
 461		       const void **kbuf, const void __user **ubuf)
 462{
 463	return user_regset_copyin(pos, count, kbuf, ubuf,
 464				  &target->thread.fpu,
 465				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 466}
 467
 468/*
 469 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 470 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 471 * bits only of FP context's general register slots.  Only general
 472 * registers are copied.
 473 */
 474static int fpr_set_msa(struct task_struct *target,
 475		       unsigned int *pos, unsigned int *count,
 476		       const void **kbuf, const void __user **ubuf)
 477{
 478	unsigned int i;
 479	u64 fpr_val;
 480	int err;
 481
 482	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 483	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 484		err = user_regset_copyin(pos, count, kbuf, ubuf,
 485					 &fpr_val, i * sizeof(elf_fpreg_t),
 486					 (i + 1) * sizeof(elf_fpreg_t));
 487		if (err)
 488			return err;
 489		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 490	}
 491
 492	return 0;
 493}
 494
 495/*
 496 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 497 * Choose the appropriate helper for general registers, and then copy
 498 * the FCSR register separately.  Ignore the incoming FIR register
 499 * contents though, as the register is read-only.
 500 *
 501 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 502 * which is supposed to have been guaranteed by the kernel before
 503 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 504 * so that we can safely avoid preinitializing temporaries for
 505 * partial register writes.
 506 */
 507static int fpr_set(struct task_struct *target,
 508		   const struct user_regset *regset,
 509		   unsigned int pos, unsigned int count,
 510		   const void *kbuf, const void __user *ubuf)
 511{
 512	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 513	const int fir_pos = fcr31_pos + sizeof(u32);
 514	u32 fcr31;
 515	int err;
 516
 517	BUG_ON(count % sizeof(elf_fpreg_t));
 518
 519	if (pos + count > sizeof(elf_fpregset_t))
 520		return -EIO;
 521
 522	init_fp_ctx(target);
 523
 524	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 525		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 526	else
 527		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 528	if (err)
 529		return err;
 530
 531	if (count > 0) {
 532		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 533					 &fcr31,
 534					 fcr31_pos, fcr31_pos + sizeof(u32));
 535		if (err)
 536			return err;
 537
 538		ptrace_setfcr31(target, fcr31);
 539	}
 540
 541	if (count > 0) {
 542		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 543					  fir_pos, fir_pos + sizeof(u32));
 544		return 0;
 545	}
 546
 547	return err;
 548}
 549
 550/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
 551static int fp_mode_get(struct task_struct *target,
 552		       const struct user_regset *regset,
 553		       struct membuf to)
 
 554{
 555	return membuf_store(&to, (int)mips_get_process_fp_mode(target));
 
 
 
 
 556}
 557
 558/*
 559 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
 560 *
 561 * We optimize for the case where `count % sizeof(int) == 0', which
 562 * is supposed to have been guaranteed by the kernel before calling
 563 * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
 564 * that we can safely avoid preinitializing temporaries for partial
 565 * mode writes.
 566 */
 567static int fp_mode_set(struct task_struct *target,
 568		       const struct user_regset *regset,
 569		       unsigned int pos, unsigned int count,
 570		       const void *kbuf, const void __user *ubuf)
 571{
 572	int fp_mode;
 573	int err;
 574
 575	BUG_ON(count % sizeof(int));
 576
 577	if (pos + count > sizeof(fp_mode))
 578		return -EIO;
 579
 580	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 581				 sizeof(fp_mode));
 582	if (err)
 583		return err;
 584
 585	if (count > 0)
 586		err = mips_set_process_fp_mode(target, fp_mode);
 587
 588	return err;
 589}
 590
 591#endif /* CONFIG_MIPS_FP_SUPPORT */
 592
 593#ifdef CONFIG_CPU_HAS_MSA
 594
 595struct msa_control_regs {
 596	unsigned int fir;
 597	unsigned int fcsr;
 598	unsigned int msair;
 599	unsigned int msacsr;
 600};
 601
 602static void copy_pad_fprs(struct task_struct *target,
 603			 const struct user_regset *regset,
 604			 struct membuf *to,
 
 605			 unsigned int live_sz)
 606{
 607	int i, j;
 608	unsigned long long fill = ~0ull;
 609	unsigned int cp_sz, pad_sz;
 610
 611	cp_sz = min(regset->size, live_sz);
 612	pad_sz = regset->size - cp_sz;
 613	WARN_ON(pad_sz % sizeof(fill));
 614
 615	for (i = 0; i < NUM_FPU_REGS; i++) {
 616		membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
 617		for (j = 0; j < (pad_sz / sizeof(fill)); j++)
 618			membuf_store(to, fill);
 
 
 
 
 
 
 
 
 
 619	}
 
 
 620}
 621
 622static int msa_get(struct task_struct *target,
 623		   const struct user_regset *regset,
 624		   struct membuf to)
 
 625{
 626	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 627	const struct msa_control_regs ctrl_regs = {
 628		.fir = boot_cpu_data.fpu_id,
 629		.fcsr = target->thread.fpu.fcr31,
 630		.msair = boot_cpu_data.msa_id,
 631		.msacsr = target->thread.fpu.msacsr,
 632	};
 
 633
 634	if (!tsk_used_math(target)) {
 635		/* The task hasn't used FP or MSA, fill with 0xff */
 636		copy_pad_fprs(target, regset, &to, 0);
 
 637	} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
 638		/* Copy scalar FP context, fill the rest with 0xff */
 639		copy_pad_fprs(target, regset, &to, 8);
 
 640	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 641		/* Trivially copy the vector registers */
 642		membuf_write(&to, &target->thread.fpu.fpr, wr_size);
 
 
 643	} else {
 644		/* Copy as much context as possible, fill the rest with 0xff */
 645		copy_pad_fprs(target, regset, &to,
 646				sizeof(target->thread.fpu.fpr[0]));
 
 647	}
 648
 649	return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
 
 
 
 650}
 651
 652static int msa_set(struct task_struct *target,
 653		   const struct user_regset *regset,
 654		   unsigned int pos, unsigned int count,
 655		   const void *kbuf, const void __user *ubuf)
 656{
 657	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 658	struct msa_control_regs ctrl_regs;
 659	unsigned int cp_sz;
 660	int i, err, start;
 661
 662	init_fp_ctx(target);
 663
 664	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 665		/* Trivially copy the vector registers */
 666		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 667					 &target->thread.fpu.fpr,
 668					 0, wr_size);
 669	} else {
 670		/* Copy as much context as possible */
 671		cp_sz = min_t(unsigned int, regset->size,
 672			      sizeof(target->thread.fpu.fpr[0]));
 673
 674		i = start = err = 0;
 675		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 676			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 677						  &target->thread.fpu.fpr[i],
 678						  start, start + cp_sz);
 679		}
 680	}
 681
 682	if (!err)
 683		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
 684					 wr_size, wr_size + sizeof(ctrl_regs));
 685	if (!err) {
 686		target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
 687		target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
 688	}
 689
 690	return err;
 691}
 692
 693#endif /* CONFIG_CPU_HAS_MSA */
 694
 695#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 696
 697/*
 698 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
 699 */
 700static int dsp32_get(struct task_struct *target,
 701		     const struct user_regset *regset,
 702		     struct membuf to)
 
 703{
 
 704	u32 dspregs[NUM_DSP_REGS + 1];
 705	unsigned int i;
 706
 707	BUG_ON(to.left % sizeof(u32));
 708
 709	if (!cpu_has_dsp)
 710		return -EIO;
 711
 712	for (i = 0; i < NUM_DSP_REGS; i++)
 713		dspregs[i] = target->thread.dsp.dspr[i];
 714	dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
 715	return membuf_write(&to, dspregs, sizeof(dspregs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 716}
 717
 718/*
 719 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
 720 */
 721static int dsp32_set(struct task_struct *target,
 722		     const struct user_regset *regset,
 723		     unsigned int pos, unsigned int count,
 724		     const void *kbuf, const void __user *ubuf)
 725{
 726	unsigned int start, num_regs, i;
 727	u32 dspregs[NUM_DSP_REGS + 1];
 728	int err;
 729
 730	BUG_ON(count % sizeof(u32));
 731
 732	if (!cpu_has_dsp)
 733		return -EIO;
 734
 735	start = pos / sizeof(u32);
 736	num_regs = count / sizeof(u32);
 737
 738	if (start + num_regs > NUM_DSP_REGS + 1)
 739		return -EIO;
 740
 741	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 742				 sizeof(dspregs));
 743	if (err)
 744		return err;
 745
 746	for (i = start; i < num_regs; i++)
 747		switch (i) {
 748		case 0 ... NUM_DSP_REGS - 1:
 749			target->thread.dsp.dspr[i] = (s32)dspregs[i];
 750			break;
 751		case NUM_DSP_REGS:
 752			target->thread.dsp.dspcontrol = (s32)dspregs[i];
 753			break;
 754		}
 755
 756	return 0;
 757}
 758
 759#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 760
 761#ifdef CONFIG_64BIT
 762
 763/*
 764 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
 765 */
 766static int dsp64_get(struct task_struct *target,
 767		     const struct user_regset *regset,
 768		     struct membuf to)
 
 769{
 
 770	u64 dspregs[NUM_DSP_REGS + 1];
 771	unsigned int i;
 772
 773	BUG_ON(to.left % sizeof(u64));
 774
 775	if (!cpu_has_dsp)
 776		return -EIO;
 777
 778	for (i = 0; i < NUM_DSP_REGS; i++)
 779		dspregs[i] = target->thread.dsp.dspr[i];
 780	dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
 781	return membuf_write(&to, dspregs, sizeof(dspregs));
 
 
 
 
 
 
 
 
 
 
 
 
 
 782}
 783
 784/*
 785 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
 786 */
 787static int dsp64_set(struct task_struct *target,
 788		     const struct user_regset *regset,
 789		     unsigned int pos, unsigned int count,
 790		     const void *kbuf, const void __user *ubuf)
 791{
 792	unsigned int start, num_regs, i;
 793	u64 dspregs[NUM_DSP_REGS + 1];
 794	int err;
 795
 796	BUG_ON(count % sizeof(u64));
 797
 798	if (!cpu_has_dsp)
 799		return -EIO;
 800
 801	start = pos / sizeof(u64);
 802	num_regs = count / sizeof(u64);
 803
 804	if (start + num_regs > NUM_DSP_REGS + 1)
 805		return -EIO;
 806
 807	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 808				 sizeof(dspregs));
 809	if (err)
 810		return err;
 811
 812	for (i = start; i < num_regs; i++)
 813		switch (i) {
 814		case 0 ... NUM_DSP_REGS - 1:
 815			target->thread.dsp.dspr[i] = dspregs[i];
 816			break;
 817		case NUM_DSP_REGS:
 818			target->thread.dsp.dspcontrol = dspregs[i];
 819			break;
 820		}
 821
 822	return 0;
 823}
 824
 825#endif /* CONFIG_64BIT */
 826
 827/*
 828 * Determine whether the DSP context is present.
 829 */
 830static int dsp_active(struct task_struct *target,
 831		      const struct user_regset *regset)
 832{
 833	return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
 834}
 835
 836enum mips_regset {
 837	REGSET_GPR,
 838	REGSET_DSP,
 839#ifdef CONFIG_MIPS_FP_SUPPORT
 840	REGSET_FPR,
 841	REGSET_FP_MODE,
 842#endif
 843#ifdef CONFIG_CPU_HAS_MSA
 844	REGSET_MSA,
 845#endif
 846};
 847
 848struct pt_regs_offset {
 849	const char *name;
 850	int offset;
 851};
 852
 853#define REG_OFFSET_NAME(reg, r) {					\
 854	.name = #reg,							\
 855	.offset = offsetof(struct pt_regs, r)				\
 856}
 857
 858#define REG_OFFSET_END {						\
 859	.name = NULL,							\
 860	.offset = 0							\
 861}
 862
 863static const struct pt_regs_offset regoffset_table[] = {
 864	REG_OFFSET_NAME(r0, regs[0]),
 865	REG_OFFSET_NAME(r1, regs[1]),
 866	REG_OFFSET_NAME(r2, regs[2]),
 867	REG_OFFSET_NAME(r3, regs[3]),
 868	REG_OFFSET_NAME(r4, regs[4]),
 869	REG_OFFSET_NAME(r5, regs[5]),
 870	REG_OFFSET_NAME(r6, regs[6]),
 871	REG_OFFSET_NAME(r7, regs[7]),
 872	REG_OFFSET_NAME(r8, regs[8]),
 873	REG_OFFSET_NAME(r9, regs[9]),
 874	REG_OFFSET_NAME(r10, regs[10]),
 875	REG_OFFSET_NAME(r11, regs[11]),
 876	REG_OFFSET_NAME(r12, regs[12]),
 877	REG_OFFSET_NAME(r13, regs[13]),
 878	REG_OFFSET_NAME(r14, regs[14]),
 879	REG_OFFSET_NAME(r15, regs[15]),
 880	REG_OFFSET_NAME(r16, regs[16]),
 881	REG_OFFSET_NAME(r17, regs[17]),
 882	REG_OFFSET_NAME(r18, regs[18]),
 883	REG_OFFSET_NAME(r19, regs[19]),
 884	REG_OFFSET_NAME(r20, regs[20]),
 885	REG_OFFSET_NAME(r21, regs[21]),
 886	REG_OFFSET_NAME(r22, regs[22]),
 887	REG_OFFSET_NAME(r23, regs[23]),
 888	REG_OFFSET_NAME(r24, regs[24]),
 889	REG_OFFSET_NAME(r25, regs[25]),
 890	REG_OFFSET_NAME(r26, regs[26]),
 891	REG_OFFSET_NAME(r27, regs[27]),
 892	REG_OFFSET_NAME(r28, regs[28]),
 893	REG_OFFSET_NAME(r29, regs[29]),
 894	REG_OFFSET_NAME(r30, regs[30]),
 895	REG_OFFSET_NAME(r31, regs[31]),
 896	REG_OFFSET_NAME(c0_status, cp0_status),
 897	REG_OFFSET_NAME(hi, hi),
 898	REG_OFFSET_NAME(lo, lo),
 899#ifdef CONFIG_CPU_HAS_SMARTMIPS
 900	REG_OFFSET_NAME(acx, acx),
 901#endif
 902	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 903	REG_OFFSET_NAME(c0_cause, cp0_cause),
 904	REG_OFFSET_NAME(c0_epc, cp0_epc),
 905#ifdef CONFIG_CPU_CAVIUM_OCTEON
 906	REG_OFFSET_NAME(mpl0, mpl[0]),
 907	REG_OFFSET_NAME(mpl1, mpl[1]),
 908	REG_OFFSET_NAME(mpl2, mpl[2]),
 909	REG_OFFSET_NAME(mtp0, mtp[0]),
 910	REG_OFFSET_NAME(mtp1, mtp[1]),
 911	REG_OFFSET_NAME(mtp2, mtp[2]),
 912#endif
 913	REG_OFFSET_END,
 914};
 915
 916/**
 917 * regs_query_register_offset() - query register offset from its name
 918 * @name:       the name of a register
 919 *
 920 * regs_query_register_offset() returns the offset of a register in struct
 921 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 922 */
 923int regs_query_register_offset(const char *name)
 924{
 925        const struct pt_regs_offset *roff;
 926        for (roff = regoffset_table; roff->name != NULL; roff++)
 927                if (!strcmp(roff->name, name))
 928                        return roff->offset;
 929        return -EINVAL;
 930}
 931
 932#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 933
 934static const struct user_regset mips_regsets[] = {
 935	[REGSET_GPR] = {
 936		.core_note_type	= NT_PRSTATUS,
 937		.n		= ELF_NGREG,
 938		.size		= sizeof(unsigned int),
 939		.align		= sizeof(unsigned int),
 940		.regset_get		= gpr32_get,
 941		.set		= gpr32_set,
 942	},
 943	[REGSET_DSP] = {
 944		.core_note_type	= NT_MIPS_DSP,
 945		.n		= NUM_DSP_REGS + 1,
 946		.size		= sizeof(u32),
 947		.align		= sizeof(u32),
 948		.regset_get		= dsp32_get,
 949		.set		= dsp32_set,
 950		.active		= dsp_active,
 951	},
 952#ifdef CONFIG_MIPS_FP_SUPPORT
 953	[REGSET_FPR] = {
 954		.core_note_type	= NT_PRFPREG,
 955		.n		= ELF_NFPREG,
 956		.size		= sizeof(elf_fpreg_t),
 957		.align		= sizeof(elf_fpreg_t),
 958		.regset_get		= fpr_get,
 959		.set		= fpr_set,
 960	},
 961	[REGSET_FP_MODE] = {
 962		.core_note_type	= NT_MIPS_FP_MODE,
 963		.n		= 1,
 964		.size		= sizeof(int),
 965		.align		= sizeof(int),
 966		.regset_get		= fp_mode_get,
 967		.set		= fp_mode_set,
 968	},
 969#endif
 970#ifdef CONFIG_CPU_HAS_MSA
 971	[REGSET_MSA] = {
 972		.core_note_type	= NT_MIPS_MSA,
 973		.n		= NUM_FPU_REGS + 1,
 974		.size		= 16,
 975		.align		= 16,
 976		.regset_get		= msa_get,
 977		.set		= msa_set,
 978	},
 979#endif
 980};
 981
 982static const struct user_regset_view user_mips_view = {
 983	.name		= "mips",
 984	.e_machine	= ELF_ARCH,
 985	.ei_osabi	= ELF_OSABI,
 986	.regsets	= mips_regsets,
 987	.n		= ARRAY_SIZE(mips_regsets),
 988};
 989
 990#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 991
 992#ifdef CONFIG_64BIT
 993
 994static const struct user_regset mips64_regsets[] = {
 995	[REGSET_GPR] = {
 996		.core_note_type	= NT_PRSTATUS,
 997		.n		= ELF_NGREG,
 998		.size		= sizeof(unsigned long),
 999		.align		= sizeof(unsigned long),
1000		.regset_get		= gpr64_get,
1001		.set		= gpr64_set,
1002	},
1003	[REGSET_DSP] = {
1004		.core_note_type	= NT_MIPS_DSP,
1005		.n		= NUM_DSP_REGS + 1,
1006		.size		= sizeof(u64),
1007		.align		= sizeof(u64),
1008		.regset_get		= dsp64_get,
1009		.set		= dsp64_set,
1010		.active		= dsp_active,
1011	},
1012#ifdef CONFIG_MIPS_FP_SUPPORT
1013	[REGSET_FP_MODE] = {
1014		.core_note_type	= NT_MIPS_FP_MODE,
1015		.n		= 1,
1016		.size		= sizeof(int),
1017		.align		= sizeof(int),
1018		.regset_get		= fp_mode_get,
1019		.set		= fp_mode_set,
1020	},
1021	[REGSET_FPR] = {
1022		.core_note_type	= NT_PRFPREG,
1023		.n		= ELF_NFPREG,
1024		.size		= sizeof(elf_fpreg_t),
1025		.align		= sizeof(elf_fpreg_t),
1026		.regset_get		= fpr_get,
1027		.set		= fpr_set,
1028	},
1029#endif
1030#ifdef CONFIG_CPU_HAS_MSA
1031	[REGSET_MSA] = {
1032		.core_note_type	= NT_MIPS_MSA,
1033		.n		= NUM_FPU_REGS + 1,
1034		.size		= 16,
1035		.align		= 16,
1036		.regset_get		= msa_get,
1037		.set		= msa_set,
1038	},
1039#endif
1040};
1041
1042static const struct user_regset_view user_mips64_view = {
1043	.name		= "mips64",
1044	.e_machine	= ELF_ARCH,
1045	.ei_osabi	= ELF_OSABI,
1046	.regsets	= mips64_regsets,
1047	.n		= ARRAY_SIZE(mips64_regsets),
1048};
1049
1050#ifdef CONFIG_MIPS32_N32
1051
1052static const struct user_regset_view user_mipsn32_view = {
1053	.name		= "mipsn32",
1054	.e_flags	= EF_MIPS_ABI2,
1055	.e_machine	= ELF_ARCH,
1056	.ei_osabi	= ELF_OSABI,
1057	.regsets	= mips64_regsets,
1058	.n		= ARRAY_SIZE(mips64_regsets),
1059};
1060
1061#endif /* CONFIG_MIPS32_N32 */
1062
1063#endif /* CONFIG_64BIT */
1064
1065const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1066{
1067#ifdef CONFIG_32BIT
1068	return &user_mips_view;
1069#else
1070#ifdef CONFIG_MIPS32_O32
1071	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1072		return &user_mips_view;
1073#endif
1074#ifdef CONFIG_MIPS32_N32
1075	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1076		return &user_mipsn32_view;
1077#endif
1078	return &user_mips64_view;
1079#endif
1080}
1081
1082long arch_ptrace(struct task_struct *child, long request,
1083		 unsigned long addr, unsigned long data)
1084{
1085	int ret;
1086	void __user *addrp = (void __user *) addr;
1087	void __user *datavp = (void __user *) data;
1088	unsigned long __user *datalp = (void __user *) data;
1089
1090	switch (request) {
1091	/* when I and D space are separate, these will need to be fixed. */
1092	case PTRACE_PEEKTEXT: /* read word at location addr. */
1093	case PTRACE_PEEKDATA:
1094		ret = generic_ptrace_peekdata(child, addr, data);
1095		break;
1096
1097	/* Read the word at location addr in the USER area. */
1098	case PTRACE_PEEKUSR: {
1099		struct pt_regs *regs;
1100		unsigned long tmp = 0;
1101
1102		regs = task_pt_regs(child);
1103		ret = 0;  /* Default return value. */
1104
1105		switch (addr) {
1106		case 0 ... 31:
1107			tmp = regs->regs[addr];
1108			break;
1109#ifdef CONFIG_MIPS_FP_SUPPORT
1110		case FPR_BASE ... FPR_BASE + 31: {
1111			union fpureg *fregs;
1112
1113			if (!tsk_used_math(child)) {
1114				/* FP not yet used */
1115				tmp = -1;
1116				break;
1117			}
1118			fregs = get_fpu_regs(child);
1119
1120#ifdef CONFIG_32BIT
1121			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1122				/*
1123				 * The odd registers are actually the high
1124				 * order bits of the values stored in the even
1125				 * registers.
1126				 */
1127				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1128						addr & 1);
1129				break;
1130			}
1131#endif
1132			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1133			break;
1134		}
1135		case FPC_CSR:
1136			tmp = child->thread.fpu.fcr31;
1137			break;
1138		case FPC_EIR:
1139			/* implementation / version register */
1140			tmp = boot_cpu_data.fpu_id;
1141			break;
1142#endif
1143		case PC:
1144			tmp = regs->cp0_epc;
1145			break;
1146		case CAUSE:
1147			tmp = regs->cp0_cause;
1148			break;
1149		case BADVADDR:
1150			tmp = regs->cp0_badvaddr;
1151			break;
1152		case MMHI:
1153			tmp = regs->hi;
1154			break;
1155		case MMLO:
1156			tmp = regs->lo;
1157			break;
1158#ifdef CONFIG_CPU_HAS_SMARTMIPS
1159		case ACX:
1160			tmp = regs->acx;
1161			break;
1162#endif
1163		case DSP_BASE ... DSP_BASE + 5: {
1164			dspreg_t *dregs;
1165
1166			if (!cpu_has_dsp) {
1167				tmp = 0;
1168				ret = -EIO;
1169				goto out;
1170			}
1171			dregs = __get_dsp_regs(child);
1172			tmp = dregs[addr - DSP_BASE];
1173			break;
1174		}
1175		case DSP_CONTROL:
1176			if (!cpu_has_dsp) {
1177				tmp = 0;
1178				ret = -EIO;
1179				goto out;
1180			}
1181			tmp = child->thread.dsp.dspcontrol;
1182			break;
1183		default:
1184			tmp = 0;
1185			ret = -EIO;
1186			goto out;
1187		}
1188		ret = put_user(tmp, datalp);
1189		break;
1190	}
1191
1192	/* when I and D space are separate, this will have to be fixed. */
1193	case PTRACE_POKETEXT: /* write the word at location addr. */
1194	case PTRACE_POKEDATA:
1195		ret = generic_ptrace_pokedata(child, addr, data);
1196		break;
1197
1198	case PTRACE_POKEUSR: {
1199		struct pt_regs *regs;
1200		ret = 0;
1201		regs = task_pt_regs(child);
1202
1203		switch (addr) {
1204		case 0 ... 31:
1205			regs->regs[addr] = data;
1206			/* System call number may have been changed */
1207			if (addr == 2)
1208				mips_syscall_update_nr(child, regs);
1209			else if (addr == 4 &&
1210				 mips_syscall_is_indirect(child, regs))
1211				mips_syscall_update_nr(child, regs);
1212			break;
1213#ifdef CONFIG_MIPS_FP_SUPPORT
1214		case FPR_BASE ... FPR_BASE + 31: {
1215			union fpureg *fregs = get_fpu_regs(child);
1216
1217			init_fp_ctx(child);
1218#ifdef CONFIG_32BIT
1219			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1220				/*
1221				 * The odd registers are actually the high
1222				 * order bits of the values stored in the even
1223				 * registers.
1224				 */
1225				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1226					  addr & 1, data);
1227				break;
1228			}
1229#endif
1230			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1231			break;
1232		}
1233		case FPC_CSR:
1234			init_fp_ctx(child);
1235			ptrace_setfcr31(child, data);
1236			break;
1237#endif
1238		case PC:
1239			regs->cp0_epc = data;
1240			break;
1241		case MMHI:
1242			regs->hi = data;
1243			break;
1244		case MMLO:
1245			regs->lo = data;
1246			break;
1247#ifdef CONFIG_CPU_HAS_SMARTMIPS
1248		case ACX:
1249			regs->acx = data;
1250			break;
1251#endif
1252		case DSP_BASE ... DSP_BASE + 5: {
1253			dspreg_t *dregs;
1254
1255			if (!cpu_has_dsp) {
1256				ret = -EIO;
1257				break;
1258			}
1259
1260			dregs = __get_dsp_regs(child);
1261			dregs[addr - DSP_BASE] = data;
1262			break;
1263		}
1264		case DSP_CONTROL:
1265			if (!cpu_has_dsp) {
1266				ret = -EIO;
1267				break;
1268			}
1269			child->thread.dsp.dspcontrol = data;
1270			break;
1271		default:
1272			/* The rest are not allowed. */
1273			ret = -EIO;
1274			break;
1275		}
1276		break;
1277		}
1278
1279	case PTRACE_GETREGS:
1280		ret = ptrace_getregs(child, datavp);
1281		break;
1282
1283	case PTRACE_SETREGS:
1284		ret = ptrace_setregs(child, datavp);
1285		break;
1286
1287#ifdef CONFIG_MIPS_FP_SUPPORT
1288	case PTRACE_GETFPREGS:
1289		ret = ptrace_getfpregs(child, datavp);
1290		break;
1291
1292	case PTRACE_SETFPREGS:
1293		ret = ptrace_setfpregs(child, datavp);
1294		break;
1295#endif
1296	case PTRACE_GET_THREAD_AREA:
1297		ret = put_user(task_thread_info(child)->tp_value, datalp);
1298		break;
1299
1300	case PTRACE_GET_WATCH_REGS:
1301		ret = ptrace_get_watch_regs(child, addrp);
1302		break;
1303
1304	case PTRACE_SET_WATCH_REGS:
1305		ret = ptrace_set_watch_regs(child, addrp);
1306		break;
1307
1308	default:
1309		ret = ptrace_request(child, request, addr, data);
1310		break;
1311	}
1312 out:
1313	return ret;
1314}
1315
1316/*
1317 * Notification of system call entry/exit
1318 * - triggered by current->work.syscall_trace
1319 */
1320asmlinkage long syscall_trace_enter(struct pt_regs *regs)
1321{
1322	user_exit();
1323
 
 
1324	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1325		if (ptrace_report_syscall_entry(regs))
1326			return -1;
 
1327	}
1328
1329#ifdef CONFIG_SECCOMP
1330	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1331		int ret, i;
1332		struct seccomp_data sd;
1333		unsigned long args[6];
1334
1335		sd.nr = current_thread_info()->syscall;
1336		sd.arch = syscall_get_arch(current);
1337		syscall_get_arguments(current, regs, args);
1338		for (i = 0; i < 6; i++)
1339			sd.args[i] = args[i];
1340		sd.instruction_pointer = KSTK_EIP(current);
1341
1342		ret = __secure_computing(&sd);
1343		if (ret == -1)
1344			return ret;
 
1345	}
1346#endif
1347
1348	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1349		trace_sys_enter(regs, regs->regs[2]);
1350
1351	audit_syscall_entry(current_thread_info()->syscall,
1352			    regs->regs[4], regs->regs[5],
1353			    regs->regs[6], regs->regs[7]);
1354
1355	/*
1356	 * Negative syscall numbers are mistaken for rejected syscalls, but
1357	 * won't have had the return value set appropriately, so we do so now.
1358	 */
1359	if (current_thread_info()->syscall < 0)
1360		syscall_set_return_value(current, regs, -ENOSYS, 0);
1361	return current_thread_info()->syscall;
1362}
1363
1364/*
1365 * Notification of system call entry/exit
1366 * - triggered by current->work.syscall_trace
1367 */
1368asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1369{
1370        /*
1371	 * We may come here right after calling schedule_user()
1372	 * or do_notify_resume(), in which case we can be in RCU
1373	 * user mode.
1374	 */
1375	user_exit();
1376
1377	audit_syscall_exit(regs);
1378
1379	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1380		trace_sys_exit(regs, regs_return_value(regs));
1381
1382	if (test_thread_flag(TIF_SYSCALL_TRACE))
1383		ptrace_report_syscall_exit(regs, 0);
1384
1385	user_enter();
1386}