Linux Audio

Check our new training course

Loading...
v5.4
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
 
  28#include <linux/security.h>
  29#include <linux/stddef.h>
  30#include <linux/tracehook.h>
  31#include <linux/audit.h>
  32#include <linux/seccomp.h>
  33#include <linux/ftrace.h>
  34
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
  42#include <asm/pgtable.h>
  43#include <asm/page.h>
  44#include <asm/processor.h>
  45#include <asm/syscall.h>
  46#include <linux/uaccess.h>
  47#include <asm/bootinfo.h>
  48#include <asm/reg.h>
  49
  50#define CREATE_TRACE_POINTS
  51#include <trace/events/syscalls.h>
  52
  53/*
  54 * Called by kernel/ptrace.c when detaching..
  55 *
  56 * Make sure single step bits etc are not set.
  57 */
  58void ptrace_disable(struct task_struct *child)
  59{
  60	/* Don't load the watchpoint registers for the ex-child. */
  61	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  62}
  63
  64/*
  65 * Read a general register set.	 We always use the 64-bit format, even
  66 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
  67 * Registers are sign extended to fill the available space.
  68 */
  69int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
  70{
  71	struct pt_regs *regs;
  72	int i;
  73
  74	if (!access_ok(data, 38 * 8))
  75		return -EIO;
  76
  77	regs = task_pt_regs(child);
  78
  79	for (i = 0; i < 32; i++)
  80		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
  81	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
  82	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
  83	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
  84	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
  85	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
  86	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
  87
  88	return 0;
  89}
  90
  91/*
  92 * Write a general register set.  As for PTRACE_GETREGS, we always use
  93 * the 64-bit format.  On a 32-bit kernel only the lower order half
  94 * (according to endianness) will be used.
  95 */
  96int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
  97{
  98	struct pt_regs *regs;
  99	int i;
 100
 101	if (!access_ok(data, 38 * 8))
 102		return -EIO;
 103
 104	regs = task_pt_regs(child);
 105
 106	for (i = 0; i < 32; i++)
 107		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 108	__get_user(regs->lo, (__s64 __user *)&data->lo);
 109	__get_user(regs->hi, (__s64 __user *)&data->hi);
 110	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 111
 112	/* badvaddr, status, and cause may not be written.  */
 113
 114	/* System call number may have been changed */
 115	mips_syscall_update_nr(child, regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116
 117	return 0;
 118}
 119
 120int ptrace_get_watch_regs(struct task_struct *child,
 121			  struct pt_watch_regs __user *addr)
 122{
 123	enum pt_watch_style style;
 124	int i;
 125
 126	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 127		return -EIO;
 128	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 129		return -EIO;
 130
 131#ifdef CONFIG_32BIT
 132	style = pt_watch_style_mips32;
 133#define WATCH_STYLE mips32
 134#else
 135	style = pt_watch_style_mips64;
 136#define WATCH_STYLE mips64
 137#endif
 138
 139	__put_user(style, &addr->style);
 140	__put_user(boot_cpu_data.watch_reg_use_cnt,
 141		   &addr->WATCH_STYLE.num_valid);
 142	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 143		__put_user(child->thread.watch.mips3264.watchlo[i],
 144			   &addr->WATCH_STYLE.watchlo[i]);
 145		__put_user(child->thread.watch.mips3264.watchhi[i] &
 146				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 147			   &addr->WATCH_STYLE.watchhi[i]);
 148		__put_user(boot_cpu_data.watch_reg_masks[i],
 149			   &addr->WATCH_STYLE.watch_masks[i]);
 150	}
 151	for (; i < 8; i++) {
 152		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 153		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 154		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 155	}
 156
 157	return 0;
 158}
 159
 160int ptrace_set_watch_regs(struct task_struct *child,
 161			  struct pt_watch_regs __user *addr)
 162{
 163	int i;
 164	int watch_active = 0;
 165	unsigned long lt[NUM_WATCH_REGS];
 166	u16 ht[NUM_WATCH_REGS];
 167
 168	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 169		return -EIO;
 170	if (!access_ok(addr, sizeof(struct pt_watch_regs)))
 171		return -EIO;
 172	/* Check the values. */
 173	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 174		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 175#ifdef CONFIG_32BIT
 176		if (lt[i] & __UA_LIMIT)
 177			return -EINVAL;
 178#else
 179		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 180			if (lt[i] & 0xffffffff80000000UL)
 181				return -EINVAL;
 182		} else {
 183			if (lt[i] & __UA_LIMIT)
 184				return -EINVAL;
 185		}
 186#endif
 187		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 188		if (ht[i] & ~MIPS_WATCHHI_MASK)
 189			return -EINVAL;
 190	}
 191	/* Install them. */
 192	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 193		if (lt[i] & MIPS_WATCHLO_IRW)
 194			watch_active = 1;
 195		child->thread.watch.mips3264.watchlo[i] = lt[i];
 196		/* Set the G bit. */
 197		child->thread.watch.mips3264.watchhi[i] = ht[i];
 198	}
 199
 200	if (watch_active)
 201		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 202	else
 203		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 204
 205	return 0;
 206}
 207
 208/* regset get/set implementations */
 209
 210#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 211
 212static int gpr32_get(struct task_struct *target,
 213		     const struct user_regset *regset,
 214		     unsigned int pos, unsigned int count,
 215		     void *kbuf, void __user *ubuf)
 216{
 217	struct pt_regs *regs = task_pt_regs(target);
 218	u32 uregs[ELF_NGREG] = {};
 219
 220	mips_dump_regs32(uregs, regs);
 221	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 222				   sizeof(uregs));
 223}
 224
 225static int gpr32_set(struct task_struct *target,
 226		     const struct user_regset *regset,
 227		     unsigned int pos, unsigned int count,
 228		     const void *kbuf, const void __user *ubuf)
 229{
 230	struct pt_regs *regs = task_pt_regs(target);
 231	u32 uregs[ELF_NGREG];
 232	unsigned start, num_regs, i;
 233	int err;
 234
 235	start = pos / sizeof(u32);
 236	num_regs = count / sizeof(u32);
 237
 238	if (start + num_regs > ELF_NGREG)
 239		return -EIO;
 240
 241	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 242				 sizeof(uregs));
 243	if (err)
 244		return err;
 245
 246	for (i = start; i < num_regs; i++) {
 247		/*
 248		 * Cast all values to signed here so that if this is a 64-bit
 249		 * kernel, the supplied 32-bit values will be sign extended.
 250		 */
 251		switch (i) {
 252		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 253			/* k0/k1 are ignored. */
 254		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 255			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 256			break;
 257		case MIPS32_EF_LO:
 258			regs->lo = (s32)uregs[i];
 259			break;
 260		case MIPS32_EF_HI:
 261			regs->hi = (s32)uregs[i];
 262			break;
 263		case MIPS32_EF_CP0_EPC:
 264			regs->cp0_epc = (s32)uregs[i];
 265			break;
 266		}
 267	}
 268
 269	/* System call number may have been changed */
 270	mips_syscall_update_nr(target, regs);
 271
 272	return 0;
 273}
 274
 275#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 276
 277#ifdef CONFIG_64BIT
 278
 279static int gpr64_get(struct task_struct *target,
 280		     const struct user_regset *regset,
 281		     unsigned int pos, unsigned int count,
 282		     void *kbuf, void __user *ubuf)
 283{
 284	struct pt_regs *regs = task_pt_regs(target);
 285	u64 uregs[ELF_NGREG] = {};
 286
 287	mips_dump_regs64(uregs, regs);
 288	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 289				   sizeof(uregs));
 290}
 291
 292static int gpr64_set(struct task_struct *target,
 293		     const struct user_regset *regset,
 294		     unsigned int pos, unsigned int count,
 295		     const void *kbuf, const void __user *ubuf)
 296{
 297	struct pt_regs *regs = task_pt_regs(target);
 298	u64 uregs[ELF_NGREG];
 299	unsigned start, num_regs, i;
 300	int err;
 301
 302	start = pos / sizeof(u64);
 303	num_regs = count / sizeof(u64);
 304
 305	if (start + num_regs > ELF_NGREG)
 306		return -EIO;
 307
 308	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 309				 sizeof(uregs));
 310	if (err)
 311		return err;
 312
 313	for (i = start; i < num_regs; i++) {
 314		switch (i) {
 315		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 316			/* k0/k1 are ignored. */
 317		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 318			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 319			break;
 320		case MIPS64_EF_LO:
 321			regs->lo = uregs[i];
 322			break;
 323		case MIPS64_EF_HI:
 324			regs->hi = uregs[i];
 325			break;
 326		case MIPS64_EF_CP0_EPC:
 327			regs->cp0_epc = uregs[i];
 328			break;
 329		}
 330	}
 331
 332	/* System call number may have been changed */
 333	mips_syscall_update_nr(target, regs);
 334
 335	return 0;
 336}
 337
 338#endif /* CONFIG_64BIT */
 339
 340
 341#ifdef CONFIG_MIPS_FP_SUPPORT
 342
 343/*
 344 * Poke at FCSR according to its mask.  Set the Cause bits even
 345 * if a corresponding Enable bit is set.  This will be noticed at
 346 * the time the thread is switched to and SIGFPE thrown accordingly.
 347 */
 348static void ptrace_setfcr31(struct task_struct *child, u32 value)
 349{
 350	u32 fcr31;
 351	u32 mask;
 352
 353	fcr31 = child->thread.fpu.fcr31;
 354	mask = boot_cpu_data.fpu_msk31;
 355	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 356}
 357
 358int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 359{
 360	int i;
 361
 362	if (!access_ok(data, 33 * 8))
 363		return -EIO;
 364
 365	if (tsk_used_math(child)) {
 366		union fpureg *fregs = get_fpu_regs(child);
 367		for (i = 0; i < 32; i++)
 368			__put_user(get_fpr64(&fregs[i], 0),
 369				   i + (__u64 __user *)data);
 370	} else {
 371		for (i = 0; i < 32; i++)
 372			__put_user((__u64) -1, i + (__u64 __user *) data);
 373	}
 374
 375	__put_user(child->thread.fpu.fcr31, data + 64);
 376	__put_user(boot_cpu_data.fpu_id, data + 65);
 377
 378	return 0;
 379}
 380
 381int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 382{
 383	union fpureg *fregs;
 384	u64 fpr_val;
 385	u32 value;
 386	int i;
 387
 388	if (!access_ok(data, 33 * 8))
 389		return -EIO;
 390
 391	init_fp_ctx(child);
 392	fregs = get_fpu_regs(child);
 393
 394	for (i = 0; i < 32; i++) {
 395		__get_user(fpr_val, i + (__u64 __user *)data);
 396		set_fpr64(&fregs[i], 0, fpr_val);
 397	}
 398
 399	__get_user(value, data + 64);
 400	ptrace_setfcr31(child, value);
 401
 402	/* FIR may not be written.  */
 403
 404	return 0;
 405}
 406
 407/*
 408 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 409 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 410 * correspond 1:1 to buffer slots.  Only general registers are copied.
 411 */
 412static int fpr_get_fpa(struct task_struct *target,
 413		       unsigned int *pos, unsigned int *count,
 414		       void **kbuf, void __user **ubuf)
 415{
 416	return user_regset_copyout(pos, count, kbuf, ubuf,
 417				   &target->thread.fpu,
 418				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 419}
 420
 421/*
 422 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 423 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 424 * general register slots are copied to buffer slots.  Only general
 425 * registers are copied.
 426 */
 427static int fpr_get_msa(struct task_struct *target,
 428		       unsigned int *pos, unsigned int *count,
 429		       void **kbuf, void __user **ubuf)
 430{
 431	unsigned int i;
 432	u64 fpr_val;
 433	int err;
 434
 435	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 436	for (i = 0; i < NUM_FPU_REGS; i++) {
 437		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 438		err = user_regset_copyout(pos, count, kbuf, ubuf,
 439					  &fpr_val, i * sizeof(elf_fpreg_t),
 440					  (i + 1) * sizeof(elf_fpreg_t));
 441		if (err)
 442			return err;
 443	}
 444
 445	return 0;
 446}
 447
 448/*
 449 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 450 * Choose the appropriate helper for general registers, and then copy
 451 * the FCSR and FIR registers separately.
 452 */
 453static int fpr_get(struct task_struct *target,
 454		   const struct user_regset *regset,
 455		   unsigned int pos, unsigned int count,
 456		   void *kbuf, void __user *ubuf)
 457{
 458	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 459	const int fir_pos = fcr31_pos + sizeof(u32);
 460	int err;
 461
 462	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 463		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
 464	else
 465		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
 466	if (err)
 467		return err;
 468
 469	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 470				  &target->thread.fpu.fcr31,
 471				  fcr31_pos, fcr31_pos + sizeof(u32));
 472	if (err)
 473		return err;
 474
 475	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 476				  &boot_cpu_data.fpu_id,
 477				  fir_pos, fir_pos + sizeof(u32));
 478
 479	return err;
 480}
 481
 482/*
 483 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 484 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 485 * context's general register slots.  Only general registers are copied.
 486 */
 487static int fpr_set_fpa(struct task_struct *target,
 488		       unsigned int *pos, unsigned int *count,
 489		       const void **kbuf, const void __user **ubuf)
 490{
 491	return user_regset_copyin(pos, count, kbuf, ubuf,
 492				  &target->thread.fpu,
 493				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 494}
 495
 496/*
 497 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 498 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 499 * bits only of FP context's general register slots.  Only general
 500 * registers are copied.
 501 */
 502static int fpr_set_msa(struct task_struct *target,
 503		       unsigned int *pos, unsigned int *count,
 504		       const void **kbuf, const void __user **ubuf)
 505{
 506	unsigned int i;
 507	u64 fpr_val;
 508	int err;
 509
 510	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 511	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 512		err = user_regset_copyin(pos, count, kbuf, ubuf,
 513					 &fpr_val, i * sizeof(elf_fpreg_t),
 514					 (i + 1) * sizeof(elf_fpreg_t));
 515		if (err)
 516			return err;
 517		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 518	}
 519
 520	return 0;
 521}
 522
 523/*
 524 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 525 * Choose the appropriate helper for general registers, and then copy
 526 * the FCSR register separately.  Ignore the incoming FIR register
 527 * contents though, as the register is read-only.
 528 *
 529 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 530 * which is supposed to have been guaranteed by the kernel before
 531 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 532 * so that we can safely avoid preinitializing temporaries for
 533 * partial register writes.
 534 */
 535static int fpr_set(struct task_struct *target,
 536		   const struct user_regset *regset,
 537		   unsigned int pos, unsigned int count,
 538		   const void *kbuf, const void __user *ubuf)
 539{
 540	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 541	const int fir_pos = fcr31_pos + sizeof(u32);
 542	u32 fcr31;
 543	int err;
 544
 545	BUG_ON(count % sizeof(elf_fpreg_t));
 546
 547	if (pos + count > sizeof(elf_fpregset_t))
 548		return -EIO;
 549
 550	init_fp_ctx(target);
 551
 552	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 553		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 554	else
 555		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 556	if (err)
 557		return err;
 558
 559	if (count > 0) {
 560		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 561					 &fcr31,
 562					 fcr31_pos, fcr31_pos + sizeof(u32));
 563		if (err)
 564			return err;
 565
 566		ptrace_setfcr31(target, fcr31);
 567	}
 568
 569	if (count > 0)
 570		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 571						fir_pos,
 572						fir_pos + sizeof(u32));
 573
 574	return err;
 575}
 576
 577/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer.  */
 578static int fp_mode_get(struct task_struct *target,
 579		       const struct user_regset *regset,
 580		       unsigned int pos, unsigned int count,
 581		       void *kbuf, void __user *ubuf)
 582{
 583	int fp_mode;
 584
 585	fp_mode = mips_get_process_fp_mode(target);
 586	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 587				   sizeof(fp_mode));
 588}
 589
 590/*
 591 * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
 592 *
 593 * We optimize for the case where `count % sizeof(int) == 0', which
 594 * is supposed to have been guaranteed by the kernel before calling
 595 * us, e.g. in `ptrace_regset'.  We enforce that requirement, so
 596 * that we can safely avoid preinitializing temporaries for partial
 597 * mode writes.
 598 */
 599static int fp_mode_set(struct task_struct *target,
 600		       const struct user_regset *regset,
 601		       unsigned int pos, unsigned int count,
 602		       const void *kbuf, const void __user *ubuf)
 603{
 604	int fp_mode;
 605	int err;
 606
 607	BUG_ON(count % sizeof(int));
 608
 609	if (pos + count > sizeof(fp_mode))
 610		return -EIO;
 611
 612	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
 613				 sizeof(fp_mode));
 614	if (err)
 615		return err;
 616
 617	if (count > 0)
 618		err = mips_set_process_fp_mode(target, fp_mode);
 619
 620	return err;
 621}
 622
 623#endif /* CONFIG_MIPS_FP_SUPPORT */
 624
 625#ifdef CONFIG_CPU_HAS_MSA
 626
 627struct msa_control_regs {
 628	unsigned int fir;
 629	unsigned int fcsr;
 630	unsigned int msair;
 631	unsigned int msacsr;
 632};
 633
 634static int copy_pad_fprs(struct task_struct *target,
 635			 const struct user_regset *regset,
 636			 unsigned int *ppos, unsigned int *pcount,
 637			 void **pkbuf, void __user **pubuf,
 638			 unsigned int live_sz)
 639{
 640	int i, j, start, start_pad, err;
 641	unsigned long long fill = ~0ull;
 642	unsigned int cp_sz, pad_sz;
 643
 644	cp_sz = min(regset->size, live_sz);
 645	pad_sz = regset->size - cp_sz;
 646	WARN_ON(pad_sz % sizeof(fill));
 647
 648	i = start = err = 0;
 649	for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 650		err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
 651					   &target->thread.fpu.fpr[i],
 652					   start, start + cp_sz);
 653
 654		start_pad = start + cp_sz;
 655		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
 656			err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
 657						   &fill, start_pad,
 658						   start_pad + sizeof(fill));
 659			start_pad += sizeof(fill);
 660		}
 661	}
 662
 663	return err;
 664}
 665
 666static int msa_get(struct task_struct *target,
 667		   const struct user_regset *regset,
 668		   unsigned int pos, unsigned int count,
 669		   void *kbuf, void __user *ubuf)
 670{
 671	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 672	const struct msa_control_regs ctrl_regs = {
 673		.fir = boot_cpu_data.fpu_id,
 674		.fcsr = target->thread.fpu.fcr31,
 675		.msair = boot_cpu_data.msa_id,
 676		.msacsr = target->thread.fpu.msacsr,
 677	};
 678	int err;
 679
 680	if (!tsk_used_math(target)) {
 681		/* The task hasn't used FP or MSA, fill with 0xff */
 682		err = copy_pad_fprs(target, regset, &pos, &count,
 683				    &kbuf, &ubuf, 0);
 684	} else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
 685		/* Copy scalar FP context, fill the rest with 0xff */
 686		err = copy_pad_fprs(target, regset, &pos, &count,
 687				    &kbuf, &ubuf, 8);
 688	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 689		/* Trivially copy the vector registers */
 690		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 691					  &target->thread.fpu.fpr,
 692					  0, wr_size);
 693	} else {
 694		/* Copy as much context as possible, fill the rest with 0xff */
 695		err = copy_pad_fprs(target, regset, &pos, &count,
 696				    &kbuf, &ubuf,
 697				    sizeof(target->thread.fpu.fpr[0]));
 698	}
 699
 700	err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 701				   &ctrl_regs, wr_size,
 702				   wr_size + sizeof(ctrl_regs));
 703	return err;
 704}
 705
 706static int msa_set(struct task_struct *target,
 707		   const struct user_regset *regset,
 708		   unsigned int pos, unsigned int count,
 709		   const void *kbuf, const void __user *ubuf)
 710{
 711	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 712	struct msa_control_regs ctrl_regs;
 713	unsigned int cp_sz;
 714	int i, err, start;
 715
 716	init_fp_ctx(target);
 717
 718	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 719		/* Trivially copy the vector registers */
 720		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 721					 &target->thread.fpu.fpr,
 722					 0, wr_size);
 723	} else {
 724		/* Copy as much context as possible */
 725		cp_sz = min_t(unsigned int, regset->size,
 726			      sizeof(target->thread.fpu.fpr[0]));
 727
 728		i = start = err = 0;
 729		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 730			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 731						  &target->thread.fpu.fpr[i],
 732						  start, start + cp_sz);
 733		}
 734	}
 735
 736	if (!err)
 737		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
 738					 wr_size, wr_size + sizeof(ctrl_regs));
 739	if (!err) {
 740		target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
 741		target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
 742	}
 743
 744	return err;
 745}
 746
 747#endif /* CONFIG_CPU_HAS_MSA */
 748
 749#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 750
 751/*
 752 * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
 753 */
 754static int dsp32_get(struct task_struct *target,
 755		     const struct user_regset *regset,
 756		     unsigned int pos, unsigned int count,
 757		     void *kbuf, void __user *ubuf)
 758{
 759	unsigned int start, num_regs, i;
 760	u32 dspregs[NUM_DSP_REGS + 1];
 761
 762	BUG_ON(count % sizeof(u32));
 763
 764	if (!cpu_has_dsp)
 765		return -EIO;
 766
 767	start = pos / sizeof(u32);
 768	num_regs = count / sizeof(u32);
 769
 770	if (start + num_regs > NUM_DSP_REGS + 1)
 771		return -EIO;
 772
 773	for (i = start; i < num_regs; i++)
 774		switch (i) {
 775		case 0 ... NUM_DSP_REGS - 1:
 776			dspregs[i] = target->thread.dsp.dspr[i];
 777			break;
 778		case NUM_DSP_REGS:
 779			dspregs[i] = target->thread.dsp.dspcontrol;
 780			break;
 781		}
 782	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 783				   sizeof(dspregs));
 784}
 785
 786/*
 787 * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
 788 */
 789static int dsp32_set(struct task_struct *target,
 790		     const struct user_regset *regset,
 791		     unsigned int pos, unsigned int count,
 792		     const void *kbuf, const void __user *ubuf)
 793{
 794	unsigned int start, num_regs, i;
 795	u32 dspregs[NUM_DSP_REGS + 1];
 796	int err;
 797
 798	BUG_ON(count % sizeof(u32));
 799
 800	if (!cpu_has_dsp)
 801		return -EIO;
 802
 803	start = pos / sizeof(u32);
 804	num_regs = count / sizeof(u32);
 805
 806	if (start + num_regs > NUM_DSP_REGS + 1)
 807		return -EIO;
 808
 809	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 810				 sizeof(dspregs));
 811	if (err)
 812		return err;
 813
 814	for (i = start; i < num_regs; i++)
 815		switch (i) {
 816		case 0 ... NUM_DSP_REGS - 1:
 817			target->thread.dsp.dspr[i] = (s32)dspregs[i];
 818			break;
 819		case NUM_DSP_REGS:
 820			target->thread.dsp.dspcontrol = (s32)dspregs[i];
 821			break;
 822		}
 823
 824	return 0;
 825}
 826
 827#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 828
 829#ifdef CONFIG_64BIT
 830
 831/*
 832 * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
 833 */
 834static int dsp64_get(struct task_struct *target,
 835		     const struct user_regset *regset,
 836		     unsigned int pos, unsigned int count,
 837		     void *kbuf, void __user *ubuf)
 838{
 839	unsigned int start, num_regs, i;
 840	u64 dspregs[NUM_DSP_REGS + 1];
 841
 842	BUG_ON(count % sizeof(u64));
 843
 844	if (!cpu_has_dsp)
 845		return -EIO;
 846
 847	start = pos / sizeof(u64);
 848	num_regs = count / sizeof(u64);
 849
 850	if (start + num_regs > NUM_DSP_REGS + 1)
 851		return -EIO;
 852
 853	for (i = start; i < num_regs; i++)
 854		switch (i) {
 855		case 0 ... NUM_DSP_REGS - 1:
 856			dspregs[i] = target->thread.dsp.dspr[i];
 857			break;
 858		case NUM_DSP_REGS:
 859			dspregs[i] = target->thread.dsp.dspcontrol;
 860			break;
 861		}
 862	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 863				   sizeof(dspregs));
 864}
 865
 866/*
 867 * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
 868 */
 869static int dsp64_set(struct task_struct *target,
 870		     const struct user_regset *regset,
 871		     unsigned int pos, unsigned int count,
 872		     const void *kbuf, const void __user *ubuf)
 873{
 874	unsigned int start, num_regs, i;
 875	u64 dspregs[NUM_DSP_REGS + 1];
 876	int err;
 877
 878	BUG_ON(count % sizeof(u64));
 879
 880	if (!cpu_has_dsp)
 881		return -EIO;
 882
 883	start = pos / sizeof(u64);
 884	num_regs = count / sizeof(u64);
 885
 886	if (start + num_regs > NUM_DSP_REGS + 1)
 887		return -EIO;
 888
 889	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
 890				 sizeof(dspregs));
 891	if (err)
 892		return err;
 893
 894	for (i = start; i < num_regs; i++)
 895		switch (i) {
 896		case 0 ... NUM_DSP_REGS - 1:
 897			target->thread.dsp.dspr[i] = dspregs[i];
 898			break;
 899		case NUM_DSP_REGS:
 900			target->thread.dsp.dspcontrol = dspregs[i];
 901			break;
 902		}
 903
 904	return 0;
 905}
 906
 907#endif /* CONFIG_64BIT */
 908
 909/*
 910 * Determine whether the DSP context is present.
 911 */
 912static int dsp_active(struct task_struct *target,
 913		      const struct user_regset *regset)
 914{
 915	return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
 916}
 917
 918enum mips_regset {
 919	REGSET_GPR,
 920	REGSET_DSP,
 921#ifdef CONFIG_MIPS_FP_SUPPORT
 922	REGSET_FPR,
 923	REGSET_FP_MODE,
 924#endif
 925#ifdef CONFIG_CPU_HAS_MSA
 926	REGSET_MSA,
 927#endif
 928};
 929
 930struct pt_regs_offset {
 931	const char *name;
 932	int offset;
 933};
 934
 935#define REG_OFFSET_NAME(reg, r) {					\
 936	.name = #reg,							\
 937	.offset = offsetof(struct pt_regs, r)				\
 938}
 939
 940#define REG_OFFSET_END {						\
 941	.name = NULL,							\
 942	.offset = 0							\
 943}
 944
 945static const struct pt_regs_offset regoffset_table[] = {
 946	REG_OFFSET_NAME(r0, regs[0]),
 947	REG_OFFSET_NAME(r1, regs[1]),
 948	REG_OFFSET_NAME(r2, regs[2]),
 949	REG_OFFSET_NAME(r3, regs[3]),
 950	REG_OFFSET_NAME(r4, regs[4]),
 951	REG_OFFSET_NAME(r5, regs[5]),
 952	REG_OFFSET_NAME(r6, regs[6]),
 953	REG_OFFSET_NAME(r7, regs[7]),
 954	REG_OFFSET_NAME(r8, regs[8]),
 955	REG_OFFSET_NAME(r9, regs[9]),
 956	REG_OFFSET_NAME(r10, regs[10]),
 957	REG_OFFSET_NAME(r11, regs[11]),
 958	REG_OFFSET_NAME(r12, regs[12]),
 959	REG_OFFSET_NAME(r13, regs[13]),
 960	REG_OFFSET_NAME(r14, regs[14]),
 961	REG_OFFSET_NAME(r15, regs[15]),
 962	REG_OFFSET_NAME(r16, regs[16]),
 963	REG_OFFSET_NAME(r17, regs[17]),
 964	REG_OFFSET_NAME(r18, regs[18]),
 965	REG_OFFSET_NAME(r19, regs[19]),
 966	REG_OFFSET_NAME(r20, regs[20]),
 967	REG_OFFSET_NAME(r21, regs[21]),
 968	REG_OFFSET_NAME(r22, regs[22]),
 969	REG_OFFSET_NAME(r23, regs[23]),
 970	REG_OFFSET_NAME(r24, regs[24]),
 971	REG_OFFSET_NAME(r25, regs[25]),
 972	REG_OFFSET_NAME(r26, regs[26]),
 973	REG_OFFSET_NAME(r27, regs[27]),
 974	REG_OFFSET_NAME(r28, regs[28]),
 975	REG_OFFSET_NAME(r29, regs[29]),
 976	REG_OFFSET_NAME(r30, regs[30]),
 977	REG_OFFSET_NAME(r31, regs[31]),
 978	REG_OFFSET_NAME(c0_status, cp0_status),
 979	REG_OFFSET_NAME(hi, hi),
 980	REG_OFFSET_NAME(lo, lo),
 981#ifdef CONFIG_CPU_HAS_SMARTMIPS
 982	REG_OFFSET_NAME(acx, acx),
 983#endif
 984	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 985	REG_OFFSET_NAME(c0_cause, cp0_cause),
 986	REG_OFFSET_NAME(c0_epc, cp0_epc),
 987#ifdef CONFIG_CPU_CAVIUM_OCTEON
 988	REG_OFFSET_NAME(mpl0, mpl[0]),
 989	REG_OFFSET_NAME(mpl1, mpl[1]),
 990	REG_OFFSET_NAME(mpl2, mpl[2]),
 991	REG_OFFSET_NAME(mtp0, mtp[0]),
 992	REG_OFFSET_NAME(mtp1, mtp[1]),
 993	REG_OFFSET_NAME(mtp2, mtp[2]),
 994#endif
 995	REG_OFFSET_END,
 996};
 997
 998/**
 999 * regs_query_register_offset() - query register offset from its name
1000 * @name:       the name of a register
1001 *
1002 * regs_query_register_offset() returns the offset of a register in struct
1003 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
1004 */
1005int regs_query_register_offset(const char *name)
1006{
1007        const struct pt_regs_offset *roff;
1008        for (roff = regoffset_table; roff->name != NULL; roff++)
1009                if (!strcmp(roff->name, name))
1010                        return roff->offset;
1011        return -EINVAL;
1012}
1013
1014#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
1015
1016static const struct user_regset mips_regsets[] = {
1017	[REGSET_GPR] = {
1018		.core_note_type	= NT_PRSTATUS,
1019		.n		= ELF_NGREG,
1020		.size		= sizeof(unsigned int),
1021		.align		= sizeof(unsigned int),
1022		.get		= gpr32_get,
1023		.set		= gpr32_set,
1024	},
1025	[REGSET_DSP] = {
1026		.core_note_type	= NT_MIPS_DSP,
1027		.n		= NUM_DSP_REGS + 1,
1028		.size		= sizeof(u32),
1029		.align		= sizeof(u32),
1030		.get		= dsp32_get,
1031		.set		= dsp32_set,
1032		.active		= dsp_active,
1033	},
1034#ifdef CONFIG_MIPS_FP_SUPPORT
1035	[REGSET_FPR] = {
1036		.core_note_type	= NT_PRFPREG,
1037		.n		= ELF_NFPREG,
1038		.size		= sizeof(elf_fpreg_t),
1039		.align		= sizeof(elf_fpreg_t),
1040		.get		= fpr_get,
1041		.set		= fpr_set,
1042	},
1043	[REGSET_FP_MODE] = {
1044		.core_note_type	= NT_MIPS_FP_MODE,
1045		.n		= 1,
1046		.size		= sizeof(int),
1047		.align		= sizeof(int),
1048		.get		= fp_mode_get,
1049		.set		= fp_mode_set,
1050	},
1051#endif
1052#ifdef CONFIG_CPU_HAS_MSA
1053	[REGSET_MSA] = {
1054		.core_note_type	= NT_MIPS_MSA,
1055		.n		= NUM_FPU_REGS + 1,
1056		.size		= 16,
1057		.align		= 16,
1058		.get		= msa_get,
1059		.set		= msa_set,
1060	},
1061#endif
1062};
1063
1064static const struct user_regset_view user_mips_view = {
1065	.name		= "mips",
1066	.e_machine	= ELF_ARCH,
1067	.ei_osabi	= ELF_OSABI,
1068	.regsets	= mips_regsets,
1069	.n		= ARRAY_SIZE(mips_regsets),
1070};
1071
1072#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
1073
1074#ifdef CONFIG_64BIT
1075
1076static const struct user_regset mips64_regsets[] = {
1077	[REGSET_GPR] = {
1078		.core_note_type	= NT_PRSTATUS,
1079		.n		= ELF_NGREG,
1080		.size		= sizeof(unsigned long),
1081		.align		= sizeof(unsigned long),
1082		.get		= gpr64_get,
1083		.set		= gpr64_set,
1084	},
1085	[REGSET_DSP] = {
1086		.core_note_type	= NT_MIPS_DSP,
1087		.n		= NUM_DSP_REGS + 1,
1088		.size		= sizeof(u64),
1089		.align		= sizeof(u64),
1090		.get		= dsp64_get,
1091		.set		= dsp64_set,
1092		.active		= dsp_active,
1093	},
1094#ifdef CONFIG_MIPS_FP_SUPPORT
1095	[REGSET_FP_MODE] = {
1096		.core_note_type	= NT_MIPS_FP_MODE,
1097		.n		= 1,
1098		.size		= sizeof(int),
1099		.align		= sizeof(int),
1100		.get		= fp_mode_get,
1101		.set		= fp_mode_set,
1102	},
1103	[REGSET_FPR] = {
1104		.core_note_type	= NT_PRFPREG,
1105		.n		= ELF_NFPREG,
1106		.size		= sizeof(elf_fpreg_t),
1107		.align		= sizeof(elf_fpreg_t),
1108		.get		= fpr_get,
1109		.set		= fpr_set,
1110	},
1111#endif
1112#ifdef CONFIG_CPU_HAS_MSA
1113	[REGSET_MSA] = {
1114		.core_note_type	= NT_MIPS_MSA,
1115		.n		= NUM_FPU_REGS + 1,
1116		.size		= 16,
1117		.align		= 16,
1118		.get		= msa_get,
1119		.set		= msa_set,
1120	},
1121#endif
1122};
1123
1124static const struct user_regset_view user_mips64_view = {
1125	.name		= "mips64",
1126	.e_machine	= ELF_ARCH,
1127	.ei_osabi	= ELF_OSABI,
1128	.regsets	= mips64_regsets,
1129	.n		= ARRAY_SIZE(mips64_regsets),
1130};
1131
1132#ifdef CONFIG_MIPS32_N32
1133
1134static const struct user_regset_view user_mipsn32_view = {
1135	.name		= "mipsn32",
1136	.e_flags	= EF_MIPS_ABI2,
1137	.e_machine	= ELF_ARCH,
1138	.ei_osabi	= ELF_OSABI,
1139	.regsets	= mips64_regsets,
1140	.n		= ARRAY_SIZE(mips64_regsets),
1141};
1142
1143#endif /* CONFIG_MIPS32_N32 */
1144
1145#endif /* CONFIG_64BIT */
1146
1147const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1148{
1149#ifdef CONFIG_32BIT
1150	return &user_mips_view;
1151#else
1152#ifdef CONFIG_MIPS32_O32
1153	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1154		return &user_mips_view;
1155#endif
1156#ifdef CONFIG_MIPS32_N32
1157	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1158		return &user_mipsn32_view;
1159#endif
1160	return &user_mips64_view;
1161#endif
1162}
1163
1164long arch_ptrace(struct task_struct *child, long request,
1165		 unsigned long addr, unsigned long data)
1166{
1167	int ret;
1168	void __user *addrp = (void __user *) addr;
1169	void __user *datavp = (void __user *) data;
1170	unsigned long __user *datalp = (void __user *) data;
1171
1172	switch (request) {
1173	/* when I and D space are separate, these will need to be fixed. */
1174	case PTRACE_PEEKTEXT: /* read word at location addr. */
1175	case PTRACE_PEEKDATA:
1176		ret = generic_ptrace_peekdata(child, addr, data);
1177		break;
1178
1179	/* Read the word at location addr in the USER area. */
1180	case PTRACE_PEEKUSR: {
1181		struct pt_regs *regs;
1182		unsigned long tmp = 0;
1183
1184		regs = task_pt_regs(child);
1185		ret = 0;  /* Default return value. */
1186
1187		switch (addr) {
1188		case 0 ... 31:
1189			tmp = regs->regs[addr];
1190			break;
1191#ifdef CONFIG_MIPS_FP_SUPPORT
1192		case FPR_BASE ... FPR_BASE + 31: {
1193			union fpureg *fregs;
1194
1195			if (!tsk_used_math(child)) {
1196				/* FP not yet used */
1197				tmp = -1;
1198				break;
1199			}
1200			fregs = get_fpu_regs(child);
1201
1202#ifdef CONFIG_32BIT
1203			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1204				/*
1205				 * The odd registers are actually the high
1206				 * order bits of the values stored in the even
1207				 * registers.
1208				 */
1209				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1210						addr & 1);
1211				break;
1212			}
1213#endif
1214			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1215			break;
1216		}
1217		case FPC_CSR:
1218			tmp = child->thread.fpu.fcr31;
1219			break;
1220		case FPC_EIR:
1221			/* implementation / version register */
1222			tmp = boot_cpu_data.fpu_id;
1223			break;
1224#endif
 
 
 
 
1225		case PC:
1226			tmp = regs->cp0_epc;
1227			break;
1228		case CAUSE:
1229			tmp = regs->cp0_cause;
1230			break;
1231		case BADVADDR:
1232			tmp = regs->cp0_badvaddr;
1233			break;
1234		case MMHI:
1235			tmp = regs->hi;
1236			break;
1237		case MMLO:
1238			tmp = regs->lo;
1239			break;
1240#ifdef CONFIG_CPU_HAS_SMARTMIPS
1241		case ACX:
1242			tmp = regs->acx;
1243			break;
1244#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1245		case DSP_BASE ... DSP_BASE + 5: {
1246			dspreg_t *dregs;
1247
1248			if (!cpu_has_dsp) {
1249				tmp = 0;
1250				ret = -EIO;
1251				goto out;
1252			}
1253			dregs = __get_dsp_regs(child);
1254			tmp = dregs[addr - DSP_BASE];
1255			break;
1256		}
1257		case DSP_CONTROL:
1258			if (!cpu_has_dsp) {
1259				tmp = 0;
1260				ret = -EIO;
1261				goto out;
1262			}
1263			tmp = child->thread.dsp.dspcontrol;
1264			break;
1265		default:
1266			tmp = 0;
1267			ret = -EIO;
1268			goto out;
1269		}
1270		ret = put_user(tmp, datalp);
1271		break;
1272	}
1273
1274	/* when I and D space are separate, this will have to be fixed. */
1275	case PTRACE_POKETEXT: /* write the word at location addr. */
1276	case PTRACE_POKEDATA:
1277		ret = generic_ptrace_pokedata(child, addr, data);
1278		break;
1279
1280	case PTRACE_POKEUSR: {
1281		struct pt_regs *regs;
1282		ret = 0;
1283		regs = task_pt_regs(child);
1284
1285		switch (addr) {
1286		case 0 ... 31:
1287			regs->regs[addr] = data;
1288			/* System call number may have been changed */
1289			if (addr == 2)
1290				mips_syscall_update_nr(child, regs);
1291			else if (addr == 4 &&
1292				 mips_syscall_is_indirect(child, regs))
1293				mips_syscall_update_nr(child, regs);
1294			break;
1295#ifdef CONFIG_MIPS_FP_SUPPORT
1296		case FPR_BASE ... FPR_BASE + 31: {
1297			union fpureg *fregs = get_fpu_regs(child);
1298
1299			init_fp_ctx(child);
 
 
 
 
 
1300#ifdef CONFIG_32BIT
1301			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1302				/*
1303				 * The odd registers are actually the high
1304				 * order bits of the values stored in the even
1305				 * registers.
1306				 */
1307				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1308					  addr & 1, data);
1309				break;
 
 
1310			}
1311#endif
1312			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
 
 
1313			break;
1314		}
1315		case FPC_CSR:
1316			init_fp_ctx(child);
1317			ptrace_setfcr31(child, data);
1318			break;
1319#endif
1320		case PC:
1321			regs->cp0_epc = data;
1322			break;
1323		case MMHI:
1324			regs->hi = data;
1325			break;
1326		case MMLO:
1327			regs->lo = data;
1328			break;
1329#ifdef CONFIG_CPU_HAS_SMARTMIPS
1330		case ACX:
1331			regs->acx = data;
1332			break;
1333#endif
 
 
 
1334		case DSP_BASE ... DSP_BASE + 5: {
1335			dspreg_t *dregs;
1336
1337			if (!cpu_has_dsp) {
1338				ret = -EIO;
1339				break;
1340			}
1341
1342			dregs = __get_dsp_regs(child);
1343			dregs[addr - DSP_BASE] = data;
1344			break;
1345		}
1346		case DSP_CONTROL:
1347			if (!cpu_has_dsp) {
1348				ret = -EIO;
1349				break;
1350			}
1351			child->thread.dsp.dspcontrol = data;
1352			break;
1353		default:
1354			/* The rest are not allowed. */
1355			ret = -EIO;
1356			break;
1357		}
1358		break;
1359		}
1360
1361	case PTRACE_GETREGS:
1362		ret = ptrace_getregs(child, datavp);
1363		break;
1364
1365	case PTRACE_SETREGS:
1366		ret = ptrace_setregs(child, datavp);
1367		break;
1368
1369#ifdef CONFIG_MIPS_FP_SUPPORT
1370	case PTRACE_GETFPREGS:
1371		ret = ptrace_getfpregs(child, datavp);
1372		break;
1373
1374	case PTRACE_SETFPREGS:
1375		ret = ptrace_setfpregs(child, datavp);
1376		break;
1377#endif
1378	case PTRACE_GET_THREAD_AREA:
1379		ret = put_user(task_thread_info(child)->tp_value, datalp);
1380		break;
1381
1382	case PTRACE_GET_WATCH_REGS:
1383		ret = ptrace_get_watch_regs(child, addrp);
1384		break;
1385
1386	case PTRACE_SET_WATCH_REGS:
1387		ret = ptrace_set_watch_regs(child, addrp);
1388		break;
1389
1390	default:
1391		ret = ptrace_request(child, request, addr, data);
1392		break;
1393	}
1394 out:
1395	return ret;
1396}
1397
 
 
 
 
 
 
 
 
 
 
 
 
1398/*
1399 * Notification of system call entry/exit
1400 * - triggered by current->work.syscall_trace
1401 */
1402asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1403{
1404	user_exit();
1405
1406	current_thread_info()->syscall = syscall;
1407
1408	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1409		if (tracehook_report_syscall_entry(regs))
1410			return -1;
1411		syscall = current_thread_info()->syscall;
1412	}
1413
1414#ifdef CONFIG_SECCOMP
1415	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1416		int ret, i;
1417		struct seccomp_data sd;
1418		unsigned long args[6];
1419
1420		sd.nr = syscall;
1421		sd.arch = syscall_get_arch(current);
1422		syscall_get_arguments(current, regs, args);
1423		for (i = 0; i < 6; i++)
1424			sd.args[i] = args[i];
1425		sd.instruction_pointer = KSTK_EIP(current);
1426
1427		ret = __secure_computing(&sd);
1428		if (ret == -1)
1429			return ret;
1430		syscall = current_thread_info()->syscall;
1431	}
1432#endif
1433
1434	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1435		trace_sys_enter(regs, regs->regs[2]);
1436
1437	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1438			    regs->regs[6], regs->regs[7]);
 
 
1439
1440	/*
1441	 * Negative syscall numbers are mistaken for rejected syscalls, but
1442	 * won't have had the return value set appropriately, so we do so now.
 
1443	 */
1444	if (syscall < 0)
1445		syscall_set_return_value(current, regs, -ENOSYS, 0);
1446	return syscall;
 
 
 
 
 
 
 
1447}
1448
1449/*
1450 * Notification of system call entry/exit
1451 * - triggered by current->work.syscall_trace
1452 */
1453asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1454{
1455        /*
1456	 * We may come here right after calling schedule_user()
1457	 * or do_notify_resume(), in which case we can be in RCU
1458	 * user mode.
1459	 */
1460	user_exit();
 
 
 
 
 
 
 
 
1461
1462	audit_syscall_exit(regs);
1463
1464	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1465		trace_sys_exit(regs, regs_return_value(regs));
1466
1467	if (test_thread_flag(TIF_SYSCALL_TRACE))
1468		tracehook_report_syscall_exit(regs, 0);
1469
1470	user_enter();
1471}
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1992 Ross Biro
  7 * Copyright (C) Linus Torvalds
  8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  9 * Copyright (C) 1996 David S. Miller
 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
 11 * Copyright (C) 1999 MIPS Technologies, Inc.
 12 * Copyright (C) 2000 Ulf Carlsson
 13 *
 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
 15 * binaries.
 16 */
 17#include <linux/compiler.h>
 
 
 18#include <linux/kernel.h>
 19#include <linux/sched.h>
 
 20#include <linux/mm.h>
 21#include <linux/errno.h>
 22#include <linux/ptrace.h>
 
 23#include <linux/smp.h>
 24#include <linux/user.h>
 25#include <linux/security.h>
 
 
 26#include <linux/audit.h>
 27#include <linux/seccomp.h>
 
 28
 29#include <asm/byteorder.h>
 30#include <asm/cpu.h>
 
 31#include <asm/dsp.h>
 32#include <asm/fpu.h>
 33#include <asm/mipsregs.h>
 34#include <asm/mipsmtregs.h>
 35#include <asm/pgtable.h>
 36#include <asm/page.h>
 37#include <asm/system.h>
 38#include <asm/uaccess.h>
 
 39#include <asm/bootinfo.h>
 40#include <asm/reg.h>
 41
 
 
 
 42/*
 43 * Called by kernel/ptrace.c when detaching..
 44 *
 45 * Make sure single step bits etc are not set.
 46 */
 47void ptrace_disable(struct task_struct *child)
 48{
 49	/* Don't load the watchpoint registers for the ex-child. */
 50	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 51}
 52
 53/*
 54 * Read a general register set.  We always use the 64-bit format, even
 55 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
 56 * Registers are sign extended to fill the available space.
 57 */
 58int ptrace_getregs(struct task_struct *child, __s64 __user *data)
 59{
 60	struct pt_regs *regs;
 61	int i;
 62
 63	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
 64		return -EIO;
 65
 66	regs = task_pt_regs(child);
 67
 68	for (i = 0; i < 32; i++)
 69		__put_user((long)regs->regs[i], data + i);
 70	__put_user((long)regs->lo, data + EF_LO - EF_R0);
 71	__put_user((long)regs->hi, data + EF_HI - EF_R0);
 72	__put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
 73	__put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
 74	__put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
 75	__put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
 76
 77	return 0;
 78}
 79
 80/*
 81 * Write a general register set.  As for PTRACE_GETREGS, we always use
 82 * the 64-bit format.  On a 32-bit kernel only the lower order half
 83 * (according to endianness) will be used.
 84 */
 85int ptrace_setregs(struct task_struct *child, __s64 __user *data)
 86{
 87	struct pt_regs *regs;
 88	int i;
 89
 90	if (!access_ok(VERIFY_READ, data, 38 * 8))
 91		return -EIO;
 92
 93	regs = task_pt_regs(child);
 94
 95	for (i = 0; i < 32; i++)
 96		__get_user(regs->regs[i], data + i);
 97	__get_user(regs->lo, data + EF_LO - EF_R0);
 98	__get_user(regs->hi, data + EF_HI - EF_R0);
 99	__get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
100
101	/* badvaddr, status, and cause may not be written.  */
102
103	return 0;
104}
105
106int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
107{
108	int i;
109	unsigned int tmp;
110
111	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
112		return -EIO;
113
114	if (tsk_used_math(child)) {
115		fpureg_t *fregs = get_fpu_regs(child);
116		for (i = 0; i < 32; i++)
117			__put_user(fregs[i], i + (__u64 __user *) data);
118	} else {
119		for (i = 0; i < 32; i++)
120			__put_user((__u64) -1, i + (__u64 __user *) data);
121	}
122
123	__put_user(child->thread.fpu.fcr31, data + 64);
124
125	preempt_disable();
126	if (cpu_has_fpu) {
127		unsigned int flags;
128
129		if (cpu_has_mipsmt) {
130			unsigned int vpflags = dvpe();
131			flags = read_c0_status();
132			__enable_fpu();
133			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
134			write_c0_status(flags);
135			evpe(vpflags);
136		} else {
137			flags = read_c0_status();
138			__enable_fpu();
139			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
140			write_c0_status(flags);
141		}
142	} else {
143		tmp = 0;
144	}
145	preempt_enable();
146	__put_user(tmp, data + 65);
147
148	return 0;
149}
150
151int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
152{
153	fpureg_t *fregs;
154	int i;
155
156	if (!access_ok(VERIFY_READ, data, 33 * 8))
157		return -EIO;
158
159	fregs = get_fpu_regs(child);
160
161	for (i = 0; i < 32; i++)
162		__get_user(fregs[i], i + (__u64 __user *) data);
163
164	__get_user(child->thread.fpu.fcr31, data + 64);
165
166	/* FIR may not be written.  */
167
168	return 0;
169}
170
171int ptrace_get_watch_regs(struct task_struct *child,
172			  struct pt_watch_regs __user *addr)
173{
174	enum pt_watch_style style;
175	int i;
176
177	if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
178		return -EIO;
179	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
180		return -EIO;
181
182#ifdef CONFIG_32BIT
183	style = pt_watch_style_mips32;
184#define WATCH_STYLE mips32
185#else
186	style = pt_watch_style_mips64;
187#define WATCH_STYLE mips64
188#endif
189
190	__put_user(style, &addr->style);
191	__put_user(current_cpu_data.watch_reg_use_cnt,
192		   &addr->WATCH_STYLE.num_valid);
193	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
194		__put_user(child->thread.watch.mips3264.watchlo[i],
195			   &addr->WATCH_STYLE.watchlo[i]);
196		__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
 
197			   &addr->WATCH_STYLE.watchhi[i]);
198		__put_user(current_cpu_data.watch_reg_masks[i],
199			   &addr->WATCH_STYLE.watch_masks[i]);
200	}
201	for (; i < 8; i++) {
202		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
203		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
204		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
205	}
206
207	return 0;
208}
209
210int ptrace_set_watch_regs(struct task_struct *child,
211			  struct pt_watch_regs __user *addr)
212{
213	int i;
214	int watch_active = 0;
215	unsigned long lt[NUM_WATCH_REGS];
216	u16 ht[NUM_WATCH_REGS];
217
218	if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
219		return -EIO;
220	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
221		return -EIO;
222	/* Check the values. */
223	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
224		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
225#ifdef CONFIG_32BIT
226		if (lt[i] & __UA_LIMIT)
227			return -EINVAL;
228#else
229		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
230			if (lt[i] & 0xffffffff80000000UL)
231				return -EINVAL;
232		} else {
233			if (lt[i] & __UA_LIMIT)
234				return -EINVAL;
235		}
236#endif
237		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
238		if (ht[i] & ~0xff8)
239			return -EINVAL;
240	}
241	/* Install them. */
242	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
243		if (lt[i] & 7)
244			watch_active = 1;
245		child->thread.watch.mips3264.watchlo[i] = lt[i];
246		/* Set the G bit. */
247		child->thread.watch.mips3264.watchhi[i] = ht[i];
248	}
249
250	if (watch_active)
251		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
252	else
253		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
254
255	return 0;
256}
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258long arch_ptrace(struct task_struct *child, long request,
259		 unsigned long addr, unsigned long data)
260{
261	int ret;
262	void __user *addrp = (void __user *) addr;
263	void __user *datavp = (void __user *) data;
264	unsigned long __user *datalp = (void __user *) data;
265
266	switch (request) {
267	/* when I and D space are separate, these will need to be fixed. */
268	case PTRACE_PEEKTEXT: /* read word at location addr. */
269	case PTRACE_PEEKDATA:
270		ret = generic_ptrace_peekdata(child, addr, data);
271		break;
272
273	/* Read the word at location addr in the USER area. */
274	case PTRACE_PEEKUSR: {
275		struct pt_regs *regs;
276		unsigned long tmp = 0;
277
278		regs = task_pt_regs(child);
279		ret = 0;  /* Default return value. */
280
281		switch (addr) {
282		case 0 ... 31:
283			tmp = regs->regs[addr];
284			break;
285		case FPR_BASE ... FPR_BASE + 31:
286			if (tsk_used_math(child)) {
287				fpureg_t *fregs = get_fpu_regs(child);
 
 
 
 
 
 
 
288
289#ifdef CONFIG_32BIT
 
290				/*
291				 * The odd registers are actually the high
292				 * order bits of the values stored in the even
293				 * registers - unless we're using r2k_switch.S.
294				 */
295				if (addr & 1)
296					tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
297				else
298					tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
299#endif
300#ifdef CONFIG_64BIT
301				tmp = fregs[addr - FPR_BASE];
 
 
 
 
 
 
 
 
302#endif
303			} else {
304				tmp = -1;	/* FP not yet used  */
305			}
306			break;
307		case PC:
308			tmp = regs->cp0_epc;
309			break;
310		case CAUSE:
311			tmp = regs->cp0_cause;
312			break;
313		case BADVADDR:
314			tmp = regs->cp0_badvaddr;
315			break;
316		case MMHI:
317			tmp = regs->hi;
318			break;
319		case MMLO:
320			tmp = regs->lo;
321			break;
322#ifdef CONFIG_CPU_HAS_SMARTMIPS
323		case ACX:
324			tmp = regs->acx;
325			break;
326#endif
327		case FPC_CSR:
328			tmp = child->thread.fpu.fcr31;
329			break;
330		case FPC_EIR: {	/* implementation / version register */
331			unsigned int flags;
332#ifdef CONFIG_MIPS_MT_SMTC
333			unsigned long irqflags;
334			unsigned int mtflags;
335#endif /* CONFIG_MIPS_MT_SMTC */
336
337			preempt_disable();
338			if (!cpu_has_fpu) {
339				preempt_enable();
340				break;
341			}
342
343#ifdef CONFIG_MIPS_MT_SMTC
344			/* Read-modify-write of Status must be atomic */
345			local_irq_save(irqflags);
346			mtflags = dmt();
347#endif /* CONFIG_MIPS_MT_SMTC */
348			if (cpu_has_mipsmt) {
349				unsigned int vpflags = dvpe();
350				flags = read_c0_status();
351				__enable_fpu();
352				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
353				write_c0_status(flags);
354				evpe(vpflags);
355			} else {
356				flags = read_c0_status();
357				__enable_fpu();
358				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
359				write_c0_status(flags);
360			}
361#ifdef CONFIG_MIPS_MT_SMTC
362			emt(mtflags);
363			local_irq_restore(irqflags);
364#endif /* CONFIG_MIPS_MT_SMTC */
365			preempt_enable();
366			break;
367		}
368		case DSP_BASE ... DSP_BASE + 5: {
369			dspreg_t *dregs;
370
371			if (!cpu_has_dsp) {
372				tmp = 0;
373				ret = -EIO;
374				goto out;
375			}
376			dregs = __get_dsp_regs(child);
377			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
378			break;
379		}
380		case DSP_CONTROL:
381			if (!cpu_has_dsp) {
382				tmp = 0;
383				ret = -EIO;
384				goto out;
385			}
386			tmp = child->thread.dsp.dspcontrol;
387			break;
388		default:
389			tmp = 0;
390			ret = -EIO;
391			goto out;
392		}
393		ret = put_user(tmp, datalp);
394		break;
395	}
396
397	/* when I and D space are separate, this will have to be fixed. */
398	case PTRACE_POKETEXT: /* write the word at location addr. */
399	case PTRACE_POKEDATA:
400		ret = generic_ptrace_pokedata(child, addr, data);
401		break;
402
403	case PTRACE_POKEUSR: {
404		struct pt_regs *regs;
405		ret = 0;
406		regs = task_pt_regs(child);
407
408		switch (addr) {
409		case 0 ... 31:
410			regs->regs[addr] = data;
 
 
 
 
 
 
411			break;
 
412		case FPR_BASE ... FPR_BASE + 31: {
413			fpureg_t *fregs = get_fpu_regs(child);
414
415			if (!tsk_used_math(child)) {
416				/* FP not yet used  */
417				memset(&child->thread.fpu, ~0,
418				       sizeof(child->thread.fpu));
419				child->thread.fpu.fcr31 = 0;
420			}
421#ifdef CONFIG_32BIT
422			/*
423			 * The odd registers are actually the high order bits
424			 * of the values stored in the even registers - unless
425			 * we're using r2k_switch.S.
426			 */
427			if (addr & 1) {
428				fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
429				fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
430			} else {
431				fregs[addr - FPR_BASE] &= ~0xffffffffLL;
432				fregs[addr - FPR_BASE] |= data;
433			}
434#endif
435#ifdef CONFIG_64BIT
436			fregs[addr - FPR_BASE] = data;
437#endif
438			break;
439		}
 
 
 
 
 
440		case PC:
441			regs->cp0_epc = data;
442			break;
443		case MMHI:
444			regs->hi = data;
445			break;
446		case MMLO:
447			regs->lo = data;
448			break;
449#ifdef CONFIG_CPU_HAS_SMARTMIPS
450		case ACX:
451			regs->acx = data;
452			break;
453#endif
454		case FPC_CSR:
455			child->thread.fpu.fcr31 = data;
456			break;
457		case DSP_BASE ... DSP_BASE + 5: {
458			dspreg_t *dregs;
459
460			if (!cpu_has_dsp) {
461				ret = -EIO;
462				break;
463			}
464
465			dregs = __get_dsp_regs(child);
466			dregs[addr - DSP_BASE] = data;
467			break;
468		}
469		case DSP_CONTROL:
470			if (!cpu_has_dsp) {
471				ret = -EIO;
472				break;
473			}
474			child->thread.dsp.dspcontrol = data;
475			break;
476		default:
477			/* The rest are not allowed. */
478			ret = -EIO;
479			break;
480		}
481		break;
482		}
483
484	case PTRACE_GETREGS:
485		ret = ptrace_getregs(child, datavp);
486		break;
487
488	case PTRACE_SETREGS:
489		ret = ptrace_setregs(child, datavp);
490		break;
491
 
492	case PTRACE_GETFPREGS:
493		ret = ptrace_getfpregs(child, datavp);
494		break;
495
496	case PTRACE_SETFPREGS:
497		ret = ptrace_setfpregs(child, datavp);
498		break;
499
500	case PTRACE_GET_THREAD_AREA:
501		ret = put_user(task_thread_info(child)->tp_value, datalp);
502		break;
503
504	case PTRACE_GET_WATCH_REGS:
505		ret = ptrace_get_watch_regs(child, addrp);
506		break;
507
508	case PTRACE_SET_WATCH_REGS:
509		ret = ptrace_set_watch_regs(child, addrp);
510		break;
511
512	default:
513		ret = ptrace_request(child, request, addr, data);
514		break;
515	}
516 out:
517	return ret;
518}
519
520static inline int audit_arch(void)
521{
522	int arch = EM_MIPS;
523#ifdef CONFIG_64BIT
524	arch |=  __AUDIT_ARCH_64BIT;
525#endif
526#if defined(__LITTLE_ENDIAN)
527	arch |=  __AUDIT_ARCH_LE;
528#endif
529	return arch;
530}
531
532/*
533 * Notification of system call entry/exit
534 * - triggered by current->work.syscall_trace
535 */
536asmlinkage void syscall_trace_enter(struct pt_regs *regs)
537{
538	/* do the secure computing check first */
539	secure_computing(regs->regs[2]);
 
 
 
 
 
 
 
540
541	if (!(current->ptrace & PT_PTRACED))
542		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
543
544	if (!test_thread_flag(TIF_SYSCALL_TRACE))
545		goto out;
546
547	/* The 0x80 provides a way for the tracing parent to distinguish
548	   between a syscall stop and SIGTRAP delivery */
549	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
550	                         0x80 : 0));
551
552	/*
553	 * this isn't the same as continuing with a signal, but it will do
554	 * for normal use.  strace only continues with a signal if the
555	 * stopping signal is not SIGTRAP.  -brl
556	 */
557	if (current->exit_code) {
558		send_sig(current->exit_code, current, 1);
559		current->exit_code = 0;
560	}
561
562out:
563	if (unlikely(current->audit_context))
564		audit_syscall_entry(audit_arch(), regs->regs[2],
565				    regs->regs[4], regs->regs[5],
566				    regs->regs[6], regs->regs[7]);
567}
568
569/*
570 * Notification of system call entry/exit
571 * - triggered by current->work.syscall_trace
572 */
573asmlinkage void syscall_trace_leave(struct pt_regs *regs)
574{
575	if (unlikely(current->audit_context))
576		audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
577		                   -regs->regs[2]);
578
579	if (!(current->ptrace & PT_PTRACED))
580		return;
581
582	if (!test_thread_flag(TIF_SYSCALL_TRACE))
583		return;
584
585	/* The 0x80 provides a way for the tracing parent to distinguish
586	   between a syscall stop and SIGTRAP delivery */
587	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
588	                         0x80 : 0));
589
590	/*
591	 * this isn't the same as continuing with a signal, but it will do
592	 * for normal use.  strace only continues with a signal if the
593	 * stopping signal is not SIGTRAP.  -brl
594	 */
595	if (current->exit_code) {
596		send_sig(current->exit_code, current, 1);
597		current->exit_code = 0;
598	}
599}