Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1992 Ross Biro
   7 * Copyright (C) Linus Torvalds
   8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
   9 * Copyright (C) 1996 David S. Miller
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 1999 MIPS Technologies, Inc.
  12 * Copyright (C) 2000 Ulf Carlsson
  13 *
  14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
  15 * binaries.
  16 */
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/elf.h>
  20#include <linux/kernel.h>
  21#include <linux/sched.h>
  22#include <linux/sched/task_stack.h>
  23#include <linux/mm.h>
  24#include <linux/errno.h>
  25#include <linux/ptrace.h>
  26#include <linux/regset.h>
  27#include <linux/smp.h>
 
  28#include <linux/security.h>
  29#include <linux/stddef.h>
  30#include <linux/tracehook.h>
  31#include <linux/audit.h>
  32#include <linux/seccomp.h>
  33#include <linux/ftrace.h>
  34
  35#include <asm/byteorder.h>
  36#include <asm/cpu.h>
  37#include <asm/cpu-info.h>
  38#include <asm/dsp.h>
  39#include <asm/fpu.h>
  40#include <asm/mipsregs.h>
  41#include <asm/mipsmtregs.h>
  42#include <asm/pgtable.h>
  43#include <asm/page.h>
  44#include <asm/syscall.h>
  45#include <linux/uaccess.h>
  46#include <asm/bootinfo.h>
  47#include <asm/reg.h>
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/syscalls.h>
  51
  52static void init_fp_ctx(struct task_struct *target)
  53{
  54	/* If FP has been used then the target already has context */
  55	if (tsk_used_math(target))
  56		return;
  57
  58	/* Begin with data registers set to all 1s... */
  59	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
  60
  61	/* FCSR has been preset by `mips_set_personality_nan'.  */
  62
  63	/*
  64	 * Record that the target has "used" math, such that the context
  65	 * just initialised, and any modifications made by the caller,
  66	 * aren't discarded.
  67	 */
  68	set_stopped_child_used_math(target);
  69}
  70
  71/*
  72 * Called by kernel/ptrace.c when detaching..
  73 *
  74 * Make sure single step bits etc are not set.
  75 */
  76void ptrace_disable(struct task_struct *child)
  77{
  78	/* Don't load the watchpoint registers for the ex-child. */
  79	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  80}
  81
  82/*
  83 * Poke at FCSR according to its mask.  Set the Cause bits even
  84 * if a corresponding Enable bit is set.  This will be noticed at
  85 * the time the thread is switched to and SIGFPE thrown accordingly.
  86 */
  87static void ptrace_setfcr31(struct task_struct *child, u32 value)
  88{
  89	u32 fcr31;
  90	u32 mask;
  91
  92	fcr31 = child->thread.fpu.fcr31;
  93	mask = boot_cpu_data.fpu_msk31;
  94	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
  95}
  96
  97/*
  98 * Read a general register set.	 We always use the 64-bit format, even
  99 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
 100 * Registers are sign extended to fill the available space.
 101 */
 102int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
 103{
 104	struct pt_regs *regs;
 105	int i;
 106
 107	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
 108		return -EIO;
 109
 110	regs = task_pt_regs(child);
 111
 112	for (i = 0; i < 32; i++)
 113		__put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
 114	__put_user((long)regs->lo, (__s64 __user *)&data->lo);
 115	__put_user((long)regs->hi, (__s64 __user *)&data->hi);
 116	__put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 117	__put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
 118	__put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
 119	__put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
 120
 121	return 0;
 122}
 123
 124/*
 125 * Write a general register set.  As for PTRACE_GETREGS, we always use
 126 * the 64-bit format.  On a 32-bit kernel only the lower order half
 127 * (according to endianness) will be used.
 128 */
 129int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
 130{
 131	struct pt_regs *regs;
 132	int i;
 133
 134	if (!access_ok(VERIFY_READ, data, 38 * 8))
 135		return -EIO;
 136
 137	regs = task_pt_regs(child);
 138
 139	for (i = 0; i < 32; i++)
 140		__get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
 141	__get_user(regs->lo, (__s64 __user *)&data->lo);
 142	__get_user(regs->hi, (__s64 __user *)&data->hi);
 143	__get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
 144
 145	/* badvaddr, status, and cause may not be written.  */
 146
 147	/* System call number may have been changed */
 148	mips_syscall_update_nr(child, regs);
 149
 150	return 0;
 151}
 152
 153int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
 154{
 155	int i;
 
 156
 157	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
 158		return -EIO;
 159
 160	if (tsk_used_math(child)) {
 161		union fpureg *fregs = get_fpu_regs(child);
 162		for (i = 0; i < 32; i++)
 163			__put_user(get_fpr64(&fregs[i], 0),
 164				   i + (__u64 __user *)data);
 165	} else {
 166		for (i = 0; i < 32; i++)
 167			__put_user((__u64) -1, i + (__u64 __user *) data);
 168	}
 169
 170	__put_user(child->thread.fpu.fcr31, data + 64);
 171	__put_user(boot_cpu_data.fpu_id, data + 65);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172
 173	return 0;
 174}
 175
 176int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 177{
 178	union fpureg *fregs;
 179	u64 fpr_val;
 180	u32 value;
 181	int i;
 182
 183	if (!access_ok(VERIFY_READ, data, 33 * 8))
 184		return -EIO;
 185
 186	init_fp_ctx(child);
 187	fregs = get_fpu_regs(child);
 188
 189	for (i = 0; i < 32; i++) {
 190		__get_user(fpr_val, i + (__u64 __user *)data);
 191		set_fpr64(&fregs[i], 0, fpr_val);
 192	}
 193
 194	__get_user(value, data + 64);
 195	ptrace_setfcr31(child, value);
 196
 197	/* FIR may not be written.  */
 198
 199	return 0;
 200}
 201
 202int ptrace_get_watch_regs(struct task_struct *child,
 203			  struct pt_watch_regs __user *addr)
 204{
 205	enum pt_watch_style style;
 206	int i;
 207
 208	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 209		return -EIO;
 210	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
 211		return -EIO;
 212
 213#ifdef CONFIG_32BIT
 214	style = pt_watch_style_mips32;
 215#define WATCH_STYLE mips32
 216#else
 217	style = pt_watch_style_mips64;
 218#define WATCH_STYLE mips64
 219#endif
 220
 221	__put_user(style, &addr->style);
 222	__put_user(boot_cpu_data.watch_reg_use_cnt,
 223		   &addr->WATCH_STYLE.num_valid);
 224	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 225		__put_user(child->thread.watch.mips3264.watchlo[i],
 226			   &addr->WATCH_STYLE.watchlo[i]);
 227		__put_user(child->thread.watch.mips3264.watchhi[i] &
 228				(MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
 229			   &addr->WATCH_STYLE.watchhi[i]);
 230		__put_user(boot_cpu_data.watch_reg_masks[i],
 231			   &addr->WATCH_STYLE.watch_masks[i]);
 232	}
 233	for (; i < 8; i++) {
 234		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
 235		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
 236		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
 237	}
 238
 239	return 0;
 240}
 241
 242int ptrace_set_watch_regs(struct task_struct *child,
 243			  struct pt_watch_regs __user *addr)
 244{
 245	int i;
 246	int watch_active = 0;
 247	unsigned long lt[NUM_WATCH_REGS];
 248	u16 ht[NUM_WATCH_REGS];
 249
 250	if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
 251		return -EIO;
 252	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
 253		return -EIO;
 254	/* Check the values. */
 255	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 256		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
 257#ifdef CONFIG_32BIT
 258		if (lt[i] & __UA_LIMIT)
 259			return -EINVAL;
 260#else
 261		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
 262			if (lt[i] & 0xffffffff80000000UL)
 263				return -EINVAL;
 264		} else {
 265			if (lt[i] & __UA_LIMIT)
 266				return -EINVAL;
 267		}
 268#endif
 269		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
 270		if (ht[i] & ~MIPS_WATCHHI_MASK)
 271			return -EINVAL;
 272	}
 273	/* Install them. */
 274	for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
 275		if (lt[i] & MIPS_WATCHLO_IRW)
 276			watch_active = 1;
 277		child->thread.watch.mips3264.watchlo[i] = lt[i];
 278		/* Set the G bit. */
 279		child->thread.watch.mips3264.watchhi[i] = ht[i];
 280	}
 281
 282	if (watch_active)
 283		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
 284	else
 285		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 286
 287	return 0;
 288}
 289
 290/* regset get/set implementations */
 291
 292#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 293
 294static int gpr32_get(struct task_struct *target,
 295		     const struct user_regset *regset,
 296		     unsigned int pos, unsigned int count,
 297		     void *kbuf, void __user *ubuf)
 298{
 299	struct pt_regs *regs = task_pt_regs(target);
 300	u32 uregs[ELF_NGREG] = {};
 301
 302	mips_dump_regs32(uregs, regs);
 303	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 304				   sizeof(uregs));
 305}
 306
 307static int gpr32_set(struct task_struct *target,
 308		     const struct user_regset *regset,
 309		     unsigned int pos, unsigned int count,
 310		     const void *kbuf, const void __user *ubuf)
 311{
 312	struct pt_regs *regs = task_pt_regs(target);
 313	u32 uregs[ELF_NGREG];
 314	unsigned start, num_regs, i;
 315	int err;
 316
 317	start = pos / sizeof(u32);
 318	num_regs = count / sizeof(u32);
 319
 320	if (start + num_regs > ELF_NGREG)
 321		return -EIO;
 322
 323	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 324				 sizeof(uregs));
 325	if (err)
 326		return err;
 327
 328	for (i = start; i < num_regs; i++) {
 329		/*
 330		 * Cast all values to signed here so that if this is a 64-bit
 331		 * kernel, the supplied 32-bit values will be sign extended.
 332		 */
 333		switch (i) {
 334		case MIPS32_EF_R1 ... MIPS32_EF_R25:
 335			/* k0/k1 are ignored. */
 336		case MIPS32_EF_R28 ... MIPS32_EF_R31:
 337			regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
 338			break;
 339		case MIPS32_EF_LO:
 340			regs->lo = (s32)uregs[i];
 341			break;
 342		case MIPS32_EF_HI:
 343			regs->hi = (s32)uregs[i];
 344			break;
 345		case MIPS32_EF_CP0_EPC:
 346			regs->cp0_epc = (s32)uregs[i];
 347			break;
 348		}
 349	}
 350
 351	/* System call number may have been changed */
 352	mips_syscall_update_nr(target, regs);
 353
 354	return 0;
 355}
 356
 357#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 358
 359#ifdef CONFIG_64BIT
 360
 361static int gpr64_get(struct task_struct *target,
 362		     const struct user_regset *regset,
 363		     unsigned int pos, unsigned int count,
 364		     void *kbuf, void __user *ubuf)
 365{
 366	struct pt_regs *regs = task_pt_regs(target);
 367	u64 uregs[ELF_NGREG] = {};
 368
 369	mips_dump_regs64(uregs, regs);
 370	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0,
 371				   sizeof(uregs));
 372}
 373
 374static int gpr64_set(struct task_struct *target,
 375		     const struct user_regset *regset,
 376		     unsigned int pos, unsigned int count,
 377		     const void *kbuf, const void __user *ubuf)
 378{
 379	struct pt_regs *regs = task_pt_regs(target);
 380	u64 uregs[ELF_NGREG];
 381	unsigned start, num_regs, i;
 382	int err;
 383
 384	start = pos / sizeof(u64);
 385	num_regs = count / sizeof(u64);
 386
 387	if (start + num_regs > ELF_NGREG)
 388		return -EIO;
 389
 390	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
 391				 sizeof(uregs));
 392	if (err)
 393		return err;
 394
 395	for (i = start; i < num_regs; i++) {
 396		switch (i) {
 397		case MIPS64_EF_R1 ... MIPS64_EF_R25:
 398			/* k0/k1 are ignored. */
 399		case MIPS64_EF_R28 ... MIPS64_EF_R31:
 400			regs->regs[i - MIPS64_EF_R0] = uregs[i];
 401			break;
 402		case MIPS64_EF_LO:
 403			regs->lo = uregs[i];
 404			break;
 405		case MIPS64_EF_HI:
 406			regs->hi = uregs[i];
 407			break;
 408		case MIPS64_EF_CP0_EPC:
 409			regs->cp0_epc = uregs[i];
 410			break;
 411		}
 412	}
 413
 414	/* System call number may have been changed */
 415	mips_syscall_update_nr(target, regs);
 416
 417	return 0;
 418}
 419
 420#endif /* CONFIG_64BIT */
 421
 422/*
 423 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 424 * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
 425 * correspond 1:1 to buffer slots.  Only general registers are copied.
 426 */
 427static int fpr_get_fpa(struct task_struct *target,
 428		       unsigned int *pos, unsigned int *count,
 429		       void **kbuf, void __user **ubuf)
 430{
 431	return user_regset_copyout(pos, count, kbuf, ubuf,
 432				   &target->thread.fpu,
 433				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 434}
 435
 436/*
 437 * Copy the floating-point context to the supplied NT_PRFPREG buffer,
 438 * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
 439 * general register slots are copied to buffer slots.  Only general
 440 * registers are copied.
 441 */
 442static int fpr_get_msa(struct task_struct *target,
 443		       unsigned int *pos, unsigned int *count,
 444		       void **kbuf, void __user **ubuf)
 445{
 446	unsigned int i;
 447	u64 fpr_val;
 448	int err;
 449
 450	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 451	for (i = 0; i < NUM_FPU_REGS; i++) {
 452		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 453		err = user_regset_copyout(pos, count, kbuf, ubuf,
 454					  &fpr_val, i * sizeof(elf_fpreg_t),
 455					  (i + 1) * sizeof(elf_fpreg_t));
 456		if (err)
 457			return err;
 458	}
 459
 460	return 0;
 461}
 462
 463/*
 464 * Copy the floating-point context to the supplied NT_PRFPREG buffer.
 465 * Choose the appropriate helper for general registers, and then copy
 466 * the FCSR and FIR registers separately.
 467 */
 468static int fpr_get(struct task_struct *target,
 469		   const struct user_regset *regset,
 470		   unsigned int pos, unsigned int count,
 471		   void *kbuf, void __user *ubuf)
 472{
 473	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 474	const int fir_pos = fcr31_pos + sizeof(u32);
 475	int err;
 476
 477	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 478		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
 479	else
 480		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
 481	if (err)
 482		return err;
 483
 484	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 485				  &target->thread.fpu.fcr31,
 486				  fcr31_pos, fcr31_pos + sizeof(u32));
 487	if (err)
 488		return err;
 489
 490	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 491				  &boot_cpu_data.fpu_id,
 492				  fir_pos, fir_pos + sizeof(u32));
 493
 494	return err;
 495}
 496
 497/*
 498 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 499 * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
 500 * context's general register slots.  Only general registers are copied.
 501 */
 502static int fpr_set_fpa(struct task_struct *target,
 503		       unsigned int *pos, unsigned int *count,
 504		       const void **kbuf, const void __user **ubuf)
 505{
 506	return user_regset_copyin(pos, count, kbuf, ubuf,
 507				  &target->thread.fpu,
 508				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 509}
 510
 511/*
 512 * Copy the supplied NT_PRFPREG buffer to the floating-point context,
 513 * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
 514 * bits only of FP context's general register slots.  Only general
 515 * registers are copied.
 516 */
 517static int fpr_set_msa(struct task_struct *target,
 518		       unsigned int *pos, unsigned int *count,
 519		       const void **kbuf, const void __user **ubuf)
 520{
 521	unsigned int i;
 522	u64 fpr_val;
 523	int err;
 524
 525	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 526	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 527		err = user_regset_copyin(pos, count, kbuf, ubuf,
 528					 &fpr_val, i * sizeof(elf_fpreg_t),
 529					 (i + 1) * sizeof(elf_fpreg_t));
 530		if (err)
 531			return err;
 532		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 533	}
 534
 535	return 0;
 536}
 537
 538/*
 539 * Copy the supplied NT_PRFPREG buffer to the floating-point context.
 540 * Choose the appropriate helper for general registers, and then copy
 541 * the FCSR register separately.  Ignore the incoming FIR register
 542 * contents though, as the register is read-only.
 543 *
 544 * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
 545 * which is supposed to have been guaranteed by the kernel before
 546 * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
 547 * so that we can safely avoid preinitializing temporaries for
 548 * partial register writes.
 549 */
 550static int fpr_set(struct task_struct *target,
 551		   const struct user_regset *regset,
 552		   unsigned int pos, unsigned int count,
 553		   const void *kbuf, const void __user *ubuf)
 554{
 555	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 556	const int fir_pos = fcr31_pos + sizeof(u32);
 557	u32 fcr31;
 558	int err;
 559
 560	BUG_ON(count % sizeof(elf_fpreg_t));
 561
 562	if (pos + count > sizeof(elf_fpregset_t))
 563		return -EIO;
 564
 565	init_fp_ctx(target);
 566
 567	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 568		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
 569	else
 570		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
 571	if (err)
 572		return err;
 573
 574	if (count > 0) {
 575		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 576					 &fcr31,
 577					 fcr31_pos, fcr31_pos + sizeof(u32));
 578		if (err)
 579			return err;
 580
 581		ptrace_setfcr31(target, fcr31);
 582	}
 583
 584	if (count > 0)
 585		err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 586						fir_pos,
 587						fir_pos + sizeof(u32));
 588
 589	return err;
 590}
 591
 592enum mips_regset {
 593	REGSET_GPR,
 594	REGSET_FPR,
 595};
 596
 597struct pt_regs_offset {
 598	const char *name;
 599	int offset;
 600};
 601
 602#define REG_OFFSET_NAME(reg, r) {					\
 603	.name = #reg,							\
 604	.offset = offsetof(struct pt_regs, r)				\
 605}
 606
 607#define REG_OFFSET_END {						\
 608	.name = NULL,							\
 609	.offset = 0							\
 610}
 611
 612static const struct pt_regs_offset regoffset_table[] = {
 613	REG_OFFSET_NAME(r0, regs[0]),
 614	REG_OFFSET_NAME(r1, regs[1]),
 615	REG_OFFSET_NAME(r2, regs[2]),
 616	REG_OFFSET_NAME(r3, regs[3]),
 617	REG_OFFSET_NAME(r4, regs[4]),
 618	REG_OFFSET_NAME(r5, regs[5]),
 619	REG_OFFSET_NAME(r6, regs[6]),
 620	REG_OFFSET_NAME(r7, regs[7]),
 621	REG_OFFSET_NAME(r8, regs[8]),
 622	REG_OFFSET_NAME(r9, regs[9]),
 623	REG_OFFSET_NAME(r10, regs[10]),
 624	REG_OFFSET_NAME(r11, regs[11]),
 625	REG_OFFSET_NAME(r12, regs[12]),
 626	REG_OFFSET_NAME(r13, regs[13]),
 627	REG_OFFSET_NAME(r14, regs[14]),
 628	REG_OFFSET_NAME(r15, regs[15]),
 629	REG_OFFSET_NAME(r16, regs[16]),
 630	REG_OFFSET_NAME(r17, regs[17]),
 631	REG_OFFSET_NAME(r18, regs[18]),
 632	REG_OFFSET_NAME(r19, regs[19]),
 633	REG_OFFSET_NAME(r20, regs[20]),
 634	REG_OFFSET_NAME(r21, regs[21]),
 635	REG_OFFSET_NAME(r22, regs[22]),
 636	REG_OFFSET_NAME(r23, regs[23]),
 637	REG_OFFSET_NAME(r24, regs[24]),
 638	REG_OFFSET_NAME(r25, regs[25]),
 639	REG_OFFSET_NAME(r26, regs[26]),
 640	REG_OFFSET_NAME(r27, regs[27]),
 641	REG_OFFSET_NAME(r28, regs[28]),
 642	REG_OFFSET_NAME(r29, regs[29]),
 643	REG_OFFSET_NAME(r30, regs[30]),
 644	REG_OFFSET_NAME(r31, regs[31]),
 645	REG_OFFSET_NAME(c0_status, cp0_status),
 646	REG_OFFSET_NAME(hi, hi),
 647	REG_OFFSET_NAME(lo, lo),
 648#ifdef CONFIG_CPU_HAS_SMARTMIPS
 649	REG_OFFSET_NAME(acx, acx),
 650#endif
 651	REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
 652	REG_OFFSET_NAME(c0_cause, cp0_cause),
 653	REG_OFFSET_NAME(c0_epc, cp0_epc),
 654#ifdef CONFIG_CPU_CAVIUM_OCTEON
 655	REG_OFFSET_NAME(mpl0, mpl[0]),
 656	REG_OFFSET_NAME(mpl1, mpl[1]),
 657	REG_OFFSET_NAME(mpl2, mpl[2]),
 658	REG_OFFSET_NAME(mtp0, mtp[0]),
 659	REG_OFFSET_NAME(mtp1, mtp[1]),
 660	REG_OFFSET_NAME(mtp2, mtp[2]),
 661#endif
 662	REG_OFFSET_END,
 663};
 664
 665/**
 666 * regs_query_register_offset() - query register offset from its name
 667 * @name:       the name of a register
 668 *
 669 * regs_query_register_offset() returns the offset of a register in struct
 670 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 671 */
 672int regs_query_register_offset(const char *name)
 673{
 674        const struct pt_regs_offset *roff;
 675        for (roff = regoffset_table; roff->name != NULL; roff++)
 676                if (!strcmp(roff->name, name))
 677                        return roff->offset;
 678        return -EINVAL;
 679}
 680
 681#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
 682
 683static const struct user_regset mips_regsets[] = {
 684	[REGSET_GPR] = {
 685		.core_note_type	= NT_PRSTATUS,
 686		.n		= ELF_NGREG,
 687		.size		= sizeof(unsigned int),
 688		.align		= sizeof(unsigned int),
 689		.get		= gpr32_get,
 690		.set		= gpr32_set,
 691	},
 692	[REGSET_FPR] = {
 693		.core_note_type	= NT_PRFPREG,
 694		.n		= ELF_NFPREG,
 695		.size		= sizeof(elf_fpreg_t),
 696		.align		= sizeof(elf_fpreg_t),
 697		.get		= fpr_get,
 698		.set		= fpr_set,
 699	},
 700};
 701
 702static const struct user_regset_view user_mips_view = {
 703	.name		= "mips",
 704	.e_machine	= ELF_ARCH,
 705	.ei_osabi	= ELF_OSABI,
 706	.regsets	= mips_regsets,
 707	.n		= ARRAY_SIZE(mips_regsets),
 708};
 709
 710#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
 711
 712#ifdef CONFIG_64BIT
 713
 714static const struct user_regset mips64_regsets[] = {
 715	[REGSET_GPR] = {
 716		.core_note_type	= NT_PRSTATUS,
 717		.n		= ELF_NGREG,
 718		.size		= sizeof(unsigned long),
 719		.align		= sizeof(unsigned long),
 720		.get		= gpr64_get,
 721		.set		= gpr64_set,
 722	},
 723	[REGSET_FPR] = {
 724		.core_note_type	= NT_PRFPREG,
 725		.n		= ELF_NFPREG,
 726		.size		= sizeof(elf_fpreg_t),
 727		.align		= sizeof(elf_fpreg_t),
 728		.get		= fpr_get,
 729		.set		= fpr_set,
 730	},
 731};
 732
 733static const struct user_regset_view user_mips64_view = {
 734	.name		= "mips64",
 735	.e_machine	= ELF_ARCH,
 736	.ei_osabi	= ELF_OSABI,
 737	.regsets	= mips64_regsets,
 738	.n		= ARRAY_SIZE(mips64_regsets),
 739};
 740
 741#ifdef CONFIG_MIPS32_N32
 742
 743static const struct user_regset_view user_mipsn32_view = {
 744	.name		= "mipsn32",
 745	.e_flags	= EF_MIPS_ABI2,
 746	.e_machine	= ELF_ARCH,
 747	.ei_osabi	= ELF_OSABI,
 748	.regsets	= mips64_regsets,
 749	.n		= ARRAY_SIZE(mips64_regsets),
 750};
 751
 752#endif /* CONFIG_MIPS32_N32 */
 753
 754#endif /* CONFIG_64BIT */
 755
 756const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 757{
 758#ifdef CONFIG_32BIT
 759	return &user_mips_view;
 760#else
 761#ifdef CONFIG_MIPS32_O32
 762	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
 763		return &user_mips_view;
 764#endif
 765#ifdef CONFIG_MIPS32_N32
 766	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
 767		return &user_mipsn32_view;
 768#endif
 769	return &user_mips64_view;
 770#endif
 771}
 772
 773long arch_ptrace(struct task_struct *child, long request,
 774		 unsigned long addr, unsigned long data)
 775{
 776	int ret;
 777	void __user *addrp = (void __user *) addr;
 778	void __user *datavp = (void __user *) data;
 779	unsigned long __user *datalp = (void __user *) data;
 780
 781	switch (request) {
 782	/* when I and D space are separate, these will need to be fixed. */
 783	case PTRACE_PEEKTEXT: /* read word at location addr. */
 784	case PTRACE_PEEKDATA:
 785		ret = generic_ptrace_peekdata(child, addr, data);
 786		break;
 787
 788	/* Read the word at location addr in the USER area. */
 789	case PTRACE_PEEKUSR: {
 790		struct pt_regs *regs;
 791		union fpureg *fregs;
 792		unsigned long tmp = 0;
 793
 794		regs = task_pt_regs(child);
 795		ret = 0;  /* Default return value. */
 796
 797		switch (addr) {
 798		case 0 ... 31:
 799			tmp = regs->regs[addr];
 800			break;
 801		case FPR_BASE ... FPR_BASE + 31:
 802			if (!tsk_used_math(child)) {
 803				/* FP not yet used */
 804				tmp = -1;
 805				break;
 806			}
 807			fregs = get_fpu_regs(child);
 808
 809#ifdef CONFIG_32BIT
 810			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 811				/*
 812				 * The odd registers are actually the high
 813				 * order bits of the values stored in the even
 814				 * registers - unless we're using r2k_switch.S.
 815				 */
 816				tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
 817						addr & 1);
 818				break;
 819			}
 
 
 
 820#endif
 821			tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
 
 
 822			break;
 823		case PC:
 824			tmp = regs->cp0_epc;
 825			break;
 826		case CAUSE:
 827			tmp = regs->cp0_cause;
 828			break;
 829		case BADVADDR:
 830			tmp = regs->cp0_badvaddr;
 831			break;
 832		case MMHI:
 833			tmp = regs->hi;
 834			break;
 835		case MMLO:
 836			tmp = regs->lo;
 837			break;
 838#ifdef CONFIG_CPU_HAS_SMARTMIPS
 839		case ACX:
 840			tmp = regs->acx;
 841			break;
 842#endif
 843		case FPC_CSR:
 844			tmp = child->thread.fpu.fcr31;
 845			break;
 846		case FPC_EIR:
 847			/* implementation / version register */
 848			tmp = boot_cpu_data.fpu_id;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849			break;
 
 850		case DSP_BASE ... DSP_BASE + 5: {
 851			dspreg_t *dregs;
 852
 853			if (!cpu_has_dsp) {
 854				tmp = 0;
 855				ret = -EIO;
 856				goto out;
 857			}
 858			dregs = __get_dsp_regs(child);
 859			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
 860			break;
 861		}
 862		case DSP_CONTROL:
 863			if (!cpu_has_dsp) {
 864				tmp = 0;
 865				ret = -EIO;
 866				goto out;
 867			}
 868			tmp = child->thread.dsp.dspcontrol;
 869			break;
 870		default:
 871			tmp = 0;
 872			ret = -EIO;
 873			goto out;
 874		}
 875		ret = put_user(tmp, datalp);
 876		break;
 877	}
 878
 879	/* when I and D space are separate, this will have to be fixed. */
 880	case PTRACE_POKETEXT: /* write the word at location addr. */
 881	case PTRACE_POKEDATA:
 882		ret = generic_ptrace_pokedata(child, addr, data);
 883		break;
 884
 885	case PTRACE_POKEUSR: {
 886		struct pt_regs *regs;
 887		ret = 0;
 888		regs = task_pt_regs(child);
 889
 890		switch (addr) {
 891		case 0 ... 31:
 892			regs->regs[addr] = data;
 893			/* System call number may have been changed */
 894			if (addr == 2)
 895				mips_syscall_update_nr(child, regs);
 896			else if (addr == 4 &&
 897				 mips_syscall_is_indirect(child, regs))
 898				mips_syscall_update_nr(child, regs);
 899			break;
 900		case FPR_BASE ... FPR_BASE + 31: {
 901			union fpureg *fregs = get_fpu_regs(child);
 902
 903			init_fp_ctx(child);
 
 
 
 
 
 904#ifdef CONFIG_32BIT
 905			if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
 906				/*
 907				 * The odd registers are actually the high
 908				 * order bits of the values stored in the even
 909				 * registers - unless we're using r2k_switch.S.
 910				 */
 911				set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
 912					  addr & 1, data);
 913				break;
 
 
 914			}
 915#endif
 916			set_fpr64(&fregs[addr - FPR_BASE], 0, data);
 
 
 917			break;
 918		}
 919		case PC:
 920			regs->cp0_epc = data;
 921			break;
 922		case MMHI:
 923			regs->hi = data;
 924			break;
 925		case MMLO:
 926			regs->lo = data;
 927			break;
 928#ifdef CONFIG_CPU_HAS_SMARTMIPS
 929		case ACX:
 930			regs->acx = data;
 931			break;
 932#endif
 933		case FPC_CSR:
 934			init_fp_ctx(child);
 935			ptrace_setfcr31(child, data);
 936			break;
 937		case DSP_BASE ... DSP_BASE + 5: {
 938			dspreg_t *dregs;
 939
 940			if (!cpu_has_dsp) {
 941				ret = -EIO;
 942				break;
 943			}
 944
 945			dregs = __get_dsp_regs(child);
 946			dregs[addr - DSP_BASE] = data;
 947			break;
 948		}
 949		case DSP_CONTROL:
 950			if (!cpu_has_dsp) {
 951				ret = -EIO;
 952				break;
 953			}
 954			child->thread.dsp.dspcontrol = data;
 955			break;
 956		default:
 957			/* The rest are not allowed. */
 958			ret = -EIO;
 959			break;
 960		}
 961		break;
 962		}
 963
 964	case PTRACE_GETREGS:
 965		ret = ptrace_getregs(child, datavp);
 966		break;
 967
 968	case PTRACE_SETREGS:
 969		ret = ptrace_setregs(child, datavp);
 970		break;
 971
 972	case PTRACE_GETFPREGS:
 973		ret = ptrace_getfpregs(child, datavp);
 974		break;
 975
 976	case PTRACE_SETFPREGS:
 977		ret = ptrace_setfpregs(child, datavp);
 978		break;
 979
 980	case PTRACE_GET_THREAD_AREA:
 981		ret = put_user(task_thread_info(child)->tp_value, datalp);
 982		break;
 983
 984	case PTRACE_GET_WATCH_REGS:
 985		ret = ptrace_get_watch_regs(child, addrp);
 986		break;
 987
 988	case PTRACE_SET_WATCH_REGS:
 989		ret = ptrace_set_watch_regs(child, addrp);
 990		break;
 991
 992	default:
 993		ret = ptrace_request(child, request, addr, data);
 994		break;
 995	}
 996 out:
 997	return ret;
 998}
 999
 
 
 
 
 
 
 
 
 
 
 
 
1000/*
1001 * Notification of system call entry/exit
1002 * - triggered by current->work.syscall_trace
1003 */
1004asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1005{
1006	user_exit();
 
1007
1008	current_thread_info()->syscall = syscall;
 
1009
1010	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1011		if (tracehook_report_syscall_entry(regs))
1012			return -1;
1013		syscall = current_thread_info()->syscall;
1014	}
1015
1016#ifdef CONFIG_SECCOMP
1017	if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1018		int ret, i;
1019		struct seccomp_data sd;
1020		unsigned long args[6];
1021
1022		sd.nr = syscall;
1023		sd.arch = syscall_get_arch();
1024		syscall_get_arguments(current, regs, 0, 6, args);
1025		for (i = 0; i < 6; i++)
1026			sd.args[i] = args[i];
1027		sd.instruction_pointer = KSTK_EIP(current);
1028
1029		ret = __secure_computing(&sd);
1030		if (ret == -1)
1031			return ret;
1032		syscall = current_thread_info()->syscall;
1033	}
1034#endif
1035
1036	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1037		trace_sys_enter(regs, regs->regs[2]);
1038
1039	audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1040			    regs->regs[6], regs->regs[7]);
1041
1042	/*
1043	 * Negative syscall numbers are mistaken for rejected syscalls, but
1044	 * won't have had the return value set appropriately, so we do so now.
 
1045	 */
1046	if (syscall < 0)
1047		syscall_set_return_value(current, regs, -ENOSYS, 0);
1048	return syscall;
 
 
 
 
 
 
 
1049}
1050
1051/*
1052 * Notification of system call entry/exit
1053 * - triggered by current->work.syscall_trace
1054 */
1055asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1056{
1057        /*
1058	 * We may come here right after calling schedule_user()
1059	 * or do_notify_resume(), in which case we can be in RCU
1060	 * user mode.
1061	 */
1062	user_exit();
1063
1064	audit_syscall_exit(regs);
 
1065
1066	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1067		trace_sys_exit(regs, regs_return_value(regs));
1068
1069	if (test_thread_flag(TIF_SYSCALL_TRACE))
1070		tracehook_report_syscall_exit(regs, 0);
 
 
1071
1072	user_enter();
 
 
 
 
 
 
 
 
1073}
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1992 Ross Biro
  7 * Copyright (C) Linus Torvalds
  8 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  9 * Copyright (C) 1996 David S. Miller
 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
 11 * Copyright (C) 1999 MIPS Technologies, Inc.
 12 * Copyright (C) 2000 Ulf Carlsson
 13 *
 14 * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
 15 * binaries.
 16 */
 17#include <linux/compiler.h>
 
 
 18#include <linux/kernel.h>
 19#include <linux/sched.h>
 
 20#include <linux/mm.h>
 21#include <linux/errno.h>
 22#include <linux/ptrace.h>
 
 23#include <linux/smp.h>
 24#include <linux/user.h>
 25#include <linux/security.h>
 
 
 26#include <linux/audit.h>
 27#include <linux/seccomp.h>
 
 28
 29#include <asm/byteorder.h>
 30#include <asm/cpu.h>
 
 31#include <asm/dsp.h>
 32#include <asm/fpu.h>
 33#include <asm/mipsregs.h>
 34#include <asm/mipsmtregs.h>
 35#include <asm/pgtable.h>
 36#include <asm/page.h>
 37#include <asm/system.h>
 38#include <asm/uaccess.h>
 39#include <asm/bootinfo.h>
 40#include <asm/reg.h>
 41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42/*
 43 * Called by kernel/ptrace.c when detaching..
 44 *
 45 * Make sure single step bits etc are not set.
 46 */
 47void ptrace_disable(struct task_struct *child)
 48{
 49	/* Don't load the watchpoint registers for the ex-child. */
 50	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
 51}
 52
 53/*
 54 * Read a general register set.  We always use the 64-bit format, even
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 55 * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
 56 * Registers are sign extended to fill the available space.
 57 */
 58int ptrace_getregs(struct task_struct *child, __s64 __user *data)
 59{
 60	struct pt_regs *regs;
 61	int i;
 62
 63	if (!access_ok(VERIFY_WRITE, data, 38 * 8))
 64		return -EIO;
 65
 66	regs = task_pt_regs(child);
 67
 68	for (i = 0; i < 32; i++)
 69		__put_user((long)regs->regs[i], data + i);
 70	__put_user((long)regs->lo, data + EF_LO - EF_R0);
 71	__put_user((long)regs->hi, data + EF_HI - EF_R0);
 72	__put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
 73	__put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
 74	__put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
 75	__put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
 76
 77	return 0;
 78}
 79
 80/*
 81 * Write a general register set.  As for PTRACE_GETREGS, we always use
 82 * the 64-bit format.  On a 32-bit kernel only the lower order half
 83 * (according to endianness) will be used.
 84 */
 85int ptrace_setregs(struct task_struct *child, __s64 __user *data)
 86{
 87	struct pt_regs *regs;
 88	int i;
 89
 90	if (!access_ok(VERIFY_READ, data, 38 * 8))
 91		return -EIO;
 92
 93	regs = task_pt_regs(child);
 94
 95	for (i = 0; i < 32; i++)
 96		__get_user(regs->regs[i], data + i);
 97	__get_user(regs->lo, data + EF_LO - EF_R0);
 98	__get_user(regs->hi, data + EF_HI - EF_R0);
 99	__get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
100
101	/* badvaddr, status, and cause may not be written.  */
102
 
 
 
103	return 0;
104}
105
106int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
107{
108	int i;
109	unsigned int tmp;
110
111	if (!access_ok(VERIFY_WRITE, data, 33 * 8))
112		return -EIO;
113
114	if (tsk_used_math(child)) {
115		fpureg_t *fregs = get_fpu_regs(child);
116		for (i = 0; i < 32; i++)
117			__put_user(fregs[i], i + (__u64 __user *) data);
 
118	} else {
119		for (i = 0; i < 32; i++)
120			__put_user((__u64) -1, i + (__u64 __user *) data);
121	}
122
123	__put_user(child->thread.fpu.fcr31, data + 64);
124
125	preempt_disable();
126	if (cpu_has_fpu) {
127		unsigned int flags;
128
129		if (cpu_has_mipsmt) {
130			unsigned int vpflags = dvpe();
131			flags = read_c0_status();
132			__enable_fpu();
133			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
134			write_c0_status(flags);
135			evpe(vpflags);
136		} else {
137			flags = read_c0_status();
138			__enable_fpu();
139			__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
140			write_c0_status(flags);
141		}
142	} else {
143		tmp = 0;
144	}
145	preempt_enable();
146	__put_user(tmp, data + 65);
147
148	return 0;
149}
150
151int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
152{
153	fpureg_t *fregs;
 
 
154	int i;
155
156	if (!access_ok(VERIFY_READ, data, 33 * 8))
157		return -EIO;
158
 
159	fregs = get_fpu_regs(child);
160
161	for (i = 0; i < 32; i++)
162		__get_user(fregs[i], i + (__u64 __user *) data);
 
 
163
164	__get_user(child->thread.fpu.fcr31, data + 64);
 
165
166	/* FIR may not be written.  */
167
168	return 0;
169}
170
171int ptrace_get_watch_regs(struct task_struct *child,
172			  struct pt_watch_regs __user *addr)
173{
174	enum pt_watch_style style;
175	int i;
176
177	if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
178		return -EIO;
179	if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
180		return -EIO;
181
182#ifdef CONFIG_32BIT
183	style = pt_watch_style_mips32;
184#define WATCH_STYLE mips32
185#else
186	style = pt_watch_style_mips64;
187#define WATCH_STYLE mips64
188#endif
189
190	__put_user(style, &addr->style);
191	__put_user(current_cpu_data.watch_reg_use_cnt,
192		   &addr->WATCH_STYLE.num_valid);
193	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
194		__put_user(child->thread.watch.mips3264.watchlo[i],
195			   &addr->WATCH_STYLE.watchlo[i]);
196		__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
 
197			   &addr->WATCH_STYLE.watchhi[i]);
198		__put_user(current_cpu_data.watch_reg_masks[i],
199			   &addr->WATCH_STYLE.watch_masks[i]);
200	}
201	for (; i < 8; i++) {
202		__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
203		__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
204		__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
205	}
206
207	return 0;
208}
209
210int ptrace_set_watch_regs(struct task_struct *child,
211			  struct pt_watch_regs __user *addr)
212{
213	int i;
214	int watch_active = 0;
215	unsigned long lt[NUM_WATCH_REGS];
216	u16 ht[NUM_WATCH_REGS];
217
218	if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
219		return -EIO;
220	if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
221		return -EIO;
222	/* Check the values. */
223	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
224		__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
225#ifdef CONFIG_32BIT
226		if (lt[i] & __UA_LIMIT)
227			return -EINVAL;
228#else
229		if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
230			if (lt[i] & 0xffffffff80000000UL)
231				return -EINVAL;
232		} else {
233			if (lt[i] & __UA_LIMIT)
234				return -EINVAL;
235		}
236#endif
237		__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
238		if (ht[i] & ~0xff8)
239			return -EINVAL;
240	}
241	/* Install them. */
242	for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
243		if (lt[i] & 7)
244			watch_active = 1;
245		child->thread.watch.mips3264.watchlo[i] = lt[i];
246		/* Set the G bit. */
247		child->thread.watch.mips3264.watchhi[i] = ht[i];
248	}
249
250	if (watch_active)
251		set_tsk_thread_flag(child, TIF_LOAD_WATCH);
252	else
253		clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
254
255	return 0;
256}
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258long arch_ptrace(struct task_struct *child, long request,
259		 unsigned long addr, unsigned long data)
260{
261	int ret;
262	void __user *addrp = (void __user *) addr;
263	void __user *datavp = (void __user *) data;
264	unsigned long __user *datalp = (void __user *) data;
265
266	switch (request) {
267	/* when I and D space are separate, these will need to be fixed. */
268	case PTRACE_PEEKTEXT: /* read word at location addr. */
269	case PTRACE_PEEKDATA:
270		ret = generic_ptrace_peekdata(child, addr, data);
271		break;
272
273	/* Read the word at location addr in the USER area. */
274	case PTRACE_PEEKUSR: {
275		struct pt_regs *regs;
 
276		unsigned long tmp = 0;
277
278		regs = task_pt_regs(child);
279		ret = 0;  /* Default return value. */
280
281		switch (addr) {
282		case 0 ... 31:
283			tmp = regs->regs[addr];
284			break;
285		case FPR_BASE ... FPR_BASE + 31:
286			if (tsk_used_math(child)) {
287				fpureg_t *fregs = get_fpu_regs(child);
 
 
 
 
288
289#ifdef CONFIG_32BIT
 
290				/*
291				 * The odd registers are actually the high
292				 * order bits of the values stored in the even
293				 * registers - unless we're using r2k_switch.S.
294				 */
295				if (addr & 1)
296					tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
297				else
298					tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
299#endif
300#ifdef CONFIG_64BIT
301				tmp = fregs[addr - FPR_BASE];
302#endif
303			} else {
304				tmp = -1;	/* FP not yet used  */
305			}
306			break;
307		case PC:
308			tmp = regs->cp0_epc;
309			break;
310		case CAUSE:
311			tmp = regs->cp0_cause;
312			break;
313		case BADVADDR:
314			tmp = regs->cp0_badvaddr;
315			break;
316		case MMHI:
317			tmp = regs->hi;
318			break;
319		case MMLO:
320			tmp = regs->lo;
321			break;
322#ifdef CONFIG_CPU_HAS_SMARTMIPS
323		case ACX:
324			tmp = regs->acx;
325			break;
326#endif
327		case FPC_CSR:
328			tmp = child->thread.fpu.fcr31;
329			break;
330		case FPC_EIR: {	/* implementation / version register */
331			unsigned int flags;
332#ifdef CONFIG_MIPS_MT_SMTC
333			unsigned long irqflags;
334			unsigned int mtflags;
335#endif /* CONFIG_MIPS_MT_SMTC */
336
337			preempt_disable();
338			if (!cpu_has_fpu) {
339				preempt_enable();
340				break;
341			}
342
343#ifdef CONFIG_MIPS_MT_SMTC
344			/* Read-modify-write of Status must be atomic */
345			local_irq_save(irqflags);
346			mtflags = dmt();
347#endif /* CONFIG_MIPS_MT_SMTC */
348			if (cpu_has_mipsmt) {
349				unsigned int vpflags = dvpe();
350				flags = read_c0_status();
351				__enable_fpu();
352				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
353				write_c0_status(flags);
354				evpe(vpflags);
355			} else {
356				flags = read_c0_status();
357				__enable_fpu();
358				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
359				write_c0_status(flags);
360			}
361#ifdef CONFIG_MIPS_MT_SMTC
362			emt(mtflags);
363			local_irq_restore(irqflags);
364#endif /* CONFIG_MIPS_MT_SMTC */
365			preempt_enable();
366			break;
367		}
368		case DSP_BASE ... DSP_BASE + 5: {
369			dspreg_t *dregs;
370
371			if (!cpu_has_dsp) {
372				tmp = 0;
373				ret = -EIO;
374				goto out;
375			}
376			dregs = __get_dsp_regs(child);
377			tmp = (unsigned long) (dregs[addr - DSP_BASE]);
378			break;
379		}
380		case DSP_CONTROL:
381			if (!cpu_has_dsp) {
382				tmp = 0;
383				ret = -EIO;
384				goto out;
385			}
386			tmp = child->thread.dsp.dspcontrol;
387			break;
388		default:
389			tmp = 0;
390			ret = -EIO;
391			goto out;
392		}
393		ret = put_user(tmp, datalp);
394		break;
395	}
396
397	/* when I and D space are separate, this will have to be fixed. */
398	case PTRACE_POKETEXT: /* write the word at location addr. */
399	case PTRACE_POKEDATA:
400		ret = generic_ptrace_pokedata(child, addr, data);
401		break;
402
403	case PTRACE_POKEUSR: {
404		struct pt_regs *regs;
405		ret = 0;
406		regs = task_pt_regs(child);
407
408		switch (addr) {
409		case 0 ... 31:
410			regs->regs[addr] = data;
 
 
 
 
 
 
411			break;
412		case FPR_BASE ... FPR_BASE + 31: {
413			fpureg_t *fregs = get_fpu_regs(child);
414
415			if (!tsk_used_math(child)) {
416				/* FP not yet used  */
417				memset(&child->thread.fpu, ~0,
418				       sizeof(child->thread.fpu));
419				child->thread.fpu.fcr31 = 0;
420			}
421#ifdef CONFIG_32BIT
422			/*
423			 * The odd registers are actually the high order bits
424			 * of the values stored in the even registers - unless
425			 * we're using r2k_switch.S.
426			 */
427			if (addr & 1) {
428				fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
429				fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
430			} else {
431				fregs[addr - FPR_BASE] &= ~0xffffffffLL;
432				fregs[addr - FPR_BASE] |= data;
433			}
434#endif
435#ifdef CONFIG_64BIT
436			fregs[addr - FPR_BASE] = data;
437#endif
438			break;
439		}
440		case PC:
441			regs->cp0_epc = data;
442			break;
443		case MMHI:
444			regs->hi = data;
445			break;
446		case MMLO:
447			regs->lo = data;
448			break;
449#ifdef CONFIG_CPU_HAS_SMARTMIPS
450		case ACX:
451			regs->acx = data;
452			break;
453#endif
454		case FPC_CSR:
455			child->thread.fpu.fcr31 = data;
 
456			break;
457		case DSP_BASE ... DSP_BASE + 5: {
458			dspreg_t *dregs;
459
460			if (!cpu_has_dsp) {
461				ret = -EIO;
462				break;
463			}
464
465			dregs = __get_dsp_regs(child);
466			dregs[addr - DSP_BASE] = data;
467			break;
468		}
469		case DSP_CONTROL:
470			if (!cpu_has_dsp) {
471				ret = -EIO;
472				break;
473			}
474			child->thread.dsp.dspcontrol = data;
475			break;
476		default:
477			/* The rest are not allowed. */
478			ret = -EIO;
479			break;
480		}
481		break;
482		}
483
484	case PTRACE_GETREGS:
485		ret = ptrace_getregs(child, datavp);
486		break;
487
488	case PTRACE_SETREGS:
489		ret = ptrace_setregs(child, datavp);
490		break;
491
492	case PTRACE_GETFPREGS:
493		ret = ptrace_getfpregs(child, datavp);
494		break;
495
496	case PTRACE_SETFPREGS:
497		ret = ptrace_setfpregs(child, datavp);
498		break;
499
500	case PTRACE_GET_THREAD_AREA:
501		ret = put_user(task_thread_info(child)->tp_value, datalp);
502		break;
503
504	case PTRACE_GET_WATCH_REGS:
505		ret = ptrace_get_watch_regs(child, addrp);
506		break;
507
508	case PTRACE_SET_WATCH_REGS:
509		ret = ptrace_set_watch_regs(child, addrp);
510		break;
511
512	default:
513		ret = ptrace_request(child, request, addr, data);
514		break;
515	}
516 out:
517	return ret;
518}
519
520static inline int audit_arch(void)
521{
522	int arch = EM_MIPS;
523#ifdef CONFIG_64BIT
524	arch |=  __AUDIT_ARCH_64BIT;
525#endif
526#if defined(__LITTLE_ENDIAN)
527	arch |=  __AUDIT_ARCH_LE;
528#endif
529	return arch;
530}
531
532/*
533 * Notification of system call entry/exit
534 * - triggered by current->work.syscall_trace
535 */
536asmlinkage void syscall_trace_enter(struct pt_regs *regs)
537{
538	/* do the secure computing check first */
539	secure_computing(regs->regs[2]);
540
541	if (!(current->ptrace & PT_PTRACED))
542		goto out;
543
544	if (!test_thread_flag(TIF_SYSCALL_TRACE))
545		goto out;
 
 
 
546
547	/* The 0x80 provides a way for the tracing parent to distinguish
548	   between a syscall stop and SIGTRAP delivery */
549	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
550	                         0x80 : 0));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
552	/*
553	 * this isn't the same as continuing with a signal, but it will do
554	 * for normal use.  strace only continues with a signal if the
555	 * stopping signal is not SIGTRAP.  -brl
556	 */
557	if (current->exit_code) {
558		send_sig(current->exit_code, current, 1);
559		current->exit_code = 0;
560	}
561
562out:
563	if (unlikely(current->audit_context))
564		audit_syscall_entry(audit_arch(), regs->regs[2],
565				    regs->regs[4], regs->regs[5],
566				    regs->regs[6], regs->regs[7]);
567}
568
569/*
570 * Notification of system call entry/exit
571 * - triggered by current->work.syscall_trace
572 */
573asmlinkage void syscall_trace_leave(struct pt_regs *regs)
574{
575	if (unlikely(current->audit_context))
576		audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
577		                   -regs->regs[2]);
 
 
 
578
579	if (!(current->ptrace & PT_PTRACED))
580		return;
581
582	if (!test_thread_flag(TIF_SYSCALL_TRACE))
583		return;
584
585	/* The 0x80 provides a way for the tracing parent to distinguish
586	   between a syscall stop and SIGTRAP delivery */
587	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
588	                         0x80 : 0));
589
590	/*
591	 * this isn't the same as continuing with a signal, but it will do
592	 * for normal use.  strace only continues with a signal if the
593	 * stopping signal is not SIGTRAP.  -brl
594	 */
595	if (current->exit_code) {
596		send_sig(current->exit_code, current, 1);
597		current->exit_code = 0;
598	}
599}