Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/m68k/kernel/ptrace.c"
   6 *  Copyright (C) 1994 by Hamish Macdonald
   7 *  Taken from linux/kernel/ptrace.c and modified for M680x0.
   8 *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
   9 *
  10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11 * and Paul Mackerras (paulus@samba.org).
  12 *
  13 * This file is subject to the terms and conditions of the GNU General
  14 * Public License.  See the file README.legal in the main directory of
  15 * this archive for more details.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/sched.h>
  20#include <linux/mm.h>
  21#include <linux/smp.h>
  22#include <linux/errno.h>
  23#include <linux/ptrace.h>
  24#include <linux/regset.h>
  25#include <linux/tracehook.h>
  26#include <linux/elf.h>
  27#include <linux/user.h>
  28#include <linux/security.h>
  29#include <linux/signal.h>
  30#include <linux/seccomp.h>
  31#include <linux/audit.h>
  32#include <trace/syscall.h>
  33#include <linux/hw_breakpoint.h>
  34#include <linux/perf_event.h>
  35#include <linux/context_tracking.h>
  36
  37#include <linux/uaccess.h>
  38#include <linux/pkeys.h>
  39#include <asm/page.h>
  40#include <asm/pgtable.h>
  41#include <asm/switch_to.h>
  42#include <asm/tm.h>
  43#include <asm/asm-prototypes.h>
  44#include <asm/debug.h>
  45
  46#define CREATE_TRACE_POINTS
  47#include <trace/events/syscalls.h>
  48
  49/*
  50 * The parameter save area on the stack is used to store arguments being passed
  51 * to callee function and is located at fixed offset from stack pointer.
  52 */
  53#ifdef CONFIG_PPC32
  54#define PARAMETER_SAVE_AREA_OFFSET	24  /* bytes */
  55#else /* CONFIG_PPC32 */
  56#define PARAMETER_SAVE_AREA_OFFSET	48  /* bytes */
  57#endif
  58
  59struct pt_regs_offset {
  60	const char *name;
  61	int offset;
  62};
  63
  64#define STR(s)	#s			/* convert to string */
  65#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  66#define GPR_OFFSET_NAME(num)	\
  67	{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
  68	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
  69#define REG_OFFSET_END {.name = NULL, .offset = 0}
  70
  71#define TVSO(f)	(offsetof(struct thread_vr_state, f))
  72#define TFSO(f)	(offsetof(struct thread_fp_state, f))
  73#define TSO(f)	(offsetof(struct thread_struct, f))
  74
  75static const struct pt_regs_offset regoffset_table[] = {
  76	GPR_OFFSET_NAME(0),
  77	GPR_OFFSET_NAME(1),
  78	GPR_OFFSET_NAME(2),
  79	GPR_OFFSET_NAME(3),
  80	GPR_OFFSET_NAME(4),
  81	GPR_OFFSET_NAME(5),
  82	GPR_OFFSET_NAME(6),
  83	GPR_OFFSET_NAME(7),
  84	GPR_OFFSET_NAME(8),
  85	GPR_OFFSET_NAME(9),
  86	GPR_OFFSET_NAME(10),
  87	GPR_OFFSET_NAME(11),
  88	GPR_OFFSET_NAME(12),
  89	GPR_OFFSET_NAME(13),
  90	GPR_OFFSET_NAME(14),
  91	GPR_OFFSET_NAME(15),
  92	GPR_OFFSET_NAME(16),
  93	GPR_OFFSET_NAME(17),
  94	GPR_OFFSET_NAME(18),
  95	GPR_OFFSET_NAME(19),
  96	GPR_OFFSET_NAME(20),
  97	GPR_OFFSET_NAME(21),
  98	GPR_OFFSET_NAME(22),
  99	GPR_OFFSET_NAME(23),
 100	GPR_OFFSET_NAME(24),
 101	GPR_OFFSET_NAME(25),
 102	GPR_OFFSET_NAME(26),
 103	GPR_OFFSET_NAME(27),
 104	GPR_OFFSET_NAME(28),
 105	GPR_OFFSET_NAME(29),
 106	GPR_OFFSET_NAME(30),
 107	GPR_OFFSET_NAME(31),
 108	REG_OFFSET_NAME(nip),
 109	REG_OFFSET_NAME(msr),
 110	REG_OFFSET_NAME(ctr),
 111	REG_OFFSET_NAME(link),
 112	REG_OFFSET_NAME(xer),
 113	REG_OFFSET_NAME(ccr),
 114#ifdef CONFIG_PPC64
 115	REG_OFFSET_NAME(softe),
 116#else
 117	REG_OFFSET_NAME(mq),
 118#endif
 119	REG_OFFSET_NAME(trap),
 120	REG_OFFSET_NAME(dar),
 121	REG_OFFSET_NAME(dsisr),
 122	REG_OFFSET_END,
 123};
 124
 125#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 126static void flush_tmregs_to_thread(struct task_struct *tsk)
 127{
 128	/*
 129	 * If task is not current, it will have been flushed already to
 130	 * it's thread_struct during __switch_to().
 131	 *
 132	 * A reclaim flushes ALL the state or if not in TM save TM SPRs
 133	 * in the appropriate thread structures from live.
 134	 */
 135
 136	if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
 137		return;
 138
 139	if (MSR_TM_SUSPENDED(mfmsr())) {
 140		tm_reclaim_current(TM_CAUSE_SIGNAL);
 141	} else {
 142		tm_enable();
 143		tm_save_sprs(&(tsk->thread));
 144	}
 145}
 146#else
 147static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
 148#endif
 149
 150/**
 151 * regs_query_register_offset() - query register offset from its name
 152 * @name:	the name of a register
 153 *
 154 * regs_query_register_offset() returns the offset of a register in struct
 155 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 156 */
 157int regs_query_register_offset(const char *name)
 158{
 159	const struct pt_regs_offset *roff;
 160	for (roff = regoffset_table; roff->name != NULL; roff++)
 161		if (!strcmp(roff->name, name))
 162			return roff->offset;
 163	return -EINVAL;
 164}
 165
 166/**
 167 * regs_query_register_name() - query register name from its offset
 168 * @offset:	the offset of a register in struct pt_regs.
 169 *
 170 * regs_query_register_name() returns the name of a register from its
 171 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
 172 */
 173const char *regs_query_register_name(unsigned int offset)
 174{
 175	const struct pt_regs_offset *roff;
 176	for (roff = regoffset_table; roff->name != NULL; roff++)
 177		if (roff->offset == offset)
 178			return roff->name;
 179	return NULL;
 180}
 181
 182/*
 183 * does not yet catch signals sent when the child dies.
 184 * in exit.c or in signal.c.
 185 */
 186
 187/*
 188 * Set of msr bits that gdb can change on behalf of a process.
 189 */
 190#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 191#define MSR_DEBUGCHANGE	0
 192#else
 193#define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
 194#endif
 195
 196/*
 197 * Max register writeable via put_reg
 198 */
 199#ifdef CONFIG_PPC32
 200#define PT_MAX_PUT_REG	PT_MQ
 201#else
 202#define PT_MAX_PUT_REG	PT_CCR
 203#endif
 204
 205static unsigned long get_user_msr(struct task_struct *task)
 206{
 207	return task->thread.regs->msr | task->thread.fpexc_mode;
 208}
 209
 210static int set_user_msr(struct task_struct *task, unsigned long msr)
 211{
 212	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
 213	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
 214	return 0;
 215}
 216
 217#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 218static unsigned long get_user_ckpt_msr(struct task_struct *task)
 219{
 220	return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
 221}
 222
 223static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
 224{
 225	task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
 226	task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
 227	return 0;
 228}
 229
 230static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
 231{
 232	task->thread.ckpt_regs.trap = trap & 0xfff0;
 233	return 0;
 234}
 235#endif
 236
 237#ifdef CONFIG_PPC64
 238static int get_user_dscr(struct task_struct *task, unsigned long *data)
 239{
 240	*data = task->thread.dscr;
 241	return 0;
 242}
 243
 244static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 245{
 246	task->thread.dscr = dscr;
 247	task->thread.dscr_inherit = 1;
 248	return 0;
 249}
 250#else
 251static int get_user_dscr(struct task_struct *task, unsigned long *data)
 252{
 253	return -EIO;
 254}
 255
 256static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 257{
 258	return -EIO;
 259}
 260#endif
 261
 262/*
 263 * We prevent mucking around with the reserved area of trap
 264 * which are used internally by the kernel.
 265 */
 266static int set_user_trap(struct task_struct *task, unsigned long trap)
 267{
 268	task->thread.regs->trap = trap & 0xfff0;
 269	return 0;
 270}
 271
 272/*
 273 * Get contents of register REGNO in task TASK.
 274 */
 275int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
 276{
 277	if ((task->thread.regs == NULL) || !data)
 278		return -EIO;
 279
 280	if (regno == PT_MSR) {
 281		*data = get_user_msr(task);
 282		return 0;
 283	}
 284
 285	if (regno == PT_DSCR)
 286		return get_user_dscr(task, data);
 287
 288#ifdef CONFIG_PPC64
 289	/*
 290	 * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
 291	 * no more used as a flag, lets force usr to alway see the softe value as 1
 292	 * which means interrupts are not soft disabled.
 293	 */
 294	if (regno == PT_SOFTE) {
 295		*data = 1;
 296		return  0;
 297	}
 298#endif
 299
 300	if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
 301		*data = ((unsigned long *)task->thread.regs)[regno];
 302		return 0;
 303	}
 304
 305	return -EIO;
 306}
 307
 308/*
 309 * Write contents of register REGNO in task TASK.
 310 */
 311int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
 312{
 313	if (task->thread.regs == NULL)
 314		return -EIO;
 315
 316	if (regno == PT_MSR)
 317		return set_user_msr(task, data);
 318	if (regno == PT_TRAP)
 319		return set_user_trap(task, data);
 320	if (regno == PT_DSCR)
 321		return set_user_dscr(task, data);
 322
 323	if (regno <= PT_MAX_PUT_REG) {
 324		((unsigned long *)task->thread.regs)[regno] = data;
 325		return 0;
 326	}
 327	return -EIO;
 328}
 329
 330static int gpr_get(struct task_struct *target, const struct user_regset *regset,
 331		   unsigned int pos, unsigned int count,
 332		   void *kbuf, void __user *ubuf)
 333{
 334	int i, ret;
 335
 336	if (target->thread.regs == NULL)
 337		return -EIO;
 338
 339	if (!FULL_REGS(target->thread.regs)) {
 340		/* We have a partial register set.  Fill 14-31 with bogus values */
 341		for (i = 14; i < 32; i++)
 342			target->thread.regs->gpr[i] = NV_REG_POISON;
 343	}
 344
 345	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 346				  target->thread.regs,
 347				  0, offsetof(struct pt_regs, msr));
 348	if (!ret) {
 349		unsigned long msr = get_user_msr(target);
 350		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 351					  offsetof(struct pt_regs, msr),
 352					  offsetof(struct pt_regs, msr) +
 353					  sizeof(msr));
 354	}
 355
 356	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 357		     offsetof(struct pt_regs, msr) + sizeof(long));
 358
 359	if (!ret)
 360		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 361					  &target->thread.regs->orig_gpr3,
 362					  offsetof(struct pt_regs, orig_gpr3),
 363					  sizeof(struct pt_regs));
 364	if (!ret)
 365		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 366					       sizeof(struct pt_regs), -1);
 367
 368	return ret;
 369}
 370
 371static int gpr_set(struct task_struct *target, const struct user_regset *regset,
 372		   unsigned int pos, unsigned int count,
 373		   const void *kbuf, const void __user *ubuf)
 374{
 375	unsigned long reg;
 376	int ret;
 377
 378	if (target->thread.regs == NULL)
 379		return -EIO;
 380
 381	CHECK_FULL_REGS(target->thread.regs);
 382
 383	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 384				 target->thread.regs,
 385				 0, PT_MSR * sizeof(reg));
 386
 387	if (!ret && count > 0) {
 388		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 389					 PT_MSR * sizeof(reg),
 390					 (PT_MSR + 1) * sizeof(reg));
 391		if (!ret)
 392			ret = set_user_msr(target, reg);
 393	}
 394
 395	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 396		     offsetof(struct pt_regs, msr) + sizeof(long));
 397
 398	if (!ret)
 399		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 400					 &target->thread.regs->orig_gpr3,
 401					 PT_ORIG_R3 * sizeof(reg),
 402					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
 403
 404	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
 405		ret = user_regset_copyin_ignore(
 406			&pos, &count, &kbuf, &ubuf,
 407			(PT_MAX_PUT_REG + 1) * sizeof(reg),
 408			PT_TRAP * sizeof(reg));
 409
 410	if (!ret && count > 0) {
 411		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 412					 PT_TRAP * sizeof(reg),
 413					 (PT_TRAP + 1) * sizeof(reg));
 414		if (!ret)
 415			ret = set_user_trap(target, reg);
 416	}
 417
 418	if (!ret)
 419		ret = user_regset_copyin_ignore(
 420			&pos, &count, &kbuf, &ubuf,
 421			(PT_TRAP + 1) * sizeof(reg), -1);
 422
 423	return ret;
 424}
 425
 426/*
 427 * Regardless of transactions, 'fp_state' holds the current running
 428 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
 429 * value of all FPR registers for the current transaction.
 430 *
 431 * Userspace interface buffer layout:
 432 *
 433 * struct data {
 434 *	u64	fpr[32];
 435 *	u64	fpscr;
 436 * };
 437 */
 438static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 439		   unsigned int pos, unsigned int count,
 440		   void *kbuf, void __user *ubuf)
 441{
 442#ifdef CONFIG_VSX
 443	u64 buf[33];
 444	int i;
 445
 446	flush_fp_to_thread(target);
 447
 
 448	/* copy to local buffer then write that out */
 449	for (i = 0; i < 32 ; i++)
 450		buf[i] = target->thread.TS_FPR(i);
 451	buf[32] = target->thread.fp_state.fpscr;
 452	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 
 453#else
 454	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 455		     offsetof(struct thread_fp_state, fpr[32]));
 456
 457	flush_fp_to_thread(target);
 458
 459	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 460				   &target->thread.fp_state, 0, -1);
 461#endif
 462}
 463
 464/*
 465 * Regardless of transactions, 'fp_state' holds the current running
 466 * value of all FPR registers and 'ckfp_state' holds the last checkpointed
 467 * value of all FPR registers for the current transaction.
 468 *
 469 * Userspace interface buffer layout:
 470 *
 471 * struct data {
 472 *	u64	fpr[32];
 473 *	u64	fpscr;
 474 * };
 475 *
 476 */
 477static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 478		   unsigned int pos, unsigned int count,
 479		   const void *kbuf, const void __user *ubuf)
 480{
 481#ifdef CONFIG_VSX
 482	u64 buf[33];
 483	int i;
 484
 485	flush_fp_to_thread(target);
 486
 487	for (i = 0; i < 32 ; i++)
 488		buf[i] = target->thread.TS_FPR(i);
 489	buf[32] = target->thread.fp_state.fpscr;
 490
 491	/* copy to local buffer then write that out */
 492	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 493	if (i)
 494		return i;
 495
 496	for (i = 0; i < 32 ; i++)
 497		target->thread.TS_FPR(i) = buf[i];
 498	target->thread.fp_state.fpscr = buf[32];
 499	return 0;
 500#else
 501	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 502		     offsetof(struct thread_fp_state, fpr[32]));
 503
 504	flush_fp_to_thread(target);
 505
 506	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 507				  &target->thread.fp_state, 0, -1);
 508#endif
 509}
 510
 511#ifdef CONFIG_ALTIVEC
 512/*
 513 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
 514 * The transfer totals 34 quadword.  Quadwords 0-31 contain the
 515 * corresponding vector registers.  Quadword 32 contains the vscr as the
 516 * last word (offset 12) within that quadword.  Quadword 33 contains the
 517 * vrsave as the first word (offset 0) within the quadword.
 518 *
 519 * This definition of the VMX state is compatible with the current PPC32
 520 * ptrace interface.  This allows signal handling and ptrace to use the
 521 * same structures.  This also simplifies the implementation of a bi-arch
 522 * (combined (32- and 64-bit) gdb.
 523 */
 524
 525static int vr_active(struct task_struct *target,
 526		     const struct user_regset *regset)
 527{
 528	flush_altivec_to_thread(target);
 529	return target->thread.used_vr ? regset->n : 0;
 530}
 531
 532/*
 533 * Regardless of transactions, 'vr_state' holds the current running
 534 * value of all the VMX registers and 'ckvr_state' holds the last
 535 * checkpointed value of all the VMX registers for the current
 536 * transaction to fall back on in case it aborts.
 537 *
 538 * Userspace interface buffer layout:
 539 *
 540 * struct data {
 541 *	vector128	vr[32];
 542 *	vector128	vscr;
 543 *	vector128	vrsave;
 544 * };
 545 */
 546static int vr_get(struct task_struct *target, const struct user_regset *regset,
 547		  unsigned int pos, unsigned int count,
 548		  void *kbuf, void __user *ubuf)
 549{
 550	int ret;
 551
 552	flush_altivec_to_thread(target);
 553
 554	BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 555		     offsetof(struct thread_vr_state, vr[32]));
 556
 557	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 558				  &target->thread.vr_state, 0,
 559				  33 * sizeof(vector128));
 560	if (!ret) {
 561		/*
 562		 * Copy out only the low-order word of vrsave.
 563		 */
 564		union {
 565			elf_vrreg_t reg;
 566			u32 word;
 567		} vrsave;
 568		memset(&vrsave, 0, sizeof(vrsave));
 569
 570		vrsave.word = target->thread.vrsave;
 571
 572		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
 573					  33 * sizeof(vector128), -1);
 574	}
 575
 576	return ret;
 577}
 578
 579/*
 580 * Regardless of transactions, 'vr_state' holds the current running
 581 * value of all the VMX registers and 'ckvr_state' holds the last
 582 * checkpointed value of all the VMX registers for the current
 583 * transaction to fall back on in case it aborts.
 584 *
 585 * Userspace interface buffer layout:
 586 *
 587 * struct data {
 588 *	vector128	vr[32];
 589 *	vector128	vscr;
 590 *	vector128	vrsave;
 591 * };
 592 */
 593static int vr_set(struct task_struct *target, const struct user_regset *regset,
 594		  unsigned int pos, unsigned int count,
 595		  const void *kbuf, const void __user *ubuf)
 596{
 597	int ret;
 598
 599	flush_altivec_to_thread(target);
 600
 601	BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 602		     offsetof(struct thread_vr_state, vr[32]));
 603
 604	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 605				 &target->thread.vr_state, 0,
 606				 33 * sizeof(vector128));
 607	if (!ret && count > 0) {
 608		/*
 609		 * We use only the first word of vrsave.
 610		 */
 611		union {
 612			elf_vrreg_t reg;
 613			u32 word;
 614		} vrsave;
 615		memset(&vrsave, 0, sizeof(vrsave));
 616
 617		vrsave.word = target->thread.vrsave;
 618
 619		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
 620					 33 * sizeof(vector128), -1);
 621		if (!ret)
 622			target->thread.vrsave = vrsave.word;
 623	}
 624
 625	return ret;
 626}
 627#endif /* CONFIG_ALTIVEC */
 628
 629#ifdef CONFIG_VSX
 630/*
 631 * Currently to set and and get all the vsx state, you need to call
 632 * the fp and VMX calls as well.  This only get/sets the lower 32
 633 * 128bit VSX registers.
 634 */
 635
 636static int vsr_active(struct task_struct *target,
 637		      const struct user_regset *regset)
 638{
 639	flush_vsx_to_thread(target);
 640	return target->thread.used_vsr ? regset->n : 0;
 641}
 642
 643/*
 644 * Regardless of transactions, 'fp_state' holds the current running
 645 * value of all FPR registers and 'ckfp_state' holds the last
 646 * checkpointed value of all FPR registers for the current
 647 * transaction.
 648 *
 649 * Userspace interface buffer layout:
 650 *
 651 * struct data {
 652 *	u64	vsx[32];
 653 * };
 654 */
 655static int vsr_get(struct task_struct *target, const struct user_regset *regset,
 656		   unsigned int pos, unsigned int count,
 657		   void *kbuf, void __user *ubuf)
 658{
 659	u64 buf[32];
 660	int ret, i;
 661
 662	flush_tmregs_to_thread(target);
 663	flush_fp_to_thread(target);
 664	flush_altivec_to_thread(target);
 665	flush_vsx_to_thread(target);
 666
 667	for (i = 0; i < 32 ; i++)
 668		buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 669
 670	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 671				  buf, 0, 32 * sizeof(double));
 672
 673	return ret;
 674}
 675
 676/*
 677 * Regardless of transactions, 'fp_state' holds the current running
 678 * value of all FPR registers and 'ckfp_state' holds the last
 679 * checkpointed value of all FPR registers for the current
 680 * transaction.
 681 *
 682 * Userspace interface buffer layout:
 683 *
 684 * struct data {
 685 *	u64	vsx[32];
 686 * };
 687 */
 688static int vsr_set(struct task_struct *target, const struct user_regset *regset,
 689		   unsigned int pos, unsigned int count,
 690		   const void *kbuf, const void __user *ubuf)
 691{
 692	u64 buf[32];
 693	int ret,i;
 694
 695	flush_tmregs_to_thread(target);
 696	flush_fp_to_thread(target);
 697	flush_altivec_to_thread(target);
 698	flush_vsx_to_thread(target);
 699
 700	for (i = 0; i < 32 ; i++)
 701		buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 702
 703	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 704				 buf, 0, 32 * sizeof(double));
 705	if (!ret)
 706		for (i = 0; i < 32 ; i++)
 707			target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 708
 709	return ret;
 710}
 711#endif /* CONFIG_VSX */
 712
 713#ifdef CONFIG_SPE
 714
 715/*
 716 * For get_evrregs/set_evrregs functions 'data' has the following layout:
 717 *
 718 * struct {
 719 *   u32 evr[32];
 720 *   u64 acc;
 721 *   u32 spefscr;
 722 * }
 723 */
 724
 725static int evr_active(struct task_struct *target,
 726		      const struct user_regset *regset)
 727{
 728	flush_spe_to_thread(target);
 729	return target->thread.used_spe ? regset->n : 0;
 730}
 731
 732static int evr_get(struct task_struct *target, const struct user_regset *regset,
 733		   unsigned int pos, unsigned int count,
 734		   void *kbuf, void __user *ubuf)
 735{
 736	int ret;
 737
 738	flush_spe_to_thread(target);
 739
 740	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 741				  &target->thread.evr,
 742				  0, sizeof(target->thread.evr));
 743
 744	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 745		     offsetof(struct thread_struct, spefscr));
 746
 747	if (!ret)
 748		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 749					  &target->thread.acc,
 750					  sizeof(target->thread.evr), -1);
 751
 752	return ret;
 753}
 754
 755static int evr_set(struct task_struct *target, const struct user_regset *regset,
 756		   unsigned int pos, unsigned int count,
 757		   const void *kbuf, const void __user *ubuf)
 758{
 759	int ret;
 760
 761	flush_spe_to_thread(target);
 762
 763	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 764				 &target->thread.evr,
 765				 0, sizeof(target->thread.evr));
 766
 767	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 768		     offsetof(struct thread_struct, spefscr));
 769
 770	if (!ret)
 771		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 772					 &target->thread.acc,
 773					 sizeof(target->thread.evr), -1);
 774
 775	return ret;
 776}
 777#endif /* CONFIG_SPE */
 778
 779#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 780/**
 781 * tm_cgpr_active - get active number of registers in CGPR
 782 * @target:	The target task.
 783 * @regset:	The user regset structure.
 784 *
 785 * This function checks for the active number of available
 786 * regisers in transaction checkpointed GPR category.
 787 */
 788static int tm_cgpr_active(struct task_struct *target,
 789			  const struct user_regset *regset)
 790{
 791	if (!cpu_has_feature(CPU_FTR_TM))
 792		return -ENODEV;
 793
 794	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 795		return 0;
 796
 797	return regset->n;
 798}
 799
 800/**
 801 * tm_cgpr_get - get CGPR registers
 802 * @target:	The target task.
 803 * @regset:	The user regset structure.
 804 * @pos:	The buffer position.
 805 * @count:	Number of bytes to copy.
 806 * @kbuf:	Kernel buffer to copy from.
 807 * @ubuf:	User buffer to copy into.
 808 *
 809 * This function gets transaction checkpointed GPR registers.
 810 *
 811 * When the transaction is active, 'ckpt_regs' holds all the checkpointed
 812 * GPR register values for the current transaction to fall back on if it
 813 * aborts in between. This function gets those checkpointed GPR registers.
 814 * The userspace interface buffer layout is as follows.
 815 *
 816 * struct data {
 817 *	struct pt_regs ckpt_regs;
 818 * };
 819 */
 820static int tm_cgpr_get(struct task_struct *target,
 821			const struct user_regset *regset,
 822			unsigned int pos, unsigned int count,
 823			void *kbuf, void __user *ubuf)
 824{
 825	int ret;
 826
 827	if (!cpu_has_feature(CPU_FTR_TM))
 828		return -ENODEV;
 829
 830	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 831		return -ENODATA;
 832
 833	flush_tmregs_to_thread(target);
 834	flush_fp_to_thread(target);
 835	flush_altivec_to_thread(target);
 836
 837	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 838				  &target->thread.ckpt_regs,
 839				  0, offsetof(struct pt_regs, msr));
 840	if (!ret) {
 841		unsigned long msr = get_user_ckpt_msr(target);
 842
 843		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 844					  offsetof(struct pt_regs, msr),
 845					  offsetof(struct pt_regs, msr) +
 846					  sizeof(msr));
 847	}
 848
 849	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 850		     offsetof(struct pt_regs, msr) + sizeof(long));
 851
 852	if (!ret)
 853		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 854					  &target->thread.ckpt_regs.orig_gpr3,
 855					  offsetof(struct pt_regs, orig_gpr3),
 856					  sizeof(struct pt_regs));
 857	if (!ret)
 858		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 859					       sizeof(struct pt_regs), -1);
 860
 861	return ret;
 862}
 863
 864/*
 865 * tm_cgpr_set - set the CGPR registers
 866 * @target:	The target task.
 867 * @regset:	The user regset structure.
 868 * @pos:	The buffer position.
 869 * @count:	Number of bytes to copy.
 870 * @kbuf:	Kernel buffer to copy into.
 871 * @ubuf:	User buffer to copy from.
 872 *
 873 * This function sets in transaction checkpointed GPR registers.
 874 *
 875 * When the transaction is active, 'ckpt_regs' holds the checkpointed
 876 * GPR register values for the current transaction to fall back on if it
 877 * aborts in between. This function sets those checkpointed GPR registers.
 878 * The userspace interface buffer layout is as follows.
 879 *
 880 * struct data {
 881 *	struct pt_regs ckpt_regs;
 882 * };
 883 */
 884static int tm_cgpr_set(struct task_struct *target,
 885			const struct user_regset *regset,
 886			unsigned int pos, unsigned int count,
 887			const void *kbuf, const void __user *ubuf)
 888{
 889	unsigned long reg;
 890	int ret;
 891
 892	if (!cpu_has_feature(CPU_FTR_TM))
 893		return -ENODEV;
 894
 895	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 896		return -ENODATA;
 897
 898	flush_tmregs_to_thread(target);
 899	flush_fp_to_thread(target);
 900	flush_altivec_to_thread(target);
 901
 902	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 903				 &target->thread.ckpt_regs,
 904				 0, PT_MSR * sizeof(reg));
 905
 906	if (!ret && count > 0) {
 907		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 908					 PT_MSR * sizeof(reg),
 909					 (PT_MSR + 1) * sizeof(reg));
 910		if (!ret)
 911			ret = set_user_ckpt_msr(target, reg);
 912	}
 913
 914	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 915		     offsetof(struct pt_regs, msr) + sizeof(long));
 916
 917	if (!ret)
 918		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 919					 &target->thread.ckpt_regs.orig_gpr3,
 920					 PT_ORIG_R3 * sizeof(reg),
 921					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
 922
 923	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
 924		ret = user_regset_copyin_ignore(
 925			&pos, &count, &kbuf, &ubuf,
 926			(PT_MAX_PUT_REG + 1) * sizeof(reg),
 927			PT_TRAP * sizeof(reg));
 928
 929	if (!ret && count > 0) {
 930		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 931					 PT_TRAP * sizeof(reg),
 932					 (PT_TRAP + 1) * sizeof(reg));
 933		if (!ret)
 934			ret = set_user_ckpt_trap(target, reg);
 935	}
 936
 937	if (!ret)
 938		ret = user_regset_copyin_ignore(
 939			&pos, &count, &kbuf, &ubuf,
 940			(PT_TRAP + 1) * sizeof(reg), -1);
 941
 942	return ret;
 943}
 944
 945/**
 946 * tm_cfpr_active - get active number of registers in CFPR
 947 * @target:	The target task.
 948 * @regset:	The user regset structure.
 949 *
 950 * This function checks for the active number of available
 951 * regisers in transaction checkpointed FPR category.
 952 */
 953static int tm_cfpr_active(struct task_struct *target,
 954				const struct user_regset *regset)
 955{
 956	if (!cpu_has_feature(CPU_FTR_TM))
 957		return -ENODEV;
 958
 959	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 960		return 0;
 961
 962	return regset->n;
 963}
 964
 965/**
 966 * tm_cfpr_get - get CFPR registers
 967 * @target:	The target task.
 968 * @regset:	The user regset structure.
 969 * @pos:	The buffer position.
 970 * @count:	Number of bytes to copy.
 971 * @kbuf:	Kernel buffer to copy from.
 972 * @ubuf:	User buffer to copy into.
 973 *
 974 * This function gets in transaction checkpointed FPR registers.
 975 *
 976 * When the transaction is active 'ckfp_state' holds the checkpointed
 977 * values for the current transaction to fall back on if it aborts
 978 * in between. This function gets those checkpointed FPR registers.
 979 * The userspace interface buffer layout is as follows.
 980 *
 981 * struct data {
 982 *	u64	fpr[32];
 983 *	u64	fpscr;
 984 *};
 985 */
 986static int tm_cfpr_get(struct task_struct *target,
 987			const struct user_regset *regset,
 988			unsigned int pos, unsigned int count,
 989			void *kbuf, void __user *ubuf)
 990{
 991	u64 buf[33];
 992	int i;
 993
 994	if (!cpu_has_feature(CPU_FTR_TM))
 995		return -ENODEV;
 996
 997	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
 998		return -ENODATA;
 999
1000	flush_tmregs_to_thread(target);
1001	flush_fp_to_thread(target);
1002	flush_altivec_to_thread(target);
1003
1004	/* copy to local buffer then write that out */
1005	for (i = 0; i < 32 ; i++)
1006		buf[i] = target->thread.TS_CKFPR(i);
1007	buf[32] = target->thread.ckfp_state.fpscr;
1008	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1009}
1010
1011/**
1012 * tm_cfpr_set - set CFPR registers
1013 * @target:	The target task.
1014 * @regset:	The user regset structure.
1015 * @pos:	The buffer position.
1016 * @count:	Number of bytes to copy.
1017 * @kbuf:	Kernel buffer to copy into.
1018 * @ubuf:	User buffer to copy from.
1019 *
1020 * This function sets in transaction checkpointed FPR registers.
1021 *
1022 * When the transaction is active 'ckfp_state' holds the checkpointed
1023 * FPR register values for the current transaction to fall back on
1024 * if it aborts in between. This function sets these checkpointed
1025 * FPR registers. The userspace interface buffer layout is as follows.
1026 *
1027 * struct data {
1028 *	u64	fpr[32];
1029 *	u64	fpscr;
1030 *};
1031 */
1032static int tm_cfpr_set(struct task_struct *target,
1033			const struct user_regset *regset,
1034			unsigned int pos, unsigned int count,
1035			const void *kbuf, const void __user *ubuf)
1036{
1037	u64 buf[33];
1038	int i;
1039
1040	if (!cpu_has_feature(CPU_FTR_TM))
1041		return -ENODEV;
1042
1043	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1044		return -ENODATA;
1045
1046	flush_tmregs_to_thread(target);
1047	flush_fp_to_thread(target);
1048	flush_altivec_to_thread(target);
1049
1050	for (i = 0; i < 32; i++)
1051		buf[i] = target->thread.TS_CKFPR(i);
1052	buf[32] = target->thread.ckfp_state.fpscr;
1053
1054	/* copy to local buffer then write that out */
1055	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
1056	if (i)
1057		return i;
1058	for (i = 0; i < 32 ; i++)
1059		target->thread.TS_CKFPR(i) = buf[i];
1060	target->thread.ckfp_state.fpscr = buf[32];
1061	return 0;
1062}
1063
1064/**
1065 * tm_cvmx_active - get active number of registers in CVMX
1066 * @target:	The target task.
1067 * @regset:	The user regset structure.
1068 *
1069 * This function checks for the active number of available
1070 * regisers in checkpointed VMX category.
1071 */
1072static int tm_cvmx_active(struct task_struct *target,
1073				const struct user_regset *regset)
1074{
1075	if (!cpu_has_feature(CPU_FTR_TM))
1076		return -ENODEV;
1077
1078	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1079		return 0;
1080
1081	return regset->n;
1082}
1083
1084/**
1085 * tm_cvmx_get - get CMVX registers
1086 * @target:	The target task.
1087 * @regset:	The user regset structure.
1088 * @pos:	The buffer position.
1089 * @count:	Number of bytes to copy.
1090 * @kbuf:	Kernel buffer to copy from.
1091 * @ubuf:	User buffer to copy into.
1092 *
1093 * This function gets in transaction checkpointed VMX registers.
1094 *
1095 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1096 * the checkpointed values for the current transaction to fall
1097 * back on if it aborts in between. The userspace interface buffer
1098 * layout is as follows.
1099 *
1100 * struct data {
1101 *	vector128	vr[32];
1102 *	vector128	vscr;
1103 *	vector128	vrsave;
1104 *};
1105 */
1106static int tm_cvmx_get(struct task_struct *target,
1107			const struct user_regset *regset,
1108			unsigned int pos, unsigned int count,
1109			void *kbuf, void __user *ubuf)
1110{
1111	int ret;
1112
1113	BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1114
1115	if (!cpu_has_feature(CPU_FTR_TM))
1116		return -ENODEV;
1117
1118	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1119		return -ENODATA;
1120
1121	/* Flush the state */
1122	flush_tmregs_to_thread(target);
1123	flush_fp_to_thread(target);
1124	flush_altivec_to_thread(target);
1125
1126	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1127					&target->thread.ckvr_state, 0,
1128					33 * sizeof(vector128));
1129	if (!ret) {
1130		/*
1131		 * Copy out only the low-order word of vrsave.
1132		 */
1133		union {
1134			elf_vrreg_t reg;
1135			u32 word;
1136		} vrsave;
1137		memset(&vrsave, 0, sizeof(vrsave));
1138		vrsave.word = target->thread.ckvrsave;
1139		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
1140						33 * sizeof(vector128), -1);
1141	}
1142
1143	return ret;
1144}
1145
1146/**
1147 * tm_cvmx_set - set CMVX registers
1148 * @target:	The target task.
1149 * @regset:	The user regset structure.
1150 * @pos:	The buffer position.
1151 * @count:	Number of bytes to copy.
1152 * @kbuf:	Kernel buffer to copy into.
1153 * @ubuf:	User buffer to copy from.
1154 *
1155 * This function sets in transaction checkpointed VMX registers.
1156 *
1157 * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
1158 * the checkpointed values for the current transaction to fall
1159 * back on if it aborts in between. The userspace interface buffer
1160 * layout is as follows.
1161 *
1162 * struct data {
1163 *	vector128	vr[32];
1164 *	vector128	vscr;
1165 *	vector128	vrsave;
1166 *};
1167 */
1168static int tm_cvmx_set(struct task_struct *target,
1169			const struct user_regset *regset,
1170			unsigned int pos, unsigned int count,
1171			const void *kbuf, const void __user *ubuf)
1172{
1173	int ret;
1174
1175	BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
1176
1177	if (!cpu_has_feature(CPU_FTR_TM))
1178		return -ENODEV;
1179
1180	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1181		return -ENODATA;
1182
1183	flush_tmregs_to_thread(target);
1184	flush_fp_to_thread(target);
1185	flush_altivec_to_thread(target);
1186
1187	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1188					&target->thread.ckvr_state, 0,
1189					33 * sizeof(vector128));
1190	if (!ret && count > 0) {
1191		/*
1192		 * We use only the low-order word of vrsave.
1193		 */
1194		union {
1195			elf_vrreg_t reg;
1196			u32 word;
1197		} vrsave;
1198		memset(&vrsave, 0, sizeof(vrsave));
1199		vrsave.word = target->thread.ckvrsave;
1200		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
1201						33 * sizeof(vector128), -1);
1202		if (!ret)
1203			target->thread.ckvrsave = vrsave.word;
1204	}
1205
1206	return ret;
1207}
1208
1209/**
1210 * tm_cvsx_active - get active number of registers in CVSX
1211 * @target:	The target task.
1212 * @regset:	The user regset structure.
1213 *
1214 * This function checks for the active number of available
1215 * regisers in transaction checkpointed VSX category.
1216 */
1217static int tm_cvsx_active(struct task_struct *target,
1218				const struct user_regset *regset)
1219{
1220	if (!cpu_has_feature(CPU_FTR_TM))
1221		return -ENODEV;
1222
1223	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1224		return 0;
1225
1226	flush_vsx_to_thread(target);
1227	return target->thread.used_vsr ? regset->n : 0;
1228}
1229
1230/**
1231 * tm_cvsx_get - get CVSX registers
1232 * @target:	The target task.
1233 * @regset:	The user regset structure.
1234 * @pos:	The buffer position.
1235 * @count:	Number of bytes to copy.
1236 * @kbuf:	Kernel buffer to copy from.
1237 * @ubuf:	User buffer to copy into.
1238 *
1239 * This function gets in transaction checkpointed VSX registers.
1240 *
1241 * When the transaction is active 'ckfp_state' holds the checkpointed
1242 * values for the current transaction to fall back on if it aborts
1243 * in between. This function gets those checkpointed VSX registers.
1244 * The userspace interface buffer layout is as follows.
1245 *
1246 * struct data {
1247 *	u64	vsx[32];
1248 *};
1249 */
1250static int tm_cvsx_get(struct task_struct *target,
1251			const struct user_regset *regset,
1252			unsigned int pos, unsigned int count,
1253			void *kbuf, void __user *ubuf)
1254{
1255	u64 buf[32];
1256	int ret, i;
1257
1258	if (!cpu_has_feature(CPU_FTR_TM))
1259		return -ENODEV;
1260
1261	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1262		return -ENODATA;
1263
1264	/* Flush the state */
1265	flush_tmregs_to_thread(target);
1266	flush_fp_to_thread(target);
1267	flush_altivec_to_thread(target);
1268	flush_vsx_to_thread(target);
1269
1270	for (i = 0; i < 32 ; i++)
1271		buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1272	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1273				  buf, 0, 32 * sizeof(double));
1274
1275	return ret;
1276}
1277
1278/**
1279 * tm_cvsx_set - set CFPR registers
1280 * @target:	The target task.
1281 * @regset:	The user regset structure.
1282 * @pos:	The buffer position.
1283 * @count:	Number of bytes to copy.
1284 * @kbuf:	Kernel buffer to copy into.
1285 * @ubuf:	User buffer to copy from.
1286 *
1287 * This function sets in transaction checkpointed VSX registers.
1288 *
1289 * When the transaction is active 'ckfp_state' holds the checkpointed
1290 * VSX register values for the current transaction to fall back on
1291 * if it aborts in between. This function sets these checkpointed
1292 * FPR registers. The userspace interface buffer layout is as follows.
1293 *
1294 * struct data {
1295 *	u64	vsx[32];
1296 *};
1297 */
1298static int tm_cvsx_set(struct task_struct *target,
1299			const struct user_regset *regset,
1300			unsigned int pos, unsigned int count,
1301			const void *kbuf, const void __user *ubuf)
1302{
1303	u64 buf[32];
1304	int ret, i;
1305
1306	if (!cpu_has_feature(CPU_FTR_TM))
1307		return -ENODEV;
1308
1309	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1310		return -ENODATA;
1311
1312	/* Flush the state */
1313	flush_tmregs_to_thread(target);
1314	flush_fp_to_thread(target);
1315	flush_altivec_to_thread(target);
1316	flush_vsx_to_thread(target);
1317
1318	for (i = 0; i < 32 ; i++)
1319		buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
1320
1321	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1322				 buf, 0, 32 * sizeof(double));
1323	if (!ret)
1324		for (i = 0; i < 32 ; i++)
1325			target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
1326
1327	return ret;
1328}
1329
1330/**
1331 * tm_spr_active - get active number of registers in TM SPR
1332 * @target:	The target task.
1333 * @regset:	The user regset structure.
1334 *
1335 * This function checks the active number of available
1336 * regisers in the transactional memory SPR category.
1337 */
1338static int tm_spr_active(struct task_struct *target,
1339			 const struct user_regset *regset)
1340{
1341	if (!cpu_has_feature(CPU_FTR_TM))
1342		return -ENODEV;
1343
1344	return regset->n;
1345}
1346
1347/**
1348 * tm_spr_get - get the TM related SPR registers
1349 * @target:	The target task.
1350 * @regset:	The user regset structure.
1351 * @pos:	The buffer position.
1352 * @count:	Number of bytes to copy.
1353 * @kbuf:	Kernel buffer to copy from.
1354 * @ubuf:	User buffer to copy into.
1355 *
1356 * This function gets transactional memory related SPR registers.
1357 * The userspace interface buffer layout is as follows.
1358 *
1359 * struct {
1360 *	u64		tm_tfhar;
1361 *	u64		tm_texasr;
1362 *	u64		tm_tfiar;
1363 * };
1364 */
1365static int tm_spr_get(struct task_struct *target,
1366		      const struct user_regset *regset,
1367		      unsigned int pos, unsigned int count,
1368		      void *kbuf, void __user *ubuf)
1369{
1370	int ret;
1371
1372	/* Build tests */
1373	BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1374	BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1375	BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1376
1377	if (!cpu_has_feature(CPU_FTR_TM))
1378		return -ENODEV;
1379
1380	/* Flush the states */
1381	flush_tmregs_to_thread(target);
1382	flush_fp_to_thread(target);
1383	flush_altivec_to_thread(target);
1384
1385	/* TFHAR register */
1386	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1387				&target->thread.tm_tfhar, 0, sizeof(u64));
1388
1389	/* TEXASR register */
1390	if (!ret)
1391		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1392				&target->thread.tm_texasr, sizeof(u64),
1393				2 * sizeof(u64));
1394
1395	/* TFIAR register */
1396	if (!ret)
1397		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1398				&target->thread.tm_tfiar,
1399				2 * sizeof(u64), 3 * sizeof(u64));
1400	return ret;
1401}
1402
1403/**
1404 * tm_spr_set - set the TM related SPR registers
1405 * @target:	The target task.
1406 * @regset:	The user regset structure.
1407 * @pos:	The buffer position.
1408 * @count:	Number of bytes to copy.
1409 * @kbuf:	Kernel buffer to copy into.
1410 * @ubuf:	User buffer to copy from.
1411 *
1412 * This function sets transactional memory related SPR registers.
1413 * The userspace interface buffer layout is as follows.
1414 *
1415 * struct {
1416 *	u64		tm_tfhar;
1417 *	u64		tm_texasr;
1418 *	u64		tm_tfiar;
1419 * };
1420 */
1421static int tm_spr_set(struct task_struct *target,
1422		      const struct user_regset *regset,
1423		      unsigned int pos, unsigned int count,
1424		      const void *kbuf, const void __user *ubuf)
1425{
1426	int ret;
1427
1428	/* Build tests */
1429	BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
1430	BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
1431	BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
1432
1433	if (!cpu_has_feature(CPU_FTR_TM))
1434		return -ENODEV;
1435
1436	/* Flush the states */
1437	flush_tmregs_to_thread(target);
1438	flush_fp_to_thread(target);
1439	flush_altivec_to_thread(target);
1440
1441	/* TFHAR register */
1442	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1443				&target->thread.tm_tfhar, 0, sizeof(u64));
1444
1445	/* TEXASR register */
1446	if (!ret)
1447		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1448				&target->thread.tm_texasr, sizeof(u64),
1449				2 * sizeof(u64));
1450
1451	/* TFIAR register */
1452	if (!ret)
1453		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1454				&target->thread.tm_tfiar,
1455				 2 * sizeof(u64), 3 * sizeof(u64));
1456	return ret;
1457}
1458
1459static int tm_tar_active(struct task_struct *target,
1460			 const struct user_regset *regset)
1461{
1462	if (!cpu_has_feature(CPU_FTR_TM))
1463		return -ENODEV;
1464
1465	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1466		return regset->n;
1467
1468	return 0;
1469}
1470
1471static int tm_tar_get(struct task_struct *target,
1472		      const struct user_regset *regset,
1473		      unsigned int pos, unsigned int count,
1474		      void *kbuf, void __user *ubuf)
1475{
1476	int ret;
1477
1478	if (!cpu_has_feature(CPU_FTR_TM))
1479		return -ENODEV;
1480
1481	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1482		return -ENODATA;
1483
1484	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1485				&target->thread.tm_tar, 0, sizeof(u64));
1486	return ret;
1487}
1488
1489static int tm_tar_set(struct task_struct *target,
1490		      const struct user_regset *regset,
1491		      unsigned int pos, unsigned int count,
1492		      const void *kbuf, const void __user *ubuf)
1493{
1494	int ret;
1495
1496	if (!cpu_has_feature(CPU_FTR_TM))
1497		return -ENODEV;
1498
1499	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1500		return -ENODATA;
1501
1502	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1503				&target->thread.tm_tar, 0, sizeof(u64));
1504	return ret;
1505}
1506
1507static int tm_ppr_active(struct task_struct *target,
1508			 const struct user_regset *regset)
1509{
1510	if (!cpu_has_feature(CPU_FTR_TM))
1511		return -ENODEV;
1512
1513	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1514		return regset->n;
1515
1516	return 0;
1517}
1518
1519
1520static int tm_ppr_get(struct task_struct *target,
1521		      const struct user_regset *regset,
1522		      unsigned int pos, unsigned int count,
1523		      void *kbuf, void __user *ubuf)
1524{
1525	int ret;
1526
1527	if (!cpu_has_feature(CPU_FTR_TM))
1528		return -ENODEV;
1529
1530	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1531		return -ENODATA;
1532
1533	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1534				&target->thread.tm_ppr, 0, sizeof(u64));
1535	return ret;
1536}
1537
1538static int tm_ppr_set(struct task_struct *target,
1539		      const struct user_regset *regset,
1540		      unsigned int pos, unsigned int count,
1541		      const void *kbuf, const void __user *ubuf)
1542{
1543	int ret;
1544
1545	if (!cpu_has_feature(CPU_FTR_TM))
1546		return -ENODEV;
1547
1548	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1549		return -ENODATA;
1550
1551	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1552				&target->thread.tm_ppr, 0, sizeof(u64));
1553	return ret;
1554}
1555
1556static int tm_dscr_active(struct task_struct *target,
1557			 const struct user_regset *regset)
1558{
1559	if (!cpu_has_feature(CPU_FTR_TM))
1560		return -ENODEV;
1561
1562	if (MSR_TM_ACTIVE(target->thread.regs->msr))
1563		return regset->n;
1564
1565	return 0;
1566}
1567
1568static int tm_dscr_get(struct task_struct *target,
1569		      const struct user_regset *regset,
1570		      unsigned int pos, unsigned int count,
1571		      void *kbuf, void __user *ubuf)
1572{
1573	int ret;
1574
1575	if (!cpu_has_feature(CPU_FTR_TM))
1576		return -ENODEV;
1577
1578	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1579		return -ENODATA;
1580
1581	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1582				&target->thread.tm_dscr, 0, sizeof(u64));
1583	return ret;
1584}
1585
1586static int tm_dscr_set(struct task_struct *target,
1587		      const struct user_regset *regset,
1588		      unsigned int pos, unsigned int count,
1589		      const void *kbuf, const void __user *ubuf)
1590{
1591	int ret;
1592
1593	if (!cpu_has_feature(CPU_FTR_TM))
1594		return -ENODEV;
1595
1596	if (!MSR_TM_ACTIVE(target->thread.regs->msr))
1597		return -ENODATA;
1598
1599	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1600				&target->thread.tm_dscr, 0, sizeof(u64));
1601	return ret;
1602}
1603#endif	/* CONFIG_PPC_TRANSACTIONAL_MEM */
1604
1605#ifdef CONFIG_PPC64
1606static int ppr_get(struct task_struct *target,
1607		      const struct user_regset *regset,
1608		      unsigned int pos, unsigned int count,
1609		      void *kbuf, void __user *ubuf)
1610{
1611	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1612				   &target->thread.ppr, 0, sizeof(u64));
1613}
1614
1615static int ppr_set(struct task_struct *target,
1616		      const struct user_regset *regset,
1617		      unsigned int pos, unsigned int count,
1618		      const void *kbuf, const void __user *ubuf)
1619{
1620	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1621				  &target->thread.ppr, 0, sizeof(u64));
1622}
1623
1624static int dscr_get(struct task_struct *target,
1625		      const struct user_regset *regset,
1626		      unsigned int pos, unsigned int count,
1627		      void *kbuf, void __user *ubuf)
1628{
1629	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1630				   &target->thread.dscr, 0, sizeof(u64));
1631}
1632static int dscr_set(struct task_struct *target,
1633		      const struct user_regset *regset,
1634		      unsigned int pos, unsigned int count,
1635		      const void *kbuf, const void __user *ubuf)
1636{
1637	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1638				  &target->thread.dscr, 0, sizeof(u64));
1639}
1640#endif
1641#ifdef CONFIG_PPC_BOOK3S_64
1642static int tar_get(struct task_struct *target,
1643		      const struct user_regset *regset,
1644		      unsigned int pos, unsigned int count,
1645		      void *kbuf, void __user *ubuf)
1646{
1647	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1648				   &target->thread.tar, 0, sizeof(u64));
1649}
1650static int tar_set(struct task_struct *target,
1651		      const struct user_regset *regset,
1652		      unsigned int pos, unsigned int count,
1653		      const void *kbuf, const void __user *ubuf)
1654{
1655	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1656				  &target->thread.tar, 0, sizeof(u64));
1657}
1658
1659static int ebb_active(struct task_struct *target,
1660			 const struct user_regset *regset)
1661{
1662	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1663		return -ENODEV;
1664
1665	if (target->thread.used_ebb)
1666		return regset->n;
1667
1668	return 0;
1669}
1670
1671static int ebb_get(struct task_struct *target,
1672		      const struct user_regset *regset,
1673		      unsigned int pos, unsigned int count,
1674		      void *kbuf, void __user *ubuf)
1675{
1676	/* Build tests */
1677	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1678	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1679
1680	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1681		return -ENODEV;
1682
1683	if (!target->thread.used_ebb)
1684		return -ENODATA;
1685
1686	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1687			&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
1688}
1689
1690static int ebb_set(struct task_struct *target,
1691		      const struct user_regset *regset,
1692		      unsigned int pos, unsigned int count,
1693		      const void *kbuf, const void __user *ubuf)
1694{
1695	int ret = 0;
1696
1697	/* Build tests */
1698	BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
1699	BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
1700
1701	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1702		return -ENODEV;
1703
1704	if (target->thread.used_ebb)
1705		return -ENODATA;
1706
1707	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1708			&target->thread.ebbrr, 0, sizeof(unsigned long));
1709
1710	if (!ret)
1711		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1712			&target->thread.ebbhr, sizeof(unsigned long),
1713			2 * sizeof(unsigned long));
1714
1715	if (!ret)
1716		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1717			&target->thread.bescr,
1718			2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
1719
1720	return ret;
1721}
1722static int pmu_active(struct task_struct *target,
1723			 const struct user_regset *regset)
1724{
1725	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1726		return -ENODEV;
1727
1728	return regset->n;
1729}
1730
1731static int pmu_get(struct task_struct *target,
1732		      const struct user_regset *regset,
1733		      unsigned int pos, unsigned int count,
1734		      void *kbuf, void __user *ubuf)
1735{
1736	/* Build tests */
1737	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1738	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1739	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1740	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1741
1742	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1743		return -ENODEV;
1744
1745	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1746			&target->thread.siar, 0,
1747			5 * sizeof(unsigned long));
1748}
1749
1750static int pmu_set(struct task_struct *target,
1751		      const struct user_regset *regset,
1752		      unsigned int pos, unsigned int count,
1753		      const void *kbuf, const void __user *ubuf)
1754{
1755	int ret = 0;
1756
1757	/* Build tests */
1758	BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
1759	BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
1760	BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
1761	BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
1762
1763	if (!cpu_has_feature(CPU_FTR_ARCH_207S))
1764		return -ENODEV;
1765
1766	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1767			&target->thread.siar, 0,
1768			sizeof(unsigned long));
1769
1770	if (!ret)
1771		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1772			&target->thread.sdar, sizeof(unsigned long),
1773			2 * sizeof(unsigned long));
1774
1775	if (!ret)
1776		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1777			&target->thread.sier, 2 * sizeof(unsigned long),
1778			3 * sizeof(unsigned long));
1779
1780	if (!ret)
1781		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1782			&target->thread.mmcr2, 3 * sizeof(unsigned long),
1783			4 * sizeof(unsigned long));
1784
1785	if (!ret)
1786		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1787			&target->thread.mmcr0, 4 * sizeof(unsigned long),
1788			5 * sizeof(unsigned long));
1789	return ret;
1790}
1791#endif
1792
1793#ifdef CONFIG_PPC_MEM_KEYS
1794static int pkey_active(struct task_struct *target,
1795		       const struct user_regset *regset)
1796{
1797	if (!arch_pkeys_enabled())
1798		return -ENODEV;
1799
1800	return regset->n;
1801}
1802
1803static int pkey_get(struct task_struct *target,
1804		    const struct user_regset *regset,
1805		    unsigned int pos, unsigned int count,
1806		    void *kbuf, void __user *ubuf)
1807{
1808	BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
1809	BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
1810
1811	if (!arch_pkeys_enabled())
1812		return -ENODEV;
1813
1814	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1815				   &target->thread.amr, 0,
1816				   ELF_NPKEY * sizeof(unsigned long));
1817}
1818
1819static int pkey_set(struct task_struct *target,
1820		      const struct user_regset *regset,
1821		      unsigned int pos, unsigned int count,
1822		      const void *kbuf, const void __user *ubuf)
1823{
1824	u64 new_amr;
1825	int ret;
1826
1827	if (!arch_pkeys_enabled())
1828		return -ENODEV;
1829
1830	/* Only the AMR can be set from userspace */
1831	if (pos != 0 || count != sizeof(new_amr))
1832		return -EINVAL;
1833
1834	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1835				 &new_amr, 0, sizeof(new_amr));
1836	if (ret)
1837		return ret;
1838
1839	/* UAMOR determines which bits of the AMR can be set from userspace. */
1840	target->thread.amr = (new_amr & target->thread.uamor) |
1841		(target->thread.amr & ~target->thread.uamor);
1842
1843	return 0;
1844}
1845#endif /* CONFIG_PPC_MEM_KEYS */
1846
1847/*
1848 * These are our native regset flavors.
1849 */
1850enum powerpc_regset {
1851	REGSET_GPR,
1852	REGSET_FPR,
1853#ifdef CONFIG_ALTIVEC
1854	REGSET_VMX,
1855#endif
1856#ifdef CONFIG_VSX
1857	REGSET_VSX,
1858#endif
1859#ifdef CONFIG_SPE
1860	REGSET_SPE,
1861#endif
1862#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1863	REGSET_TM_CGPR,		/* TM checkpointed GPR registers */
1864	REGSET_TM_CFPR,		/* TM checkpointed FPR registers */
1865	REGSET_TM_CVMX,		/* TM checkpointed VMX registers */
1866	REGSET_TM_CVSX,		/* TM checkpointed VSX registers */
1867	REGSET_TM_SPR,		/* TM specific SPR registers */
1868	REGSET_TM_CTAR,		/* TM checkpointed TAR register */
1869	REGSET_TM_CPPR,		/* TM checkpointed PPR register */
1870	REGSET_TM_CDSCR,	/* TM checkpointed DSCR register */
1871#endif
1872#ifdef CONFIG_PPC64
1873	REGSET_PPR,		/* PPR register */
1874	REGSET_DSCR,		/* DSCR register */
1875#endif
1876#ifdef CONFIG_PPC_BOOK3S_64
1877	REGSET_TAR,		/* TAR register */
1878	REGSET_EBB,		/* EBB registers */
1879	REGSET_PMR,		/* Performance Monitor Registers */
1880#endif
1881#ifdef CONFIG_PPC_MEM_KEYS
1882	REGSET_PKEY,		/* AMR register */
1883#endif
1884};
1885
1886static const struct user_regset native_regsets[] = {
1887	[REGSET_GPR] = {
1888		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
1889		.size = sizeof(long), .align = sizeof(long),
1890		.get = gpr_get, .set = gpr_set
1891	},
1892	[REGSET_FPR] = {
1893		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
1894		.size = sizeof(double), .align = sizeof(double),
1895		.get = fpr_get, .set = fpr_set
1896	},
1897#ifdef CONFIG_ALTIVEC
1898	[REGSET_VMX] = {
1899		.core_note_type = NT_PPC_VMX, .n = 34,
1900		.size = sizeof(vector128), .align = sizeof(vector128),
1901		.active = vr_active, .get = vr_get, .set = vr_set
1902	},
1903#endif
1904#ifdef CONFIG_VSX
1905	[REGSET_VSX] = {
1906		.core_note_type = NT_PPC_VSX, .n = 32,
1907		.size = sizeof(double), .align = sizeof(double),
1908		.active = vsr_active, .get = vsr_get, .set = vsr_set
1909	},
1910#endif
1911#ifdef CONFIG_SPE
1912	[REGSET_SPE] = {
1913		.core_note_type = NT_PPC_SPE, .n = 35,
1914		.size = sizeof(u32), .align = sizeof(u32),
1915		.active = evr_active, .get = evr_get, .set = evr_set
1916	},
1917#endif
1918#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1919	[REGSET_TM_CGPR] = {
1920		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
1921		.size = sizeof(long), .align = sizeof(long),
1922		.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
1923	},
1924	[REGSET_TM_CFPR] = {
1925		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
1926		.size = sizeof(double), .align = sizeof(double),
1927		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
1928	},
1929	[REGSET_TM_CVMX] = {
1930		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
1931		.size = sizeof(vector128), .align = sizeof(vector128),
1932		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
1933	},
1934	[REGSET_TM_CVSX] = {
1935		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
1936		.size = sizeof(double), .align = sizeof(double),
1937		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
1938	},
1939	[REGSET_TM_SPR] = {
1940		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
1941		.size = sizeof(u64), .align = sizeof(u64),
1942		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
1943	},
1944	[REGSET_TM_CTAR] = {
1945		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
1946		.size = sizeof(u64), .align = sizeof(u64),
1947		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
1948	},
1949	[REGSET_TM_CPPR] = {
1950		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
1951		.size = sizeof(u64), .align = sizeof(u64),
1952		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
1953	},
1954	[REGSET_TM_CDSCR] = {
1955		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
1956		.size = sizeof(u64), .align = sizeof(u64),
1957		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
1958	},
1959#endif
1960#ifdef CONFIG_PPC64
1961	[REGSET_PPR] = {
1962		.core_note_type = NT_PPC_PPR, .n = 1,
1963		.size = sizeof(u64), .align = sizeof(u64),
1964		.get = ppr_get, .set = ppr_set
1965	},
1966	[REGSET_DSCR] = {
1967		.core_note_type = NT_PPC_DSCR, .n = 1,
1968		.size = sizeof(u64), .align = sizeof(u64),
1969		.get = dscr_get, .set = dscr_set
1970	},
1971#endif
1972#ifdef CONFIG_PPC_BOOK3S_64
1973	[REGSET_TAR] = {
1974		.core_note_type = NT_PPC_TAR, .n = 1,
1975		.size = sizeof(u64), .align = sizeof(u64),
1976		.get = tar_get, .set = tar_set
1977	},
1978	[REGSET_EBB] = {
1979		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
1980		.size = sizeof(u64), .align = sizeof(u64),
1981		.active = ebb_active, .get = ebb_get, .set = ebb_set
1982	},
1983	[REGSET_PMR] = {
1984		.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
1985		.size = sizeof(u64), .align = sizeof(u64),
1986		.active = pmu_active, .get = pmu_get, .set = pmu_set
1987	},
1988#endif
1989#ifdef CONFIG_PPC_MEM_KEYS
1990	[REGSET_PKEY] = {
1991		.core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
1992		.size = sizeof(u64), .align = sizeof(u64),
1993		.active = pkey_active, .get = pkey_get, .set = pkey_set
1994	},
1995#endif
1996};
1997
1998static const struct user_regset_view user_ppc_native_view = {
1999	.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
2000	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2001};
2002
2003#ifdef CONFIG_PPC64
2004#include <linux/compat.h>
2005
2006static int gpr32_get_common(struct task_struct *target,
2007		     const struct user_regset *regset,
2008		     unsigned int pos, unsigned int count,
2009			    void *kbuf, void __user *ubuf,
2010			    unsigned long *regs)
2011{
 
2012	compat_ulong_t *k = kbuf;
2013	compat_ulong_t __user *u = ubuf;
2014	compat_ulong_t reg;
 
 
 
 
 
 
 
 
 
 
2015
2016	pos /= sizeof(reg);
2017	count /= sizeof(reg);
2018
2019	if (kbuf)
2020		for (; count > 0 && pos < PT_MSR; --count)
2021			*k++ = regs[pos++];
2022	else
2023		for (; count > 0 && pos < PT_MSR; --count)
2024			if (__put_user((compat_ulong_t) regs[pos++], u++))
2025				return -EFAULT;
2026
2027	if (count > 0 && pos == PT_MSR) {
2028		reg = get_user_msr(target);
2029		if (kbuf)
2030			*k++ = reg;
2031		else if (__put_user(reg, u++))
2032			return -EFAULT;
2033		++pos;
2034		--count;
2035	}
2036
2037	if (kbuf)
2038		for (; count > 0 && pos < PT_REGS_COUNT; --count)
2039			*k++ = regs[pos++];
2040	else
2041		for (; count > 0 && pos < PT_REGS_COUNT; --count)
2042			if (__put_user((compat_ulong_t) regs[pos++], u++))
2043				return -EFAULT;
2044
2045	kbuf = k;
2046	ubuf = u;
2047	pos *= sizeof(reg);
2048	count *= sizeof(reg);
2049	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
2050					PT_REGS_COUNT * sizeof(reg), -1);
2051}
2052
2053static int gpr32_set_common(struct task_struct *target,
2054		     const struct user_regset *regset,
2055		     unsigned int pos, unsigned int count,
2056		     const void *kbuf, const void __user *ubuf,
2057		     unsigned long *regs)
2058{
 
2059	const compat_ulong_t *k = kbuf;
2060	const compat_ulong_t __user *u = ubuf;
2061	compat_ulong_t reg;
2062
 
 
 
 
 
2063	pos /= sizeof(reg);
2064	count /= sizeof(reg);
2065
2066	if (kbuf)
2067		for (; count > 0 && pos < PT_MSR; --count)
2068			regs[pos++] = *k++;
2069	else
2070		for (; count > 0 && pos < PT_MSR; --count) {
2071			if (__get_user(reg, u++))
2072				return -EFAULT;
2073			regs[pos++] = reg;
2074		}
2075
2076
2077	if (count > 0 && pos == PT_MSR) {
2078		if (kbuf)
2079			reg = *k++;
2080		else if (__get_user(reg, u++))
2081			return -EFAULT;
2082		set_user_msr(target, reg);
2083		++pos;
2084		--count;
2085	}
2086
2087	if (kbuf) {
2088		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
2089			regs[pos++] = *k++;
2090		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2091			++k;
2092	} else {
2093		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
2094			if (__get_user(reg, u++))
2095				return -EFAULT;
2096			regs[pos++] = reg;
2097		}
2098		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
2099			if (__get_user(reg, u++))
2100				return -EFAULT;
2101	}
2102
2103	if (count > 0 && pos == PT_TRAP) {
2104		if (kbuf)
2105			reg = *k++;
2106		else if (__get_user(reg, u++))
2107			return -EFAULT;
2108		set_user_trap(target, reg);
2109		++pos;
2110		--count;
2111	}
2112
2113	kbuf = k;
2114	ubuf = u;
2115	pos *= sizeof(reg);
2116	count *= sizeof(reg);
2117	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
2118					 (PT_TRAP + 1) * sizeof(reg), -1);
2119}
2120
2121#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2122static int tm_cgpr32_get(struct task_struct *target,
2123		     const struct user_regset *regset,
2124		     unsigned int pos, unsigned int count,
2125		     void *kbuf, void __user *ubuf)
2126{
2127	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2128			&target->thread.ckpt_regs.gpr[0]);
2129}
2130
2131static int tm_cgpr32_set(struct task_struct *target,
2132		     const struct user_regset *regset,
2133		     unsigned int pos, unsigned int count,
2134		     const void *kbuf, const void __user *ubuf)
2135{
2136	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2137			&target->thread.ckpt_regs.gpr[0]);
2138}
2139#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2140
2141static int gpr32_get(struct task_struct *target,
2142		     const struct user_regset *regset,
2143		     unsigned int pos, unsigned int count,
2144		     void *kbuf, void __user *ubuf)
2145{
2146	int i;
2147
2148	if (target->thread.regs == NULL)
2149		return -EIO;
2150
2151	if (!FULL_REGS(target->thread.regs)) {
2152		/*
2153		 * We have a partial register set.
2154		 * Fill 14-31 with bogus values.
2155		 */
2156		for (i = 14; i < 32; i++)
2157			target->thread.regs->gpr[i] = NV_REG_POISON;
2158	}
2159	return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
2160			&target->thread.regs->gpr[0]);
2161}
2162
2163static int gpr32_set(struct task_struct *target,
2164		     const struct user_regset *regset,
2165		     unsigned int pos, unsigned int count,
2166		     const void *kbuf, const void __user *ubuf)
2167{
2168	if (target->thread.regs == NULL)
2169		return -EIO;
2170
2171	CHECK_FULL_REGS(target->thread.regs);
2172	return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
2173			&target->thread.regs->gpr[0]);
2174}
2175
2176/*
2177 * These are the regset flavors matching the CONFIG_PPC32 native set.
2178 */
2179static const struct user_regset compat_regsets[] = {
2180	[REGSET_GPR] = {
2181		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
2182		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
2183		.get = gpr32_get, .set = gpr32_set
2184	},
2185	[REGSET_FPR] = {
2186		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
2187		.size = sizeof(double), .align = sizeof(double),
2188		.get = fpr_get, .set = fpr_set
2189	},
2190#ifdef CONFIG_ALTIVEC
2191	[REGSET_VMX] = {
2192		.core_note_type = NT_PPC_VMX, .n = 34,
2193		.size = sizeof(vector128), .align = sizeof(vector128),
2194		.active = vr_active, .get = vr_get, .set = vr_set
2195	},
2196#endif
2197#ifdef CONFIG_SPE
2198	[REGSET_SPE] = {
2199		.core_note_type = NT_PPC_SPE, .n = 35,
2200		.size = sizeof(u32), .align = sizeof(u32),
2201		.active = evr_active, .get = evr_get, .set = evr_set
2202	},
2203#endif
2204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2205	[REGSET_TM_CGPR] = {
2206		.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
2207		.size = sizeof(long), .align = sizeof(long),
2208		.active = tm_cgpr_active,
2209		.get = tm_cgpr32_get, .set = tm_cgpr32_set
2210	},
2211	[REGSET_TM_CFPR] = {
2212		.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
2213		.size = sizeof(double), .align = sizeof(double),
2214		.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
2215	},
2216	[REGSET_TM_CVMX] = {
2217		.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
2218		.size = sizeof(vector128), .align = sizeof(vector128),
2219		.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
2220	},
2221	[REGSET_TM_CVSX] = {
2222		.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
2223		.size = sizeof(double), .align = sizeof(double),
2224		.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
2225	},
2226	[REGSET_TM_SPR] = {
2227		.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
2228		.size = sizeof(u64), .align = sizeof(u64),
2229		.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
2230	},
2231	[REGSET_TM_CTAR] = {
2232		.core_note_type = NT_PPC_TM_CTAR, .n = 1,
2233		.size = sizeof(u64), .align = sizeof(u64),
2234		.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
2235	},
2236	[REGSET_TM_CPPR] = {
2237		.core_note_type = NT_PPC_TM_CPPR, .n = 1,
2238		.size = sizeof(u64), .align = sizeof(u64),
2239		.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
2240	},
2241	[REGSET_TM_CDSCR] = {
2242		.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
2243		.size = sizeof(u64), .align = sizeof(u64),
2244		.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
2245	},
2246#endif
2247#ifdef CONFIG_PPC64
2248	[REGSET_PPR] = {
2249		.core_note_type = NT_PPC_PPR, .n = 1,
2250		.size = sizeof(u64), .align = sizeof(u64),
2251		.get = ppr_get, .set = ppr_set
2252	},
2253	[REGSET_DSCR] = {
2254		.core_note_type = NT_PPC_DSCR, .n = 1,
2255		.size = sizeof(u64), .align = sizeof(u64),
2256		.get = dscr_get, .set = dscr_set
2257	},
2258#endif
2259#ifdef CONFIG_PPC_BOOK3S_64
2260	[REGSET_TAR] = {
2261		.core_note_type = NT_PPC_TAR, .n = 1,
2262		.size = sizeof(u64), .align = sizeof(u64),
2263		.get = tar_get, .set = tar_set
2264	},
2265	[REGSET_EBB] = {
2266		.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
2267		.size = sizeof(u64), .align = sizeof(u64),
2268		.active = ebb_active, .get = ebb_get, .set = ebb_set
2269	},
2270#endif
2271};
2272
2273static const struct user_regset_view user_ppc_compat_view = {
2274	.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
2275	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
2276};
2277#endif	/* CONFIG_PPC64 */
2278
2279const struct user_regset_view *task_user_regset_view(struct task_struct *task)
2280{
2281#ifdef CONFIG_PPC64
2282	if (test_tsk_thread_flag(task, TIF_32BIT))
2283		return &user_ppc_compat_view;
2284#endif
2285	return &user_ppc_native_view;
2286}
2287
2288
2289void user_enable_single_step(struct task_struct *task)
2290{
2291	struct pt_regs *regs = task->thread.regs;
2292
2293	if (regs != NULL) {
2294#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2295		task->thread.debug.dbcr0 &= ~DBCR0_BT;
2296		task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2297		regs->msr |= MSR_DE;
2298#else
2299		regs->msr &= ~MSR_BE;
2300		regs->msr |= MSR_SE;
2301#endif
2302	}
2303	set_tsk_thread_flag(task, TIF_SINGLESTEP);
2304}
2305
2306void user_enable_block_step(struct task_struct *task)
2307{
2308	struct pt_regs *regs = task->thread.regs;
2309
2310	if (regs != NULL) {
2311#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2312		task->thread.debug.dbcr0 &= ~DBCR0_IC;
2313		task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
2314		regs->msr |= MSR_DE;
2315#else
2316		regs->msr &= ~MSR_SE;
2317		regs->msr |= MSR_BE;
2318#endif
2319	}
2320	set_tsk_thread_flag(task, TIF_SINGLESTEP);
2321}
2322
2323void user_disable_single_step(struct task_struct *task)
2324{
2325	struct pt_regs *regs = task->thread.regs;
2326
2327	if (regs != NULL) {
2328#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2329		/*
2330		 * The logic to disable single stepping should be as
2331		 * simple as turning off the Instruction Complete flag.
2332		 * And, after doing so, if all debug flags are off, turn
2333		 * off DBCR0(IDM) and MSR(DE) .... Torez
2334		 */
2335		task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
2336		/*
2337		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
2338		 */
2339		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2340					task->thread.debug.dbcr1)) {
2341			/*
2342			 * All debug events were off.....
2343			 */
2344			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2345			regs->msr &= ~MSR_DE;
2346		}
2347#else
2348		regs->msr &= ~(MSR_SE | MSR_BE);
2349#endif
2350	}
2351	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
2352}
2353
2354#ifdef CONFIG_HAVE_HW_BREAKPOINT
2355void ptrace_triggered(struct perf_event *bp,
2356		      struct perf_sample_data *data, struct pt_regs *regs)
2357{
2358	struct perf_event_attr attr;
2359
2360	/*
2361	 * Disable the breakpoint request here since ptrace has defined a
2362	 * one-shot behaviour for breakpoint exceptions in PPC64.
2363	 * The SIGTRAP signal is generated automatically for us in do_dabr().
2364	 * We don't have to do anything about that here
2365	 */
2366	attr = bp->attr;
2367	attr.disabled = true;
2368	modify_user_hw_breakpoint(bp, &attr);
2369}
2370#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2371
2372static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
2373			       unsigned long data)
2374{
2375#ifdef CONFIG_HAVE_HW_BREAKPOINT
2376	int ret;
2377	struct thread_struct *thread = &(task->thread);
2378	struct perf_event *bp;
2379	struct perf_event_attr attr;
2380#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2381#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2382	bool set_bp = true;
2383	struct arch_hw_breakpoint hw_brk;
2384#endif
2385
2386	/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
2387	 *  For embedded processors we support one DAC and no IAC's at the
2388	 *  moment.
2389	 */
2390	if (addr > 0)
2391		return -EINVAL;
2392
2393	/* The bottom 3 bits in dabr are flags */
2394	if ((data & ~0x7UL) >= TASK_SIZE)
2395		return -EIO;
2396
2397#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2398	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
2399	 *  It was assumed, on previous implementations, that 3 bits were
2400	 *  passed together with the data address, fitting the design of the
2401	 *  DABR register, as follows:
2402	 *
2403	 *  bit 0: Read flag
2404	 *  bit 1: Write flag
2405	 *  bit 2: Breakpoint translation
2406	 *
2407	 *  Thus, we use them here as so.
2408	 */
2409
2410	/* Ensure breakpoint translation bit is set */
2411	if (data && !(data & HW_BRK_TYPE_TRANSLATE))
2412		return -EIO;
2413	hw_brk.address = data & (~HW_BRK_TYPE_DABR);
2414	hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
2415	hw_brk.len = 8;
2416	set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
2417#ifdef CONFIG_HAVE_HW_BREAKPOINT
2418	bp = thread->ptrace_bps[0];
2419	if (!set_bp) {
2420		if (bp) {
2421			unregister_hw_breakpoint(bp);
2422			thread->ptrace_bps[0] = NULL;
2423		}
2424		return 0;
2425	}
2426	if (bp) {
2427		attr = bp->attr;
2428		attr.bp_addr = hw_brk.address;
2429		arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
2430
2431		/* Enable breakpoint */
2432		attr.disabled = false;
2433
2434		ret =  modify_user_hw_breakpoint(bp, &attr);
2435		if (ret) {
2436			return ret;
2437		}
2438		thread->ptrace_bps[0] = bp;
2439		thread->hw_brk = hw_brk;
2440		return 0;
2441	}
2442
2443	/* Create a new breakpoint request if one doesn't exist already */
2444	hw_breakpoint_init(&attr);
2445	attr.bp_addr = hw_brk.address;
2446	arch_bp_generic_fields(hw_brk.type,
2447			       &attr.bp_type);
2448
2449	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2450					       ptrace_triggered, NULL, task);
2451	if (IS_ERR(bp)) {
2452		thread->ptrace_bps[0] = NULL;
2453		return PTR_ERR(bp);
2454	}
2455
2456#else /* !CONFIG_HAVE_HW_BREAKPOINT */
2457	if (set_bp && (!ppc_breakpoint_available()))
2458		return -ENODEV;
2459#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2460	task->thread.hw_brk = hw_brk;
2461#else /* CONFIG_PPC_ADV_DEBUG_REGS */
2462	/* As described above, it was assumed 3 bits were passed with the data
2463	 *  address, but we will assume only the mode bits will be passed
2464	 *  as to not cause alignment restrictions for DAC-based processors.
2465	 */
2466
2467	/* DAC's hold the whole address without any mode flags */
2468	task->thread.debug.dac1 = data & ~0x3UL;
2469
2470	if (task->thread.debug.dac1 == 0) {
2471		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2472		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
2473					task->thread.debug.dbcr1)) {
2474			task->thread.regs->msr &= ~MSR_DE;
2475			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
2476		}
2477		return 0;
2478	}
2479
2480	/* Read or Write bits must be set */
2481
2482	if (!(data & 0x3UL))
2483		return -EINVAL;
2484
2485	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
2486	   register */
2487	task->thread.debug.dbcr0 |= DBCR0_IDM;
2488
2489	/* Check for write and read flags and set DBCR0
2490	   accordingly */
2491	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
2492	if (data & 0x1UL)
2493		dbcr_dac(task) |= DBCR_DAC1R;
2494	if (data & 0x2UL)
2495		dbcr_dac(task) |= DBCR_DAC1W;
2496	task->thread.regs->msr |= MSR_DE;
2497#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2498	return 0;
2499}
2500
2501/*
2502 * Called by kernel/ptrace.c when detaching..
2503 *
2504 * Make sure single step bits etc are not set.
2505 */
2506void ptrace_disable(struct task_struct *child)
2507{
2508	/* make sure the single step bit is not set. */
2509	user_disable_single_step(child);
2510}
2511
2512#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2513static long set_instruction_bp(struct task_struct *child,
2514			      struct ppc_hw_breakpoint *bp_info)
2515{
2516	int slot;
2517	int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
2518	int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
2519	int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
2520	int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
2521
2522	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2523		slot2_in_use = 1;
2524	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2525		slot4_in_use = 1;
2526
2527	if (bp_info->addr >= TASK_SIZE)
2528		return -EIO;
2529
2530	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
2531
2532		/* Make sure range is valid. */
2533		if (bp_info->addr2 >= TASK_SIZE)
2534			return -EIO;
2535
2536		/* We need a pair of IAC regsisters */
2537		if ((!slot1_in_use) && (!slot2_in_use)) {
2538			slot = 1;
2539			child->thread.debug.iac1 = bp_info->addr;
2540			child->thread.debug.iac2 = bp_info->addr2;
2541			child->thread.debug.dbcr0 |= DBCR0_IAC1;
2542			if (bp_info->addr_mode ==
2543					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2544				dbcr_iac_range(child) |= DBCR_IAC12X;
2545			else
2546				dbcr_iac_range(child) |= DBCR_IAC12I;
2547#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2548		} else if ((!slot3_in_use) && (!slot4_in_use)) {
2549			slot = 3;
2550			child->thread.debug.iac3 = bp_info->addr;
2551			child->thread.debug.iac4 = bp_info->addr2;
2552			child->thread.debug.dbcr0 |= DBCR0_IAC3;
2553			if (bp_info->addr_mode ==
2554					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2555				dbcr_iac_range(child) |= DBCR_IAC34X;
2556			else
2557				dbcr_iac_range(child) |= DBCR_IAC34I;
2558#endif
2559		} else
2560			return -ENOSPC;
2561	} else {
2562		/* We only need one.  If possible leave a pair free in
2563		 * case a range is needed later
2564		 */
2565		if (!slot1_in_use) {
2566			/*
2567			 * Don't use iac1 if iac1-iac2 are free and either
2568			 * iac3 or iac4 (but not both) are free
2569			 */
2570			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
2571				slot = 1;
2572				child->thread.debug.iac1 = bp_info->addr;
2573				child->thread.debug.dbcr0 |= DBCR0_IAC1;
2574				goto out;
2575			}
2576		}
2577		if (!slot2_in_use) {
2578			slot = 2;
2579			child->thread.debug.iac2 = bp_info->addr;
2580			child->thread.debug.dbcr0 |= DBCR0_IAC2;
2581#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2582		} else if (!slot3_in_use) {
2583			slot = 3;
2584			child->thread.debug.iac3 = bp_info->addr;
2585			child->thread.debug.dbcr0 |= DBCR0_IAC3;
2586		} else if (!slot4_in_use) {
2587			slot = 4;
2588			child->thread.debug.iac4 = bp_info->addr;
2589			child->thread.debug.dbcr0 |= DBCR0_IAC4;
2590#endif
2591		} else
2592			return -ENOSPC;
2593	}
2594out:
2595	child->thread.debug.dbcr0 |= DBCR0_IDM;
2596	child->thread.regs->msr |= MSR_DE;
2597
2598	return slot;
2599}
2600
2601static int del_instruction_bp(struct task_struct *child, int slot)
2602{
2603	switch (slot) {
2604	case 1:
2605		if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
2606			return -ENOENT;
2607
2608		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
2609			/* address range - clear slots 1 & 2 */
2610			child->thread.debug.iac2 = 0;
2611			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
2612		}
2613		child->thread.debug.iac1 = 0;
2614		child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
2615		break;
2616	case 2:
2617		if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
2618			return -ENOENT;
2619
2620		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
2621			/* used in a range */
2622			return -EINVAL;
2623		child->thread.debug.iac2 = 0;
2624		child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
2625		break;
2626#if CONFIG_PPC_ADV_DEBUG_IACS > 2
2627	case 3:
2628		if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
2629			return -ENOENT;
2630
2631		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
2632			/* address range - clear slots 3 & 4 */
2633			child->thread.debug.iac4 = 0;
2634			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
2635		}
2636		child->thread.debug.iac3 = 0;
2637		child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
2638		break;
2639	case 4:
2640		if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
2641			return -ENOENT;
2642
2643		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
2644			/* Used in a range */
2645			return -EINVAL;
2646		child->thread.debug.iac4 = 0;
2647		child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2648		break;
2649#endif
2650	default:
2651		return -EINVAL;
2652	}
2653	return 0;
2654}
2655
2656static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
2657{
2658	int byte_enable =
2659		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
2660		& 0xf;
2661	int condition_mode =
2662		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
2663	int slot;
2664
2665	if (byte_enable && (condition_mode == 0))
2666		return -EINVAL;
2667
2668	if (bp_info->addr >= TASK_SIZE)
2669		return -EIO;
2670
2671	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
2672		slot = 1;
2673		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2674			dbcr_dac(child) |= DBCR_DAC1R;
2675		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2676			dbcr_dac(child) |= DBCR_DAC1W;
2677		child->thread.debug.dac1 = (unsigned long)bp_info->addr;
2678#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2679		if (byte_enable) {
2680			child->thread.debug.dvc1 =
2681				(unsigned long)bp_info->condition_value;
2682			child->thread.debug.dbcr2 |=
2683				((byte_enable << DBCR2_DVC1BE_SHIFT) |
2684				 (condition_mode << DBCR2_DVC1M_SHIFT));
2685		}
2686#endif
2687#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2688	} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2689		/* Both dac1 and dac2 are part of a range */
2690		return -ENOSPC;
2691#endif
2692	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
2693		slot = 2;
2694		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2695			dbcr_dac(child) |= DBCR_DAC2R;
2696		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2697			dbcr_dac(child) |= DBCR_DAC2W;
2698		child->thread.debug.dac2 = (unsigned long)bp_info->addr;
2699#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2700		if (byte_enable) {
2701			child->thread.debug.dvc2 =
2702				(unsigned long)bp_info->condition_value;
2703			child->thread.debug.dbcr2 |=
2704				((byte_enable << DBCR2_DVC2BE_SHIFT) |
2705				 (condition_mode << DBCR2_DVC2M_SHIFT));
2706		}
2707#endif
2708	} else
2709		return -ENOSPC;
2710	child->thread.debug.dbcr0 |= DBCR0_IDM;
2711	child->thread.regs->msr |= MSR_DE;
2712
2713	return slot + 4;
2714}
2715
2716static int del_dac(struct task_struct *child, int slot)
2717{
2718	if (slot == 1) {
2719		if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
2720			return -ENOENT;
2721
2722		child->thread.debug.dac1 = 0;
2723		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
2724#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2725		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
2726			child->thread.debug.dac2 = 0;
2727			child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
2728		}
2729		child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
2730#endif
2731#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2732		child->thread.debug.dvc1 = 0;
2733#endif
2734	} else if (slot == 2) {
2735		if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
2736			return -ENOENT;
2737
2738#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2739		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
2740			/* Part of a range */
2741			return -EINVAL;
2742		child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
2743#endif
2744#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
2745		child->thread.debug.dvc2 = 0;
2746#endif
2747		child->thread.debug.dac2 = 0;
2748		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
2749	} else
2750		return -EINVAL;
2751
2752	return 0;
2753}
2754#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2755
2756#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2757static int set_dac_range(struct task_struct *child,
2758			 struct ppc_hw_breakpoint *bp_info)
2759{
2760	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
2761
2762	/* We don't allow range watchpoints to be used with DVC */
2763	if (bp_info->condition_mode)
2764		return -EINVAL;
2765
2766	/*
2767	 * Best effort to verify the address range.  The user/supervisor bits
2768	 * prevent trapping in kernel space, but let's fail on an obvious bad
2769	 * range.  The simple test on the mask is not fool-proof, and any
2770	 * exclusive range will spill over into kernel space.
2771	 */
2772	if (bp_info->addr >= TASK_SIZE)
2773		return -EIO;
2774	if (mode == PPC_BREAKPOINT_MODE_MASK) {
2775		/*
2776		 * dac2 is a bitmask.  Don't allow a mask that makes a
2777		 * kernel space address from a valid dac1 value
2778		 */
2779		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
2780			return -EIO;
2781	} else {
2782		/*
2783		 * For range breakpoints, addr2 must also be a valid address
2784		 */
2785		if (bp_info->addr2 >= TASK_SIZE)
2786			return -EIO;
2787	}
2788
2789	if (child->thread.debug.dbcr0 &
2790	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
2791		return -ENOSPC;
2792
2793	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2794		child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
2795	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2796		child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
2797	child->thread.debug.dac1 = bp_info->addr;
2798	child->thread.debug.dac2 = bp_info->addr2;
2799	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2800		child->thread.debug.dbcr2  |= DBCR2_DAC12M;
2801	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
2802		child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
2803	else	/* PPC_BREAKPOINT_MODE_MASK */
2804		child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
2805	child->thread.regs->msr |= MSR_DE;
2806
2807	return 5;
2808}
2809#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
2810
2811static long ppc_set_hwdebug(struct task_struct *child,
2812		     struct ppc_hw_breakpoint *bp_info)
2813{
2814#ifdef CONFIG_HAVE_HW_BREAKPOINT
2815	int len = 0;
2816	struct thread_struct *thread = &(child->thread);
2817	struct perf_event *bp;
2818	struct perf_event_attr attr;
2819#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2820#ifndef CONFIG_PPC_ADV_DEBUG_REGS
2821	struct arch_hw_breakpoint brk;
2822#endif
2823
2824	if (bp_info->version != 1)
2825		return -ENOTSUPP;
2826#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2827	/*
2828	 * Check for invalid flags and combinations
2829	 */
2830	if ((bp_info->trigger_type == 0) ||
2831	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
2832				       PPC_BREAKPOINT_TRIGGER_RW)) ||
2833	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
2834	    (bp_info->condition_mode &
2835	     ~(PPC_BREAKPOINT_CONDITION_MODE |
2836	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
2837		return -EINVAL;
2838#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
2839	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2840		return -EINVAL;
2841#endif
2842
2843	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
2844		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
2845		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
2846			return -EINVAL;
2847		return set_instruction_bp(child, bp_info);
2848	}
2849	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2850		return set_dac(child, bp_info);
2851
2852#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
2853	return set_dac_range(child, bp_info);
2854#else
2855	return -EINVAL;
2856#endif
2857#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2858	/*
2859	 * We only support one data breakpoint
2860	 */
2861	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
2862	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
2863	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
2864		return -EINVAL;
2865
2866	if ((unsigned long)bp_info->addr >= TASK_SIZE)
2867		return -EIO;
2868
2869	brk.address = bp_info->addr & ~7UL;
2870	brk.type = HW_BRK_TYPE_TRANSLATE;
2871	brk.len = 8;
2872	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
2873		brk.type |= HW_BRK_TYPE_READ;
2874	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
2875		brk.type |= HW_BRK_TYPE_WRITE;
2876#ifdef CONFIG_HAVE_HW_BREAKPOINT
2877	/*
2878	 * Check if the request is for 'range' breakpoints. We can
2879	 * support it if range < 8 bytes.
2880	 */
2881	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
2882		len = bp_info->addr2 - bp_info->addr;
2883	else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
2884		len = 1;
2885	else
2886		return -EINVAL;
2887	bp = thread->ptrace_bps[0];
2888	if (bp)
2889		return -ENOSPC;
2890
2891	/* Create a new breakpoint request if one doesn't exist already */
2892	hw_breakpoint_init(&attr);
2893	attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
2894	attr.bp_len = len;
2895	arch_bp_generic_fields(brk.type, &attr.bp_type);
2896
2897	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
2898					       ptrace_triggered, NULL, child);
2899	if (IS_ERR(bp)) {
2900		thread->ptrace_bps[0] = NULL;
2901		return PTR_ERR(bp);
2902	}
2903
2904	return 1;
2905#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2906
2907	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
2908		return -EINVAL;
2909
2910	if (child->thread.hw_brk.address)
2911		return -ENOSPC;
2912
2913	if (!ppc_breakpoint_available())
2914		return -ENODEV;
2915
2916	child->thread.hw_brk = brk;
2917
2918	return 1;
2919#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
2920}
2921
2922static long ppc_del_hwdebug(struct task_struct *child, long data)
2923{
2924#ifdef CONFIG_HAVE_HW_BREAKPOINT
2925	int ret = 0;
2926	struct thread_struct *thread = &(child->thread);
2927	struct perf_event *bp;
2928#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2929#ifdef CONFIG_PPC_ADV_DEBUG_REGS
2930	int rc;
2931
2932	if (data <= 4)
2933		rc = del_instruction_bp(child, (int)data);
2934	else
2935		rc = del_dac(child, (int)data - 4);
2936
2937	if (!rc) {
2938		if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
2939					child->thread.debug.dbcr1)) {
2940			child->thread.debug.dbcr0 &= ~DBCR0_IDM;
2941			child->thread.regs->msr &= ~MSR_DE;
2942		}
2943	}
2944	return rc;
2945#else
2946	if (data != 1)
2947		return -EINVAL;
2948
2949#ifdef CONFIG_HAVE_HW_BREAKPOINT
2950	bp = thread->ptrace_bps[0];
2951	if (bp) {
2952		unregister_hw_breakpoint(bp);
2953		thread->ptrace_bps[0] = NULL;
2954	} else
2955		ret = -ENOENT;
2956	return ret;
2957#else /* CONFIG_HAVE_HW_BREAKPOINT */
2958	if (child->thread.hw_brk.address == 0)
2959		return -ENOENT;
2960
2961	child->thread.hw_brk.address = 0;
2962	child->thread.hw_brk.type = 0;
2963#endif /* CONFIG_HAVE_HW_BREAKPOINT */
2964
2965	return 0;
2966#endif
2967}
2968
2969long arch_ptrace(struct task_struct *child, long request,
2970		 unsigned long addr, unsigned long data)
2971{
2972	int ret = -EPERM;
2973	void __user *datavp = (void __user *) data;
2974	unsigned long __user *datalp = datavp;
2975
2976	switch (request) {
2977	/* read the word at location addr in the USER area. */
2978	case PTRACE_PEEKUSR: {
2979		unsigned long index, tmp;
2980
2981		ret = -EIO;
2982		/* convert to index and check */
2983#ifdef CONFIG_PPC32
2984		index = addr >> 2;
2985		if ((addr & 3) || (index > PT_FPSCR)
2986		    || (child->thread.regs == NULL))
2987#else
2988		index = addr >> 3;
2989		if ((addr & 7) || (index > PT_FPSCR))
2990#endif
2991			break;
2992
2993		CHECK_FULL_REGS(child->thread.regs);
2994		if (index < PT_FPR0) {
2995			ret = ptrace_get_reg(child, (int) index, &tmp);
2996			if (ret)
2997				break;
2998		} else {
2999			unsigned int fpidx = index - PT_FPR0;
3000
3001			flush_fp_to_thread(child);
3002			if (fpidx < (PT_FPSCR - PT_FPR0))
3003				memcpy(&tmp, &child->thread.TS_FPR(fpidx),
3004				       sizeof(long));
3005			else
3006				tmp = child->thread.fp_state.fpscr;
3007		}
3008		ret = put_user(tmp, datalp);
3009		break;
3010	}
3011
3012	/* write the word at location addr in the USER area */
3013	case PTRACE_POKEUSR: {
3014		unsigned long index;
3015
3016		ret = -EIO;
3017		/* convert to index and check */
3018#ifdef CONFIG_PPC32
3019		index = addr >> 2;
3020		if ((addr & 3) || (index > PT_FPSCR)
3021		    || (child->thread.regs == NULL))
3022#else
3023		index = addr >> 3;
3024		if ((addr & 7) || (index > PT_FPSCR))
3025#endif
3026			break;
3027
3028		CHECK_FULL_REGS(child->thread.regs);
3029		if (index < PT_FPR0) {
3030			ret = ptrace_put_reg(child, index, data);
3031		} else {
3032			unsigned int fpidx = index - PT_FPR0;
3033
3034			flush_fp_to_thread(child);
3035			if (fpidx < (PT_FPSCR - PT_FPR0))
3036				memcpy(&child->thread.TS_FPR(fpidx), &data,
3037				       sizeof(long));
3038			else
3039				child->thread.fp_state.fpscr = data;
3040			ret = 0;
3041		}
3042		break;
3043	}
3044
3045	case PPC_PTRACE_GETHWDBGINFO: {
3046		struct ppc_debug_info dbginfo;
3047
3048		dbginfo.version = 1;
3049#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3050		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
3051		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
3052		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
3053		dbginfo.data_bp_alignment = 4;
3054		dbginfo.sizeof_condition = 4;
3055		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
3056				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
3057#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
3058		dbginfo.features |=
3059				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
3060				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
3061#endif
3062#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
3063		dbginfo.num_instruction_bps = 0;
3064		if (ppc_breakpoint_available())
3065			dbginfo.num_data_bps = 1;
3066		else
3067			dbginfo.num_data_bps = 0;
3068		dbginfo.num_condition_regs = 0;
3069#ifdef CONFIG_PPC64
3070		dbginfo.data_bp_alignment = 8;
3071#else
3072		dbginfo.data_bp_alignment = 4;
3073#endif
3074		dbginfo.sizeof_condition = 0;
3075#ifdef CONFIG_HAVE_HW_BREAKPOINT
3076		dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
3077		if (cpu_has_feature(CPU_FTR_DAWR))
3078			dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
3079#else
3080		dbginfo.features = 0;
3081#endif /* CONFIG_HAVE_HW_BREAKPOINT */
3082#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
3083
3084		if (!access_ok(VERIFY_WRITE, datavp,
3085			       sizeof(struct ppc_debug_info)))
3086			return -EFAULT;
3087		ret = __copy_to_user(datavp, &dbginfo,
3088				     sizeof(struct ppc_debug_info)) ?
3089		      -EFAULT : 0;
3090		break;
3091	}
3092
3093	case PPC_PTRACE_SETHWDEBUG: {
3094		struct ppc_hw_breakpoint bp_info;
3095
3096		if (!access_ok(VERIFY_READ, datavp,
3097			       sizeof(struct ppc_hw_breakpoint)))
3098			return -EFAULT;
3099		ret = __copy_from_user(&bp_info, datavp,
3100				       sizeof(struct ppc_hw_breakpoint)) ?
3101		      -EFAULT : 0;
3102		if (!ret)
3103			ret = ppc_set_hwdebug(child, &bp_info);
3104		break;
3105	}
3106
3107	case PPC_PTRACE_DELHWDEBUG: {
3108		ret = ppc_del_hwdebug(child, data);
3109		break;
3110	}
3111
3112	case PTRACE_GET_DEBUGREG: {
3113#ifndef CONFIG_PPC_ADV_DEBUG_REGS
3114		unsigned long dabr_fake;
3115#endif
3116		ret = -EINVAL;
3117		/* We only support one DABR and no IABRS at the moment */
3118		if (addr > 0)
3119			break;
3120#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3121		ret = put_user(child->thread.debug.dac1, datalp);
3122#else
3123		dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
3124			     (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
3125		ret = put_user(dabr_fake, datalp);
3126#endif
3127		break;
3128	}
3129
3130	case PTRACE_SET_DEBUGREG:
3131		ret = ptrace_set_debugreg(child, addr, data);
3132		break;
3133
3134#ifdef CONFIG_PPC64
3135	case PTRACE_GETREGS64:
3136#endif
3137	case PTRACE_GETREGS:	/* Get all pt_regs from the child. */
3138		return copy_regset_to_user(child, &user_ppc_native_view,
3139					   REGSET_GPR,
3140					   0, sizeof(struct pt_regs),
3141					   datavp);
3142
3143#ifdef CONFIG_PPC64
3144	case PTRACE_SETREGS64:
3145#endif
3146	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
3147		return copy_regset_from_user(child, &user_ppc_native_view,
3148					     REGSET_GPR,
3149					     0, sizeof(struct pt_regs),
3150					     datavp);
3151
3152	case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
3153		return copy_regset_to_user(child, &user_ppc_native_view,
3154					   REGSET_FPR,
3155					   0, sizeof(elf_fpregset_t),
3156					   datavp);
3157
3158	case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
3159		return copy_regset_from_user(child, &user_ppc_native_view,
3160					     REGSET_FPR,
3161					     0, sizeof(elf_fpregset_t),
3162					     datavp);
3163
3164#ifdef CONFIG_ALTIVEC
3165	case PTRACE_GETVRREGS:
3166		return copy_regset_to_user(child, &user_ppc_native_view,
3167					   REGSET_VMX,
3168					   0, (33 * sizeof(vector128) +
3169					       sizeof(u32)),
3170					   datavp);
3171
3172	case PTRACE_SETVRREGS:
3173		return copy_regset_from_user(child, &user_ppc_native_view,
3174					     REGSET_VMX,
3175					     0, (33 * sizeof(vector128) +
3176						 sizeof(u32)),
3177					     datavp);
3178#endif
3179#ifdef CONFIG_VSX
3180	case PTRACE_GETVSRREGS:
3181		return copy_regset_to_user(child, &user_ppc_native_view,
3182					   REGSET_VSX,
3183					   0, 32 * sizeof(double),
3184					   datavp);
3185
3186	case PTRACE_SETVSRREGS:
3187		return copy_regset_from_user(child, &user_ppc_native_view,
3188					     REGSET_VSX,
3189					     0, 32 * sizeof(double),
3190					     datavp);
3191#endif
3192#ifdef CONFIG_SPE
3193	case PTRACE_GETEVRREGS:
3194		/* Get the child spe register state. */
3195		return copy_regset_to_user(child, &user_ppc_native_view,
3196					   REGSET_SPE, 0, 35 * sizeof(u32),
3197					   datavp);
3198
3199	case PTRACE_SETEVRREGS:
3200		/* Set the child spe register state. */
3201		return copy_regset_from_user(child, &user_ppc_native_view,
3202					     REGSET_SPE, 0, 35 * sizeof(u32),
3203					     datavp);
3204#endif
3205
3206	default:
3207		ret = ptrace_request(child, request, addr, data);
3208		break;
3209	}
3210	return ret;
3211}
3212
3213#ifdef CONFIG_SECCOMP
3214static int do_seccomp(struct pt_regs *regs)
3215{
3216	if (!test_thread_flag(TIF_SECCOMP))
3217		return 0;
3218
3219	/*
3220	 * The ABI we present to seccomp tracers is that r3 contains
3221	 * the syscall return value and orig_gpr3 contains the first
3222	 * syscall parameter. This is different to the ptrace ABI where
3223	 * both r3 and orig_gpr3 contain the first syscall parameter.
3224	 */
3225	regs->gpr[3] = -ENOSYS;
3226
3227	/*
3228	 * We use the __ version here because we have already checked
3229	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
3230	 * have already loaded -ENOSYS into r3, or seccomp has put
3231	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
3232	 */
3233	if (__secure_computing(NULL))
3234		return -1;
3235
3236	/*
3237	 * The syscall was allowed by seccomp, restore the register
3238	 * state to what audit expects.
3239	 * Note that we use orig_gpr3, which means a seccomp tracer can
3240	 * modify the first syscall parameter (in orig_gpr3) and also
3241	 * allow the syscall to proceed.
3242	 */
3243	regs->gpr[3] = regs->orig_gpr3;
3244
3245	return 0;
3246}
3247#else
3248static inline int do_seccomp(struct pt_regs *regs) { return 0; }
3249#endif /* CONFIG_SECCOMP */
3250
3251/**
3252 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
3253 * @regs: the pt_regs of the task to trace (current)
3254 *
3255 * Performs various types of tracing on syscall entry. This includes seccomp,
3256 * ptrace, syscall tracepoints and audit.
3257 *
3258 * The pt_regs are potentially visible to userspace via ptrace, so their
3259 * contents is ABI.
3260 *
3261 * One or more of the tracers may modify the contents of pt_regs, in particular
3262 * to modify arguments or even the syscall number itself.
3263 *
3264 * It's also possible that a tracer can choose to reject the system call. In
3265 * that case this function will return an illegal syscall number, and will put
3266 * an appropriate return value in regs->r3.
3267 *
3268 * Return: the (possibly changed) syscall number.
3269 */
3270long do_syscall_trace_enter(struct pt_regs *regs)
3271{
3272	user_exit();
3273
3274	/*
3275	 * The tracer may decide to abort the syscall, if so tracehook
3276	 * will return !0. Note that the tracer may also just change
3277	 * regs->gpr[0] to an invalid syscall number, that is handled
3278	 * below on the exit path.
3279	 */
3280	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
3281	    tracehook_report_syscall_entry(regs))
3282		goto skip;
3283
3284	/* Run seccomp after ptrace; allow it to set gpr[3]. */
3285	if (do_seccomp(regs))
3286		return -1;
3287
3288	/* Avoid trace and audit when syscall is invalid. */
3289	if (regs->gpr[0] >= NR_syscalls)
3290		goto skip;
 
 
 
 
 
 
3291
3292	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3293		trace_sys_enter(regs, regs->gpr[0]);
3294
3295#ifdef CONFIG_PPC64
3296	if (!is_32bit_task())
3297		audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
3298				    regs->gpr[5], regs->gpr[6]);
3299	else
3300#endif
3301		audit_syscall_entry(regs->gpr[0],
3302				    regs->gpr[3] & 0xffffffff,
3303				    regs->gpr[4] & 0xffffffff,
3304				    regs->gpr[5] & 0xffffffff,
3305				    regs->gpr[6] & 0xffffffff);
3306
 
 
 
 
 
 
 
 
 
3307	/* Return the possibly modified but valid syscall number */
3308	return regs->gpr[0];
3309
3310skip:
3311	/*
3312	 * If we are aborting explicitly, or if the syscall number is
3313	 * now invalid, set the return value to -ENOSYS.
3314	 */
3315	regs->gpr[3] = -ENOSYS;
3316	return -1;
3317}
3318
3319void do_syscall_trace_leave(struct pt_regs *regs)
3320{
3321	int step;
3322
3323	audit_syscall_exit(regs);
3324
3325	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
3326		trace_sys_exit(regs, regs->result);
3327
3328	step = test_thread_flag(TIF_SINGLESTEP);
3329	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
3330		tracehook_report_syscall_exit(regs, step);
3331
3332	user_enter();
3333}
v4.6
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/m68k/kernel/ptrace.c"
   6 *  Copyright (C) 1994 by Hamish Macdonald
   7 *  Taken from linux/kernel/ptrace.c and modified for M680x0.
   8 *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
   9 *
  10 * Modified by Cort Dougan (cort@hq.fsmlabs.com)
  11 * and Paul Mackerras (paulus@samba.org).
  12 *
  13 * This file is subject to the terms and conditions of the GNU General
  14 * Public License.  See the file README.legal in the main directory of
  15 * this archive for more details.
  16 */
  17
  18#include <linux/kernel.h>
  19#include <linux/sched.h>
  20#include <linux/mm.h>
  21#include <linux/smp.h>
  22#include <linux/errno.h>
  23#include <linux/ptrace.h>
  24#include <linux/regset.h>
  25#include <linux/tracehook.h>
  26#include <linux/elf.h>
  27#include <linux/user.h>
  28#include <linux/security.h>
  29#include <linux/signal.h>
  30#include <linux/seccomp.h>
  31#include <linux/audit.h>
  32#include <trace/syscall.h>
  33#include <linux/hw_breakpoint.h>
  34#include <linux/perf_event.h>
  35#include <linux/context_tracking.h>
  36
  37#include <asm/uaccess.h>
 
  38#include <asm/page.h>
  39#include <asm/pgtable.h>
  40#include <asm/switch_to.h>
 
 
 
  41
  42#define CREATE_TRACE_POINTS
  43#include <trace/events/syscalls.h>
  44
  45/*
  46 * The parameter save area on the stack is used to store arguments being passed
  47 * to callee function and is located at fixed offset from stack pointer.
  48 */
  49#ifdef CONFIG_PPC32
  50#define PARAMETER_SAVE_AREA_OFFSET	24  /* bytes */
  51#else /* CONFIG_PPC32 */
  52#define PARAMETER_SAVE_AREA_OFFSET	48  /* bytes */
  53#endif
  54
  55struct pt_regs_offset {
  56	const char *name;
  57	int offset;
  58};
  59
  60#define STR(s)	#s			/* convert to string */
  61#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  62#define GPR_OFFSET_NAME(num)	\
  63	{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
  64	{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
  65#define REG_OFFSET_END {.name = NULL, .offset = 0}
  66
 
 
 
 
  67static const struct pt_regs_offset regoffset_table[] = {
  68	GPR_OFFSET_NAME(0),
  69	GPR_OFFSET_NAME(1),
  70	GPR_OFFSET_NAME(2),
  71	GPR_OFFSET_NAME(3),
  72	GPR_OFFSET_NAME(4),
  73	GPR_OFFSET_NAME(5),
  74	GPR_OFFSET_NAME(6),
  75	GPR_OFFSET_NAME(7),
  76	GPR_OFFSET_NAME(8),
  77	GPR_OFFSET_NAME(9),
  78	GPR_OFFSET_NAME(10),
  79	GPR_OFFSET_NAME(11),
  80	GPR_OFFSET_NAME(12),
  81	GPR_OFFSET_NAME(13),
  82	GPR_OFFSET_NAME(14),
  83	GPR_OFFSET_NAME(15),
  84	GPR_OFFSET_NAME(16),
  85	GPR_OFFSET_NAME(17),
  86	GPR_OFFSET_NAME(18),
  87	GPR_OFFSET_NAME(19),
  88	GPR_OFFSET_NAME(20),
  89	GPR_OFFSET_NAME(21),
  90	GPR_OFFSET_NAME(22),
  91	GPR_OFFSET_NAME(23),
  92	GPR_OFFSET_NAME(24),
  93	GPR_OFFSET_NAME(25),
  94	GPR_OFFSET_NAME(26),
  95	GPR_OFFSET_NAME(27),
  96	GPR_OFFSET_NAME(28),
  97	GPR_OFFSET_NAME(29),
  98	GPR_OFFSET_NAME(30),
  99	GPR_OFFSET_NAME(31),
 100	REG_OFFSET_NAME(nip),
 101	REG_OFFSET_NAME(msr),
 102	REG_OFFSET_NAME(ctr),
 103	REG_OFFSET_NAME(link),
 104	REG_OFFSET_NAME(xer),
 105	REG_OFFSET_NAME(ccr),
 106#ifdef CONFIG_PPC64
 107	REG_OFFSET_NAME(softe),
 108#else
 109	REG_OFFSET_NAME(mq),
 110#endif
 111	REG_OFFSET_NAME(trap),
 112	REG_OFFSET_NAME(dar),
 113	REG_OFFSET_NAME(dsisr),
 114	REG_OFFSET_END,
 115};
 116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117/**
 118 * regs_query_register_offset() - query register offset from its name
 119 * @name:	the name of a register
 120 *
 121 * regs_query_register_offset() returns the offset of a register in struct
 122 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 123 */
 124int regs_query_register_offset(const char *name)
 125{
 126	const struct pt_regs_offset *roff;
 127	for (roff = regoffset_table; roff->name != NULL; roff++)
 128		if (!strcmp(roff->name, name))
 129			return roff->offset;
 130	return -EINVAL;
 131}
 132
 133/**
 134 * regs_query_register_name() - query register name from its offset
 135 * @offset:	the offset of a register in struct pt_regs.
 136 *
 137 * regs_query_register_name() returns the name of a register from its
 138 * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
 139 */
 140const char *regs_query_register_name(unsigned int offset)
 141{
 142	const struct pt_regs_offset *roff;
 143	for (roff = regoffset_table; roff->name != NULL; roff++)
 144		if (roff->offset == offset)
 145			return roff->name;
 146	return NULL;
 147}
 148
 149/*
 150 * does not yet catch signals sent when the child dies.
 151 * in exit.c or in signal.c.
 152 */
 153
 154/*
 155 * Set of msr bits that gdb can change on behalf of a process.
 156 */
 157#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 158#define MSR_DEBUGCHANGE	0
 159#else
 160#define MSR_DEBUGCHANGE	(MSR_SE | MSR_BE)
 161#endif
 162
 163/*
 164 * Max register writeable via put_reg
 165 */
 166#ifdef CONFIG_PPC32
 167#define PT_MAX_PUT_REG	PT_MQ
 168#else
 169#define PT_MAX_PUT_REG	PT_CCR
 170#endif
 171
 172static unsigned long get_user_msr(struct task_struct *task)
 173{
 174	return task->thread.regs->msr | task->thread.fpexc_mode;
 175}
 176
 177static int set_user_msr(struct task_struct *task, unsigned long msr)
 178{
 179	task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
 180	task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
 181	return 0;
 182}
 183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 184#ifdef CONFIG_PPC64
 185static int get_user_dscr(struct task_struct *task, unsigned long *data)
 186{
 187	*data = task->thread.dscr;
 188	return 0;
 189}
 190
 191static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 192{
 193	task->thread.dscr = dscr;
 194	task->thread.dscr_inherit = 1;
 195	return 0;
 196}
 197#else
 198static int get_user_dscr(struct task_struct *task, unsigned long *data)
 199{
 200	return -EIO;
 201}
 202
 203static int set_user_dscr(struct task_struct *task, unsigned long dscr)
 204{
 205	return -EIO;
 206}
 207#endif
 208
 209/*
 210 * We prevent mucking around with the reserved area of trap
 211 * which are used internally by the kernel.
 212 */
 213static int set_user_trap(struct task_struct *task, unsigned long trap)
 214{
 215	task->thread.regs->trap = trap & 0xfff0;
 216	return 0;
 217}
 218
 219/*
 220 * Get contents of register REGNO in task TASK.
 221 */
 222int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
 223{
 224	if ((task->thread.regs == NULL) || !data)
 225		return -EIO;
 226
 227	if (regno == PT_MSR) {
 228		*data = get_user_msr(task);
 229		return 0;
 230	}
 231
 232	if (regno == PT_DSCR)
 233		return get_user_dscr(task, data);
 234
 
 
 
 
 
 
 
 
 
 
 
 
 235	if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
 236		*data = ((unsigned long *)task->thread.regs)[regno];
 237		return 0;
 238	}
 239
 240	return -EIO;
 241}
 242
 243/*
 244 * Write contents of register REGNO in task TASK.
 245 */
 246int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
 247{
 248	if (task->thread.regs == NULL)
 249		return -EIO;
 250
 251	if (regno == PT_MSR)
 252		return set_user_msr(task, data);
 253	if (regno == PT_TRAP)
 254		return set_user_trap(task, data);
 255	if (regno == PT_DSCR)
 256		return set_user_dscr(task, data);
 257
 258	if (regno <= PT_MAX_PUT_REG) {
 259		((unsigned long *)task->thread.regs)[regno] = data;
 260		return 0;
 261	}
 262	return -EIO;
 263}
 264
 265static int gpr_get(struct task_struct *target, const struct user_regset *regset,
 266		   unsigned int pos, unsigned int count,
 267		   void *kbuf, void __user *ubuf)
 268{
 269	int i, ret;
 270
 271	if (target->thread.regs == NULL)
 272		return -EIO;
 273
 274	if (!FULL_REGS(target->thread.regs)) {
 275		/* We have a partial register set.  Fill 14-31 with bogus values */
 276		for (i = 14; i < 32; i++)
 277			target->thread.regs->gpr[i] = NV_REG_POISON;
 278	}
 279
 280	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 281				  target->thread.regs,
 282				  0, offsetof(struct pt_regs, msr));
 283	if (!ret) {
 284		unsigned long msr = get_user_msr(target);
 285		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
 286					  offsetof(struct pt_regs, msr),
 287					  offsetof(struct pt_regs, msr) +
 288					  sizeof(msr));
 289	}
 290
 291	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 292		     offsetof(struct pt_regs, msr) + sizeof(long));
 293
 294	if (!ret)
 295		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 296					  &target->thread.regs->orig_gpr3,
 297					  offsetof(struct pt_regs, orig_gpr3),
 298					  sizeof(struct pt_regs));
 299	if (!ret)
 300		ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 301					       sizeof(struct pt_regs), -1);
 302
 303	return ret;
 304}
 305
 306static int gpr_set(struct task_struct *target, const struct user_regset *regset,
 307		   unsigned int pos, unsigned int count,
 308		   const void *kbuf, const void __user *ubuf)
 309{
 310	unsigned long reg;
 311	int ret;
 312
 313	if (target->thread.regs == NULL)
 314		return -EIO;
 315
 316	CHECK_FULL_REGS(target->thread.regs);
 317
 318	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 319				 target->thread.regs,
 320				 0, PT_MSR * sizeof(reg));
 321
 322	if (!ret && count > 0) {
 323		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 324					 PT_MSR * sizeof(reg),
 325					 (PT_MSR + 1) * sizeof(reg));
 326		if (!ret)
 327			ret = set_user_msr(target, reg);
 328	}
 329
 330	BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
 331		     offsetof(struct pt_regs, msr) + sizeof(long));
 332
 333	if (!ret)
 334		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 335					 &target->thread.regs->orig_gpr3,
 336					 PT_ORIG_R3 * sizeof(reg),
 337					 (PT_MAX_PUT_REG + 1) * sizeof(reg));
 338
 339	if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
 340		ret = user_regset_copyin_ignore(
 341			&pos, &count, &kbuf, &ubuf,
 342			(PT_MAX_PUT_REG + 1) * sizeof(reg),
 343			PT_TRAP * sizeof(reg));
 344
 345	if (!ret && count > 0) {
 346		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
 347					 PT_TRAP * sizeof(reg),
 348					 (PT_TRAP + 1) * sizeof(reg));
 349		if (!ret)
 350			ret = set_user_trap(target, reg);
 351	}
 352
 353	if (!ret)
 354		ret = user_regset_copyin_ignore(
 355			&pos, &count, &kbuf, &ubuf,
 356			(PT_TRAP + 1) * sizeof(reg), -1);
 357
 358	return ret;
 359}
 360
 
 
 
 
 
 
 
 
 
 
 
 
 361static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 362		   unsigned int pos, unsigned int count,
 363		   void *kbuf, void __user *ubuf)
 364{
 365#ifdef CONFIG_VSX
 366	u64 buf[33];
 367	int i;
 368#endif
 369	flush_fp_to_thread(target);
 370
 371#ifdef CONFIG_VSX
 372	/* copy to local buffer then write that out */
 373	for (i = 0; i < 32 ; i++)
 374		buf[i] = target->thread.TS_FPR(i);
 375	buf[32] = target->thread.fp_state.fpscr;
 376	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 377
 378#else
 379	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 380		     offsetof(struct thread_fp_state, fpr[32][0]));
 
 
 381
 382	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 383				   &target->thread.fp_state, 0, -1);
 384#endif
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 387static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 388		   unsigned int pos, unsigned int count,
 389		   const void *kbuf, const void __user *ubuf)
 390{
 391#ifdef CONFIG_VSX
 392	u64 buf[33];
 393	int i;
 394#endif
 395	flush_fp_to_thread(target);
 396
 397#ifdef CONFIG_VSX
 
 
 
 398	/* copy to local buffer then write that out */
 399	i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 400	if (i)
 401		return i;
 
 402	for (i = 0; i < 32 ; i++)
 403		target->thread.TS_FPR(i) = buf[i];
 404	target->thread.fp_state.fpscr = buf[32];
 405	return 0;
 406#else
 407	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
 408		     offsetof(struct thread_fp_state, fpr[32][0]));
 
 
 409
 410	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 411				  &target->thread.fp_state, 0, -1);
 412#endif
 413}
 414
 415#ifdef CONFIG_ALTIVEC
 416/*
 417 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
 418 * The transfer totals 34 quadword.  Quadwords 0-31 contain the
 419 * corresponding vector registers.  Quadword 32 contains the vscr as the
 420 * last word (offset 12) within that quadword.  Quadword 33 contains the
 421 * vrsave as the first word (offset 0) within the quadword.
 422 *
 423 * This definition of the VMX state is compatible with the current PPC32
 424 * ptrace interface.  This allows signal handling and ptrace to use the
 425 * same structures.  This also simplifies the implementation of a bi-arch
 426 * (combined (32- and 64-bit) gdb.
 427 */
 428
 429static int vr_active(struct task_struct *target,
 430		     const struct user_regset *regset)
 431{
 432	flush_altivec_to_thread(target);
 433	return target->thread.used_vr ? regset->n : 0;
 434}
 435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436static int vr_get(struct task_struct *target, const struct user_regset *regset,
 437		  unsigned int pos, unsigned int count,
 438		  void *kbuf, void __user *ubuf)
 439{
 440	int ret;
 441
 442	flush_altivec_to_thread(target);
 443
 444	BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 445		     offsetof(struct thread_vr_state, vr[32]));
 446
 447	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 448				  &target->thread.vr_state, 0,
 449				  33 * sizeof(vector128));
 450	if (!ret) {
 451		/*
 452		 * Copy out only the low-order word of vrsave.
 453		 */
 454		union {
 455			elf_vrreg_t reg;
 456			u32 word;
 457		} vrsave;
 458		memset(&vrsave, 0, sizeof(vrsave));
 
 459		vrsave.word = target->thread.vrsave;
 
 460		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
 461					  33 * sizeof(vector128), -1);
 462	}
 463
 464	return ret;
 465}
 466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467static int vr_set(struct task_struct *target, const struct user_regset *regset,
 468		  unsigned int pos, unsigned int count,
 469		  const void *kbuf, const void __user *ubuf)
 470{
 471	int ret;
 472
 473	flush_altivec_to_thread(target);
 474
 475	BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
 476		     offsetof(struct thread_vr_state, vr[32]));
 477
 478	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 479				 &target->thread.vr_state, 0,
 480				 33 * sizeof(vector128));
 481	if (!ret && count > 0) {
 482		/*
 483		 * We use only the first word of vrsave.
 484		 */
 485		union {
 486			elf_vrreg_t reg;
 487			u32 word;
 488		} vrsave;
 489		memset(&vrsave, 0, sizeof(vrsave));
 
 490		vrsave.word = target->thread.vrsave;
 
 491		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
 492					 33 * sizeof(vector128), -1);
 493		if (!ret)
 494			target->thread.vrsave = vrsave.word;
 495	}
 496
 497	return ret;
 498}
 499#endif /* CONFIG_ALTIVEC */
 500
 501#ifdef CONFIG_VSX
 502/*
 503 * Currently to set and and get all the vsx state, you need to call
 504 * the fp and VMX calls as well.  This only get/sets the lower 32
 505 * 128bit VSX registers.
 506 */
 507
 508static int vsr_active(struct task_struct *target,
 509		      const struct user_regset *regset)
 510{
 511	flush_vsx_to_thread(target);
 512	return target->thread.used_vsr ? regset->n : 0;
 513}
 514
 
 
 
 
 
 
 
 
 
 
 
 
 515static int vsr_get(struct task_struct *target, const struct user_regset *regset,
 516		   unsigned int pos, unsigned int count,
 517		   void *kbuf, void __user *ubuf)
 518{
 519	u64 buf[32];
 520	int ret, i;
 521
 
 
 
 522	flush_vsx_to_thread(target);
 523
 524	for (i = 0; i < 32 ; i++)
 525		buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 
 526	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 527				  buf, 0, 32 * sizeof(double));
 528
 529	return ret;
 530}
 531
 
 
 
 
 
 
 
 
 
 
 
 
 532static int vsr_set(struct task_struct *target, const struct user_regset *regset,
 533		   unsigned int pos, unsigned int count,
 534		   const void *kbuf, const void __user *ubuf)
 535{
 536	u64 buf[32];
 537	int ret,i;
 538
 
 
 
 539	flush_vsx_to_thread(target);
 540
 
 
 
 541	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 542				 buf, 0, 32 * sizeof(double));
 543	for (i = 0; i < 32 ; i++)
 544		target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 545
 546
 547	return ret;
 548}
 549#endif /* CONFIG_VSX */
 550
 551#ifdef CONFIG_SPE
 552
 553/*
 554 * For get_evrregs/set_evrregs functions 'data' has the following layout:
 555 *
 556 * struct {
 557 *   u32 evr[32];
 558 *   u64 acc;
 559 *   u32 spefscr;
 560 * }
 561 */
 562
 563static int evr_active(struct task_struct *target,
 564		      const struct user_regset *regset)
 565{
 566	flush_spe_to_thread(target);
 567	return target->thread.used_spe ? regset->n : 0;
 568}
 569
 570static int evr_get(struct task_struct *target, const struct user_regset *regset,
 571		   unsigned int pos, unsigned int count,
 572		   void *kbuf, void __user *ubuf)
 573{
 574	int ret;
 575
 576	flush_spe_to_thread(target);
 577
 578	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 579				  &target->thread.evr,
 580				  0, sizeof(target->thread.evr));
 581
 582	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 583		     offsetof(struct thread_struct, spefscr));
 584
 585	if (!ret)
 586		ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
 587					  &target->thread.acc,
 588					  sizeof(target->thread.evr), -1);
 589
 590	return ret;
 591}
 592
 593static int evr_set(struct task_struct *target, const struct user_regset *regset,
 594		   unsigned int pos, unsigned int count,
 595		   const void *kbuf, const void __user *ubuf)
 596{
 597	int ret;
 598
 599	flush_spe_to_thread(target);
 600
 601	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 602				 &target->thread.evr,
 603				 0, sizeof(target->thread.evr));
 604
 605	BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
 606		     offsetof(struct thread_struct, spefscr));
 607
 608	if (!ret)
 609		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 610					 &target->thread.acc,
 611					 sizeof(target->thread.evr), -1);
 612
 613	return ret;
 614}
 615#endif /* CONFIG_SPE */
 616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617
 618/*
 619 * These are our native regset flavors.
 620 */
 621enum powerpc_regset {
 622	REGSET_GPR,
 623	REGSET_FPR,
 624#ifdef CONFIG_ALTIVEC
 625	REGSET_VMX,
 626#endif
 627#ifdef CONFIG_VSX
 628	REGSET_VSX,
 629#endif
 630#ifdef CONFIG_SPE
 631	REGSET_SPE,
 632#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633};
 634
 635static const struct user_regset native_regsets[] = {
 636	[REGSET_GPR] = {
 637		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
 638		.size = sizeof(long), .align = sizeof(long),
 639		.get = gpr_get, .set = gpr_set
 640	},
 641	[REGSET_FPR] = {
 642		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
 643		.size = sizeof(double), .align = sizeof(double),
 644		.get = fpr_get, .set = fpr_set
 645	},
 646#ifdef CONFIG_ALTIVEC
 647	[REGSET_VMX] = {
 648		.core_note_type = NT_PPC_VMX, .n = 34,
 649		.size = sizeof(vector128), .align = sizeof(vector128),
 650		.active = vr_active, .get = vr_get, .set = vr_set
 651	},
 652#endif
 653#ifdef CONFIG_VSX
 654	[REGSET_VSX] = {
 655		.core_note_type = NT_PPC_VSX, .n = 32,
 656		.size = sizeof(double), .align = sizeof(double),
 657		.active = vsr_active, .get = vsr_get, .set = vsr_set
 658	},
 659#endif
 660#ifdef CONFIG_SPE
 661	[REGSET_SPE] = {
 662		.core_note_type = NT_PPC_SPE, .n = 35,
 663		.size = sizeof(u32), .align = sizeof(u32),
 664		.active = evr_active, .get = evr_get, .set = evr_set
 665	},
 666#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 667};
 668
 669static const struct user_regset_view user_ppc_native_view = {
 670	.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
 671	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
 672};
 673
 674#ifdef CONFIG_PPC64
 675#include <linux/compat.h>
 676
 677static int gpr32_get(struct task_struct *target,
 678		     const struct user_regset *regset,
 679		     unsigned int pos, unsigned int count,
 680		     void *kbuf, void __user *ubuf)
 
 681{
 682	const unsigned long *regs = &target->thread.regs->gpr[0];
 683	compat_ulong_t *k = kbuf;
 684	compat_ulong_t __user *u = ubuf;
 685	compat_ulong_t reg;
 686	int i;
 687
 688	if (target->thread.regs == NULL)
 689		return -EIO;
 690
 691	if (!FULL_REGS(target->thread.regs)) {
 692		/* We have a partial register set.  Fill 14-31 with bogus values */
 693		for (i = 14; i < 32; i++)
 694			target->thread.regs->gpr[i] = NV_REG_POISON; 
 695	}
 696
 697	pos /= sizeof(reg);
 698	count /= sizeof(reg);
 699
 700	if (kbuf)
 701		for (; count > 0 && pos < PT_MSR; --count)
 702			*k++ = regs[pos++];
 703	else
 704		for (; count > 0 && pos < PT_MSR; --count)
 705			if (__put_user((compat_ulong_t) regs[pos++], u++))
 706				return -EFAULT;
 707
 708	if (count > 0 && pos == PT_MSR) {
 709		reg = get_user_msr(target);
 710		if (kbuf)
 711			*k++ = reg;
 712		else if (__put_user(reg, u++))
 713			return -EFAULT;
 714		++pos;
 715		--count;
 716	}
 717
 718	if (kbuf)
 719		for (; count > 0 && pos < PT_REGS_COUNT; --count)
 720			*k++ = regs[pos++];
 721	else
 722		for (; count > 0 && pos < PT_REGS_COUNT; --count)
 723			if (__put_user((compat_ulong_t) regs[pos++], u++))
 724				return -EFAULT;
 725
 726	kbuf = k;
 727	ubuf = u;
 728	pos *= sizeof(reg);
 729	count *= sizeof(reg);
 730	return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
 731					PT_REGS_COUNT * sizeof(reg), -1);
 732}
 733
 734static int gpr32_set(struct task_struct *target,
 735		     const struct user_regset *regset,
 736		     unsigned int pos, unsigned int count,
 737		     const void *kbuf, const void __user *ubuf)
 
 738{
 739	unsigned long *regs = &target->thread.regs->gpr[0];
 740	const compat_ulong_t *k = kbuf;
 741	const compat_ulong_t __user *u = ubuf;
 742	compat_ulong_t reg;
 743
 744	if (target->thread.regs == NULL)
 745		return -EIO;
 746
 747	CHECK_FULL_REGS(target->thread.regs);
 748
 749	pos /= sizeof(reg);
 750	count /= sizeof(reg);
 751
 752	if (kbuf)
 753		for (; count > 0 && pos < PT_MSR; --count)
 754			regs[pos++] = *k++;
 755	else
 756		for (; count > 0 && pos < PT_MSR; --count) {
 757			if (__get_user(reg, u++))
 758				return -EFAULT;
 759			regs[pos++] = reg;
 760		}
 761
 762
 763	if (count > 0 && pos == PT_MSR) {
 764		if (kbuf)
 765			reg = *k++;
 766		else if (__get_user(reg, u++))
 767			return -EFAULT;
 768		set_user_msr(target, reg);
 769		++pos;
 770		--count;
 771	}
 772
 773	if (kbuf) {
 774		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
 775			regs[pos++] = *k++;
 776		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
 777			++k;
 778	} else {
 779		for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
 780			if (__get_user(reg, u++))
 781				return -EFAULT;
 782			regs[pos++] = reg;
 783		}
 784		for (; count > 0 && pos < PT_TRAP; --count, ++pos)
 785			if (__get_user(reg, u++))
 786				return -EFAULT;
 787	}
 788
 789	if (count > 0 && pos == PT_TRAP) {
 790		if (kbuf)
 791			reg = *k++;
 792		else if (__get_user(reg, u++))
 793			return -EFAULT;
 794		set_user_trap(target, reg);
 795		++pos;
 796		--count;
 797	}
 798
 799	kbuf = k;
 800	ubuf = u;
 801	pos *= sizeof(reg);
 802	count *= sizeof(reg);
 803	return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 804					 (PT_TRAP + 1) * sizeof(reg), -1);
 805}
 806
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 807/*
 808 * These are the regset flavors matching the CONFIG_PPC32 native set.
 809 */
 810static const struct user_regset compat_regsets[] = {
 811	[REGSET_GPR] = {
 812		.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
 813		.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
 814		.get = gpr32_get, .set = gpr32_set
 815	},
 816	[REGSET_FPR] = {
 817		.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
 818		.size = sizeof(double), .align = sizeof(double),
 819		.get = fpr_get, .set = fpr_set
 820	},
 821#ifdef CONFIG_ALTIVEC
 822	[REGSET_VMX] = {
 823		.core_note_type = NT_PPC_VMX, .n = 34,
 824		.size = sizeof(vector128), .align = sizeof(vector128),
 825		.active = vr_active, .get = vr_get, .set = vr_set
 826	},
 827#endif
 828#ifdef CONFIG_SPE
 829	[REGSET_SPE] = {
 830		.core_note_type = NT_PPC_SPE, .n = 35,
 831		.size = sizeof(u32), .align = sizeof(u32),
 832		.active = evr_active, .get = evr_get, .set = evr_set
 833	},
 834#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835};
 836
 837static const struct user_regset_view user_ppc_compat_view = {
 838	.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
 839	.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
 840};
 841#endif	/* CONFIG_PPC64 */
 842
 843const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 844{
 845#ifdef CONFIG_PPC64
 846	if (test_tsk_thread_flag(task, TIF_32BIT))
 847		return &user_ppc_compat_view;
 848#endif
 849	return &user_ppc_native_view;
 850}
 851
 852
 853void user_enable_single_step(struct task_struct *task)
 854{
 855	struct pt_regs *regs = task->thread.regs;
 856
 857	if (regs != NULL) {
 858#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 859		task->thread.debug.dbcr0 &= ~DBCR0_BT;
 860		task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
 861		regs->msr |= MSR_DE;
 862#else
 863		regs->msr &= ~MSR_BE;
 864		regs->msr |= MSR_SE;
 865#endif
 866	}
 867	set_tsk_thread_flag(task, TIF_SINGLESTEP);
 868}
 869
 870void user_enable_block_step(struct task_struct *task)
 871{
 872	struct pt_regs *regs = task->thread.regs;
 873
 874	if (regs != NULL) {
 875#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 876		task->thread.debug.dbcr0 &= ~DBCR0_IC;
 877		task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
 878		regs->msr |= MSR_DE;
 879#else
 880		regs->msr &= ~MSR_SE;
 881		regs->msr |= MSR_BE;
 882#endif
 883	}
 884	set_tsk_thread_flag(task, TIF_SINGLESTEP);
 885}
 886
 887void user_disable_single_step(struct task_struct *task)
 888{
 889	struct pt_regs *regs = task->thread.regs;
 890
 891	if (regs != NULL) {
 892#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 893		/*
 894		 * The logic to disable single stepping should be as
 895		 * simple as turning off the Instruction Complete flag.
 896		 * And, after doing so, if all debug flags are off, turn
 897		 * off DBCR0(IDM) and MSR(DE) .... Torez
 898		 */
 899		task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
 900		/*
 901		 * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
 902		 */
 903		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
 904					task->thread.debug.dbcr1)) {
 905			/*
 906			 * All debug events were off.....
 907			 */
 908			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
 909			regs->msr &= ~MSR_DE;
 910		}
 911#else
 912		regs->msr &= ~(MSR_SE | MSR_BE);
 913#endif
 914	}
 915	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
 916}
 917
 918#ifdef CONFIG_HAVE_HW_BREAKPOINT
 919void ptrace_triggered(struct perf_event *bp,
 920		      struct perf_sample_data *data, struct pt_regs *regs)
 921{
 922	struct perf_event_attr attr;
 923
 924	/*
 925	 * Disable the breakpoint request here since ptrace has defined a
 926	 * one-shot behaviour for breakpoint exceptions in PPC64.
 927	 * The SIGTRAP signal is generated automatically for us in do_dabr().
 928	 * We don't have to do anything about that here
 929	 */
 930	attr = bp->attr;
 931	attr.disabled = true;
 932	modify_user_hw_breakpoint(bp, &attr);
 933}
 934#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 935
 936static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
 937			       unsigned long data)
 938{
 939#ifdef CONFIG_HAVE_HW_BREAKPOINT
 940	int ret;
 941	struct thread_struct *thread = &(task->thread);
 942	struct perf_event *bp;
 943	struct perf_event_attr attr;
 944#endif /* CONFIG_HAVE_HW_BREAKPOINT */
 945#ifndef CONFIG_PPC_ADV_DEBUG_REGS
 
 946	struct arch_hw_breakpoint hw_brk;
 947#endif
 948
 949	/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
 950	 *  For embedded processors we support one DAC and no IAC's at the
 951	 *  moment.
 952	 */
 953	if (addr > 0)
 954		return -EINVAL;
 955
 956	/* The bottom 3 bits in dabr are flags */
 957	if ((data & ~0x7UL) >= TASK_SIZE)
 958		return -EIO;
 959
 960#ifndef CONFIG_PPC_ADV_DEBUG_REGS
 961	/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
 962	 *  It was assumed, on previous implementations, that 3 bits were
 963	 *  passed together with the data address, fitting the design of the
 964	 *  DABR register, as follows:
 965	 *
 966	 *  bit 0: Read flag
 967	 *  bit 1: Write flag
 968	 *  bit 2: Breakpoint translation
 969	 *
 970	 *  Thus, we use them here as so.
 971	 */
 972
 973	/* Ensure breakpoint translation bit is set */
 974	if (data && !(data & HW_BRK_TYPE_TRANSLATE))
 975		return -EIO;
 976	hw_brk.address = data & (~HW_BRK_TYPE_DABR);
 977	hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
 978	hw_brk.len = 8;
 
 979#ifdef CONFIG_HAVE_HW_BREAKPOINT
 980	bp = thread->ptrace_bps[0];
 981	if ((!data) || !(hw_brk.type & HW_BRK_TYPE_RDWR)) {
 982		if (bp) {
 983			unregister_hw_breakpoint(bp);
 984			thread->ptrace_bps[0] = NULL;
 985		}
 986		return 0;
 987	}
 988	if (bp) {
 989		attr = bp->attr;
 990		attr.bp_addr = hw_brk.address;
 991		arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
 992
 993		/* Enable breakpoint */
 994		attr.disabled = false;
 995
 996		ret =  modify_user_hw_breakpoint(bp, &attr);
 997		if (ret) {
 998			return ret;
 999		}
1000		thread->ptrace_bps[0] = bp;
1001		thread->hw_brk = hw_brk;
1002		return 0;
1003	}
1004
1005	/* Create a new breakpoint request if one doesn't exist already */
1006	hw_breakpoint_init(&attr);
1007	attr.bp_addr = hw_brk.address;
1008	arch_bp_generic_fields(hw_brk.type,
1009			       &attr.bp_type);
1010
1011	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
1012					       ptrace_triggered, NULL, task);
1013	if (IS_ERR(bp)) {
1014		thread->ptrace_bps[0] = NULL;
1015		return PTR_ERR(bp);
1016	}
1017
 
 
 
1018#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1019	task->thread.hw_brk = hw_brk;
1020#else /* CONFIG_PPC_ADV_DEBUG_REGS */
1021	/* As described above, it was assumed 3 bits were passed with the data
1022	 *  address, but we will assume only the mode bits will be passed
1023	 *  as to not cause alignment restrictions for DAC-based processors.
1024	 */
1025
1026	/* DAC's hold the whole address without any mode flags */
1027	task->thread.debug.dac1 = data & ~0x3UL;
1028
1029	if (task->thread.debug.dac1 == 0) {
1030		dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1031		if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
1032					task->thread.debug.dbcr1)) {
1033			task->thread.regs->msr &= ~MSR_DE;
1034			task->thread.debug.dbcr0 &= ~DBCR0_IDM;
1035		}
1036		return 0;
1037	}
1038
1039	/* Read or Write bits must be set */
1040
1041	if (!(data & 0x3UL))
1042		return -EINVAL;
1043
1044	/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
1045	   register */
1046	task->thread.debug.dbcr0 |= DBCR0_IDM;
1047
1048	/* Check for write and read flags and set DBCR0
1049	   accordingly */
1050	dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
1051	if (data & 0x1UL)
1052		dbcr_dac(task) |= DBCR_DAC1R;
1053	if (data & 0x2UL)
1054		dbcr_dac(task) |= DBCR_DAC1W;
1055	task->thread.regs->msr |= MSR_DE;
1056#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1057	return 0;
1058}
1059
1060/*
1061 * Called by kernel/ptrace.c when detaching..
1062 *
1063 * Make sure single step bits etc are not set.
1064 */
1065void ptrace_disable(struct task_struct *child)
1066{
1067	/* make sure the single step bit is not set. */
1068	user_disable_single_step(child);
1069}
1070
1071#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1072static long set_instruction_bp(struct task_struct *child,
1073			      struct ppc_hw_breakpoint *bp_info)
1074{
1075	int slot;
1076	int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
1077	int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
1078	int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
1079	int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
1080
1081	if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1082		slot2_in_use = 1;
1083	if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1084		slot4_in_use = 1;
1085
1086	if (bp_info->addr >= TASK_SIZE)
1087		return -EIO;
1088
1089	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
1090
1091		/* Make sure range is valid. */
1092		if (bp_info->addr2 >= TASK_SIZE)
1093			return -EIO;
1094
1095		/* We need a pair of IAC regsisters */
1096		if ((!slot1_in_use) && (!slot2_in_use)) {
1097			slot = 1;
1098			child->thread.debug.iac1 = bp_info->addr;
1099			child->thread.debug.iac2 = bp_info->addr2;
1100			child->thread.debug.dbcr0 |= DBCR0_IAC1;
1101			if (bp_info->addr_mode ==
1102					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1103				dbcr_iac_range(child) |= DBCR_IAC12X;
1104			else
1105				dbcr_iac_range(child) |= DBCR_IAC12I;
1106#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1107		} else if ((!slot3_in_use) && (!slot4_in_use)) {
1108			slot = 3;
1109			child->thread.debug.iac3 = bp_info->addr;
1110			child->thread.debug.iac4 = bp_info->addr2;
1111			child->thread.debug.dbcr0 |= DBCR0_IAC3;
1112			if (bp_info->addr_mode ==
1113					PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1114				dbcr_iac_range(child) |= DBCR_IAC34X;
1115			else
1116				dbcr_iac_range(child) |= DBCR_IAC34I;
1117#endif
1118		} else
1119			return -ENOSPC;
1120	} else {
1121		/* We only need one.  If possible leave a pair free in
1122		 * case a range is needed later
1123		 */
1124		if (!slot1_in_use) {
1125			/*
1126			 * Don't use iac1 if iac1-iac2 are free and either
1127			 * iac3 or iac4 (but not both) are free
1128			 */
1129			if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
1130				slot = 1;
1131				child->thread.debug.iac1 = bp_info->addr;
1132				child->thread.debug.dbcr0 |= DBCR0_IAC1;
1133				goto out;
1134			}
1135		}
1136		if (!slot2_in_use) {
1137			slot = 2;
1138			child->thread.debug.iac2 = bp_info->addr;
1139			child->thread.debug.dbcr0 |= DBCR0_IAC2;
1140#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1141		} else if (!slot3_in_use) {
1142			slot = 3;
1143			child->thread.debug.iac3 = bp_info->addr;
1144			child->thread.debug.dbcr0 |= DBCR0_IAC3;
1145		} else if (!slot4_in_use) {
1146			slot = 4;
1147			child->thread.debug.iac4 = bp_info->addr;
1148			child->thread.debug.dbcr0 |= DBCR0_IAC4;
1149#endif
1150		} else
1151			return -ENOSPC;
1152	}
1153out:
1154	child->thread.debug.dbcr0 |= DBCR0_IDM;
1155	child->thread.regs->msr |= MSR_DE;
1156
1157	return slot;
1158}
1159
1160static int del_instruction_bp(struct task_struct *child, int slot)
1161{
1162	switch (slot) {
1163	case 1:
1164		if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
1165			return -ENOENT;
1166
1167		if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
1168			/* address range - clear slots 1 & 2 */
1169			child->thread.debug.iac2 = 0;
1170			dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
1171		}
1172		child->thread.debug.iac1 = 0;
1173		child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1174		break;
1175	case 2:
1176		if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
1177			return -ENOENT;
1178
1179		if (dbcr_iac_range(child) & DBCR_IAC12MODE)
1180			/* used in a range */
1181			return -EINVAL;
1182		child->thread.debug.iac2 = 0;
1183		child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1184		break;
1185#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1186	case 3:
1187		if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
1188			return -ENOENT;
1189
1190		if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
1191			/* address range - clear slots 3 & 4 */
1192			child->thread.debug.iac4 = 0;
1193			dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
1194		}
1195		child->thread.debug.iac3 = 0;
1196		child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1197		break;
1198	case 4:
1199		if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
1200			return -ENOENT;
1201
1202		if (dbcr_iac_range(child) & DBCR_IAC34MODE)
1203			/* Used in a range */
1204			return -EINVAL;
1205		child->thread.debug.iac4 = 0;
1206		child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1207		break;
1208#endif
1209	default:
1210		return -EINVAL;
1211	}
1212	return 0;
1213}
1214
1215static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
1216{
1217	int byte_enable =
1218		(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
1219		& 0xf;
1220	int condition_mode =
1221		bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
1222	int slot;
1223
1224	if (byte_enable && (condition_mode == 0))
1225		return -EINVAL;
1226
1227	if (bp_info->addr >= TASK_SIZE)
1228		return -EIO;
1229
1230	if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
1231		slot = 1;
1232		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1233			dbcr_dac(child) |= DBCR_DAC1R;
1234		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1235			dbcr_dac(child) |= DBCR_DAC1W;
1236		child->thread.debug.dac1 = (unsigned long)bp_info->addr;
1237#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1238		if (byte_enable) {
1239			child->thread.debug.dvc1 =
1240				(unsigned long)bp_info->condition_value;
1241			child->thread.debug.dbcr2 |=
1242				((byte_enable << DBCR2_DVC1BE_SHIFT) |
1243				 (condition_mode << DBCR2_DVC1M_SHIFT));
1244		}
1245#endif
1246#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1247	} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1248		/* Both dac1 and dac2 are part of a range */
1249		return -ENOSPC;
1250#endif
1251	} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
1252		slot = 2;
1253		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1254			dbcr_dac(child) |= DBCR_DAC2R;
1255		if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1256			dbcr_dac(child) |= DBCR_DAC2W;
1257		child->thread.debug.dac2 = (unsigned long)bp_info->addr;
1258#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1259		if (byte_enable) {
1260			child->thread.debug.dvc2 =
1261				(unsigned long)bp_info->condition_value;
1262			child->thread.debug.dbcr2 |=
1263				((byte_enable << DBCR2_DVC2BE_SHIFT) |
1264				 (condition_mode << DBCR2_DVC2M_SHIFT));
1265		}
1266#endif
1267	} else
1268		return -ENOSPC;
1269	child->thread.debug.dbcr0 |= DBCR0_IDM;
1270	child->thread.regs->msr |= MSR_DE;
1271
1272	return slot + 4;
1273}
1274
1275static int del_dac(struct task_struct *child, int slot)
1276{
1277	if (slot == 1) {
1278		if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
1279			return -ENOENT;
1280
1281		child->thread.debug.dac1 = 0;
1282		dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1283#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1284		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
1285			child->thread.debug.dac2 = 0;
1286			child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1287		}
1288		child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
1289#endif
1290#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1291		child->thread.debug.dvc1 = 0;
1292#endif
1293	} else if (slot == 2) {
1294		if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
1295			return -ENOENT;
1296
1297#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1298		if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
1299			/* Part of a range */
1300			return -EINVAL;
1301		child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
1302#endif
1303#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
1304		child->thread.debug.dvc2 = 0;
1305#endif
1306		child->thread.debug.dac2 = 0;
1307		dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1308	} else
1309		return -EINVAL;
1310
1311	return 0;
1312}
1313#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1314
1315#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1316static int set_dac_range(struct task_struct *child,
1317			 struct ppc_hw_breakpoint *bp_info)
1318{
1319	int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
1320
1321	/* We don't allow range watchpoints to be used with DVC */
1322	if (bp_info->condition_mode)
1323		return -EINVAL;
1324
1325	/*
1326	 * Best effort to verify the address range.  The user/supervisor bits
1327	 * prevent trapping in kernel space, but let's fail on an obvious bad
1328	 * range.  The simple test on the mask is not fool-proof, and any
1329	 * exclusive range will spill over into kernel space.
1330	 */
1331	if (bp_info->addr >= TASK_SIZE)
1332		return -EIO;
1333	if (mode == PPC_BREAKPOINT_MODE_MASK) {
1334		/*
1335		 * dac2 is a bitmask.  Don't allow a mask that makes a
1336		 * kernel space address from a valid dac1 value
1337		 */
1338		if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
1339			return -EIO;
1340	} else {
1341		/*
1342		 * For range breakpoints, addr2 must also be a valid address
1343		 */
1344		if (bp_info->addr2 >= TASK_SIZE)
1345			return -EIO;
1346	}
1347
1348	if (child->thread.debug.dbcr0 &
1349	    (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
1350		return -ENOSPC;
1351
1352	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1353		child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
1354	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1355		child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
1356	child->thread.debug.dac1 = bp_info->addr;
1357	child->thread.debug.dac2 = bp_info->addr2;
1358	if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1359		child->thread.debug.dbcr2  |= DBCR2_DAC12M;
1360	else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
1361		child->thread.debug.dbcr2  |= DBCR2_DAC12MX;
1362	else	/* PPC_BREAKPOINT_MODE_MASK */
1363		child->thread.debug.dbcr2  |= DBCR2_DAC12MM;
1364	child->thread.regs->msr |= MSR_DE;
1365
1366	return 5;
1367}
1368#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
1369
1370static long ppc_set_hwdebug(struct task_struct *child,
1371		     struct ppc_hw_breakpoint *bp_info)
1372{
1373#ifdef CONFIG_HAVE_HW_BREAKPOINT
1374	int len = 0;
1375	struct thread_struct *thread = &(child->thread);
1376	struct perf_event *bp;
1377	struct perf_event_attr attr;
1378#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1379#ifndef CONFIG_PPC_ADV_DEBUG_REGS
1380	struct arch_hw_breakpoint brk;
1381#endif
1382
1383	if (bp_info->version != 1)
1384		return -ENOTSUPP;
1385#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1386	/*
1387	 * Check for invalid flags and combinations
1388	 */
1389	if ((bp_info->trigger_type == 0) ||
1390	    (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
1391				       PPC_BREAKPOINT_TRIGGER_RW)) ||
1392	    (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
1393	    (bp_info->condition_mode &
1394	     ~(PPC_BREAKPOINT_CONDITION_MODE |
1395	       PPC_BREAKPOINT_CONDITION_BE_ALL)))
1396		return -EINVAL;
1397#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
1398	if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1399		return -EINVAL;
1400#endif
1401
1402	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
1403		if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
1404		    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
1405			return -EINVAL;
1406		return set_instruction_bp(child, bp_info);
1407	}
1408	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1409		return set_dac(child, bp_info);
1410
1411#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1412	return set_dac_range(child, bp_info);
1413#else
1414	return -EINVAL;
1415#endif
1416#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1417	/*
1418	 * We only support one data breakpoint
1419	 */
1420	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
1421	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
1422	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
1423		return -EINVAL;
1424
1425	if ((unsigned long)bp_info->addr >= TASK_SIZE)
1426		return -EIO;
1427
1428	brk.address = bp_info->addr & ~7UL;
1429	brk.type = HW_BRK_TYPE_TRANSLATE;
1430	brk.len = 8;
1431	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
1432		brk.type |= HW_BRK_TYPE_READ;
1433	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
1434		brk.type |= HW_BRK_TYPE_WRITE;
1435#ifdef CONFIG_HAVE_HW_BREAKPOINT
1436	/*
1437	 * Check if the request is for 'range' breakpoints. We can
1438	 * support it if range < 8 bytes.
1439	 */
1440	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
1441		len = bp_info->addr2 - bp_info->addr;
1442	else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
1443		len = 1;
1444	else
1445		return -EINVAL;
1446	bp = thread->ptrace_bps[0];
1447	if (bp)
1448		return -ENOSPC;
1449
1450	/* Create a new breakpoint request if one doesn't exist already */
1451	hw_breakpoint_init(&attr);
1452	attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
1453	attr.bp_len = len;
1454	arch_bp_generic_fields(brk.type, &attr.bp_type);
1455
1456	thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
1457					       ptrace_triggered, NULL, child);
1458	if (IS_ERR(bp)) {
1459		thread->ptrace_bps[0] = NULL;
1460		return PTR_ERR(bp);
1461	}
1462
1463	return 1;
1464#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1465
1466	if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
1467		return -EINVAL;
1468
1469	if (child->thread.hw_brk.address)
1470		return -ENOSPC;
1471
 
 
 
1472	child->thread.hw_brk = brk;
1473
1474	return 1;
1475#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
1476}
1477
1478static long ppc_del_hwdebug(struct task_struct *child, long data)
1479{
1480#ifdef CONFIG_HAVE_HW_BREAKPOINT
1481	int ret = 0;
1482	struct thread_struct *thread = &(child->thread);
1483	struct perf_event *bp;
1484#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1485#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1486	int rc;
1487
1488	if (data <= 4)
1489		rc = del_instruction_bp(child, (int)data);
1490	else
1491		rc = del_dac(child, (int)data - 4);
1492
1493	if (!rc) {
1494		if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
1495					child->thread.debug.dbcr1)) {
1496			child->thread.debug.dbcr0 &= ~DBCR0_IDM;
1497			child->thread.regs->msr &= ~MSR_DE;
1498		}
1499	}
1500	return rc;
1501#else
1502	if (data != 1)
1503		return -EINVAL;
1504
1505#ifdef CONFIG_HAVE_HW_BREAKPOINT
1506	bp = thread->ptrace_bps[0];
1507	if (bp) {
1508		unregister_hw_breakpoint(bp);
1509		thread->ptrace_bps[0] = NULL;
1510	} else
1511		ret = -ENOENT;
1512	return ret;
1513#else /* CONFIG_HAVE_HW_BREAKPOINT */
1514	if (child->thread.hw_brk.address == 0)
1515		return -ENOENT;
1516
1517	child->thread.hw_brk.address = 0;
1518	child->thread.hw_brk.type = 0;
1519#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1520
1521	return 0;
1522#endif
1523}
1524
1525long arch_ptrace(struct task_struct *child, long request,
1526		 unsigned long addr, unsigned long data)
1527{
1528	int ret = -EPERM;
1529	void __user *datavp = (void __user *) data;
1530	unsigned long __user *datalp = datavp;
1531
1532	switch (request) {
1533	/* read the word at location addr in the USER area. */
1534	case PTRACE_PEEKUSR: {
1535		unsigned long index, tmp;
1536
1537		ret = -EIO;
1538		/* convert to index and check */
1539#ifdef CONFIG_PPC32
1540		index = addr >> 2;
1541		if ((addr & 3) || (index > PT_FPSCR)
1542		    || (child->thread.regs == NULL))
1543#else
1544		index = addr >> 3;
1545		if ((addr & 7) || (index > PT_FPSCR))
1546#endif
1547			break;
1548
1549		CHECK_FULL_REGS(child->thread.regs);
1550		if (index < PT_FPR0) {
1551			ret = ptrace_get_reg(child, (int) index, &tmp);
1552			if (ret)
1553				break;
1554		} else {
1555			unsigned int fpidx = index - PT_FPR0;
1556
1557			flush_fp_to_thread(child);
1558			if (fpidx < (PT_FPSCR - PT_FPR0))
1559				memcpy(&tmp, &child->thread.TS_FPR(fpidx),
1560				       sizeof(long));
1561			else
1562				tmp = child->thread.fp_state.fpscr;
1563		}
1564		ret = put_user(tmp, datalp);
1565		break;
1566	}
1567
1568	/* write the word at location addr in the USER area */
1569	case PTRACE_POKEUSR: {
1570		unsigned long index;
1571
1572		ret = -EIO;
1573		/* convert to index and check */
1574#ifdef CONFIG_PPC32
1575		index = addr >> 2;
1576		if ((addr & 3) || (index > PT_FPSCR)
1577		    || (child->thread.regs == NULL))
1578#else
1579		index = addr >> 3;
1580		if ((addr & 7) || (index > PT_FPSCR))
1581#endif
1582			break;
1583
1584		CHECK_FULL_REGS(child->thread.regs);
1585		if (index < PT_FPR0) {
1586			ret = ptrace_put_reg(child, index, data);
1587		} else {
1588			unsigned int fpidx = index - PT_FPR0;
1589
1590			flush_fp_to_thread(child);
1591			if (fpidx < (PT_FPSCR - PT_FPR0))
1592				memcpy(&child->thread.TS_FPR(fpidx), &data,
1593				       sizeof(long));
1594			else
1595				child->thread.fp_state.fpscr = data;
1596			ret = 0;
1597		}
1598		break;
1599	}
1600
1601	case PPC_PTRACE_GETHWDBGINFO: {
1602		struct ppc_debug_info dbginfo;
1603
1604		dbginfo.version = 1;
1605#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1606		dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
1607		dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
1608		dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
1609		dbginfo.data_bp_alignment = 4;
1610		dbginfo.sizeof_condition = 4;
1611		dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
1612				   PPC_DEBUG_FEATURE_INSN_BP_MASK;
1613#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1614		dbginfo.features |=
1615				   PPC_DEBUG_FEATURE_DATA_BP_RANGE |
1616				   PPC_DEBUG_FEATURE_DATA_BP_MASK;
1617#endif
1618#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
1619		dbginfo.num_instruction_bps = 0;
1620		dbginfo.num_data_bps = 1;
 
 
 
1621		dbginfo.num_condition_regs = 0;
1622#ifdef CONFIG_PPC64
1623		dbginfo.data_bp_alignment = 8;
1624#else
1625		dbginfo.data_bp_alignment = 4;
1626#endif
1627		dbginfo.sizeof_condition = 0;
1628#ifdef CONFIG_HAVE_HW_BREAKPOINT
1629		dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
1630		if (cpu_has_feature(CPU_FTR_DAWR))
1631			dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
1632#else
1633		dbginfo.features = 0;
1634#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1635#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1636
1637		if (!access_ok(VERIFY_WRITE, datavp,
1638			       sizeof(struct ppc_debug_info)))
1639			return -EFAULT;
1640		ret = __copy_to_user(datavp, &dbginfo,
1641				     sizeof(struct ppc_debug_info)) ?
1642		      -EFAULT : 0;
1643		break;
1644	}
1645
1646	case PPC_PTRACE_SETHWDEBUG: {
1647		struct ppc_hw_breakpoint bp_info;
1648
1649		if (!access_ok(VERIFY_READ, datavp,
1650			       sizeof(struct ppc_hw_breakpoint)))
1651			return -EFAULT;
1652		ret = __copy_from_user(&bp_info, datavp,
1653				       sizeof(struct ppc_hw_breakpoint)) ?
1654		      -EFAULT : 0;
1655		if (!ret)
1656			ret = ppc_set_hwdebug(child, &bp_info);
1657		break;
1658	}
1659
1660	case PPC_PTRACE_DELHWDEBUG: {
1661		ret = ppc_del_hwdebug(child, data);
1662		break;
1663	}
1664
1665	case PTRACE_GET_DEBUGREG: {
1666#ifndef CONFIG_PPC_ADV_DEBUG_REGS
1667		unsigned long dabr_fake;
1668#endif
1669		ret = -EINVAL;
1670		/* We only support one DABR and no IABRS at the moment */
1671		if (addr > 0)
1672			break;
1673#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1674		ret = put_user(child->thread.debug.dac1, datalp);
1675#else
1676		dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
1677			     (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
1678		ret = put_user(dabr_fake, datalp);
1679#endif
1680		break;
1681	}
1682
1683	case PTRACE_SET_DEBUGREG:
1684		ret = ptrace_set_debugreg(child, addr, data);
1685		break;
1686
1687#ifdef CONFIG_PPC64
1688	case PTRACE_GETREGS64:
1689#endif
1690	case PTRACE_GETREGS:	/* Get all pt_regs from the child. */
1691		return copy_regset_to_user(child, &user_ppc_native_view,
1692					   REGSET_GPR,
1693					   0, sizeof(struct pt_regs),
1694					   datavp);
1695
1696#ifdef CONFIG_PPC64
1697	case PTRACE_SETREGS64:
1698#endif
1699	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
1700		return copy_regset_from_user(child, &user_ppc_native_view,
1701					     REGSET_GPR,
1702					     0, sizeof(struct pt_regs),
1703					     datavp);
1704
1705	case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
1706		return copy_regset_to_user(child, &user_ppc_native_view,
1707					   REGSET_FPR,
1708					   0, sizeof(elf_fpregset_t),
1709					   datavp);
1710
1711	case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
1712		return copy_regset_from_user(child, &user_ppc_native_view,
1713					     REGSET_FPR,
1714					     0, sizeof(elf_fpregset_t),
1715					     datavp);
1716
1717#ifdef CONFIG_ALTIVEC
1718	case PTRACE_GETVRREGS:
1719		return copy_regset_to_user(child, &user_ppc_native_view,
1720					   REGSET_VMX,
1721					   0, (33 * sizeof(vector128) +
1722					       sizeof(u32)),
1723					   datavp);
1724
1725	case PTRACE_SETVRREGS:
1726		return copy_regset_from_user(child, &user_ppc_native_view,
1727					     REGSET_VMX,
1728					     0, (33 * sizeof(vector128) +
1729						 sizeof(u32)),
1730					     datavp);
1731#endif
1732#ifdef CONFIG_VSX
1733	case PTRACE_GETVSRREGS:
1734		return copy_regset_to_user(child, &user_ppc_native_view,
1735					   REGSET_VSX,
1736					   0, 32 * sizeof(double),
1737					   datavp);
1738
1739	case PTRACE_SETVSRREGS:
1740		return copy_regset_from_user(child, &user_ppc_native_view,
1741					     REGSET_VSX,
1742					     0, 32 * sizeof(double),
1743					     datavp);
1744#endif
1745#ifdef CONFIG_SPE
1746	case PTRACE_GETEVRREGS:
1747		/* Get the child spe register state. */
1748		return copy_regset_to_user(child, &user_ppc_native_view,
1749					   REGSET_SPE, 0, 35 * sizeof(u32),
1750					   datavp);
1751
1752	case PTRACE_SETEVRREGS:
1753		/* Set the child spe register state. */
1754		return copy_regset_from_user(child, &user_ppc_native_view,
1755					     REGSET_SPE, 0, 35 * sizeof(u32),
1756					     datavp);
1757#endif
1758
1759	default:
1760		ret = ptrace_request(child, request, addr, data);
1761		break;
1762	}
1763	return ret;
1764}
1765
1766#ifdef CONFIG_SECCOMP
1767static int do_seccomp(struct pt_regs *regs)
1768{
1769	if (!test_thread_flag(TIF_SECCOMP))
1770		return 0;
1771
1772	/*
1773	 * The ABI we present to seccomp tracers is that r3 contains
1774	 * the syscall return value and orig_gpr3 contains the first
1775	 * syscall parameter. This is different to the ptrace ABI where
1776	 * both r3 and orig_gpr3 contain the first syscall parameter.
1777	 */
1778	regs->gpr[3] = -ENOSYS;
1779
1780	/*
1781	 * We use the __ version here because we have already checked
1782	 * TIF_SECCOMP. If this fails, there is nothing left to do, we
1783	 * have already loaded -ENOSYS into r3, or seccomp has put
1784	 * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
1785	 */
1786	if (__secure_computing())
1787		return -1;
1788
1789	/*
1790	 * The syscall was allowed by seccomp, restore the register
1791	 * state to what ptrace and audit expect.
1792	 * Note that we use orig_gpr3, which means a seccomp tracer can
1793	 * modify the first syscall parameter (in orig_gpr3) and also
1794	 * allow the syscall to proceed.
1795	 */
1796	regs->gpr[3] = regs->orig_gpr3;
1797
1798	return 0;
1799}
1800#else
1801static inline int do_seccomp(struct pt_regs *regs) { return 0; }
1802#endif /* CONFIG_SECCOMP */
1803
1804/**
1805 * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
1806 * @regs: the pt_regs of the task to trace (current)
1807 *
1808 * Performs various types of tracing on syscall entry. This includes seccomp,
1809 * ptrace, syscall tracepoints and audit.
1810 *
1811 * The pt_regs are potentially visible to userspace via ptrace, so their
1812 * contents is ABI.
1813 *
1814 * One or more of the tracers may modify the contents of pt_regs, in particular
1815 * to modify arguments or even the syscall number itself.
1816 *
1817 * It's also possible that a tracer can choose to reject the system call. In
1818 * that case this function will return an illegal syscall number, and will put
1819 * an appropriate return value in regs->r3.
1820 *
1821 * Return: the (possibly changed) syscall number.
1822 */
1823long do_syscall_trace_enter(struct pt_regs *regs)
1824{
1825	bool abort = false;
1826
1827	user_exit();
 
 
 
 
 
 
 
 
1828
 
1829	if (do_seccomp(regs))
1830		return -1;
1831
1832	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1833		/*
1834		 * The tracer may decide to abort the syscall, if so tracehook
1835		 * will return !0. Note that the tracer may also just change
1836		 * regs->gpr[0] to an invalid syscall number, that is handled
1837		 * below on the exit path.
1838		 */
1839		abort = tracehook_report_syscall_entry(regs) != 0;
1840	}
1841
1842	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1843		trace_sys_enter(regs, regs->gpr[0]);
1844
1845#ifdef CONFIG_PPC64
1846	if (!is_32bit_task())
1847		audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
1848				    regs->gpr[5], regs->gpr[6]);
1849	else
1850#endif
1851		audit_syscall_entry(regs->gpr[0],
1852				    regs->gpr[3] & 0xffffffff,
1853				    regs->gpr[4] & 0xffffffff,
1854				    regs->gpr[5] & 0xffffffff,
1855				    regs->gpr[6] & 0xffffffff);
1856
1857	if (abort || regs->gpr[0] >= NR_syscalls) {
1858		/*
1859		 * If we are aborting explicitly, or if the syscall number is
1860		 * now invalid, set the return value to -ENOSYS.
1861		 */
1862		regs->gpr[3] = -ENOSYS;
1863		return -1;
1864	}
1865
1866	/* Return the possibly modified but valid syscall number */
1867	return regs->gpr[0];
 
 
 
 
 
 
 
 
1868}
1869
1870void do_syscall_trace_leave(struct pt_regs *regs)
1871{
1872	int step;
1873
1874	audit_syscall_exit(regs);
1875
1876	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1877		trace_sys_exit(regs, regs->result);
1878
1879	step = test_thread_flag(TIF_SINGLESTEP);
1880	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1881		tracehook_report_syscall_exit(regs, step);
1882
1883	user_enter();
1884}