Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Kernel support for the ptrace() and syscall tracing interfaces.
   3 *
   4 * Copyright (C) 1999-2005 Hewlett-Packard Co
   5 *	David Mosberger-Tang <davidm@hpl.hp.com>
   6 * Copyright (C) 2006 Intel Co
   7 *  2006-08-12	- IA64 Native Utrace implementation support added by
   8 *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *
  10 * Derived from the x86 and Alpha versions.
  11 */
  12#include <linux/kernel.h>
  13#include <linux/sched.h>
  14#include <linux/mm.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/user.h>
  18#include <linux/security.h>
  19#include <linux/audit.h>
  20#include <linux/signal.h>
  21#include <linux/regset.h>
  22#include <linux/elf.h>
  23#include <linux/tracehook.h>
  24
  25#include <asm/pgtable.h>
  26#include <asm/processor.h>
  27#include <asm/ptrace_offsets.h>
  28#include <asm/rse.h>
  29#include <asm/system.h>
  30#include <asm/uaccess.h>
  31#include <asm/unwind.h>
  32#ifdef CONFIG_PERFMON
  33#include <asm/perfmon.h>
  34#endif
  35
  36#include "entry.h"
  37
  38/*
  39 * Bits in the PSR that we allow ptrace() to change:
  40 *	be, up, ac, mfl, mfh (the user mask; five bits total)
  41 *	db (debug breakpoint fault; one bit)
  42 *	id (instruction debug fault disable; one bit)
  43 *	dd (data debug fault disable; one bit)
  44 *	ri (restart instruction; two bits)
  45 *	is (instruction set; one bit)
  46 */
  47#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
  48		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
  49
  50#define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
  51#define PFM_MASK	MASK(38)
  52
  53#define PTRACE_DEBUG	0
  54
  55#if PTRACE_DEBUG
  56# define dprintk(format...)	printk(format)
  57# define inline
  58#else
  59# define dprintk(format...)
  60#endif
  61
  62/* Return TRUE if PT was created due to kernel-entry via a system-call.  */
  63
  64static inline int
  65in_syscall (struct pt_regs *pt)
  66{
  67	return (long) pt->cr_ifs >= 0;
  68}
  69
  70/*
  71 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
  72 * bitset where bit i is set iff the NaT bit of register i is set.
  73 */
  74unsigned long
  75ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
  76{
  77#	define GET_BITS(first, last, unat)				\
  78	({								\
  79		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
  80		unsigned long nbits = (last - first + 1);		\
  81		unsigned long mask = MASK(nbits) << first;		\
  82		unsigned long dist;					\
  83		if (bit < first)					\
  84			dist = 64 + bit - first;			\
  85		else							\
  86			dist = bit - first;				\
  87		ia64_rotr(unat, dist) & mask;				\
  88	})
  89	unsigned long val;
  90
  91	/*
  92	 * Registers that are stored consecutively in struct pt_regs
  93	 * can be handled in parallel.  If the register order in
  94	 * struct_pt_regs changes, this code MUST be updated.
  95	 */
  96	val  = GET_BITS( 1,  1, scratch_unat);
  97	val |= GET_BITS( 2,  3, scratch_unat);
  98	val |= GET_BITS(12, 13, scratch_unat);
  99	val |= GET_BITS(14, 14, scratch_unat);
 100	val |= GET_BITS(15, 15, scratch_unat);
 101	val |= GET_BITS( 8, 11, scratch_unat);
 102	val |= GET_BITS(16, 31, scratch_unat);
 103	return val;
 104
 105#	undef GET_BITS
 106}
 107
 108/*
 109 * Set the NaT bits for the scratch registers according to NAT and
 110 * return the resulting unat (assuming the scratch registers are
 111 * stored in PT).
 112 */
 113unsigned long
 114ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
 115{
 116#	define PUT_BITS(first, last, nat)				\
 117	({								\
 118		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
 119		unsigned long nbits = (last - first + 1);		\
 120		unsigned long mask = MASK(nbits) << first;		\
 121		long dist;						\
 122		if (bit < first)					\
 123			dist = 64 + bit - first;			\
 124		else							\
 125			dist = bit - first;				\
 126		ia64_rotl(nat & mask, dist);				\
 127	})
 128	unsigned long scratch_unat;
 129
 130	/*
 131	 * Registers that are stored consecutively in struct pt_regs
 132	 * can be handled in parallel.  If the register order in
 133	 * struct_pt_regs changes, this code MUST be updated.
 134	 */
 135	scratch_unat  = PUT_BITS( 1,  1, nat);
 136	scratch_unat |= PUT_BITS( 2,  3, nat);
 137	scratch_unat |= PUT_BITS(12, 13, nat);
 138	scratch_unat |= PUT_BITS(14, 14, nat);
 139	scratch_unat |= PUT_BITS(15, 15, nat);
 140	scratch_unat |= PUT_BITS( 8, 11, nat);
 141	scratch_unat |= PUT_BITS(16, 31, nat);
 142
 143	return scratch_unat;
 144
 145#	undef PUT_BITS
 146}
 147
 148#define IA64_MLX_TEMPLATE	0x2
 149#define IA64_MOVL_OPCODE	6
 150
 151void
 152ia64_increment_ip (struct pt_regs *regs)
 153{
 154	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
 155
 156	if (ri > 2) {
 157		ri = 0;
 158		regs->cr_iip += 16;
 159	} else if (ri == 2) {
 160		get_user(w0, (char __user *) regs->cr_iip + 0);
 161		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 162			/*
 163			 * rfi'ing to slot 2 of an MLX bundle causes
 164			 * an illegal operation fault.  We don't want
 165			 * that to happen...
 166			 */
 167			ri = 0;
 168			regs->cr_iip += 16;
 169		}
 170	}
 171	ia64_psr(regs)->ri = ri;
 172}
 173
 174void
 175ia64_decrement_ip (struct pt_regs *regs)
 176{
 177	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
 178
 179	if (ia64_psr(regs)->ri == 0) {
 180		regs->cr_iip -= 16;
 181		ri = 2;
 182		get_user(w0, (char __user *) regs->cr_iip + 0);
 183		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 184			/*
 185			 * rfi'ing to slot 2 of an MLX bundle causes
 186			 * an illegal operation fault.  We don't want
 187			 * that to happen...
 188			 */
 189			ri = 1;
 190		}
 191	}
 192	ia64_psr(regs)->ri = ri;
 193}
 194
 195/*
 196 * This routine is used to read an rnat bits that are stored on the
 197 * kernel backing store.  Since, in general, the alignment of the user
 198 * and kernel are different, this is not completely trivial.  In
 199 * essence, we need to construct the user RNAT based on up to two
 200 * kernel RNAT values and/or the RNAT value saved in the child's
 201 * pt_regs.
 202 *
 203 * user rbs
 204 *
 205 * +--------+ <-- lowest address
 206 * | slot62 |
 207 * +--------+
 208 * |  rnat  | 0x....1f8
 209 * +--------+
 210 * | slot00 | \
 211 * +--------+ |
 212 * | slot01 | > child_regs->ar_rnat
 213 * +--------+ |
 214 * | slot02 | /				kernel rbs
 215 * +--------+				+--------+
 216 *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
 217 * +- - - - +				+--------+
 218 *					| slot62 |
 219 * +- - - - +				+--------+
 220 *					|  rnat	 |
 221 * +- - - - +				+--------+
 222 *   vrnat				| slot00 |
 223 * +- - - - +				+--------+
 224 *					=	 =
 225 *					+--------+
 226 *					| slot00 | \
 227 *					+--------+ |
 228 *					| slot01 | > child_stack->ar_rnat
 229 *					+--------+ |
 230 *					| slot02 | /
 231 *					+--------+
 232 *						  <--- child_stack->ar_bspstore
 233 *
 234 * The way to think of this code is as follows: bit 0 in the user rnat
 235 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
 236 * value.  The kernel rnat value holding this bit is stored in
 237 * variable rnat0.  rnat1 is loaded with the kernel rnat value that
 238 * form the upper bits of the user rnat value.
 239 *
 240 * Boundary cases:
 241 *
 242 * o when reading the rnat "below" the first rnat slot on the kernel
 243 *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
 244 *   merged in from pt->ar_rnat.
 245 *
 246 * o when reading the rnat "above" the last rnat slot on the kernel
 247 *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
 248 */
 249static unsigned long
 250get_rnat (struct task_struct *task, struct switch_stack *sw,
 251	  unsigned long *krbs, unsigned long *urnat_addr,
 252	  unsigned long *urbs_end)
 253{
 254	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
 255	unsigned long umask = 0, mask, m;
 256	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 257	long num_regs, nbits;
 258	struct pt_regs *pt;
 259
 260	pt = task_pt_regs(task);
 261	kbsp = (unsigned long *) sw->ar_bspstore;
 262	ubspstore = (unsigned long *) pt->ar_bspstore;
 263
 264	if (urbs_end < urnat_addr)
 265		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
 266	else
 267		nbits = 63;
 268	mask = MASK(nbits);
 269	/*
 270	 * First, figure out which bit number slot 0 in user-land maps
 271	 * to in the kernel rnat.  Do this by figuring out how many
 272	 * register slots we're beyond the user's backingstore and
 273	 * then computing the equivalent address in kernel space.
 274	 */
 275	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 276	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 277	shift = ia64_rse_slot_num(slot0_kaddr);
 278	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 279	rnat0_kaddr = rnat1_kaddr - 64;
 280
 281	if (ubspstore + 63 > urnat_addr) {
 282		/* some bits need to be merged in from pt->ar_rnat */
 283		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 284		urnat = (pt->ar_rnat & umask);
 285		mask &= ~umask;
 286		if (!mask)
 287			return urnat;
 288	}
 289
 290	m = mask << shift;
 291	if (rnat0_kaddr >= kbsp)
 292		rnat0 = sw->ar_rnat;
 293	else if (rnat0_kaddr > krbs)
 294		rnat0 = *rnat0_kaddr;
 295	urnat |= (rnat0 & m) >> shift;
 296
 297	m = mask >> (63 - shift);
 298	if (rnat1_kaddr >= kbsp)
 299		rnat1 = sw->ar_rnat;
 300	else if (rnat1_kaddr > krbs)
 301		rnat1 = *rnat1_kaddr;
 302	urnat |= (rnat1 & m) << (63 - shift);
 303	return urnat;
 304}
 305
 306/*
 307 * The reverse of get_rnat.
 308 */
 309static void
 310put_rnat (struct task_struct *task, struct switch_stack *sw,
 311	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
 312	  unsigned long *urbs_end)
 313{
 314	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
 315	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 316	long num_regs, nbits;
 317	struct pt_regs *pt;
 318	unsigned long cfm, *urbs_kargs;
 319
 320	pt = task_pt_regs(task);
 321	kbsp = (unsigned long *) sw->ar_bspstore;
 322	ubspstore = (unsigned long *) pt->ar_bspstore;
 323
 324	urbs_kargs = urbs_end;
 325	if (in_syscall(pt)) {
 326		/*
 327		 * If entered via syscall, don't allow user to set rnat bits
 328		 * for syscall args.
 329		 */
 330		cfm = pt->cr_ifs;
 331		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
 332	}
 333
 334	if (urbs_kargs >= urnat_addr)
 335		nbits = 63;
 336	else {
 337		if ((urnat_addr - 63) >= urbs_kargs)
 338			return;
 339		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
 340	}
 341	mask = MASK(nbits);
 342
 343	/*
 344	 * First, figure out which bit number slot 0 in user-land maps
 345	 * to in the kernel rnat.  Do this by figuring out how many
 346	 * register slots we're beyond the user's backingstore and
 347	 * then computing the equivalent address in kernel space.
 348	 */
 349	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 350	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 351	shift = ia64_rse_slot_num(slot0_kaddr);
 352	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 353	rnat0_kaddr = rnat1_kaddr - 64;
 354
 355	if (ubspstore + 63 > urnat_addr) {
 356		/* some bits need to be place in pt->ar_rnat: */
 357		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 358		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
 359		mask &= ~umask;
 360		if (!mask)
 361			return;
 362	}
 363	/*
 364	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
 365	 * rnat slot is ignored. so we don't have to clear it here.
 366	 */
 367	rnat0 = (urnat << shift);
 368	m = mask << shift;
 369	if (rnat0_kaddr >= kbsp)
 370		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
 371	else if (rnat0_kaddr > krbs)
 372		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
 373
 374	rnat1 = (urnat >> (63 - shift));
 375	m = mask >> (63 - shift);
 376	if (rnat1_kaddr >= kbsp)
 377		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
 378	else if (rnat1_kaddr > krbs)
 379		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
 380}
 381
 382static inline int
 383on_kernel_rbs (unsigned long addr, unsigned long bspstore,
 384	       unsigned long urbs_end)
 385{
 386	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
 387						      urbs_end);
 388	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
 389}
 390
 391/*
 392 * Read a word from the user-level backing store of task CHILD.  ADDR
 393 * is the user-level address to read the word from, VAL a pointer to
 394 * the return value, and USER_BSP gives the end of the user-level
 395 * backing store (i.e., it's the address that would be in ar.bsp after
 396 * the user executed a "cover" instruction).
 397 *
 398 * This routine takes care of accessing the kernel register backing
 399 * store for those registers that got spilled there.  It also takes
 400 * care of calculating the appropriate RNaT collection words.
 401 */
 402long
 403ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
 404	   unsigned long user_rbs_end, unsigned long addr, long *val)
 405{
 406	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
 407	struct pt_regs *child_regs;
 408	size_t copied;
 409	long ret;
 410
 411	urbs_end = (long *) user_rbs_end;
 412	laddr = (unsigned long *) addr;
 413	child_regs = task_pt_regs(child);
 414	bspstore = (unsigned long *) child_regs->ar_bspstore;
 415	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 416	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 417			  (unsigned long) urbs_end))
 418	{
 419		/*
 420		 * Attempt to read the RBS in an area that's actually
 421		 * on the kernel RBS => read the corresponding bits in
 422		 * the kernel RBS.
 423		 */
 424		rnat_addr = ia64_rse_rnat_addr(laddr);
 425		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
 426
 427		if (laddr == rnat_addr) {
 428			/* return NaT collection word itself */
 429			*val = ret;
 430			return 0;
 431		}
 432
 433		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
 434			/*
 435			 * It is implementation dependent whether the
 436			 * data portion of a NaT value gets saved on a
 437			 * st8.spill or RSE spill (e.g., see EAS 2.6,
 438			 * 4.4.4.6 Register Spill and Fill).  To get
 439			 * consistent behavior across all possible
 440			 * IA-64 implementations, we return zero in
 441			 * this case.
 442			 */
 443			*val = 0;
 444			return 0;
 445		}
 446
 447		if (laddr < urbs_end) {
 448			/*
 449			 * The desired word is on the kernel RBS and
 450			 * is not a NaT.
 451			 */
 452			regnum = ia64_rse_num_regs(bspstore, laddr);
 453			*val = *ia64_rse_skip_regs(krbs, regnum);
 454			return 0;
 455		}
 456	}
 457	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
 458	if (copied != sizeof(ret))
 459		return -EIO;
 460	*val = ret;
 461	return 0;
 462}
 463
 464long
 465ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
 466	   unsigned long user_rbs_end, unsigned long addr, long val)
 467{
 468	unsigned long *bspstore, *krbs, regnum, *laddr;
 469	unsigned long *urbs_end = (long *) user_rbs_end;
 470	struct pt_regs *child_regs;
 471
 472	laddr = (unsigned long *) addr;
 473	child_regs = task_pt_regs(child);
 474	bspstore = (unsigned long *) child_regs->ar_bspstore;
 475	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 476	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 477			  (unsigned long) urbs_end))
 478	{
 479		/*
 480		 * Attempt to write the RBS in an area that's actually
 481		 * on the kernel RBS => write the corresponding bits
 482		 * in the kernel RBS.
 483		 */
 484		if (ia64_rse_is_rnat_slot(laddr))
 485			put_rnat(child, child_stack, krbs, laddr, val,
 486				 urbs_end);
 487		else {
 488			if (laddr < urbs_end) {
 489				regnum = ia64_rse_num_regs(bspstore, laddr);
 490				*ia64_rse_skip_regs(krbs, regnum) = val;
 491			}
 492		}
 493	} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
 
 494		   != sizeof(val))
 495		return -EIO;
 496	return 0;
 497}
 498
 499/*
 500 * Calculate the address of the end of the user-level register backing
 501 * store.  This is the address that would have been stored in ar.bsp
 502 * if the user had executed a "cover" instruction right before
 503 * entering the kernel.  If CFMP is not NULL, it is used to return the
 504 * "current frame mask" that was active at the time the kernel was
 505 * entered.
 506 */
 507unsigned long
 508ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
 509		       unsigned long *cfmp)
 510{
 511	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
 512	long ndirty;
 513
 514	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 515	bspstore = (unsigned long *) pt->ar_bspstore;
 516	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
 517
 518	if (in_syscall(pt))
 519		ndirty += (cfm & 0x7f);
 520	else
 521		cfm &= ~(1UL << 63);	/* clear valid bit */
 522
 523	if (cfmp)
 524		*cfmp = cfm;
 525	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
 526}
 527
 528/*
 529 * Synchronize (i.e, write) the RSE backing store living in kernel
 530 * space to the VM of the CHILD task.  SW and PT are the pointers to
 531 * the switch_stack and pt_regs structures, respectively.
 532 * USER_RBS_END is the user-level address at which the backing store
 533 * ends.
 534 */
 535long
 536ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
 537		    unsigned long user_rbs_start, unsigned long user_rbs_end)
 538{
 539	unsigned long addr, val;
 540	long ret;
 541
 542	/* now copy word for word from kernel rbs to user rbs: */
 543	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 544		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
 545		if (ret < 0)
 546			return ret;
 547		if (access_process_vm(child, addr, &val, sizeof(val), 1)
 
 548		    != sizeof(val))
 549			return -EIO;
 550	}
 551	return 0;
 552}
 553
 554static long
 555ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
 556		unsigned long user_rbs_start, unsigned long user_rbs_end)
 557{
 558	unsigned long addr, val;
 559	long ret;
 560
 561	/* now copy word for word from user rbs to kernel rbs: */
 562	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 563		if (access_process_vm(child, addr, &val, sizeof(val), 0)
 
 564				!= sizeof(val))
 565			return -EIO;
 566
 567		ret = ia64_poke(child, sw, user_rbs_end, addr, val);
 568		if (ret < 0)
 569			return ret;
 570	}
 571	return 0;
 572}
 573
 574typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
 575			    unsigned long, unsigned long);
 576
 577static void do_sync_rbs(struct unw_frame_info *info, void *arg)
 578{
 579	struct pt_regs *pt;
 580	unsigned long urbs_end;
 581	syncfunc_t fn = arg;
 582
 583	if (unw_unwind_to_user(info) < 0)
 584		return;
 585	pt = task_pt_regs(info->task);
 586	urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
 587
 588	fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
 589}
 590
 591/*
 592 * when a thread is stopped (ptraced), debugger might change thread's user
 593 * stack (change memory directly), and we must avoid the RSE stored in kernel
 594 * to override user stack (user space's RSE is newer than kernel's in the
 595 * case). To workaround the issue, we copy kernel RSE to user RSE before the
 596 * task is stopped, so user RSE has updated data.  we then copy user RSE to
 597 * kernel after the task is resummed from traced stop and kernel will use the
 598 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
 599 * synchronize user RSE to kernel.
 600 */
 601void ia64_ptrace_stop(void)
 602{
 603	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
 604		return;
 605	set_notify_resume(current);
 606	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
 607}
 608
 609/*
 610 * This is called to read back the register backing store.
 611 */
 612void ia64_sync_krbs(void)
 613{
 614	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
 615
 616	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
 617}
 618
 619/*
 620 * After PTRACE_ATTACH, a thread's register backing store area in user
 621 * space is assumed to contain correct data whenever the thread is
 622 * stopped.  arch_ptrace_stop takes care of this on tracing stops.
 623 * But if the child was already stopped for job control when we attach
 624 * to it, then it might not ever get into ptrace_stop by the time we
 625 * want to examine the user memory containing the RBS.
 626 */
 627void
 628ptrace_attach_sync_user_rbs (struct task_struct *child)
 629{
 630	int stopped = 0;
 631	struct unw_frame_info info;
 632
 633	/*
 634	 * If the child is in TASK_STOPPED, we need to change that to
 635	 * TASK_TRACED momentarily while we operate on it.  This ensures
 636	 * that the child won't be woken up and return to user mode while
 637	 * we are doing the sync.  (It can only be woken up for SIGKILL.)
 638	 */
 639
 640	read_lock(&tasklist_lock);
 641	if (child->sighand) {
 642		spin_lock_irq(&child->sighand->siglock);
 643		if (child->state == TASK_STOPPED &&
 644		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
 645			set_notify_resume(child);
 646
 647			child->state = TASK_TRACED;
 648			stopped = 1;
 649		}
 650		spin_unlock_irq(&child->sighand->siglock);
 651	}
 652	read_unlock(&tasklist_lock);
 653
 654	if (!stopped)
 655		return;
 656
 657	unw_init_from_blocked_task(&info, child);
 658	do_sync_rbs(&info, ia64_sync_user_rbs);
 659
 660	/*
 661	 * Now move the child back into TASK_STOPPED if it should be in a
 662	 * job control stop, so that SIGCONT can be used to wake it up.
 663	 */
 664	read_lock(&tasklist_lock);
 665	if (child->sighand) {
 666		spin_lock_irq(&child->sighand->siglock);
 667		if (child->state == TASK_TRACED &&
 668		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
 669			child->state = TASK_STOPPED;
 670		}
 671		spin_unlock_irq(&child->sighand->siglock);
 672	}
 673	read_unlock(&tasklist_lock);
 674}
 675
 676static inline int
 677thread_matches (struct task_struct *thread, unsigned long addr)
 678{
 679	unsigned long thread_rbs_end;
 680	struct pt_regs *thread_regs;
 681
 682	if (ptrace_check_attach(thread, 0) < 0)
 683		/*
 684		 * If the thread is not in an attachable state, we'll
 685		 * ignore it.  The net effect is that if ADDR happens
 686		 * to overlap with the portion of the thread's
 687		 * register backing store that is currently residing
 688		 * on the thread's kernel stack, then ptrace() may end
 689		 * up accessing a stale value.  But if the thread
 690		 * isn't stopped, that's a problem anyhow, so we're
 691		 * doing as well as we can...
 692		 */
 693		return 0;
 694
 695	thread_regs = task_pt_regs(thread);
 696	thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
 697	if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
 698		return 0;
 699
 700	return 1;	/* looks like we've got a winner */
 701}
 702
 703/*
 704 * Write f32-f127 back to task->thread.fph if it has been modified.
 705 */
 706inline void
 707ia64_flush_fph (struct task_struct *task)
 708{
 709	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 710
 711	/*
 712	 * Prevent migrating this task while
 713	 * we're fiddling with the FPU state
 714	 */
 715	preempt_disable();
 716	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
 717		psr->mfh = 0;
 718		task->thread.flags |= IA64_THREAD_FPH_VALID;
 719		ia64_save_fpu(&task->thread.fph[0]);
 720	}
 721	preempt_enable();
 722}
 723
 724/*
 725 * Sync the fph state of the task so that it can be manipulated
 726 * through thread.fph.  If necessary, f32-f127 are written back to
 727 * thread.fph or, if the fph state hasn't been used before, thread.fph
 728 * is cleared to zeroes.  Also, access to f32-f127 is disabled to
 729 * ensure that the task picks up the state from thread.fph when it
 730 * executes again.
 731 */
 732void
 733ia64_sync_fph (struct task_struct *task)
 734{
 735	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 736
 737	ia64_flush_fph(task);
 738	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
 739		task->thread.flags |= IA64_THREAD_FPH_VALID;
 740		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
 741	}
 742	ia64_drop_fpu(task);
 743	psr->dfh = 1;
 744}
 745
 746/*
 747 * Change the machine-state of CHILD such that it will return via the normal
 748 * kernel exit-path, rather than the syscall-exit path.
 749 */
 750static void
 751convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
 752			unsigned long cfm)
 753{
 754	struct unw_frame_info info, prev_info;
 755	unsigned long ip, sp, pr;
 756
 757	unw_init_from_blocked_task(&info, child);
 758	while (1) {
 759		prev_info = info;
 760		if (unw_unwind(&info) < 0)
 761			return;
 762
 763		unw_get_sp(&info, &sp);
 764		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
 765		    < IA64_PT_REGS_SIZE) {
 766			dprintk("ptrace.%s: ran off the top of the kernel "
 767				"stack\n", __func__);
 768			return;
 769		}
 770		if (unw_get_pr (&prev_info, &pr) < 0) {
 771			unw_get_rp(&prev_info, &ip);
 772			dprintk("ptrace.%s: failed to read "
 773				"predicate register (ip=0x%lx)\n",
 774				__func__, ip);
 775			return;
 776		}
 777		if (unw_is_intr_frame(&info)
 778		    && (pr & (1UL << PRED_USER_STACK)))
 779			break;
 780	}
 781
 782	/*
 783	 * Note: at the time of this call, the target task is blocked
 784	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
 785	 * (aka, "pLvSys") we redirect execution from
 786	 * .work_pending_syscall_end to .work_processed_kernel.
 787	 */
 788	unw_get_pr(&prev_info, &pr);
 789	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
 790	pr |=  (1UL << PRED_NON_SYSCALL);
 791	unw_set_pr(&prev_info, pr);
 792
 793	pt->cr_ifs = (1UL << 63) | cfm;
 794	/*
 795	 * Clear the memory that is NOT written on syscall-entry to
 796	 * ensure we do not leak kernel-state to user when execution
 797	 * resumes.
 798	 */
 799	pt->r2 = 0;
 800	pt->r3 = 0;
 801	pt->r14 = 0;
 802	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
 803	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
 804	pt->b7 = 0;
 805	pt->ar_ccv = 0;
 806	pt->ar_csd = 0;
 807	pt->ar_ssd = 0;
 808}
 809
 810static int
 811access_nat_bits (struct task_struct *child, struct pt_regs *pt,
 812		 struct unw_frame_info *info,
 813		 unsigned long *data, int write_access)
 814{
 815	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
 816	char nat = 0;
 817
 818	if (write_access) {
 819		nat_bits = *data;
 820		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
 821		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
 822			dprintk("ptrace: failed to set ar.unat\n");
 823			return -1;
 824		}
 825		for (regnum = 4; regnum <= 7; ++regnum) {
 826			unw_get_gr(info, regnum, &dummy, &nat);
 827			unw_set_gr(info, regnum, dummy,
 828				   (nat_bits >> regnum) & 1);
 829		}
 830	} else {
 831		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
 832			dprintk("ptrace: failed to read ar.unat\n");
 833			return -1;
 834		}
 835		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
 836		for (regnum = 4; regnum <= 7; ++regnum) {
 837			unw_get_gr(info, regnum, &dummy, &nat);
 838			nat_bits |= (nat != 0) << regnum;
 839		}
 840		*data = nat_bits;
 841	}
 842	return 0;
 843}
 844
 845static int
 846access_uarea (struct task_struct *child, unsigned long addr,
 847	      unsigned long *data, int write_access);
 848
 849static long
 850ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 851{
 852	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
 853	struct unw_frame_info info;
 854	struct ia64_fpreg fpval;
 855	struct switch_stack *sw;
 856	struct pt_regs *pt;
 857	long ret, retval = 0;
 858	char nat = 0;
 859	int i;
 860
 861	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
 862		return -EIO;
 863
 864	pt = task_pt_regs(child);
 865	sw = (struct switch_stack *) (child->thread.ksp + 16);
 866	unw_init_from_blocked_task(&info, child);
 867	if (unw_unwind_to_user(&info) < 0) {
 868		return -EIO;
 869	}
 870
 871	if (((unsigned long) ppr & 0x7) != 0) {
 872		dprintk("ptrace:unaligned register address %p\n", ppr);
 873		return -EIO;
 874	}
 875
 876	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
 877	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
 878	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
 879	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
 880	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
 881	    || access_uarea(child, PT_CFM, &cfm, 0)
 882	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
 883		return -EIO;
 884
 885	/* control regs */
 886
 887	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
 888	retval |= __put_user(psr, &ppr->cr_ipsr);
 889
 890	/* app regs */
 891
 892	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
 893	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
 894	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
 895	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
 896	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
 897	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
 898
 899	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
 900	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
 901	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
 902	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
 903	retval |= __put_user(cfm, &ppr->cfm);
 904
 905	/* gr1-gr3 */
 906
 907	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
 908	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
 909
 910	/* gr4-gr7 */
 911
 912	for (i = 4; i < 8; i++) {
 913		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
 914			return -EIO;
 915		retval |= __put_user(val, &ppr->gr[i]);
 916	}
 917
 918	/* gr8-gr11 */
 919
 920	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
 921
 922	/* gr12-gr15 */
 923
 924	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
 925	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
 926	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
 927
 928	/* gr16-gr31 */
 929
 930	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
 931
 932	/* b0 */
 933
 934	retval |= __put_user(pt->b0, &ppr->br[0]);
 935
 936	/* b1-b5 */
 937
 938	for (i = 1; i < 6; i++) {
 939		if (unw_access_br(&info, i, &val, 0) < 0)
 940			return -EIO;
 941		__put_user(val, &ppr->br[i]);
 942	}
 943
 944	/* b6-b7 */
 945
 946	retval |= __put_user(pt->b6, &ppr->br[6]);
 947	retval |= __put_user(pt->b7, &ppr->br[7]);
 948
 949	/* fr2-fr5 */
 950
 951	for (i = 2; i < 6; i++) {
 952		if (unw_get_fr(&info, i, &fpval) < 0)
 953			return -EIO;
 954		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 955	}
 956
 957	/* fr6-fr11 */
 958
 959	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
 960				 sizeof(struct ia64_fpreg) * 6);
 961
 962	/* fp scratch regs(12-15) */
 963
 964	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
 965				 sizeof(struct ia64_fpreg) * 4);
 966
 967	/* fr16-fr31 */
 968
 969	for (i = 16; i < 32; i++) {
 970		if (unw_get_fr(&info, i, &fpval) < 0)
 971			return -EIO;
 972		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 973	}
 974
 975	/* fph */
 976
 977	ia64_flush_fph(child);
 978	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
 979				 sizeof(ppr->fr[32]) * 96);
 980
 981	/*  preds */
 982
 983	retval |= __put_user(pt->pr, &ppr->pr);
 984
 985	/* nat bits */
 986
 987	retval |= __put_user(nat_bits, &ppr->nat);
 988
 989	ret = retval ? -EIO : 0;
 990	return ret;
 991}
 992
 993static long
 994ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 995{
 996	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
 997	struct unw_frame_info info;
 998	struct switch_stack *sw;
 999	struct ia64_fpreg fpval;
1000	struct pt_regs *pt;
1001	long ret, retval = 0;
1002	int i;
1003
1004	memset(&fpval, 0, sizeof(fpval));
1005
1006	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1007		return -EIO;
1008
1009	pt = task_pt_regs(child);
1010	sw = (struct switch_stack *) (child->thread.ksp + 16);
1011	unw_init_from_blocked_task(&info, child);
1012	if (unw_unwind_to_user(&info) < 0) {
1013		return -EIO;
1014	}
1015
1016	if (((unsigned long) ppr & 0x7) != 0) {
1017		dprintk("ptrace:unaligned register address %p\n", ppr);
1018		return -EIO;
1019	}
1020
1021	/* control regs */
1022
1023	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1024	retval |= __get_user(psr, &ppr->cr_ipsr);
1025
1026	/* app regs */
1027
1028	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1029	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1030	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1031	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1032	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1033	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1034
1035	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1036	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1037	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1038	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1039	retval |= __get_user(cfm, &ppr->cfm);
1040
1041	/* gr1-gr3 */
1042
1043	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1044	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1045
1046	/* gr4-gr7 */
1047
1048	for (i = 4; i < 8; i++) {
1049		retval |= __get_user(val, &ppr->gr[i]);
1050		/* NaT bit will be set via PT_NAT_BITS: */
1051		if (unw_set_gr(&info, i, val, 0) < 0)
1052			return -EIO;
1053	}
1054
1055	/* gr8-gr11 */
1056
1057	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1058
1059	/* gr12-gr15 */
1060
1061	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1062	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1063	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1064
1065	/* gr16-gr31 */
1066
1067	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1068
1069	/* b0 */
1070
1071	retval |= __get_user(pt->b0, &ppr->br[0]);
1072
1073	/* b1-b5 */
1074
1075	for (i = 1; i < 6; i++) {
1076		retval |= __get_user(val, &ppr->br[i]);
1077		unw_set_br(&info, i, val);
1078	}
1079
1080	/* b6-b7 */
1081
1082	retval |= __get_user(pt->b6, &ppr->br[6]);
1083	retval |= __get_user(pt->b7, &ppr->br[7]);
1084
1085	/* fr2-fr5 */
1086
1087	for (i = 2; i < 6; i++) {
1088		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1089		if (unw_set_fr(&info, i, fpval) < 0)
1090			return -EIO;
1091	}
1092
1093	/* fr6-fr11 */
1094
1095	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1096				   sizeof(ppr->fr[6]) * 6);
1097
1098	/* fp scratch regs(12-15) */
1099
1100	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1101				   sizeof(ppr->fr[12]) * 4);
1102
1103	/* fr16-fr31 */
1104
1105	for (i = 16; i < 32; i++) {
1106		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1107					   sizeof(fpval));
1108		if (unw_set_fr(&info, i, fpval) < 0)
1109			return -EIO;
1110	}
1111
1112	/* fph */
1113
1114	ia64_sync_fph(child);
1115	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1116				   sizeof(ppr->fr[32]) * 96);
1117
1118	/* preds */
1119
1120	retval |= __get_user(pt->pr, &ppr->pr);
1121
1122	/* nat bits */
1123
1124	retval |= __get_user(nat_bits, &ppr->nat);
1125
1126	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1127	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1128	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1129	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1130	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1131	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1132	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1133	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1134
1135	ret = retval ? -EIO : 0;
1136	return ret;
1137}
1138
1139void
1140user_enable_single_step (struct task_struct *child)
1141{
1142	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1143
1144	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1145	child_psr->ss = 1;
1146}
1147
1148void
1149user_enable_block_step (struct task_struct *child)
1150{
1151	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1152
1153	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1154	child_psr->tb = 1;
1155}
1156
1157void
1158user_disable_single_step (struct task_struct *child)
1159{
1160	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1161
1162	/* make sure the single step/taken-branch trap bits are not set: */
1163	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1164	child_psr->ss = 0;
1165	child_psr->tb = 0;
1166}
1167
1168/*
1169 * Called by kernel/ptrace.c when detaching..
1170 *
1171 * Make sure the single step bit is not set.
1172 */
1173void
1174ptrace_disable (struct task_struct *child)
1175{
1176	user_disable_single_step(child);
1177}
1178
1179long
1180arch_ptrace (struct task_struct *child, long request,
1181	     unsigned long addr, unsigned long data)
1182{
1183	switch (request) {
1184	case PTRACE_PEEKTEXT:
1185	case PTRACE_PEEKDATA:
1186		/* read word at location addr */
1187		if (access_process_vm(child, addr, &data, sizeof(data), 0)
 
1188		    != sizeof(data))
1189			return -EIO;
1190		/* ensure return value is not mistaken for error code */
1191		force_successful_syscall_return();
1192		return data;
1193
1194	/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1195	 * by the generic ptrace_request().
1196	 */
1197
1198	case PTRACE_PEEKUSR:
1199		/* read the word at addr in the USER area */
1200		if (access_uarea(child, addr, &data, 0) < 0)
1201			return -EIO;
1202		/* ensure return value is not mistaken for error code */
1203		force_successful_syscall_return();
1204		return data;
1205
1206	case PTRACE_POKEUSR:
1207		/* write the word at addr in the USER area */
1208		if (access_uarea(child, addr, &data, 1) < 0)
1209			return -EIO;
1210		return 0;
1211
1212	case PTRACE_OLD_GETSIGINFO:
1213		/* for backwards-compatibility */
1214		return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1215
1216	case PTRACE_OLD_SETSIGINFO:
1217		/* for backwards-compatibility */
1218		return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1219
1220	case PTRACE_GETREGS:
1221		return ptrace_getregs(child,
1222				      (struct pt_all_user_regs __user *) data);
1223
1224	case PTRACE_SETREGS:
1225		return ptrace_setregs(child,
1226				      (struct pt_all_user_regs __user *) data);
1227
1228	default:
1229		return ptrace_request(child, request, addr, data);
1230	}
1231}
1232
1233
1234/* "asmlinkage" so the input arguments are preserved... */
1235
1236asmlinkage long
1237syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1238		     long arg4, long arg5, long arg6, long arg7,
1239		     struct pt_regs regs)
1240{
1241	if (test_thread_flag(TIF_SYSCALL_TRACE))
1242		if (tracehook_report_syscall_entry(&regs))
1243			return -ENOSYS;
1244
1245	/* copy user rbs to kernel rbs */
1246	if (test_thread_flag(TIF_RESTORE_RSE))
1247		ia64_sync_krbs();
1248
1249	if (unlikely(current->audit_context)) {
1250		long syscall;
1251		int arch;
1252
1253		syscall = regs.r15;
1254		arch = AUDIT_ARCH_IA64;
1255
1256		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1257	}
1258
1259	return 0;
1260}
1261
1262/* "asmlinkage" so the input arguments are preserved... */
1263
1264asmlinkage void
1265syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1266		     long arg4, long arg5, long arg6, long arg7,
1267		     struct pt_regs regs)
1268{
1269	int step;
1270
1271	if (unlikely(current->audit_context)) {
1272		int success = AUDITSC_RESULT(regs.r10);
1273		long result = regs.r8;
1274
1275		if (success != AUDITSC_SUCCESS)
1276			result = -result;
1277		audit_syscall_exit(success, result);
1278	}
1279
1280	step = test_thread_flag(TIF_SINGLESTEP);
1281	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1282		tracehook_report_syscall_exit(&regs, step);
1283
1284	/* copy user rbs to kernel rbs */
1285	if (test_thread_flag(TIF_RESTORE_RSE))
1286		ia64_sync_krbs();
1287}
1288
1289/* Utrace implementation starts here */
1290struct regset_get {
1291	void *kbuf;
1292	void __user *ubuf;
1293};
1294
1295struct regset_set {
1296	const void *kbuf;
1297	const void __user *ubuf;
1298};
1299
1300struct regset_getset {
1301	struct task_struct *target;
1302	const struct user_regset *regset;
1303	union {
1304		struct regset_get get;
1305		struct regset_set set;
1306	} u;
1307	unsigned int pos;
1308	unsigned int count;
1309	int ret;
1310};
1311
1312static int
1313access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1314		unsigned long addr, unsigned long *data, int write_access)
1315{
1316	struct pt_regs *pt;
1317	unsigned long *ptr = NULL;
1318	int ret;
1319	char nat = 0;
1320
1321	pt = task_pt_regs(target);
1322	switch (addr) {
1323	case ELF_GR_OFFSET(1):
1324		ptr = &pt->r1;
1325		break;
1326	case ELF_GR_OFFSET(2):
1327	case ELF_GR_OFFSET(3):
1328		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1329		break;
1330	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1331		if (write_access) {
1332			/* read NaT bit first: */
1333			unsigned long dummy;
1334
1335			ret = unw_get_gr(info, addr/8, &dummy, &nat);
1336			if (ret < 0)
1337				return ret;
1338		}
1339		return unw_access_gr(info, addr/8, data, &nat, write_access);
1340	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1341		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1342		break;
1343	case ELF_GR_OFFSET(12):
1344	case ELF_GR_OFFSET(13):
1345		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1346		break;
1347	case ELF_GR_OFFSET(14):
1348		ptr = &pt->r14;
1349		break;
1350	case ELF_GR_OFFSET(15):
1351		ptr = &pt->r15;
1352	}
1353	if (write_access)
1354		*ptr = *data;
1355	else
1356		*data = *ptr;
1357	return 0;
1358}
1359
1360static int
1361access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1362		unsigned long addr, unsigned long *data, int write_access)
1363{
1364	struct pt_regs *pt;
1365	unsigned long *ptr = NULL;
1366
1367	pt = task_pt_regs(target);
1368	switch (addr) {
1369	case ELF_BR_OFFSET(0):
1370		ptr = &pt->b0;
1371		break;
1372	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1373		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1374				     data, write_access);
1375	case ELF_BR_OFFSET(6):
1376		ptr = &pt->b6;
1377		break;
1378	case ELF_BR_OFFSET(7):
1379		ptr = &pt->b7;
1380	}
1381	if (write_access)
1382		*ptr = *data;
1383	else
1384		*data = *ptr;
1385	return 0;
1386}
1387
1388static int
1389access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1390		unsigned long addr, unsigned long *data, int write_access)
1391{
1392	struct pt_regs *pt;
1393	unsigned long cfm, urbs_end;
1394	unsigned long *ptr = NULL;
1395
1396	pt = task_pt_regs(target);
1397	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1398		switch (addr) {
1399		case ELF_AR_RSC_OFFSET:
1400			/* force PL3 */
1401			if (write_access)
1402				pt->ar_rsc = *data | (3 << 2);
1403			else
1404				*data = pt->ar_rsc;
1405			return 0;
1406		case ELF_AR_BSP_OFFSET:
1407			/*
1408			 * By convention, we use PT_AR_BSP to refer to
1409			 * the end of the user-level backing store.
1410			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1411			 * to get the real value of ar.bsp at the time
1412			 * the kernel was entered.
1413			 *
1414			 * Furthermore, when changing the contents of
1415			 * PT_AR_BSP (or PT_CFM) while the task is
1416			 * blocked in a system call, convert the state
1417			 * so that the non-system-call exit
1418			 * path is used.  This ensures that the proper
1419			 * state will be picked up when resuming
1420			 * execution.  However, it *also* means that
1421			 * once we write PT_AR_BSP/PT_CFM, it won't be
1422			 * possible to modify the syscall arguments of
1423			 * the pending system call any longer.  This
1424			 * shouldn't be an issue because modifying
1425			 * PT_AR_BSP/PT_CFM generally implies that
1426			 * we're either abandoning the pending system
1427			 * call or that we defer it's re-execution
1428			 * (e.g., due to GDB doing an inferior
1429			 * function call).
1430			 */
1431			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1432			if (write_access) {
1433				if (*data != urbs_end) {
1434					if (in_syscall(pt))
1435						convert_to_non_syscall(target,
1436								       pt,
1437								       cfm);
1438					/*
1439					 * Simulate user-level write
1440					 * of ar.bsp:
1441					 */
1442					pt->loadrs = 0;
1443					pt->ar_bspstore = *data;
1444				}
1445			} else
1446				*data = urbs_end;
1447			return 0;
1448		case ELF_AR_BSPSTORE_OFFSET:
1449			ptr = &pt->ar_bspstore;
1450			break;
1451		case ELF_AR_RNAT_OFFSET:
1452			ptr = &pt->ar_rnat;
1453			break;
1454		case ELF_AR_CCV_OFFSET:
1455			ptr = &pt->ar_ccv;
1456			break;
1457		case ELF_AR_UNAT_OFFSET:
1458			ptr = &pt->ar_unat;
1459			break;
1460		case ELF_AR_FPSR_OFFSET:
1461			ptr = &pt->ar_fpsr;
1462			break;
1463		case ELF_AR_PFS_OFFSET:
1464			ptr = &pt->ar_pfs;
1465			break;
1466		case ELF_AR_LC_OFFSET:
1467			return unw_access_ar(info, UNW_AR_LC, data,
1468					     write_access);
1469		case ELF_AR_EC_OFFSET:
1470			return unw_access_ar(info, UNW_AR_EC, data,
1471					     write_access);
1472		case ELF_AR_CSD_OFFSET:
1473			ptr = &pt->ar_csd;
1474			break;
1475		case ELF_AR_SSD_OFFSET:
1476			ptr = &pt->ar_ssd;
1477		}
1478	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1479		switch (addr) {
1480		case ELF_CR_IIP_OFFSET:
1481			ptr = &pt->cr_iip;
1482			break;
1483		case ELF_CFM_OFFSET:
1484			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1485			if (write_access) {
1486				if (((cfm ^ *data) & PFM_MASK) != 0) {
1487					if (in_syscall(pt))
1488						convert_to_non_syscall(target,
1489								       pt,
1490								       cfm);
1491					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1492						      | (*data & PFM_MASK));
1493				}
1494			} else
1495				*data = cfm;
1496			return 0;
1497		case ELF_CR_IPSR_OFFSET:
1498			if (write_access) {
1499				unsigned long tmp = *data;
1500				/* psr.ri==3 is a reserved value: SDM 2:25 */
1501				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1502					tmp &= ~IA64_PSR_RI;
1503				pt->cr_ipsr = ((tmp & IPSR_MASK)
1504					       | (pt->cr_ipsr & ~IPSR_MASK));
1505			} else
1506				*data = (pt->cr_ipsr & IPSR_MASK);
1507			return 0;
1508		}
1509	} else if (addr == ELF_NAT_OFFSET)
1510		return access_nat_bits(target, pt, info,
1511				       data, write_access);
1512	else if (addr == ELF_PR_OFFSET)
1513		ptr = &pt->pr;
1514	else
1515		return -1;
1516
1517	if (write_access)
1518		*ptr = *data;
1519	else
1520		*data = *ptr;
1521
1522	return 0;
1523}
1524
1525static int
1526access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1527		unsigned long addr, unsigned long *data, int write_access)
1528{
1529	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1530		return access_elf_gpreg(target, info, addr, data, write_access);
1531	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1532		return access_elf_breg(target, info, addr, data, write_access);
1533	else
1534		return access_elf_areg(target, info, addr, data, write_access);
1535}
1536
1537void do_gpregs_get(struct unw_frame_info *info, void *arg)
1538{
1539	struct pt_regs *pt;
1540	struct regset_getset *dst = arg;
1541	elf_greg_t tmp[16];
1542	unsigned int i, index, min_copy;
1543
1544	if (unw_unwind_to_user(info) < 0)
1545		return;
1546
1547	/*
1548	 * coredump format:
1549	 *      r0-r31
1550	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1551	 *      predicate registers (p0-p63)
1552	 *      b0-b7
1553	 *      ip cfm user-mask
1554	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
1555	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1556	 */
1557
1558
1559	/* Skip r0 */
1560	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1561		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1562						      &dst->u.get.kbuf,
1563						      &dst->u.get.ubuf,
1564						      0, ELF_GR_OFFSET(1));
1565		if (dst->ret || dst->count == 0)
1566			return;
1567	}
1568
1569	/* gr1 - gr15 */
1570	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1571		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1572		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1573			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1574		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1575				index++)
1576			if (access_elf_reg(dst->target, info, i,
1577						&tmp[index], 0) < 0) {
1578				dst->ret = -EIO;
1579				return;
1580			}
1581		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1582				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1583				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1584		if (dst->ret || dst->count == 0)
1585			return;
1586	}
1587
1588	/* r16-r31 */
1589	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1590		pt = task_pt_regs(dst->target);
1591		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1592				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1593				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1594		if (dst->ret || dst->count == 0)
1595			return;
1596	}
1597
1598	/* nat, pr, b0 - b7 */
1599	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1600		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1601		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1602			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1603		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1604				index++)
1605			if (access_elf_reg(dst->target, info, i,
1606						&tmp[index], 0) < 0) {
1607				dst->ret = -EIO;
1608				return;
1609			}
1610		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1611				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1612				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1613		if (dst->ret || dst->count == 0)
1614			return;
1615	}
1616
1617	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1618	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1619	 */
1620	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1621		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1622		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1623			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1624		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1625				index++)
1626			if (access_elf_reg(dst->target, info, i,
1627						&tmp[index], 0) < 0) {
1628				dst->ret = -EIO;
1629				return;
1630			}
1631		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1632				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1633				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1634	}
1635}
1636
1637void do_gpregs_set(struct unw_frame_info *info, void *arg)
1638{
1639	struct pt_regs *pt;
1640	struct regset_getset *dst = arg;
1641	elf_greg_t tmp[16];
1642	unsigned int i, index;
1643
1644	if (unw_unwind_to_user(info) < 0)
1645		return;
1646
1647	/* Skip r0 */
1648	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1649		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1650						       &dst->u.set.kbuf,
1651						       &dst->u.set.ubuf,
1652						       0, ELF_GR_OFFSET(1));
1653		if (dst->ret || dst->count == 0)
1654			return;
1655	}
1656
1657	/* gr1-gr15 */
1658	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1659		i = dst->pos;
1660		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1661		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1662				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1663				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1664		if (dst->ret)
1665			return;
1666		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1667			if (access_elf_reg(dst->target, info, i,
1668						&tmp[index], 1) < 0) {
1669				dst->ret = -EIO;
1670				return;
1671			}
1672		if (dst->count == 0)
1673			return;
1674	}
1675
1676	/* gr16-gr31 */
1677	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1678		pt = task_pt_regs(dst->target);
1679		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1680				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1681				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1682		if (dst->ret || dst->count == 0)
1683			return;
1684	}
1685
1686	/* nat, pr, b0 - b7 */
1687	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1688		i = dst->pos;
1689		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1690		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1691				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1692				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1693		if (dst->ret)
1694			return;
1695		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1696			if (access_elf_reg(dst->target, info, i,
1697						&tmp[index], 1) < 0) {
1698				dst->ret = -EIO;
1699				return;
1700			}
1701		if (dst->count == 0)
1702			return;
1703	}
1704
1705	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1706	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1707	 */
1708	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1709		i = dst->pos;
1710		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1711		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1712				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1713				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1714		if (dst->ret)
1715			return;
1716		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1717			if (access_elf_reg(dst->target, info, i,
1718						&tmp[index], 1) < 0) {
1719				dst->ret = -EIO;
1720				return;
1721			}
1722	}
1723}
1724
1725#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
1726
1727void do_fpregs_get(struct unw_frame_info *info, void *arg)
1728{
1729	struct regset_getset *dst = arg;
1730	struct task_struct *task = dst->target;
1731	elf_fpreg_t tmp[30];
1732	int index, min_copy, i;
1733
1734	if (unw_unwind_to_user(info) < 0)
1735		return;
1736
1737	/* Skip pos 0 and 1 */
1738	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1739		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1740						      &dst->u.get.kbuf,
1741						      &dst->u.get.ubuf,
1742						      0, ELF_FP_OFFSET(2));
1743		if (dst->count == 0 || dst->ret)
1744			return;
1745	}
1746
1747	/* fr2-fr31 */
1748	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1749		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1750
1751		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1752				dst->pos + dst->count);
1753		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1754				index++)
1755			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1756					 &tmp[index])) {
1757				dst->ret = -EIO;
1758				return;
1759			}
1760		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1761				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1762				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1763		if (dst->count == 0 || dst->ret)
1764			return;
1765	}
1766
1767	/* fph */
1768	if (dst->count > 0) {
1769		ia64_flush_fph(dst->target);
1770		if (task->thread.flags & IA64_THREAD_FPH_VALID)
1771			dst->ret = user_regset_copyout(
1772				&dst->pos, &dst->count,
1773				&dst->u.get.kbuf, &dst->u.get.ubuf,
1774				&dst->target->thread.fph,
1775				ELF_FP_OFFSET(32), -1);
1776		else
1777			/* Zero fill instead.  */
1778			dst->ret = user_regset_copyout_zero(
1779				&dst->pos, &dst->count,
1780				&dst->u.get.kbuf, &dst->u.get.ubuf,
1781				ELF_FP_OFFSET(32), -1);
1782	}
1783}
1784
1785void do_fpregs_set(struct unw_frame_info *info, void *arg)
1786{
1787	struct regset_getset *dst = arg;
1788	elf_fpreg_t fpreg, tmp[30];
1789	int index, start, end;
1790
1791	if (unw_unwind_to_user(info) < 0)
1792		return;
1793
1794	/* Skip pos 0 and 1 */
1795	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1796		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1797						       &dst->u.set.kbuf,
1798						       &dst->u.set.ubuf,
1799						       0, ELF_FP_OFFSET(2));
1800		if (dst->count == 0 || dst->ret)
1801			return;
1802	}
1803
1804	/* fr2-fr31 */
1805	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1806		start = dst->pos;
1807		end = min(((unsigned int)ELF_FP_OFFSET(32)),
1808			 dst->pos + dst->count);
1809		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1810				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1811				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1812		if (dst->ret)
1813			return;
1814
1815		if (start & 0xF) { /* only write high part */
1816			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1817					 &fpreg)) {
1818				dst->ret = -EIO;
1819				return;
1820			}
1821			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1822				= fpreg.u.bits[0];
1823			start &= ~0xFUL;
1824		}
1825		if (end & 0xF) { /* only write low part */
1826			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1827					&fpreg)) {
1828				dst->ret = -EIO;
1829				return;
1830			}
1831			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1832				= fpreg.u.bits[1];
1833			end = (end + 0xF) & ~0xFUL;
1834		}
1835
1836		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
1837			index = start / sizeof(elf_fpreg_t);
1838			if (unw_set_fr(info, index, tmp[index - 2])) {
1839				dst->ret = -EIO;
1840				return;
1841			}
1842		}
1843		if (dst->ret || dst->count == 0)
1844			return;
1845	}
1846
1847	/* fph */
1848	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1849		ia64_sync_fph(dst->target);
1850		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1851						&dst->u.set.kbuf,
1852						&dst->u.set.ubuf,
1853						&dst->target->thread.fph,
1854						ELF_FP_OFFSET(32), -1);
1855	}
1856}
1857
1858static int
1859do_regset_call(void (*call)(struct unw_frame_info *, void *),
1860	       struct task_struct *target,
1861	       const struct user_regset *regset,
1862	       unsigned int pos, unsigned int count,
1863	       const void *kbuf, const void __user *ubuf)
1864{
1865	struct regset_getset info = { .target = target, .regset = regset,
1866				 .pos = pos, .count = count,
1867				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1868				 .ret = 0 };
1869
1870	if (target == current)
1871		unw_init_running(call, &info);
1872	else {
1873		struct unw_frame_info ufi;
1874		memset(&ufi, 0, sizeof(ufi));
1875		unw_init_from_blocked_task(&ufi, target);
1876		(*call)(&ufi, &info);
1877	}
1878
1879	return info.ret;
1880}
1881
1882static int
1883gpregs_get(struct task_struct *target,
1884	   const struct user_regset *regset,
1885	   unsigned int pos, unsigned int count,
1886	   void *kbuf, void __user *ubuf)
1887{
1888	return do_regset_call(do_gpregs_get, target, regset, pos, count,
1889		kbuf, ubuf);
1890}
1891
1892static int gpregs_set(struct task_struct *target,
1893		const struct user_regset *regset,
1894		unsigned int pos, unsigned int count,
1895		const void *kbuf, const void __user *ubuf)
1896{
1897	return do_regset_call(do_gpregs_set, target, regset, pos, count,
1898		kbuf, ubuf);
1899}
1900
1901static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1902{
1903	do_sync_rbs(info, ia64_sync_user_rbs);
1904}
1905
1906/*
1907 * This is called to write back the register backing store.
1908 * ptrace does this before it stops, so that a tracer reading the user
1909 * memory after the thread stops will get the current register data.
1910 */
1911static int
1912gpregs_writeback(struct task_struct *target,
1913		 const struct user_regset *regset,
1914		 int now)
1915{
1916	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1917		return 0;
1918	set_notify_resume(target);
1919	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1920		NULL, NULL);
1921}
1922
1923static int
1924fpregs_active(struct task_struct *target, const struct user_regset *regset)
1925{
1926	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1927}
1928
1929static int fpregs_get(struct task_struct *target,
1930		const struct user_regset *regset,
1931		unsigned int pos, unsigned int count,
1932		void *kbuf, void __user *ubuf)
1933{
1934	return do_regset_call(do_fpregs_get, target, regset, pos, count,
1935		kbuf, ubuf);
1936}
1937
1938static int fpregs_set(struct task_struct *target,
1939		const struct user_regset *regset,
1940		unsigned int pos, unsigned int count,
1941		const void *kbuf, const void __user *ubuf)
1942{
1943	return do_regset_call(do_fpregs_set, target, regset, pos, count,
1944		kbuf, ubuf);
1945}
1946
1947static int
1948access_uarea(struct task_struct *child, unsigned long addr,
1949	      unsigned long *data, int write_access)
1950{
1951	unsigned int pos = -1; /* an invalid value */
1952	int ret;
1953	unsigned long *ptr, regnum;
1954
1955	if ((addr & 0x7) != 0) {
1956		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1957		return -1;
1958	}
1959	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1960		(addr >= PT_R7 + 8 && addr < PT_B1) ||
1961		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1962		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1963		dprintk("ptrace: rejecting access to register "
1964					"address 0x%lx\n", addr);
1965		return -1;
1966	}
1967
1968	switch (addr) {
1969	case PT_F32 ... (PT_F127 + 15):
1970		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1971		break;
1972	case PT_F2 ... (PT_F5 + 15):
1973		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1974		break;
1975	case PT_F10 ... (PT_F31 + 15):
1976		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1977		break;
1978	case PT_F6 ... (PT_F9 + 15):
1979		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1980		break;
1981	}
1982
1983	if (pos != -1) {
1984		if (write_access)
1985			ret = fpregs_set(child, NULL, pos,
1986				sizeof(unsigned long), data, NULL);
1987		else
1988			ret = fpregs_get(child, NULL, pos,
1989				sizeof(unsigned long), data, NULL);
1990		if (ret != 0)
1991			return -1;
1992		return 0;
1993	}
1994
1995	switch (addr) {
1996	case PT_NAT_BITS:
1997		pos = ELF_NAT_OFFSET;
1998		break;
1999	case PT_R4 ... PT_R7:
2000		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
2001		break;
2002	case PT_B1 ... PT_B5:
2003		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
2004		break;
2005	case PT_AR_EC:
2006		pos = ELF_AR_EC_OFFSET;
2007		break;
2008	case PT_AR_LC:
2009		pos = ELF_AR_LC_OFFSET;
2010		break;
2011	case PT_CR_IPSR:
2012		pos = ELF_CR_IPSR_OFFSET;
2013		break;
2014	case PT_CR_IIP:
2015		pos = ELF_CR_IIP_OFFSET;
2016		break;
2017	case PT_CFM:
2018		pos = ELF_CFM_OFFSET;
2019		break;
2020	case PT_AR_UNAT:
2021		pos = ELF_AR_UNAT_OFFSET;
2022		break;
2023	case PT_AR_PFS:
2024		pos = ELF_AR_PFS_OFFSET;
2025		break;
2026	case PT_AR_RSC:
2027		pos = ELF_AR_RSC_OFFSET;
2028		break;
2029	case PT_AR_RNAT:
2030		pos = ELF_AR_RNAT_OFFSET;
2031		break;
2032	case PT_AR_BSPSTORE:
2033		pos = ELF_AR_BSPSTORE_OFFSET;
2034		break;
2035	case PT_PR:
2036		pos = ELF_PR_OFFSET;
2037		break;
2038	case PT_B6:
2039		pos = ELF_BR_OFFSET(6);
2040		break;
2041	case PT_AR_BSP:
2042		pos = ELF_AR_BSP_OFFSET;
2043		break;
2044	case PT_R1 ... PT_R3:
2045		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2046		break;
2047	case PT_R12 ... PT_R15:
2048		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2049		break;
2050	case PT_R8 ... PT_R11:
2051		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2052		break;
2053	case PT_R16 ... PT_R31:
2054		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2055		break;
2056	case PT_AR_CCV:
2057		pos = ELF_AR_CCV_OFFSET;
2058		break;
2059	case PT_AR_FPSR:
2060		pos = ELF_AR_FPSR_OFFSET;
2061		break;
2062	case PT_B0:
2063		pos = ELF_BR_OFFSET(0);
2064		break;
2065	case PT_B7:
2066		pos = ELF_BR_OFFSET(7);
2067		break;
2068	case PT_AR_CSD:
2069		pos = ELF_AR_CSD_OFFSET;
2070		break;
2071	case PT_AR_SSD:
2072		pos = ELF_AR_SSD_OFFSET;
2073		break;
2074	}
2075
2076	if (pos != -1) {
2077		if (write_access)
2078			ret = gpregs_set(child, NULL, pos,
2079				sizeof(unsigned long), data, NULL);
2080		else
2081			ret = gpregs_get(child, NULL, pos,
2082				sizeof(unsigned long), data, NULL);
2083		if (ret != 0)
2084			return -1;
2085		return 0;
2086	}
2087
2088	/* access debug registers */
2089	if (addr >= PT_IBR) {
2090		regnum = (addr - PT_IBR) >> 3;
2091		ptr = &child->thread.ibr[0];
2092	} else {
2093		regnum = (addr - PT_DBR) >> 3;
2094		ptr = &child->thread.dbr[0];
2095	}
2096
2097	if (regnum >= 8) {
2098		dprintk("ptrace: rejecting access to register "
2099				"address 0x%lx\n", addr);
2100		return -1;
2101	}
2102#ifdef CONFIG_PERFMON
2103	/*
2104	 * Check if debug registers are used by perfmon. This
2105	 * test must be done once we know that we can do the
2106	 * operation, i.e. the arguments are all valid, but
2107	 * before we start modifying the state.
2108	 *
2109	 * Perfmon needs to keep a count of how many processes
2110	 * are trying to modify the debug registers for system
2111	 * wide monitoring sessions.
2112	 *
2113	 * We also include read access here, because they may
2114	 * cause the PMU-installed debug register state
2115	 * (dbr[], ibr[]) to be reset. The two arrays are also
2116	 * used by perfmon, but we do not use
2117	 * IA64_THREAD_DBG_VALID. The registers are restored
2118	 * by the PMU context switch code.
2119	 */
2120	if (pfm_use_debug_registers(child))
2121		return -1;
2122#endif
2123
2124	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2125		child->thread.flags |= IA64_THREAD_DBG_VALID;
2126		memset(child->thread.dbr, 0,
2127				sizeof(child->thread.dbr));
2128		memset(child->thread.ibr, 0,
2129				sizeof(child->thread.ibr));
2130	}
2131
2132	ptr += regnum;
2133
2134	if ((regnum & 1) && write_access) {
2135		/* don't let the user set kernel-level breakpoints: */
2136		*ptr = *data & ~(7UL << 56);
2137		return 0;
2138	}
2139	if (write_access)
2140		*ptr = *data;
2141	else
2142		*data = *ptr;
2143	return 0;
2144}
2145
2146static const struct user_regset native_regsets[] = {
2147	{
2148		.core_note_type = NT_PRSTATUS,
2149		.n = ELF_NGREG,
2150		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2151		.get = gpregs_get, .set = gpregs_set,
2152		.writeback = gpregs_writeback
2153	},
2154	{
2155		.core_note_type = NT_PRFPREG,
2156		.n = ELF_NFPREG,
2157		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2158		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2159	},
2160};
2161
2162static const struct user_regset_view user_ia64_view = {
2163	.name = "ia64",
2164	.e_machine = EM_IA_64,
2165	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2166};
2167
2168const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2169{
2170	return &user_ia64_view;
2171}
2172
2173struct syscall_get_set_args {
2174	unsigned int i;
2175	unsigned int n;
2176	unsigned long *args;
2177	struct pt_regs *regs;
2178	int rw;
2179};
2180
2181static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2182{
2183	struct syscall_get_set_args *args = data;
2184	struct pt_regs *pt = args->regs;
2185	unsigned long *krbs, cfm, ndirty;
2186	int i, count;
2187
2188	if (unw_unwind_to_user(info) < 0)
2189		return;
2190
2191	cfm = pt->cr_ifs;
2192	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2193	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2194
2195	count = 0;
2196	if (in_syscall(pt))
2197		count = min_t(int, args->n, cfm & 0x7f);
2198
2199	for (i = 0; i < count; i++) {
2200		if (args->rw)
2201			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2202				args->args[i];
2203		else
2204			args->args[i] = *ia64_rse_skip_regs(krbs,
2205				ndirty + i + args->i);
2206	}
2207
2208	if (!args->rw) {
2209		while (i < args->n) {
2210			args->args[i] = 0;
2211			i++;
2212		}
2213	}
2214}
2215
2216void ia64_syscall_get_set_arguments(struct task_struct *task,
2217	struct pt_regs *regs, unsigned int i, unsigned int n,
2218	unsigned long *args, int rw)
2219{
2220	struct syscall_get_set_args data = {
2221		.i = i,
2222		.n = n,
2223		.args = args,
2224		.regs = regs,
2225		.rw = rw,
2226	};
2227
2228	if (task == current)
2229		unw_init_running(syscall_get_set_args_cb, &data);
2230	else {
2231		struct unw_frame_info ufi;
2232		memset(&ufi, 0, sizeof(ufi));
2233		unw_init_from_blocked_task(&ufi, task);
2234		syscall_get_set_args_cb(&ufi, &data);
2235	}
2236}
v4.10.11
   1/*
   2 * Kernel support for the ptrace() and syscall tracing interfaces.
   3 *
   4 * Copyright (C) 1999-2005 Hewlett-Packard Co
   5 *	David Mosberger-Tang <davidm@hpl.hp.com>
   6 * Copyright (C) 2006 Intel Co
   7 *  2006-08-12	- IA64 Native Utrace implementation support added by
   8 *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *
  10 * Derived from the x86 and Alpha versions.
  11 */
  12#include <linux/kernel.h>
  13#include <linux/sched.h>
  14#include <linux/mm.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/user.h>
  18#include <linux/security.h>
  19#include <linux/audit.h>
  20#include <linux/signal.h>
  21#include <linux/regset.h>
  22#include <linux/elf.h>
  23#include <linux/tracehook.h>
  24
  25#include <asm/pgtable.h>
  26#include <asm/processor.h>
  27#include <asm/ptrace_offsets.h>
  28#include <asm/rse.h>
  29#include <linux/uaccess.h>
 
  30#include <asm/unwind.h>
  31#ifdef CONFIG_PERFMON
  32#include <asm/perfmon.h>
  33#endif
  34
  35#include "entry.h"
  36
  37/*
  38 * Bits in the PSR that we allow ptrace() to change:
  39 *	be, up, ac, mfl, mfh (the user mask; five bits total)
  40 *	db (debug breakpoint fault; one bit)
  41 *	id (instruction debug fault disable; one bit)
  42 *	dd (data debug fault disable; one bit)
  43 *	ri (restart instruction; two bits)
  44 *	is (instruction set; one bit)
  45 */
  46#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
  47		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
  48
  49#define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
  50#define PFM_MASK	MASK(38)
  51
  52#define PTRACE_DEBUG	0
  53
  54#if PTRACE_DEBUG
  55# define dprintk(format...)	printk(format)
  56# define inline
  57#else
  58# define dprintk(format...)
  59#endif
  60
  61/* Return TRUE if PT was created due to kernel-entry via a system-call.  */
  62
  63static inline int
  64in_syscall (struct pt_regs *pt)
  65{
  66	return (long) pt->cr_ifs >= 0;
  67}
  68
  69/*
  70 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
  71 * bitset where bit i is set iff the NaT bit of register i is set.
  72 */
  73unsigned long
  74ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
  75{
  76#	define GET_BITS(first, last, unat)				\
  77	({								\
  78		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
  79		unsigned long nbits = (last - first + 1);		\
  80		unsigned long mask = MASK(nbits) << first;		\
  81		unsigned long dist;					\
  82		if (bit < first)					\
  83			dist = 64 + bit - first;			\
  84		else							\
  85			dist = bit - first;				\
  86		ia64_rotr(unat, dist) & mask;				\
  87	})
  88	unsigned long val;
  89
  90	/*
  91	 * Registers that are stored consecutively in struct pt_regs
  92	 * can be handled in parallel.  If the register order in
  93	 * struct_pt_regs changes, this code MUST be updated.
  94	 */
  95	val  = GET_BITS( 1,  1, scratch_unat);
  96	val |= GET_BITS( 2,  3, scratch_unat);
  97	val |= GET_BITS(12, 13, scratch_unat);
  98	val |= GET_BITS(14, 14, scratch_unat);
  99	val |= GET_BITS(15, 15, scratch_unat);
 100	val |= GET_BITS( 8, 11, scratch_unat);
 101	val |= GET_BITS(16, 31, scratch_unat);
 102	return val;
 103
 104#	undef GET_BITS
 105}
 106
 107/*
 108 * Set the NaT bits for the scratch registers according to NAT and
 109 * return the resulting unat (assuming the scratch registers are
 110 * stored in PT).
 111 */
 112unsigned long
 113ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
 114{
 115#	define PUT_BITS(first, last, nat)				\
 116	({								\
 117		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
 118		unsigned long nbits = (last - first + 1);		\
 119		unsigned long mask = MASK(nbits) << first;		\
 120		long dist;						\
 121		if (bit < first)					\
 122			dist = 64 + bit - first;			\
 123		else							\
 124			dist = bit - first;				\
 125		ia64_rotl(nat & mask, dist);				\
 126	})
 127	unsigned long scratch_unat;
 128
 129	/*
 130	 * Registers that are stored consecutively in struct pt_regs
 131	 * can be handled in parallel.  If the register order in
 132	 * struct_pt_regs changes, this code MUST be updated.
 133	 */
 134	scratch_unat  = PUT_BITS( 1,  1, nat);
 135	scratch_unat |= PUT_BITS( 2,  3, nat);
 136	scratch_unat |= PUT_BITS(12, 13, nat);
 137	scratch_unat |= PUT_BITS(14, 14, nat);
 138	scratch_unat |= PUT_BITS(15, 15, nat);
 139	scratch_unat |= PUT_BITS( 8, 11, nat);
 140	scratch_unat |= PUT_BITS(16, 31, nat);
 141
 142	return scratch_unat;
 143
 144#	undef PUT_BITS
 145}
 146
 147#define IA64_MLX_TEMPLATE	0x2
 148#define IA64_MOVL_OPCODE	6
 149
 150void
 151ia64_increment_ip (struct pt_regs *regs)
 152{
 153	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
 154
 155	if (ri > 2) {
 156		ri = 0;
 157		regs->cr_iip += 16;
 158	} else if (ri == 2) {
 159		get_user(w0, (char __user *) regs->cr_iip + 0);
 160		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 161			/*
 162			 * rfi'ing to slot 2 of an MLX bundle causes
 163			 * an illegal operation fault.  We don't want
 164			 * that to happen...
 165			 */
 166			ri = 0;
 167			regs->cr_iip += 16;
 168		}
 169	}
 170	ia64_psr(regs)->ri = ri;
 171}
 172
 173void
 174ia64_decrement_ip (struct pt_regs *regs)
 175{
 176	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
 177
 178	if (ia64_psr(regs)->ri == 0) {
 179		regs->cr_iip -= 16;
 180		ri = 2;
 181		get_user(w0, (char __user *) regs->cr_iip + 0);
 182		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 183			/*
 184			 * rfi'ing to slot 2 of an MLX bundle causes
 185			 * an illegal operation fault.  We don't want
 186			 * that to happen...
 187			 */
 188			ri = 1;
 189		}
 190	}
 191	ia64_psr(regs)->ri = ri;
 192}
 193
 194/*
 195 * This routine is used to read an rnat bits that are stored on the
 196 * kernel backing store.  Since, in general, the alignment of the user
 197 * and kernel are different, this is not completely trivial.  In
 198 * essence, we need to construct the user RNAT based on up to two
 199 * kernel RNAT values and/or the RNAT value saved in the child's
 200 * pt_regs.
 201 *
 202 * user rbs
 203 *
 204 * +--------+ <-- lowest address
 205 * | slot62 |
 206 * +--------+
 207 * |  rnat  | 0x....1f8
 208 * +--------+
 209 * | slot00 | \
 210 * +--------+ |
 211 * | slot01 | > child_regs->ar_rnat
 212 * +--------+ |
 213 * | slot02 | /				kernel rbs
 214 * +--------+				+--------+
 215 *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
 216 * +- - - - +				+--------+
 217 *					| slot62 |
 218 * +- - - - +				+--------+
 219 *					|  rnat	 |
 220 * +- - - - +				+--------+
 221 *   vrnat				| slot00 |
 222 * +- - - - +				+--------+
 223 *					=	 =
 224 *					+--------+
 225 *					| slot00 | \
 226 *					+--------+ |
 227 *					| slot01 | > child_stack->ar_rnat
 228 *					+--------+ |
 229 *					| slot02 | /
 230 *					+--------+
 231 *						  <--- child_stack->ar_bspstore
 232 *
 233 * The way to think of this code is as follows: bit 0 in the user rnat
 234 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
 235 * value.  The kernel rnat value holding this bit is stored in
 236 * variable rnat0.  rnat1 is loaded with the kernel rnat value that
 237 * form the upper bits of the user rnat value.
 238 *
 239 * Boundary cases:
 240 *
 241 * o when reading the rnat "below" the first rnat slot on the kernel
 242 *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
 243 *   merged in from pt->ar_rnat.
 244 *
 245 * o when reading the rnat "above" the last rnat slot on the kernel
 246 *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
 247 */
 248static unsigned long
 249get_rnat (struct task_struct *task, struct switch_stack *sw,
 250	  unsigned long *krbs, unsigned long *urnat_addr,
 251	  unsigned long *urbs_end)
 252{
 253	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
 254	unsigned long umask = 0, mask, m;
 255	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 256	long num_regs, nbits;
 257	struct pt_regs *pt;
 258
 259	pt = task_pt_regs(task);
 260	kbsp = (unsigned long *) sw->ar_bspstore;
 261	ubspstore = (unsigned long *) pt->ar_bspstore;
 262
 263	if (urbs_end < urnat_addr)
 264		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
 265	else
 266		nbits = 63;
 267	mask = MASK(nbits);
 268	/*
 269	 * First, figure out which bit number slot 0 in user-land maps
 270	 * to in the kernel rnat.  Do this by figuring out how many
 271	 * register slots we're beyond the user's backingstore and
 272	 * then computing the equivalent address in kernel space.
 273	 */
 274	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 275	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 276	shift = ia64_rse_slot_num(slot0_kaddr);
 277	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 278	rnat0_kaddr = rnat1_kaddr - 64;
 279
 280	if (ubspstore + 63 > urnat_addr) {
 281		/* some bits need to be merged in from pt->ar_rnat */
 282		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 283		urnat = (pt->ar_rnat & umask);
 284		mask &= ~umask;
 285		if (!mask)
 286			return urnat;
 287	}
 288
 289	m = mask << shift;
 290	if (rnat0_kaddr >= kbsp)
 291		rnat0 = sw->ar_rnat;
 292	else if (rnat0_kaddr > krbs)
 293		rnat0 = *rnat0_kaddr;
 294	urnat |= (rnat0 & m) >> shift;
 295
 296	m = mask >> (63 - shift);
 297	if (rnat1_kaddr >= kbsp)
 298		rnat1 = sw->ar_rnat;
 299	else if (rnat1_kaddr > krbs)
 300		rnat1 = *rnat1_kaddr;
 301	urnat |= (rnat1 & m) << (63 - shift);
 302	return urnat;
 303}
 304
 305/*
 306 * The reverse of get_rnat.
 307 */
 308static void
 309put_rnat (struct task_struct *task, struct switch_stack *sw,
 310	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
 311	  unsigned long *urbs_end)
 312{
 313	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
 314	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 315	long num_regs, nbits;
 316	struct pt_regs *pt;
 317	unsigned long cfm, *urbs_kargs;
 318
 319	pt = task_pt_regs(task);
 320	kbsp = (unsigned long *) sw->ar_bspstore;
 321	ubspstore = (unsigned long *) pt->ar_bspstore;
 322
 323	urbs_kargs = urbs_end;
 324	if (in_syscall(pt)) {
 325		/*
 326		 * If entered via syscall, don't allow user to set rnat bits
 327		 * for syscall args.
 328		 */
 329		cfm = pt->cr_ifs;
 330		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
 331	}
 332
 333	if (urbs_kargs >= urnat_addr)
 334		nbits = 63;
 335	else {
 336		if ((urnat_addr - 63) >= urbs_kargs)
 337			return;
 338		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
 339	}
 340	mask = MASK(nbits);
 341
 342	/*
 343	 * First, figure out which bit number slot 0 in user-land maps
 344	 * to in the kernel rnat.  Do this by figuring out how many
 345	 * register slots we're beyond the user's backingstore and
 346	 * then computing the equivalent address in kernel space.
 347	 */
 348	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 349	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 350	shift = ia64_rse_slot_num(slot0_kaddr);
 351	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 352	rnat0_kaddr = rnat1_kaddr - 64;
 353
 354	if (ubspstore + 63 > urnat_addr) {
 355		/* some bits need to be place in pt->ar_rnat: */
 356		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 357		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
 358		mask &= ~umask;
 359		if (!mask)
 360			return;
 361	}
 362	/*
 363	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
 364	 * rnat slot is ignored. so we don't have to clear it here.
 365	 */
 366	rnat0 = (urnat << shift);
 367	m = mask << shift;
 368	if (rnat0_kaddr >= kbsp)
 369		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
 370	else if (rnat0_kaddr > krbs)
 371		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
 372
 373	rnat1 = (urnat >> (63 - shift));
 374	m = mask >> (63 - shift);
 375	if (rnat1_kaddr >= kbsp)
 376		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
 377	else if (rnat1_kaddr > krbs)
 378		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
 379}
 380
 381static inline int
 382on_kernel_rbs (unsigned long addr, unsigned long bspstore,
 383	       unsigned long urbs_end)
 384{
 385	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
 386						      urbs_end);
 387	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
 388}
 389
 390/*
 391 * Read a word from the user-level backing store of task CHILD.  ADDR
 392 * is the user-level address to read the word from, VAL a pointer to
 393 * the return value, and USER_BSP gives the end of the user-level
 394 * backing store (i.e., it's the address that would be in ar.bsp after
 395 * the user executed a "cover" instruction).
 396 *
 397 * This routine takes care of accessing the kernel register backing
 398 * store for those registers that got spilled there.  It also takes
 399 * care of calculating the appropriate RNaT collection words.
 400 */
 401long
 402ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
 403	   unsigned long user_rbs_end, unsigned long addr, long *val)
 404{
 405	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
 406	struct pt_regs *child_regs;
 407	size_t copied;
 408	long ret;
 409
 410	urbs_end = (long *) user_rbs_end;
 411	laddr = (unsigned long *) addr;
 412	child_regs = task_pt_regs(child);
 413	bspstore = (unsigned long *) child_regs->ar_bspstore;
 414	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 415	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 416			  (unsigned long) urbs_end))
 417	{
 418		/*
 419		 * Attempt to read the RBS in an area that's actually
 420		 * on the kernel RBS => read the corresponding bits in
 421		 * the kernel RBS.
 422		 */
 423		rnat_addr = ia64_rse_rnat_addr(laddr);
 424		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
 425
 426		if (laddr == rnat_addr) {
 427			/* return NaT collection word itself */
 428			*val = ret;
 429			return 0;
 430		}
 431
 432		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
 433			/*
 434			 * It is implementation dependent whether the
 435			 * data portion of a NaT value gets saved on a
 436			 * st8.spill or RSE spill (e.g., see EAS 2.6,
 437			 * 4.4.4.6 Register Spill and Fill).  To get
 438			 * consistent behavior across all possible
 439			 * IA-64 implementations, we return zero in
 440			 * this case.
 441			 */
 442			*val = 0;
 443			return 0;
 444		}
 445
 446		if (laddr < urbs_end) {
 447			/*
 448			 * The desired word is on the kernel RBS and
 449			 * is not a NaT.
 450			 */
 451			regnum = ia64_rse_num_regs(bspstore, laddr);
 452			*val = *ia64_rse_skip_regs(krbs, regnum);
 453			return 0;
 454		}
 455	}
 456	copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
 457	if (copied != sizeof(ret))
 458		return -EIO;
 459	*val = ret;
 460	return 0;
 461}
 462
 463long
 464ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
 465	   unsigned long user_rbs_end, unsigned long addr, long val)
 466{
 467	unsigned long *bspstore, *krbs, regnum, *laddr;
 468	unsigned long *urbs_end = (long *) user_rbs_end;
 469	struct pt_regs *child_regs;
 470
 471	laddr = (unsigned long *) addr;
 472	child_regs = task_pt_regs(child);
 473	bspstore = (unsigned long *) child_regs->ar_bspstore;
 474	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 475	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 476			  (unsigned long) urbs_end))
 477	{
 478		/*
 479		 * Attempt to write the RBS in an area that's actually
 480		 * on the kernel RBS => write the corresponding bits
 481		 * in the kernel RBS.
 482		 */
 483		if (ia64_rse_is_rnat_slot(laddr))
 484			put_rnat(child, child_stack, krbs, laddr, val,
 485				 urbs_end);
 486		else {
 487			if (laddr < urbs_end) {
 488				regnum = ia64_rse_num_regs(bspstore, laddr);
 489				*ia64_rse_skip_regs(krbs, regnum) = val;
 490			}
 491		}
 492	} else if (access_process_vm(child, addr, &val, sizeof(val),
 493				FOLL_FORCE | FOLL_WRITE)
 494		   != sizeof(val))
 495		return -EIO;
 496	return 0;
 497}
 498
 499/*
 500 * Calculate the address of the end of the user-level register backing
 501 * store.  This is the address that would have been stored in ar.bsp
 502 * if the user had executed a "cover" instruction right before
 503 * entering the kernel.  If CFMP is not NULL, it is used to return the
 504 * "current frame mask" that was active at the time the kernel was
 505 * entered.
 506 */
 507unsigned long
 508ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
 509		       unsigned long *cfmp)
 510{
 511	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
 512	long ndirty;
 513
 514	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 515	bspstore = (unsigned long *) pt->ar_bspstore;
 516	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
 517
 518	if (in_syscall(pt))
 519		ndirty += (cfm & 0x7f);
 520	else
 521		cfm &= ~(1UL << 63);	/* clear valid bit */
 522
 523	if (cfmp)
 524		*cfmp = cfm;
 525	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
 526}
 527
 528/*
 529 * Synchronize (i.e, write) the RSE backing store living in kernel
 530 * space to the VM of the CHILD task.  SW and PT are the pointers to
 531 * the switch_stack and pt_regs structures, respectively.
 532 * USER_RBS_END is the user-level address at which the backing store
 533 * ends.
 534 */
 535long
 536ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
 537		    unsigned long user_rbs_start, unsigned long user_rbs_end)
 538{
 539	unsigned long addr, val;
 540	long ret;
 541
 542	/* now copy word for word from kernel rbs to user rbs: */
 543	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 544		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
 545		if (ret < 0)
 546			return ret;
 547		if (access_process_vm(child, addr, &val, sizeof(val),
 548				FOLL_FORCE | FOLL_WRITE)
 549		    != sizeof(val))
 550			return -EIO;
 551	}
 552	return 0;
 553}
 554
 555static long
 556ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
 557		unsigned long user_rbs_start, unsigned long user_rbs_end)
 558{
 559	unsigned long addr, val;
 560	long ret;
 561
 562	/* now copy word for word from user rbs to kernel rbs: */
 563	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 564		if (access_process_vm(child, addr, &val, sizeof(val),
 565				FOLL_FORCE)
 566				!= sizeof(val))
 567			return -EIO;
 568
 569		ret = ia64_poke(child, sw, user_rbs_end, addr, val);
 570		if (ret < 0)
 571			return ret;
 572	}
 573	return 0;
 574}
 575
 576typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
 577			    unsigned long, unsigned long);
 578
 579static void do_sync_rbs(struct unw_frame_info *info, void *arg)
 580{
 581	struct pt_regs *pt;
 582	unsigned long urbs_end;
 583	syncfunc_t fn = arg;
 584
 585	if (unw_unwind_to_user(info) < 0)
 586		return;
 587	pt = task_pt_regs(info->task);
 588	urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
 589
 590	fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
 591}
 592
 593/*
 594 * when a thread is stopped (ptraced), debugger might change thread's user
 595 * stack (change memory directly), and we must avoid the RSE stored in kernel
 596 * to override user stack (user space's RSE is newer than kernel's in the
 597 * case). To workaround the issue, we copy kernel RSE to user RSE before the
 598 * task is stopped, so user RSE has updated data.  we then copy user RSE to
 599 * kernel after the task is resummed from traced stop and kernel will use the
 600 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
 601 * synchronize user RSE to kernel.
 602 */
 603void ia64_ptrace_stop(void)
 604{
 605	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
 606		return;
 607	set_notify_resume(current);
 608	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
 609}
 610
 611/*
 612 * This is called to read back the register backing store.
 613 */
 614void ia64_sync_krbs(void)
 615{
 616	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
 617
 618	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
 619}
 620
 621/*
 622 * After PTRACE_ATTACH, a thread's register backing store area in user
 623 * space is assumed to contain correct data whenever the thread is
 624 * stopped.  arch_ptrace_stop takes care of this on tracing stops.
 625 * But if the child was already stopped for job control when we attach
 626 * to it, then it might not ever get into ptrace_stop by the time we
 627 * want to examine the user memory containing the RBS.
 628 */
 629void
 630ptrace_attach_sync_user_rbs (struct task_struct *child)
 631{
 632	int stopped = 0;
 633	struct unw_frame_info info;
 634
 635	/*
 636	 * If the child is in TASK_STOPPED, we need to change that to
 637	 * TASK_TRACED momentarily while we operate on it.  This ensures
 638	 * that the child won't be woken up and return to user mode while
 639	 * we are doing the sync.  (It can only be woken up for SIGKILL.)
 640	 */
 641
 642	read_lock(&tasklist_lock);
 643	if (child->sighand) {
 644		spin_lock_irq(&child->sighand->siglock);
 645		if (child->state == TASK_STOPPED &&
 646		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
 647			set_notify_resume(child);
 648
 649			child->state = TASK_TRACED;
 650			stopped = 1;
 651		}
 652		spin_unlock_irq(&child->sighand->siglock);
 653	}
 654	read_unlock(&tasklist_lock);
 655
 656	if (!stopped)
 657		return;
 658
 659	unw_init_from_blocked_task(&info, child);
 660	do_sync_rbs(&info, ia64_sync_user_rbs);
 661
 662	/*
 663	 * Now move the child back into TASK_STOPPED if it should be in a
 664	 * job control stop, so that SIGCONT can be used to wake it up.
 665	 */
 666	read_lock(&tasklist_lock);
 667	if (child->sighand) {
 668		spin_lock_irq(&child->sighand->siglock);
 669		if (child->state == TASK_TRACED &&
 670		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
 671			child->state = TASK_STOPPED;
 672		}
 673		spin_unlock_irq(&child->sighand->siglock);
 674	}
 675	read_unlock(&tasklist_lock);
 676}
 677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 678/*
 679 * Write f32-f127 back to task->thread.fph if it has been modified.
 680 */
 681inline void
 682ia64_flush_fph (struct task_struct *task)
 683{
 684	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 685
 686	/*
 687	 * Prevent migrating this task while
 688	 * we're fiddling with the FPU state
 689	 */
 690	preempt_disable();
 691	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
 692		psr->mfh = 0;
 693		task->thread.flags |= IA64_THREAD_FPH_VALID;
 694		ia64_save_fpu(&task->thread.fph[0]);
 695	}
 696	preempt_enable();
 697}
 698
 699/*
 700 * Sync the fph state of the task so that it can be manipulated
 701 * through thread.fph.  If necessary, f32-f127 are written back to
 702 * thread.fph or, if the fph state hasn't been used before, thread.fph
 703 * is cleared to zeroes.  Also, access to f32-f127 is disabled to
 704 * ensure that the task picks up the state from thread.fph when it
 705 * executes again.
 706 */
 707void
 708ia64_sync_fph (struct task_struct *task)
 709{
 710	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 711
 712	ia64_flush_fph(task);
 713	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
 714		task->thread.flags |= IA64_THREAD_FPH_VALID;
 715		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
 716	}
 717	ia64_drop_fpu(task);
 718	psr->dfh = 1;
 719}
 720
 721/*
 722 * Change the machine-state of CHILD such that it will return via the normal
 723 * kernel exit-path, rather than the syscall-exit path.
 724 */
 725static void
 726convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
 727			unsigned long cfm)
 728{
 729	struct unw_frame_info info, prev_info;
 730	unsigned long ip, sp, pr;
 731
 732	unw_init_from_blocked_task(&info, child);
 733	while (1) {
 734		prev_info = info;
 735		if (unw_unwind(&info) < 0)
 736			return;
 737
 738		unw_get_sp(&info, &sp);
 739		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
 740		    < IA64_PT_REGS_SIZE) {
 741			dprintk("ptrace.%s: ran off the top of the kernel "
 742				"stack\n", __func__);
 743			return;
 744		}
 745		if (unw_get_pr (&prev_info, &pr) < 0) {
 746			unw_get_rp(&prev_info, &ip);
 747			dprintk("ptrace.%s: failed to read "
 748				"predicate register (ip=0x%lx)\n",
 749				__func__, ip);
 750			return;
 751		}
 752		if (unw_is_intr_frame(&info)
 753		    && (pr & (1UL << PRED_USER_STACK)))
 754			break;
 755	}
 756
 757	/*
 758	 * Note: at the time of this call, the target task is blocked
 759	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
 760	 * (aka, "pLvSys") we redirect execution from
 761	 * .work_pending_syscall_end to .work_processed_kernel.
 762	 */
 763	unw_get_pr(&prev_info, &pr);
 764	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
 765	pr |=  (1UL << PRED_NON_SYSCALL);
 766	unw_set_pr(&prev_info, pr);
 767
 768	pt->cr_ifs = (1UL << 63) | cfm;
 769	/*
 770	 * Clear the memory that is NOT written on syscall-entry to
 771	 * ensure we do not leak kernel-state to user when execution
 772	 * resumes.
 773	 */
 774	pt->r2 = 0;
 775	pt->r3 = 0;
 776	pt->r14 = 0;
 777	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
 778	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
 779	pt->b7 = 0;
 780	pt->ar_ccv = 0;
 781	pt->ar_csd = 0;
 782	pt->ar_ssd = 0;
 783}
 784
 785static int
 786access_nat_bits (struct task_struct *child, struct pt_regs *pt,
 787		 struct unw_frame_info *info,
 788		 unsigned long *data, int write_access)
 789{
 790	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
 791	char nat = 0;
 792
 793	if (write_access) {
 794		nat_bits = *data;
 795		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
 796		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
 797			dprintk("ptrace: failed to set ar.unat\n");
 798			return -1;
 799		}
 800		for (regnum = 4; regnum <= 7; ++regnum) {
 801			unw_get_gr(info, regnum, &dummy, &nat);
 802			unw_set_gr(info, regnum, dummy,
 803				   (nat_bits >> regnum) & 1);
 804		}
 805	} else {
 806		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
 807			dprintk("ptrace: failed to read ar.unat\n");
 808			return -1;
 809		}
 810		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
 811		for (regnum = 4; regnum <= 7; ++regnum) {
 812			unw_get_gr(info, regnum, &dummy, &nat);
 813			nat_bits |= (nat != 0) << regnum;
 814		}
 815		*data = nat_bits;
 816	}
 817	return 0;
 818}
 819
 820static int
 821access_uarea (struct task_struct *child, unsigned long addr,
 822	      unsigned long *data, int write_access);
 823
 824static long
 825ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 826{
 827	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
 828	struct unw_frame_info info;
 829	struct ia64_fpreg fpval;
 830	struct switch_stack *sw;
 831	struct pt_regs *pt;
 832	long ret, retval = 0;
 833	char nat = 0;
 834	int i;
 835
 836	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
 837		return -EIO;
 838
 839	pt = task_pt_regs(child);
 840	sw = (struct switch_stack *) (child->thread.ksp + 16);
 841	unw_init_from_blocked_task(&info, child);
 842	if (unw_unwind_to_user(&info) < 0) {
 843		return -EIO;
 844	}
 845
 846	if (((unsigned long) ppr & 0x7) != 0) {
 847		dprintk("ptrace:unaligned register address %p\n", ppr);
 848		return -EIO;
 849	}
 850
 851	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
 852	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
 853	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
 854	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
 855	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
 856	    || access_uarea(child, PT_CFM, &cfm, 0)
 857	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
 858		return -EIO;
 859
 860	/* control regs */
 861
 862	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
 863	retval |= __put_user(psr, &ppr->cr_ipsr);
 864
 865	/* app regs */
 866
 867	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
 868	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
 869	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
 870	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
 871	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
 872	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
 873
 874	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
 875	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
 876	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
 877	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
 878	retval |= __put_user(cfm, &ppr->cfm);
 879
 880	/* gr1-gr3 */
 881
 882	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
 883	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
 884
 885	/* gr4-gr7 */
 886
 887	for (i = 4; i < 8; i++) {
 888		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
 889			return -EIO;
 890		retval |= __put_user(val, &ppr->gr[i]);
 891	}
 892
 893	/* gr8-gr11 */
 894
 895	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
 896
 897	/* gr12-gr15 */
 898
 899	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
 900	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
 901	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
 902
 903	/* gr16-gr31 */
 904
 905	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
 906
 907	/* b0 */
 908
 909	retval |= __put_user(pt->b0, &ppr->br[0]);
 910
 911	/* b1-b5 */
 912
 913	for (i = 1; i < 6; i++) {
 914		if (unw_access_br(&info, i, &val, 0) < 0)
 915			return -EIO;
 916		__put_user(val, &ppr->br[i]);
 917	}
 918
 919	/* b6-b7 */
 920
 921	retval |= __put_user(pt->b6, &ppr->br[6]);
 922	retval |= __put_user(pt->b7, &ppr->br[7]);
 923
 924	/* fr2-fr5 */
 925
 926	for (i = 2; i < 6; i++) {
 927		if (unw_get_fr(&info, i, &fpval) < 0)
 928			return -EIO;
 929		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 930	}
 931
 932	/* fr6-fr11 */
 933
 934	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
 935				 sizeof(struct ia64_fpreg) * 6);
 936
 937	/* fp scratch regs(12-15) */
 938
 939	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
 940				 sizeof(struct ia64_fpreg) * 4);
 941
 942	/* fr16-fr31 */
 943
 944	for (i = 16; i < 32; i++) {
 945		if (unw_get_fr(&info, i, &fpval) < 0)
 946			return -EIO;
 947		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 948	}
 949
 950	/* fph */
 951
 952	ia64_flush_fph(child);
 953	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
 954				 sizeof(ppr->fr[32]) * 96);
 955
 956	/*  preds */
 957
 958	retval |= __put_user(pt->pr, &ppr->pr);
 959
 960	/* nat bits */
 961
 962	retval |= __put_user(nat_bits, &ppr->nat);
 963
 964	ret = retval ? -EIO : 0;
 965	return ret;
 966}
 967
 968static long
 969ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 970{
 971	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
 972	struct unw_frame_info info;
 973	struct switch_stack *sw;
 974	struct ia64_fpreg fpval;
 975	struct pt_regs *pt;
 976	long ret, retval = 0;
 977	int i;
 978
 979	memset(&fpval, 0, sizeof(fpval));
 980
 981	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
 982		return -EIO;
 983
 984	pt = task_pt_regs(child);
 985	sw = (struct switch_stack *) (child->thread.ksp + 16);
 986	unw_init_from_blocked_task(&info, child);
 987	if (unw_unwind_to_user(&info) < 0) {
 988		return -EIO;
 989	}
 990
 991	if (((unsigned long) ppr & 0x7) != 0) {
 992		dprintk("ptrace:unaligned register address %p\n", ppr);
 993		return -EIO;
 994	}
 995
 996	/* control regs */
 997
 998	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
 999	retval |= __get_user(psr, &ppr->cr_ipsr);
1000
1001	/* app regs */
1002
1003	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1004	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1005	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1006	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1007	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1008	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1009
1010	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1011	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1012	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1013	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1014	retval |= __get_user(cfm, &ppr->cfm);
1015
1016	/* gr1-gr3 */
1017
1018	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1019	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1020
1021	/* gr4-gr7 */
1022
1023	for (i = 4; i < 8; i++) {
1024		retval |= __get_user(val, &ppr->gr[i]);
1025		/* NaT bit will be set via PT_NAT_BITS: */
1026		if (unw_set_gr(&info, i, val, 0) < 0)
1027			return -EIO;
1028	}
1029
1030	/* gr8-gr11 */
1031
1032	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1033
1034	/* gr12-gr15 */
1035
1036	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1037	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1038	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1039
1040	/* gr16-gr31 */
1041
1042	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1043
1044	/* b0 */
1045
1046	retval |= __get_user(pt->b0, &ppr->br[0]);
1047
1048	/* b1-b5 */
1049
1050	for (i = 1; i < 6; i++) {
1051		retval |= __get_user(val, &ppr->br[i]);
1052		unw_set_br(&info, i, val);
1053	}
1054
1055	/* b6-b7 */
1056
1057	retval |= __get_user(pt->b6, &ppr->br[6]);
1058	retval |= __get_user(pt->b7, &ppr->br[7]);
1059
1060	/* fr2-fr5 */
1061
1062	for (i = 2; i < 6; i++) {
1063		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1064		if (unw_set_fr(&info, i, fpval) < 0)
1065			return -EIO;
1066	}
1067
1068	/* fr6-fr11 */
1069
1070	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1071				   sizeof(ppr->fr[6]) * 6);
1072
1073	/* fp scratch regs(12-15) */
1074
1075	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1076				   sizeof(ppr->fr[12]) * 4);
1077
1078	/* fr16-fr31 */
1079
1080	for (i = 16; i < 32; i++) {
1081		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1082					   sizeof(fpval));
1083		if (unw_set_fr(&info, i, fpval) < 0)
1084			return -EIO;
1085	}
1086
1087	/* fph */
1088
1089	ia64_sync_fph(child);
1090	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1091				   sizeof(ppr->fr[32]) * 96);
1092
1093	/* preds */
1094
1095	retval |= __get_user(pt->pr, &ppr->pr);
1096
1097	/* nat bits */
1098
1099	retval |= __get_user(nat_bits, &ppr->nat);
1100
1101	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1102	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1103	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1104	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1105	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1106	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1107	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1108	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1109
1110	ret = retval ? -EIO : 0;
1111	return ret;
1112}
1113
1114void
1115user_enable_single_step (struct task_struct *child)
1116{
1117	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1118
1119	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1120	child_psr->ss = 1;
1121}
1122
1123void
1124user_enable_block_step (struct task_struct *child)
1125{
1126	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1127
1128	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1129	child_psr->tb = 1;
1130}
1131
1132void
1133user_disable_single_step (struct task_struct *child)
1134{
1135	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1136
1137	/* make sure the single step/taken-branch trap bits are not set: */
1138	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1139	child_psr->ss = 0;
1140	child_psr->tb = 0;
1141}
1142
1143/*
1144 * Called by kernel/ptrace.c when detaching..
1145 *
1146 * Make sure the single step bit is not set.
1147 */
1148void
1149ptrace_disable (struct task_struct *child)
1150{
1151	user_disable_single_step(child);
1152}
1153
1154long
1155arch_ptrace (struct task_struct *child, long request,
1156	     unsigned long addr, unsigned long data)
1157{
1158	switch (request) {
1159	case PTRACE_PEEKTEXT:
1160	case PTRACE_PEEKDATA:
1161		/* read word at location addr */
1162		if (ptrace_access_vm(child, addr, &data, sizeof(data),
1163				FOLL_FORCE)
1164		    != sizeof(data))
1165			return -EIO;
1166		/* ensure return value is not mistaken for error code */
1167		force_successful_syscall_return();
1168		return data;
1169
1170	/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1171	 * by the generic ptrace_request().
1172	 */
1173
1174	case PTRACE_PEEKUSR:
1175		/* read the word at addr in the USER area */
1176		if (access_uarea(child, addr, &data, 0) < 0)
1177			return -EIO;
1178		/* ensure return value is not mistaken for error code */
1179		force_successful_syscall_return();
1180		return data;
1181
1182	case PTRACE_POKEUSR:
1183		/* write the word at addr in the USER area */
1184		if (access_uarea(child, addr, &data, 1) < 0)
1185			return -EIO;
1186		return 0;
1187
1188	case PTRACE_OLD_GETSIGINFO:
1189		/* for backwards-compatibility */
1190		return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1191
1192	case PTRACE_OLD_SETSIGINFO:
1193		/* for backwards-compatibility */
1194		return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1195
1196	case PTRACE_GETREGS:
1197		return ptrace_getregs(child,
1198				      (struct pt_all_user_regs __user *) data);
1199
1200	case PTRACE_SETREGS:
1201		return ptrace_setregs(child,
1202				      (struct pt_all_user_regs __user *) data);
1203
1204	default:
1205		return ptrace_request(child, request, addr, data);
1206	}
1207}
1208
1209
1210/* "asmlinkage" so the input arguments are preserved... */
1211
1212asmlinkage long
1213syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1214		     long arg4, long arg5, long arg6, long arg7,
1215		     struct pt_regs regs)
1216{
1217	if (test_thread_flag(TIF_SYSCALL_TRACE))
1218		if (tracehook_report_syscall_entry(&regs))
1219			return -ENOSYS;
1220
1221	/* copy user rbs to kernel rbs */
1222	if (test_thread_flag(TIF_RESTORE_RSE))
1223		ia64_sync_krbs();
1224
 
 
 
1225
1226	audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
 
 
 
 
1227
1228	return 0;
1229}
1230
1231/* "asmlinkage" so the input arguments are preserved... */
1232
1233asmlinkage void
1234syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1235		     long arg4, long arg5, long arg6, long arg7,
1236		     struct pt_regs regs)
1237{
1238	int step;
1239
1240	audit_syscall_exit(&regs);
 
 
 
 
 
 
 
1241
1242	step = test_thread_flag(TIF_SINGLESTEP);
1243	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1244		tracehook_report_syscall_exit(&regs, step);
1245
1246	/* copy user rbs to kernel rbs */
1247	if (test_thread_flag(TIF_RESTORE_RSE))
1248		ia64_sync_krbs();
1249}
1250
1251/* Utrace implementation starts here */
1252struct regset_get {
1253	void *kbuf;
1254	void __user *ubuf;
1255};
1256
1257struct regset_set {
1258	const void *kbuf;
1259	const void __user *ubuf;
1260};
1261
1262struct regset_getset {
1263	struct task_struct *target;
1264	const struct user_regset *regset;
1265	union {
1266		struct regset_get get;
1267		struct regset_set set;
1268	} u;
1269	unsigned int pos;
1270	unsigned int count;
1271	int ret;
1272};
1273
1274static int
1275access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1276		unsigned long addr, unsigned long *data, int write_access)
1277{
1278	struct pt_regs *pt;
1279	unsigned long *ptr = NULL;
1280	int ret;
1281	char nat = 0;
1282
1283	pt = task_pt_regs(target);
1284	switch (addr) {
1285	case ELF_GR_OFFSET(1):
1286		ptr = &pt->r1;
1287		break;
1288	case ELF_GR_OFFSET(2):
1289	case ELF_GR_OFFSET(3):
1290		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1291		break;
1292	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1293		if (write_access) {
1294			/* read NaT bit first: */
1295			unsigned long dummy;
1296
1297			ret = unw_get_gr(info, addr/8, &dummy, &nat);
1298			if (ret < 0)
1299				return ret;
1300		}
1301		return unw_access_gr(info, addr/8, data, &nat, write_access);
1302	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1303		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1304		break;
1305	case ELF_GR_OFFSET(12):
1306	case ELF_GR_OFFSET(13):
1307		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1308		break;
1309	case ELF_GR_OFFSET(14):
1310		ptr = &pt->r14;
1311		break;
1312	case ELF_GR_OFFSET(15):
1313		ptr = &pt->r15;
1314	}
1315	if (write_access)
1316		*ptr = *data;
1317	else
1318		*data = *ptr;
1319	return 0;
1320}
1321
1322static int
1323access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1324		unsigned long addr, unsigned long *data, int write_access)
1325{
1326	struct pt_regs *pt;
1327	unsigned long *ptr = NULL;
1328
1329	pt = task_pt_regs(target);
1330	switch (addr) {
1331	case ELF_BR_OFFSET(0):
1332		ptr = &pt->b0;
1333		break;
1334	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1335		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1336				     data, write_access);
1337	case ELF_BR_OFFSET(6):
1338		ptr = &pt->b6;
1339		break;
1340	case ELF_BR_OFFSET(7):
1341		ptr = &pt->b7;
1342	}
1343	if (write_access)
1344		*ptr = *data;
1345	else
1346		*data = *ptr;
1347	return 0;
1348}
1349
1350static int
1351access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1352		unsigned long addr, unsigned long *data, int write_access)
1353{
1354	struct pt_regs *pt;
1355	unsigned long cfm, urbs_end;
1356	unsigned long *ptr = NULL;
1357
1358	pt = task_pt_regs(target);
1359	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1360		switch (addr) {
1361		case ELF_AR_RSC_OFFSET:
1362			/* force PL3 */
1363			if (write_access)
1364				pt->ar_rsc = *data | (3 << 2);
1365			else
1366				*data = pt->ar_rsc;
1367			return 0;
1368		case ELF_AR_BSP_OFFSET:
1369			/*
1370			 * By convention, we use PT_AR_BSP to refer to
1371			 * the end of the user-level backing store.
1372			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1373			 * to get the real value of ar.bsp at the time
1374			 * the kernel was entered.
1375			 *
1376			 * Furthermore, when changing the contents of
1377			 * PT_AR_BSP (or PT_CFM) while the task is
1378			 * blocked in a system call, convert the state
1379			 * so that the non-system-call exit
1380			 * path is used.  This ensures that the proper
1381			 * state will be picked up when resuming
1382			 * execution.  However, it *also* means that
1383			 * once we write PT_AR_BSP/PT_CFM, it won't be
1384			 * possible to modify the syscall arguments of
1385			 * the pending system call any longer.  This
1386			 * shouldn't be an issue because modifying
1387			 * PT_AR_BSP/PT_CFM generally implies that
1388			 * we're either abandoning the pending system
1389			 * call or that we defer it's re-execution
1390			 * (e.g., due to GDB doing an inferior
1391			 * function call).
1392			 */
1393			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1394			if (write_access) {
1395				if (*data != urbs_end) {
1396					if (in_syscall(pt))
1397						convert_to_non_syscall(target,
1398								       pt,
1399								       cfm);
1400					/*
1401					 * Simulate user-level write
1402					 * of ar.bsp:
1403					 */
1404					pt->loadrs = 0;
1405					pt->ar_bspstore = *data;
1406				}
1407			} else
1408				*data = urbs_end;
1409			return 0;
1410		case ELF_AR_BSPSTORE_OFFSET:
1411			ptr = &pt->ar_bspstore;
1412			break;
1413		case ELF_AR_RNAT_OFFSET:
1414			ptr = &pt->ar_rnat;
1415			break;
1416		case ELF_AR_CCV_OFFSET:
1417			ptr = &pt->ar_ccv;
1418			break;
1419		case ELF_AR_UNAT_OFFSET:
1420			ptr = &pt->ar_unat;
1421			break;
1422		case ELF_AR_FPSR_OFFSET:
1423			ptr = &pt->ar_fpsr;
1424			break;
1425		case ELF_AR_PFS_OFFSET:
1426			ptr = &pt->ar_pfs;
1427			break;
1428		case ELF_AR_LC_OFFSET:
1429			return unw_access_ar(info, UNW_AR_LC, data,
1430					     write_access);
1431		case ELF_AR_EC_OFFSET:
1432			return unw_access_ar(info, UNW_AR_EC, data,
1433					     write_access);
1434		case ELF_AR_CSD_OFFSET:
1435			ptr = &pt->ar_csd;
1436			break;
1437		case ELF_AR_SSD_OFFSET:
1438			ptr = &pt->ar_ssd;
1439		}
1440	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1441		switch (addr) {
1442		case ELF_CR_IIP_OFFSET:
1443			ptr = &pt->cr_iip;
1444			break;
1445		case ELF_CFM_OFFSET:
1446			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1447			if (write_access) {
1448				if (((cfm ^ *data) & PFM_MASK) != 0) {
1449					if (in_syscall(pt))
1450						convert_to_non_syscall(target,
1451								       pt,
1452								       cfm);
1453					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1454						      | (*data & PFM_MASK));
1455				}
1456			} else
1457				*data = cfm;
1458			return 0;
1459		case ELF_CR_IPSR_OFFSET:
1460			if (write_access) {
1461				unsigned long tmp = *data;
1462				/* psr.ri==3 is a reserved value: SDM 2:25 */
1463				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1464					tmp &= ~IA64_PSR_RI;
1465				pt->cr_ipsr = ((tmp & IPSR_MASK)
1466					       | (pt->cr_ipsr & ~IPSR_MASK));
1467			} else
1468				*data = (pt->cr_ipsr & IPSR_MASK);
1469			return 0;
1470		}
1471	} else if (addr == ELF_NAT_OFFSET)
1472		return access_nat_bits(target, pt, info,
1473				       data, write_access);
1474	else if (addr == ELF_PR_OFFSET)
1475		ptr = &pt->pr;
1476	else
1477		return -1;
1478
1479	if (write_access)
1480		*ptr = *data;
1481	else
1482		*data = *ptr;
1483
1484	return 0;
1485}
1486
1487static int
1488access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1489		unsigned long addr, unsigned long *data, int write_access)
1490{
1491	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1492		return access_elf_gpreg(target, info, addr, data, write_access);
1493	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1494		return access_elf_breg(target, info, addr, data, write_access);
1495	else
1496		return access_elf_areg(target, info, addr, data, write_access);
1497}
1498
1499void do_gpregs_get(struct unw_frame_info *info, void *arg)
1500{
1501	struct pt_regs *pt;
1502	struct regset_getset *dst = arg;
1503	elf_greg_t tmp[16];
1504	unsigned int i, index, min_copy;
1505
1506	if (unw_unwind_to_user(info) < 0)
1507		return;
1508
1509	/*
1510	 * coredump format:
1511	 *      r0-r31
1512	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1513	 *      predicate registers (p0-p63)
1514	 *      b0-b7
1515	 *      ip cfm user-mask
1516	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
1517	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1518	 */
1519
1520
1521	/* Skip r0 */
1522	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1523		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1524						      &dst->u.get.kbuf,
1525						      &dst->u.get.ubuf,
1526						      0, ELF_GR_OFFSET(1));
1527		if (dst->ret || dst->count == 0)
1528			return;
1529	}
1530
1531	/* gr1 - gr15 */
1532	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1533		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1534		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1535			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1536		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1537				index++)
1538			if (access_elf_reg(dst->target, info, i,
1539						&tmp[index], 0) < 0) {
1540				dst->ret = -EIO;
1541				return;
1542			}
1543		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1544				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1545				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1546		if (dst->ret || dst->count == 0)
1547			return;
1548	}
1549
1550	/* r16-r31 */
1551	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1552		pt = task_pt_regs(dst->target);
1553		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1554				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1555				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1556		if (dst->ret || dst->count == 0)
1557			return;
1558	}
1559
1560	/* nat, pr, b0 - b7 */
1561	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1562		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1563		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1564			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1565		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1566				index++)
1567			if (access_elf_reg(dst->target, info, i,
1568						&tmp[index], 0) < 0) {
1569				dst->ret = -EIO;
1570				return;
1571			}
1572		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1573				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1574				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1575		if (dst->ret || dst->count == 0)
1576			return;
1577	}
1578
1579	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1580	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1581	 */
1582	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1583		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1584		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1585			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1586		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1587				index++)
1588			if (access_elf_reg(dst->target, info, i,
1589						&tmp[index], 0) < 0) {
1590				dst->ret = -EIO;
1591				return;
1592			}
1593		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1594				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1595				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1596	}
1597}
1598
1599void do_gpregs_set(struct unw_frame_info *info, void *arg)
1600{
1601	struct pt_regs *pt;
1602	struct regset_getset *dst = arg;
1603	elf_greg_t tmp[16];
1604	unsigned int i, index;
1605
1606	if (unw_unwind_to_user(info) < 0)
1607		return;
1608
1609	/* Skip r0 */
1610	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1611		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1612						       &dst->u.set.kbuf,
1613						       &dst->u.set.ubuf,
1614						       0, ELF_GR_OFFSET(1));
1615		if (dst->ret || dst->count == 0)
1616			return;
1617	}
1618
1619	/* gr1-gr15 */
1620	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1621		i = dst->pos;
1622		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1623		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1624				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1625				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1626		if (dst->ret)
1627			return;
1628		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1629			if (access_elf_reg(dst->target, info, i,
1630						&tmp[index], 1) < 0) {
1631				dst->ret = -EIO;
1632				return;
1633			}
1634		if (dst->count == 0)
1635			return;
1636	}
1637
1638	/* gr16-gr31 */
1639	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1640		pt = task_pt_regs(dst->target);
1641		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1642				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1643				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1644		if (dst->ret || dst->count == 0)
1645			return;
1646	}
1647
1648	/* nat, pr, b0 - b7 */
1649	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1650		i = dst->pos;
1651		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1652		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1653				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1654				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1655		if (dst->ret)
1656			return;
1657		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1658			if (access_elf_reg(dst->target, info, i,
1659						&tmp[index], 1) < 0) {
1660				dst->ret = -EIO;
1661				return;
1662			}
1663		if (dst->count == 0)
1664			return;
1665	}
1666
1667	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1668	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1669	 */
1670	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1671		i = dst->pos;
1672		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1673		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1674				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1675				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1676		if (dst->ret)
1677			return;
1678		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1679			if (access_elf_reg(dst->target, info, i,
1680						&tmp[index], 1) < 0) {
1681				dst->ret = -EIO;
1682				return;
1683			}
1684	}
1685}
1686
1687#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
1688
1689void do_fpregs_get(struct unw_frame_info *info, void *arg)
1690{
1691	struct regset_getset *dst = arg;
1692	struct task_struct *task = dst->target;
1693	elf_fpreg_t tmp[30];
1694	int index, min_copy, i;
1695
1696	if (unw_unwind_to_user(info) < 0)
1697		return;
1698
1699	/* Skip pos 0 and 1 */
1700	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1701		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1702						      &dst->u.get.kbuf,
1703						      &dst->u.get.ubuf,
1704						      0, ELF_FP_OFFSET(2));
1705		if (dst->count == 0 || dst->ret)
1706			return;
1707	}
1708
1709	/* fr2-fr31 */
1710	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1711		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1712
1713		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1714				dst->pos + dst->count);
1715		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1716				index++)
1717			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1718					 &tmp[index])) {
1719				dst->ret = -EIO;
1720				return;
1721			}
1722		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1723				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1724				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1725		if (dst->count == 0 || dst->ret)
1726			return;
1727	}
1728
1729	/* fph */
1730	if (dst->count > 0) {
1731		ia64_flush_fph(dst->target);
1732		if (task->thread.flags & IA64_THREAD_FPH_VALID)
1733			dst->ret = user_regset_copyout(
1734				&dst->pos, &dst->count,
1735				&dst->u.get.kbuf, &dst->u.get.ubuf,
1736				&dst->target->thread.fph,
1737				ELF_FP_OFFSET(32), -1);
1738		else
1739			/* Zero fill instead.  */
1740			dst->ret = user_regset_copyout_zero(
1741				&dst->pos, &dst->count,
1742				&dst->u.get.kbuf, &dst->u.get.ubuf,
1743				ELF_FP_OFFSET(32), -1);
1744	}
1745}
1746
1747void do_fpregs_set(struct unw_frame_info *info, void *arg)
1748{
1749	struct regset_getset *dst = arg;
1750	elf_fpreg_t fpreg, tmp[30];
1751	int index, start, end;
1752
1753	if (unw_unwind_to_user(info) < 0)
1754		return;
1755
1756	/* Skip pos 0 and 1 */
1757	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1758		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1759						       &dst->u.set.kbuf,
1760						       &dst->u.set.ubuf,
1761						       0, ELF_FP_OFFSET(2));
1762		if (dst->count == 0 || dst->ret)
1763			return;
1764	}
1765
1766	/* fr2-fr31 */
1767	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1768		start = dst->pos;
1769		end = min(((unsigned int)ELF_FP_OFFSET(32)),
1770			 dst->pos + dst->count);
1771		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1772				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1773				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1774		if (dst->ret)
1775			return;
1776
1777		if (start & 0xF) { /* only write high part */
1778			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1779					 &fpreg)) {
1780				dst->ret = -EIO;
1781				return;
1782			}
1783			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1784				= fpreg.u.bits[0];
1785			start &= ~0xFUL;
1786		}
1787		if (end & 0xF) { /* only write low part */
1788			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1789					&fpreg)) {
1790				dst->ret = -EIO;
1791				return;
1792			}
1793			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1794				= fpreg.u.bits[1];
1795			end = (end + 0xF) & ~0xFUL;
1796		}
1797
1798		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
1799			index = start / sizeof(elf_fpreg_t);
1800			if (unw_set_fr(info, index, tmp[index - 2])) {
1801				dst->ret = -EIO;
1802				return;
1803			}
1804		}
1805		if (dst->ret || dst->count == 0)
1806			return;
1807	}
1808
1809	/* fph */
1810	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1811		ia64_sync_fph(dst->target);
1812		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1813						&dst->u.set.kbuf,
1814						&dst->u.set.ubuf,
1815						&dst->target->thread.fph,
1816						ELF_FP_OFFSET(32), -1);
1817	}
1818}
1819
1820static int
1821do_regset_call(void (*call)(struct unw_frame_info *, void *),
1822	       struct task_struct *target,
1823	       const struct user_regset *regset,
1824	       unsigned int pos, unsigned int count,
1825	       const void *kbuf, const void __user *ubuf)
1826{
1827	struct regset_getset info = { .target = target, .regset = regset,
1828				 .pos = pos, .count = count,
1829				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1830				 .ret = 0 };
1831
1832	if (target == current)
1833		unw_init_running(call, &info);
1834	else {
1835		struct unw_frame_info ufi;
1836		memset(&ufi, 0, sizeof(ufi));
1837		unw_init_from_blocked_task(&ufi, target);
1838		(*call)(&ufi, &info);
1839	}
1840
1841	return info.ret;
1842}
1843
1844static int
1845gpregs_get(struct task_struct *target,
1846	   const struct user_regset *regset,
1847	   unsigned int pos, unsigned int count,
1848	   void *kbuf, void __user *ubuf)
1849{
1850	return do_regset_call(do_gpregs_get, target, regset, pos, count,
1851		kbuf, ubuf);
1852}
1853
1854static int gpregs_set(struct task_struct *target,
1855		const struct user_regset *regset,
1856		unsigned int pos, unsigned int count,
1857		const void *kbuf, const void __user *ubuf)
1858{
1859	return do_regset_call(do_gpregs_set, target, regset, pos, count,
1860		kbuf, ubuf);
1861}
1862
1863static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1864{
1865	do_sync_rbs(info, ia64_sync_user_rbs);
1866}
1867
1868/*
1869 * This is called to write back the register backing store.
1870 * ptrace does this before it stops, so that a tracer reading the user
1871 * memory after the thread stops will get the current register data.
1872 */
1873static int
1874gpregs_writeback(struct task_struct *target,
1875		 const struct user_regset *regset,
1876		 int now)
1877{
1878	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1879		return 0;
1880	set_notify_resume(target);
1881	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1882		NULL, NULL);
1883}
1884
1885static int
1886fpregs_active(struct task_struct *target, const struct user_regset *regset)
1887{
1888	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1889}
1890
1891static int fpregs_get(struct task_struct *target,
1892		const struct user_regset *regset,
1893		unsigned int pos, unsigned int count,
1894		void *kbuf, void __user *ubuf)
1895{
1896	return do_regset_call(do_fpregs_get, target, regset, pos, count,
1897		kbuf, ubuf);
1898}
1899
1900static int fpregs_set(struct task_struct *target,
1901		const struct user_regset *regset,
1902		unsigned int pos, unsigned int count,
1903		const void *kbuf, const void __user *ubuf)
1904{
1905	return do_regset_call(do_fpregs_set, target, regset, pos, count,
1906		kbuf, ubuf);
1907}
1908
1909static int
1910access_uarea(struct task_struct *child, unsigned long addr,
1911	      unsigned long *data, int write_access)
1912{
1913	unsigned int pos = -1; /* an invalid value */
1914	int ret;
1915	unsigned long *ptr, regnum;
1916
1917	if ((addr & 0x7) != 0) {
1918		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1919		return -1;
1920	}
1921	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1922		(addr >= PT_R7 + 8 && addr < PT_B1) ||
1923		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1924		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1925		dprintk("ptrace: rejecting access to register "
1926					"address 0x%lx\n", addr);
1927		return -1;
1928	}
1929
1930	switch (addr) {
1931	case PT_F32 ... (PT_F127 + 15):
1932		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1933		break;
1934	case PT_F2 ... (PT_F5 + 15):
1935		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1936		break;
1937	case PT_F10 ... (PT_F31 + 15):
1938		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1939		break;
1940	case PT_F6 ... (PT_F9 + 15):
1941		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1942		break;
1943	}
1944
1945	if (pos != -1) {
1946		if (write_access)
1947			ret = fpregs_set(child, NULL, pos,
1948				sizeof(unsigned long), data, NULL);
1949		else
1950			ret = fpregs_get(child, NULL, pos,
1951				sizeof(unsigned long), data, NULL);
1952		if (ret != 0)
1953			return -1;
1954		return 0;
1955	}
1956
1957	switch (addr) {
1958	case PT_NAT_BITS:
1959		pos = ELF_NAT_OFFSET;
1960		break;
1961	case PT_R4 ... PT_R7:
1962		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1963		break;
1964	case PT_B1 ... PT_B5:
1965		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1966		break;
1967	case PT_AR_EC:
1968		pos = ELF_AR_EC_OFFSET;
1969		break;
1970	case PT_AR_LC:
1971		pos = ELF_AR_LC_OFFSET;
1972		break;
1973	case PT_CR_IPSR:
1974		pos = ELF_CR_IPSR_OFFSET;
1975		break;
1976	case PT_CR_IIP:
1977		pos = ELF_CR_IIP_OFFSET;
1978		break;
1979	case PT_CFM:
1980		pos = ELF_CFM_OFFSET;
1981		break;
1982	case PT_AR_UNAT:
1983		pos = ELF_AR_UNAT_OFFSET;
1984		break;
1985	case PT_AR_PFS:
1986		pos = ELF_AR_PFS_OFFSET;
1987		break;
1988	case PT_AR_RSC:
1989		pos = ELF_AR_RSC_OFFSET;
1990		break;
1991	case PT_AR_RNAT:
1992		pos = ELF_AR_RNAT_OFFSET;
1993		break;
1994	case PT_AR_BSPSTORE:
1995		pos = ELF_AR_BSPSTORE_OFFSET;
1996		break;
1997	case PT_PR:
1998		pos = ELF_PR_OFFSET;
1999		break;
2000	case PT_B6:
2001		pos = ELF_BR_OFFSET(6);
2002		break;
2003	case PT_AR_BSP:
2004		pos = ELF_AR_BSP_OFFSET;
2005		break;
2006	case PT_R1 ... PT_R3:
2007		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2008		break;
2009	case PT_R12 ... PT_R15:
2010		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2011		break;
2012	case PT_R8 ... PT_R11:
2013		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2014		break;
2015	case PT_R16 ... PT_R31:
2016		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2017		break;
2018	case PT_AR_CCV:
2019		pos = ELF_AR_CCV_OFFSET;
2020		break;
2021	case PT_AR_FPSR:
2022		pos = ELF_AR_FPSR_OFFSET;
2023		break;
2024	case PT_B0:
2025		pos = ELF_BR_OFFSET(0);
2026		break;
2027	case PT_B7:
2028		pos = ELF_BR_OFFSET(7);
2029		break;
2030	case PT_AR_CSD:
2031		pos = ELF_AR_CSD_OFFSET;
2032		break;
2033	case PT_AR_SSD:
2034		pos = ELF_AR_SSD_OFFSET;
2035		break;
2036	}
2037
2038	if (pos != -1) {
2039		if (write_access)
2040			ret = gpregs_set(child, NULL, pos,
2041				sizeof(unsigned long), data, NULL);
2042		else
2043			ret = gpregs_get(child, NULL, pos,
2044				sizeof(unsigned long), data, NULL);
2045		if (ret != 0)
2046			return -1;
2047		return 0;
2048	}
2049
2050	/* access debug registers */
2051	if (addr >= PT_IBR) {
2052		regnum = (addr - PT_IBR) >> 3;
2053		ptr = &child->thread.ibr[0];
2054	} else {
2055		regnum = (addr - PT_DBR) >> 3;
2056		ptr = &child->thread.dbr[0];
2057	}
2058
2059	if (regnum >= 8) {
2060		dprintk("ptrace: rejecting access to register "
2061				"address 0x%lx\n", addr);
2062		return -1;
2063	}
2064#ifdef CONFIG_PERFMON
2065	/*
2066	 * Check if debug registers are used by perfmon. This
2067	 * test must be done once we know that we can do the
2068	 * operation, i.e. the arguments are all valid, but
2069	 * before we start modifying the state.
2070	 *
2071	 * Perfmon needs to keep a count of how many processes
2072	 * are trying to modify the debug registers for system
2073	 * wide monitoring sessions.
2074	 *
2075	 * We also include read access here, because they may
2076	 * cause the PMU-installed debug register state
2077	 * (dbr[], ibr[]) to be reset. The two arrays are also
2078	 * used by perfmon, but we do not use
2079	 * IA64_THREAD_DBG_VALID. The registers are restored
2080	 * by the PMU context switch code.
2081	 */
2082	if (pfm_use_debug_registers(child))
2083		return -1;
2084#endif
2085
2086	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2087		child->thread.flags |= IA64_THREAD_DBG_VALID;
2088		memset(child->thread.dbr, 0,
2089				sizeof(child->thread.dbr));
2090		memset(child->thread.ibr, 0,
2091				sizeof(child->thread.ibr));
2092	}
2093
2094	ptr += regnum;
2095
2096	if ((regnum & 1) && write_access) {
2097		/* don't let the user set kernel-level breakpoints: */
2098		*ptr = *data & ~(7UL << 56);
2099		return 0;
2100	}
2101	if (write_access)
2102		*ptr = *data;
2103	else
2104		*data = *ptr;
2105	return 0;
2106}
2107
2108static const struct user_regset native_regsets[] = {
2109	{
2110		.core_note_type = NT_PRSTATUS,
2111		.n = ELF_NGREG,
2112		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2113		.get = gpregs_get, .set = gpregs_set,
2114		.writeback = gpregs_writeback
2115	},
2116	{
2117		.core_note_type = NT_PRFPREG,
2118		.n = ELF_NFPREG,
2119		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2120		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2121	},
2122};
2123
2124static const struct user_regset_view user_ia64_view = {
2125	.name = "ia64",
2126	.e_machine = EM_IA_64,
2127	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2128};
2129
2130const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2131{
2132	return &user_ia64_view;
2133}
2134
2135struct syscall_get_set_args {
2136	unsigned int i;
2137	unsigned int n;
2138	unsigned long *args;
2139	struct pt_regs *regs;
2140	int rw;
2141};
2142
2143static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2144{
2145	struct syscall_get_set_args *args = data;
2146	struct pt_regs *pt = args->regs;
2147	unsigned long *krbs, cfm, ndirty;
2148	int i, count;
2149
2150	if (unw_unwind_to_user(info) < 0)
2151		return;
2152
2153	cfm = pt->cr_ifs;
2154	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2155	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2156
2157	count = 0;
2158	if (in_syscall(pt))
2159		count = min_t(int, args->n, cfm & 0x7f);
2160
2161	for (i = 0; i < count; i++) {
2162		if (args->rw)
2163			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2164				args->args[i];
2165		else
2166			args->args[i] = *ia64_rse_skip_regs(krbs,
2167				ndirty + i + args->i);
2168	}
2169
2170	if (!args->rw) {
2171		while (i < args->n) {
2172			args->args[i] = 0;
2173			i++;
2174		}
2175	}
2176}
2177
2178void ia64_syscall_get_set_arguments(struct task_struct *task,
2179	struct pt_regs *regs, unsigned int i, unsigned int n,
2180	unsigned long *args, int rw)
2181{
2182	struct syscall_get_set_args data = {
2183		.i = i,
2184		.n = n,
2185		.args = args,
2186		.regs = regs,
2187		.rw = rw,
2188	};
2189
2190	if (task == current)
2191		unw_init_running(syscall_get_set_args_cb, &data);
2192	else {
2193		struct unw_frame_info ufi;
2194		memset(&ufi, 0, sizeof(ufi));
2195		unw_init_from_blocked_task(&ufi, task);
2196		syscall_get_set_args_cb(&ufi, &data);
2197	}
2198}