Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Kernel support for the ptrace() and syscall tracing interfaces.
   4 *
   5 * Copyright (C) 1999-2005 Hewlett-Packard Co
   6 *	David Mosberger-Tang <davidm@hpl.hp.com>
   7 * Copyright (C) 2006 Intel Co
   8 *  2006-08-12	- IA64 Native Utrace implementation support added by
   9 *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  10 *
  11 * Derived from the x86 and Alpha versions.
  12 */
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/sched/task.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/mm.h>
  18#include <linux/errno.h>
  19#include <linux/ptrace.h>
  20#include <linux/user.h>
  21#include <linux/security.h>
  22#include <linux/audit.h>
  23#include <linux/signal.h>
  24#include <linux/regset.h>
  25#include <linux/elf.h>
  26#include <linux/tracehook.h>
  27
  28#include <asm/pgtable.h>
  29#include <asm/processor.h>
  30#include <asm/ptrace_offsets.h>
  31#include <asm/rse.h>
  32#include <linux/uaccess.h>
 
  33#include <asm/unwind.h>
  34#ifdef CONFIG_PERFMON
  35#include <asm/perfmon.h>
  36#endif
  37
  38#include "entry.h"
  39
  40/*
  41 * Bits in the PSR that we allow ptrace() to change:
  42 *	be, up, ac, mfl, mfh (the user mask; five bits total)
  43 *	db (debug breakpoint fault; one bit)
  44 *	id (instruction debug fault disable; one bit)
  45 *	dd (data debug fault disable; one bit)
  46 *	ri (restart instruction; two bits)
  47 *	is (instruction set; one bit)
  48 */
  49#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
  50		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
  51
  52#define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
  53#define PFM_MASK	MASK(38)
  54
  55#define PTRACE_DEBUG	0
  56
  57#if PTRACE_DEBUG
  58# define dprintk(format...)	printk(format)
  59# define inline
  60#else
  61# define dprintk(format...)
  62#endif
  63
  64/* Return TRUE if PT was created due to kernel-entry via a system-call.  */
  65
  66static inline int
  67in_syscall (struct pt_regs *pt)
  68{
  69	return (long) pt->cr_ifs >= 0;
  70}
  71
  72/*
  73 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
  74 * bitset where bit i is set iff the NaT bit of register i is set.
  75 */
  76unsigned long
  77ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
  78{
  79#	define GET_BITS(first, last, unat)				\
  80	({								\
  81		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
  82		unsigned long nbits = (last - first + 1);		\
  83		unsigned long mask = MASK(nbits) << first;		\
  84		unsigned long dist;					\
  85		if (bit < first)					\
  86			dist = 64 + bit - first;			\
  87		else							\
  88			dist = bit - first;				\
  89		ia64_rotr(unat, dist) & mask;				\
  90	})
  91	unsigned long val;
  92
  93	/*
  94	 * Registers that are stored consecutively in struct pt_regs
  95	 * can be handled in parallel.  If the register order in
  96	 * struct_pt_regs changes, this code MUST be updated.
  97	 */
  98	val  = GET_BITS( 1,  1, scratch_unat);
  99	val |= GET_BITS( 2,  3, scratch_unat);
 100	val |= GET_BITS(12, 13, scratch_unat);
 101	val |= GET_BITS(14, 14, scratch_unat);
 102	val |= GET_BITS(15, 15, scratch_unat);
 103	val |= GET_BITS( 8, 11, scratch_unat);
 104	val |= GET_BITS(16, 31, scratch_unat);
 105	return val;
 106
 107#	undef GET_BITS
 108}
 109
 110/*
 111 * Set the NaT bits for the scratch registers according to NAT and
 112 * return the resulting unat (assuming the scratch registers are
 113 * stored in PT).
 114 */
 115unsigned long
 116ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
 117{
 118#	define PUT_BITS(first, last, nat)				\
 119	({								\
 120		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
 121		unsigned long nbits = (last - first + 1);		\
 122		unsigned long mask = MASK(nbits) << first;		\
 123		long dist;						\
 124		if (bit < first)					\
 125			dist = 64 + bit - first;			\
 126		else							\
 127			dist = bit - first;				\
 128		ia64_rotl(nat & mask, dist);				\
 129	})
 130	unsigned long scratch_unat;
 131
 132	/*
 133	 * Registers that are stored consecutively in struct pt_regs
 134	 * can be handled in parallel.  If the register order in
 135	 * struct_pt_regs changes, this code MUST be updated.
 136	 */
 137	scratch_unat  = PUT_BITS( 1,  1, nat);
 138	scratch_unat |= PUT_BITS( 2,  3, nat);
 139	scratch_unat |= PUT_BITS(12, 13, nat);
 140	scratch_unat |= PUT_BITS(14, 14, nat);
 141	scratch_unat |= PUT_BITS(15, 15, nat);
 142	scratch_unat |= PUT_BITS( 8, 11, nat);
 143	scratch_unat |= PUT_BITS(16, 31, nat);
 144
 145	return scratch_unat;
 146
 147#	undef PUT_BITS
 148}
 149
 150#define IA64_MLX_TEMPLATE	0x2
 151#define IA64_MOVL_OPCODE	6
 152
 153void
 154ia64_increment_ip (struct pt_regs *regs)
 155{
 156	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
 157
 158	if (ri > 2) {
 159		ri = 0;
 160		regs->cr_iip += 16;
 161	} else if (ri == 2) {
 162		get_user(w0, (char __user *) regs->cr_iip + 0);
 163		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 164			/*
 165			 * rfi'ing to slot 2 of an MLX bundle causes
 166			 * an illegal operation fault.  We don't want
 167			 * that to happen...
 168			 */
 169			ri = 0;
 170			regs->cr_iip += 16;
 171		}
 172	}
 173	ia64_psr(regs)->ri = ri;
 174}
 175
 176void
 177ia64_decrement_ip (struct pt_regs *regs)
 178{
 179	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
 180
 181	if (ia64_psr(regs)->ri == 0) {
 182		regs->cr_iip -= 16;
 183		ri = 2;
 184		get_user(w0, (char __user *) regs->cr_iip + 0);
 185		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 186			/*
 187			 * rfi'ing to slot 2 of an MLX bundle causes
 188			 * an illegal operation fault.  We don't want
 189			 * that to happen...
 190			 */
 191			ri = 1;
 192		}
 193	}
 194	ia64_psr(regs)->ri = ri;
 195}
 196
 197/*
 198 * This routine is used to read an rnat bits that are stored on the
 199 * kernel backing store.  Since, in general, the alignment of the user
 200 * and kernel are different, this is not completely trivial.  In
 201 * essence, we need to construct the user RNAT based on up to two
 202 * kernel RNAT values and/or the RNAT value saved in the child's
 203 * pt_regs.
 204 *
 205 * user rbs
 206 *
 207 * +--------+ <-- lowest address
 208 * | slot62 |
 209 * +--------+
 210 * |  rnat  | 0x....1f8
 211 * +--------+
 212 * | slot00 | \
 213 * +--------+ |
 214 * | slot01 | > child_regs->ar_rnat
 215 * +--------+ |
 216 * | slot02 | /				kernel rbs
 217 * +--------+				+--------+
 218 *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
 219 * +- - - - +				+--------+
 220 *					| slot62 |
 221 * +- - - - +				+--------+
 222 *					|  rnat	 |
 223 * +- - - - +				+--------+
 224 *   vrnat				| slot00 |
 225 * +- - - - +				+--------+
 226 *					=	 =
 227 *					+--------+
 228 *					| slot00 | \
 229 *					+--------+ |
 230 *					| slot01 | > child_stack->ar_rnat
 231 *					+--------+ |
 232 *					| slot02 | /
 233 *					+--------+
 234 *						  <--- child_stack->ar_bspstore
 235 *
 236 * The way to think of this code is as follows: bit 0 in the user rnat
 237 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
 238 * value.  The kernel rnat value holding this bit is stored in
 239 * variable rnat0.  rnat1 is loaded with the kernel rnat value that
 240 * form the upper bits of the user rnat value.
 241 *
 242 * Boundary cases:
 243 *
 244 * o when reading the rnat "below" the first rnat slot on the kernel
 245 *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
 246 *   merged in from pt->ar_rnat.
 247 *
 248 * o when reading the rnat "above" the last rnat slot on the kernel
 249 *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
 250 */
 251static unsigned long
 252get_rnat (struct task_struct *task, struct switch_stack *sw,
 253	  unsigned long *krbs, unsigned long *urnat_addr,
 254	  unsigned long *urbs_end)
 255{
 256	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
 257	unsigned long umask = 0, mask, m;
 258	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 259	long num_regs, nbits;
 260	struct pt_regs *pt;
 261
 262	pt = task_pt_regs(task);
 263	kbsp = (unsigned long *) sw->ar_bspstore;
 264	ubspstore = (unsigned long *) pt->ar_bspstore;
 265
 266	if (urbs_end < urnat_addr)
 267		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
 268	else
 269		nbits = 63;
 270	mask = MASK(nbits);
 271	/*
 272	 * First, figure out which bit number slot 0 in user-land maps
 273	 * to in the kernel rnat.  Do this by figuring out how many
 274	 * register slots we're beyond the user's backingstore and
 275	 * then computing the equivalent address in kernel space.
 276	 */
 277	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 278	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 279	shift = ia64_rse_slot_num(slot0_kaddr);
 280	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 281	rnat0_kaddr = rnat1_kaddr - 64;
 282
 283	if (ubspstore + 63 > urnat_addr) {
 284		/* some bits need to be merged in from pt->ar_rnat */
 285		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 286		urnat = (pt->ar_rnat & umask);
 287		mask &= ~umask;
 288		if (!mask)
 289			return urnat;
 290	}
 291
 292	m = mask << shift;
 293	if (rnat0_kaddr >= kbsp)
 294		rnat0 = sw->ar_rnat;
 295	else if (rnat0_kaddr > krbs)
 296		rnat0 = *rnat0_kaddr;
 297	urnat |= (rnat0 & m) >> shift;
 298
 299	m = mask >> (63 - shift);
 300	if (rnat1_kaddr >= kbsp)
 301		rnat1 = sw->ar_rnat;
 302	else if (rnat1_kaddr > krbs)
 303		rnat1 = *rnat1_kaddr;
 304	urnat |= (rnat1 & m) << (63 - shift);
 305	return urnat;
 306}
 307
 308/*
 309 * The reverse of get_rnat.
 310 */
 311static void
 312put_rnat (struct task_struct *task, struct switch_stack *sw,
 313	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
 314	  unsigned long *urbs_end)
 315{
 316	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
 317	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 318	long num_regs, nbits;
 319	struct pt_regs *pt;
 320	unsigned long cfm, *urbs_kargs;
 321
 322	pt = task_pt_regs(task);
 323	kbsp = (unsigned long *) sw->ar_bspstore;
 324	ubspstore = (unsigned long *) pt->ar_bspstore;
 325
 326	urbs_kargs = urbs_end;
 327	if (in_syscall(pt)) {
 328		/*
 329		 * If entered via syscall, don't allow user to set rnat bits
 330		 * for syscall args.
 331		 */
 332		cfm = pt->cr_ifs;
 333		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
 334	}
 335
 336	if (urbs_kargs >= urnat_addr)
 337		nbits = 63;
 338	else {
 339		if ((urnat_addr - 63) >= urbs_kargs)
 340			return;
 341		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
 342	}
 343	mask = MASK(nbits);
 344
 345	/*
 346	 * First, figure out which bit number slot 0 in user-land maps
 347	 * to in the kernel rnat.  Do this by figuring out how many
 348	 * register slots we're beyond the user's backingstore and
 349	 * then computing the equivalent address in kernel space.
 350	 */
 351	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 352	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 353	shift = ia64_rse_slot_num(slot0_kaddr);
 354	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 355	rnat0_kaddr = rnat1_kaddr - 64;
 356
 357	if (ubspstore + 63 > urnat_addr) {
 358		/* some bits need to be place in pt->ar_rnat: */
 359		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 360		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
 361		mask &= ~umask;
 362		if (!mask)
 363			return;
 364	}
 365	/*
 366	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
 367	 * rnat slot is ignored. so we don't have to clear it here.
 368	 */
 369	rnat0 = (urnat << shift);
 370	m = mask << shift;
 371	if (rnat0_kaddr >= kbsp)
 372		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
 373	else if (rnat0_kaddr > krbs)
 374		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
 375
 376	rnat1 = (urnat >> (63 - shift));
 377	m = mask >> (63 - shift);
 378	if (rnat1_kaddr >= kbsp)
 379		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
 380	else if (rnat1_kaddr > krbs)
 381		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
 382}
 383
 384static inline int
 385on_kernel_rbs (unsigned long addr, unsigned long bspstore,
 386	       unsigned long urbs_end)
 387{
 388	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
 389						      urbs_end);
 390	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
 391}
 392
 393/*
 394 * Read a word from the user-level backing store of task CHILD.  ADDR
 395 * is the user-level address to read the word from, VAL a pointer to
 396 * the return value, and USER_BSP gives the end of the user-level
 397 * backing store (i.e., it's the address that would be in ar.bsp after
 398 * the user executed a "cover" instruction).
 399 *
 400 * This routine takes care of accessing the kernel register backing
 401 * store for those registers that got spilled there.  It also takes
 402 * care of calculating the appropriate RNaT collection words.
 403 */
 404long
 405ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
 406	   unsigned long user_rbs_end, unsigned long addr, long *val)
 407{
 408	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
 409	struct pt_regs *child_regs;
 410	size_t copied;
 411	long ret;
 412
 413	urbs_end = (long *) user_rbs_end;
 414	laddr = (unsigned long *) addr;
 415	child_regs = task_pt_regs(child);
 416	bspstore = (unsigned long *) child_regs->ar_bspstore;
 417	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 418	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 419			  (unsigned long) urbs_end))
 420	{
 421		/*
 422		 * Attempt to read the RBS in an area that's actually
 423		 * on the kernel RBS => read the corresponding bits in
 424		 * the kernel RBS.
 425		 */
 426		rnat_addr = ia64_rse_rnat_addr(laddr);
 427		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
 428
 429		if (laddr == rnat_addr) {
 430			/* return NaT collection word itself */
 431			*val = ret;
 432			return 0;
 433		}
 434
 435		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
 436			/*
 437			 * It is implementation dependent whether the
 438			 * data portion of a NaT value gets saved on a
 439			 * st8.spill or RSE spill (e.g., see EAS 2.6,
 440			 * 4.4.4.6 Register Spill and Fill).  To get
 441			 * consistent behavior across all possible
 442			 * IA-64 implementations, we return zero in
 443			 * this case.
 444			 */
 445			*val = 0;
 446			return 0;
 447		}
 448
 449		if (laddr < urbs_end) {
 450			/*
 451			 * The desired word is on the kernel RBS and
 452			 * is not a NaT.
 453			 */
 454			regnum = ia64_rse_num_regs(bspstore, laddr);
 455			*val = *ia64_rse_skip_regs(krbs, regnum);
 456			return 0;
 457		}
 458	}
 459	copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
 460	if (copied != sizeof(ret))
 461		return -EIO;
 462	*val = ret;
 463	return 0;
 464}
 465
 466long
 467ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
 468	   unsigned long user_rbs_end, unsigned long addr, long val)
 469{
 470	unsigned long *bspstore, *krbs, regnum, *laddr;
 471	unsigned long *urbs_end = (long *) user_rbs_end;
 472	struct pt_regs *child_regs;
 473
 474	laddr = (unsigned long *) addr;
 475	child_regs = task_pt_regs(child);
 476	bspstore = (unsigned long *) child_regs->ar_bspstore;
 477	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 478	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 479			  (unsigned long) urbs_end))
 480	{
 481		/*
 482		 * Attempt to write the RBS in an area that's actually
 483		 * on the kernel RBS => write the corresponding bits
 484		 * in the kernel RBS.
 485		 */
 486		if (ia64_rse_is_rnat_slot(laddr))
 487			put_rnat(child, child_stack, krbs, laddr, val,
 488				 urbs_end);
 489		else {
 490			if (laddr < urbs_end) {
 491				regnum = ia64_rse_num_regs(bspstore, laddr);
 492				*ia64_rse_skip_regs(krbs, regnum) = val;
 493			}
 494		}
 495	} else if (access_process_vm(child, addr, &val, sizeof(val),
 496				FOLL_FORCE | FOLL_WRITE)
 497		   != sizeof(val))
 498		return -EIO;
 499	return 0;
 500}
 501
 502/*
 503 * Calculate the address of the end of the user-level register backing
 504 * store.  This is the address that would have been stored in ar.bsp
 505 * if the user had executed a "cover" instruction right before
 506 * entering the kernel.  If CFMP is not NULL, it is used to return the
 507 * "current frame mask" that was active at the time the kernel was
 508 * entered.
 509 */
 510unsigned long
 511ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
 512		       unsigned long *cfmp)
 513{
 514	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
 515	long ndirty;
 516
 517	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 518	bspstore = (unsigned long *) pt->ar_bspstore;
 519	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
 520
 521	if (in_syscall(pt))
 522		ndirty += (cfm & 0x7f);
 523	else
 524		cfm &= ~(1UL << 63);	/* clear valid bit */
 525
 526	if (cfmp)
 527		*cfmp = cfm;
 528	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
 529}
 530
 531/*
 532 * Synchronize (i.e, write) the RSE backing store living in kernel
 533 * space to the VM of the CHILD task.  SW and PT are the pointers to
 534 * the switch_stack and pt_regs structures, respectively.
 535 * USER_RBS_END is the user-level address at which the backing store
 536 * ends.
 537 */
 538long
 539ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
 540		    unsigned long user_rbs_start, unsigned long user_rbs_end)
 541{
 542	unsigned long addr, val;
 543	long ret;
 544
 545	/* now copy word for word from kernel rbs to user rbs: */
 546	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 547		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
 548		if (ret < 0)
 549			return ret;
 550		if (access_process_vm(child, addr, &val, sizeof(val),
 551				FOLL_FORCE | FOLL_WRITE)
 552		    != sizeof(val))
 553			return -EIO;
 554	}
 555	return 0;
 556}
 557
 558static long
 559ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
 560		unsigned long user_rbs_start, unsigned long user_rbs_end)
 561{
 562	unsigned long addr, val;
 563	long ret;
 564
 565	/* now copy word for word from user rbs to kernel rbs: */
 566	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 567		if (access_process_vm(child, addr, &val, sizeof(val),
 568				FOLL_FORCE)
 569				!= sizeof(val))
 570			return -EIO;
 571
 572		ret = ia64_poke(child, sw, user_rbs_end, addr, val);
 573		if (ret < 0)
 574			return ret;
 575	}
 576	return 0;
 577}
 578
 579typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
 580			    unsigned long, unsigned long);
 581
 582static void do_sync_rbs(struct unw_frame_info *info, void *arg)
 583{
 584	struct pt_regs *pt;
 585	unsigned long urbs_end;
 586	syncfunc_t fn = arg;
 587
 588	if (unw_unwind_to_user(info) < 0)
 589		return;
 590	pt = task_pt_regs(info->task);
 591	urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
 592
 593	fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
 594}
 595
 596/*
 597 * when a thread is stopped (ptraced), debugger might change thread's user
 598 * stack (change memory directly), and we must avoid the RSE stored in kernel
 599 * to override user stack (user space's RSE is newer than kernel's in the
 600 * case). To workaround the issue, we copy kernel RSE to user RSE before the
 601 * task is stopped, so user RSE has updated data.  we then copy user RSE to
 602 * kernel after the task is resummed from traced stop and kernel will use the
 603 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
 604 * synchronize user RSE to kernel.
 605 */
 606void ia64_ptrace_stop(void)
 607{
 608	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
 609		return;
 610	set_notify_resume(current);
 611	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
 612}
 613
 614/*
 615 * This is called to read back the register backing store.
 616 */
 617void ia64_sync_krbs(void)
 618{
 619	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
 620
 621	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
 622}
 623
 624/*
 625 * After PTRACE_ATTACH, a thread's register backing store area in user
 626 * space is assumed to contain correct data whenever the thread is
 627 * stopped.  arch_ptrace_stop takes care of this on tracing stops.
 628 * But if the child was already stopped for job control when we attach
 629 * to it, then it might not ever get into ptrace_stop by the time we
 630 * want to examine the user memory containing the RBS.
 631 */
 632void
 633ptrace_attach_sync_user_rbs (struct task_struct *child)
 634{
 635	int stopped = 0;
 636	struct unw_frame_info info;
 637
 638	/*
 639	 * If the child is in TASK_STOPPED, we need to change that to
 640	 * TASK_TRACED momentarily while we operate on it.  This ensures
 641	 * that the child won't be woken up and return to user mode while
 642	 * we are doing the sync.  (It can only be woken up for SIGKILL.)
 643	 */
 644
 645	read_lock(&tasklist_lock);
 646	if (child->sighand) {
 647		spin_lock_irq(&child->sighand->siglock);
 648		if (child->state == TASK_STOPPED &&
 649		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
 650			set_notify_resume(child);
 651
 652			child->state = TASK_TRACED;
 653			stopped = 1;
 654		}
 655		spin_unlock_irq(&child->sighand->siglock);
 656	}
 657	read_unlock(&tasklist_lock);
 658
 659	if (!stopped)
 660		return;
 661
 662	unw_init_from_blocked_task(&info, child);
 663	do_sync_rbs(&info, ia64_sync_user_rbs);
 664
 665	/*
 666	 * Now move the child back into TASK_STOPPED if it should be in a
 667	 * job control stop, so that SIGCONT can be used to wake it up.
 668	 */
 669	read_lock(&tasklist_lock);
 670	if (child->sighand) {
 671		spin_lock_irq(&child->sighand->siglock);
 672		if (child->state == TASK_TRACED &&
 673		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
 674			child->state = TASK_STOPPED;
 675		}
 676		spin_unlock_irq(&child->sighand->siglock);
 677	}
 678	read_unlock(&tasklist_lock);
 679}
 680
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 681/*
 682 * Write f32-f127 back to task->thread.fph if it has been modified.
 683 */
 684inline void
 685ia64_flush_fph (struct task_struct *task)
 686{
 687	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 688
 689	/*
 690	 * Prevent migrating this task while
 691	 * we're fiddling with the FPU state
 692	 */
 693	preempt_disable();
 694	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
 695		psr->mfh = 0;
 696		task->thread.flags |= IA64_THREAD_FPH_VALID;
 697		ia64_save_fpu(&task->thread.fph[0]);
 698	}
 699	preempt_enable();
 700}
 701
 702/*
 703 * Sync the fph state of the task so that it can be manipulated
 704 * through thread.fph.  If necessary, f32-f127 are written back to
 705 * thread.fph or, if the fph state hasn't been used before, thread.fph
 706 * is cleared to zeroes.  Also, access to f32-f127 is disabled to
 707 * ensure that the task picks up the state from thread.fph when it
 708 * executes again.
 709 */
 710void
 711ia64_sync_fph (struct task_struct *task)
 712{
 713	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 714
 715	ia64_flush_fph(task);
 716	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
 717		task->thread.flags |= IA64_THREAD_FPH_VALID;
 718		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
 719	}
 720	ia64_drop_fpu(task);
 721	psr->dfh = 1;
 722}
 723
 724/*
 725 * Change the machine-state of CHILD such that it will return via the normal
 726 * kernel exit-path, rather than the syscall-exit path.
 727 */
 728static void
 729convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
 730			unsigned long cfm)
 731{
 732	struct unw_frame_info info, prev_info;
 733	unsigned long ip, sp, pr;
 734
 735	unw_init_from_blocked_task(&info, child);
 736	while (1) {
 737		prev_info = info;
 738		if (unw_unwind(&info) < 0)
 739			return;
 740
 741		unw_get_sp(&info, &sp);
 742		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
 743		    < IA64_PT_REGS_SIZE) {
 744			dprintk("ptrace.%s: ran off the top of the kernel "
 745				"stack\n", __func__);
 746			return;
 747		}
 748		if (unw_get_pr (&prev_info, &pr) < 0) {
 749			unw_get_rp(&prev_info, &ip);
 750			dprintk("ptrace.%s: failed to read "
 751				"predicate register (ip=0x%lx)\n",
 752				__func__, ip);
 753			return;
 754		}
 755		if (unw_is_intr_frame(&info)
 756		    && (pr & (1UL << PRED_USER_STACK)))
 757			break;
 758	}
 759
 760	/*
 761	 * Note: at the time of this call, the target task is blocked
 762	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
 763	 * (aka, "pLvSys") we redirect execution from
 764	 * .work_pending_syscall_end to .work_processed_kernel.
 765	 */
 766	unw_get_pr(&prev_info, &pr);
 767	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
 768	pr |=  (1UL << PRED_NON_SYSCALL);
 769	unw_set_pr(&prev_info, pr);
 770
 771	pt->cr_ifs = (1UL << 63) | cfm;
 772	/*
 773	 * Clear the memory that is NOT written on syscall-entry to
 774	 * ensure we do not leak kernel-state to user when execution
 775	 * resumes.
 776	 */
 777	pt->r2 = 0;
 778	pt->r3 = 0;
 779	pt->r14 = 0;
 780	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
 781	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
 782	pt->b7 = 0;
 783	pt->ar_ccv = 0;
 784	pt->ar_csd = 0;
 785	pt->ar_ssd = 0;
 786}
 787
 788static int
 789access_nat_bits (struct task_struct *child, struct pt_regs *pt,
 790		 struct unw_frame_info *info,
 791		 unsigned long *data, int write_access)
 792{
 793	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
 794	char nat = 0;
 795
 796	if (write_access) {
 797		nat_bits = *data;
 798		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
 799		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
 800			dprintk("ptrace: failed to set ar.unat\n");
 801			return -1;
 802		}
 803		for (regnum = 4; regnum <= 7; ++regnum) {
 804			unw_get_gr(info, regnum, &dummy, &nat);
 805			unw_set_gr(info, regnum, dummy,
 806				   (nat_bits >> regnum) & 1);
 807		}
 808	} else {
 809		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
 810			dprintk("ptrace: failed to read ar.unat\n");
 811			return -1;
 812		}
 813		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
 814		for (regnum = 4; regnum <= 7; ++regnum) {
 815			unw_get_gr(info, regnum, &dummy, &nat);
 816			nat_bits |= (nat != 0) << regnum;
 817		}
 818		*data = nat_bits;
 819	}
 820	return 0;
 821}
 822
 823static int
 824access_uarea (struct task_struct *child, unsigned long addr,
 825	      unsigned long *data, int write_access);
 826
 827static long
 828ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 829{
 830	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
 831	struct unw_frame_info info;
 832	struct ia64_fpreg fpval;
 833	struct switch_stack *sw;
 834	struct pt_regs *pt;
 835	long ret, retval = 0;
 836	char nat = 0;
 837	int i;
 838
 839	if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
 840		return -EIO;
 841
 842	pt = task_pt_regs(child);
 843	sw = (struct switch_stack *) (child->thread.ksp + 16);
 844	unw_init_from_blocked_task(&info, child);
 845	if (unw_unwind_to_user(&info) < 0) {
 846		return -EIO;
 847	}
 848
 849	if (((unsigned long) ppr & 0x7) != 0) {
 850		dprintk("ptrace:unaligned register address %p\n", ppr);
 851		return -EIO;
 852	}
 853
 854	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
 855	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
 856	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
 857	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
 858	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
 859	    || access_uarea(child, PT_CFM, &cfm, 0)
 860	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
 861		return -EIO;
 862
 863	/* control regs */
 864
 865	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
 866	retval |= __put_user(psr, &ppr->cr_ipsr);
 867
 868	/* app regs */
 869
 870	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
 871	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
 872	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
 873	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
 874	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
 875	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
 876
 877	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
 878	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
 879	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
 880	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
 881	retval |= __put_user(cfm, &ppr->cfm);
 882
 883	/* gr1-gr3 */
 884
 885	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
 886	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
 887
 888	/* gr4-gr7 */
 889
 890	for (i = 4; i < 8; i++) {
 891		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
 892			return -EIO;
 893		retval |= __put_user(val, &ppr->gr[i]);
 894	}
 895
 896	/* gr8-gr11 */
 897
 898	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
 899
 900	/* gr12-gr15 */
 901
 902	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
 903	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
 904	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
 905
 906	/* gr16-gr31 */
 907
 908	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
 909
 910	/* b0 */
 911
 912	retval |= __put_user(pt->b0, &ppr->br[0]);
 913
 914	/* b1-b5 */
 915
 916	for (i = 1; i < 6; i++) {
 917		if (unw_access_br(&info, i, &val, 0) < 0)
 918			return -EIO;
 919		__put_user(val, &ppr->br[i]);
 920	}
 921
 922	/* b6-b7 */
 923
 924	retval |= __put_user(pt->b6, &ppr->br[6]);
 925	retval |= __put_user(pt->b7, &ppr->br[7]);
 926
 927	/* fr2-fr5 */
 928
 929	for (i = 2; i < 6; i++) {
 930		if (unw_get_fr(&info, i, &fpval) < 0)
 931			return -EIO;
 932		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 933	}
 934
 935	/* fr6-fr11 */
 936
 937	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
 938				 sizeof(struct ia64_fpreg) * 6);
 939
 940	/* fp scratch regs(12-15) */
 941
 942	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
 943				 sizeof(struct ia64_fpreg) * 4);
 944
 945	/* fr16-fr31 */
 946
 947	for (i = 16; i < 32; i++) {
 948		if (unw_get_fr(&info, i, &fpval) < 0)
 949			return -EIO;
 950		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 951	}
 952
 953	/* fph */
 954
 955	ia64_flush_fph(child);
 956	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
 957				 sizeof(ppr->fr[32]) * 96);
 958
 959	/*  preds */
 960
 961	retval |= __put_user(pt->pr, &ppr->pr);
 962
 963	/* nat bits */
 964
 965	retval |= __put_user(nat_bits, &ppr->nat);
 966
 967	ret = retval ? -EIO : 0;
 968	return ret;
 969}
 970
 971static long
 972ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 973{
 974	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
 975	struct unw_frame_info info;
 976	struct switch_stack *sw;
 977	struct ia64_fpreg fpval;
 978	struct pt_regs *pt;
 979	long ret, retval = 0;
 980	int i;
 981
 982	memset(&fpval, 0, sizeof(fpval));
 983
 984	if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
 985		return -EIO;
 986
 987	pt = task_pt_regs(child);
 988	sw = (struct switch_stack *) (child->thread.ksp + 16);
 989	unw_init_from_blocked_task(&info, child);
 990	if (unw_unwind_to_user(&info) < 0) {
 991		return -EIO;
 992	}
 993
 994	if (((unsigned long) ppr & 0x7) != 0) {
 995		dprintk("ptrace:unaligned register address %p\n", ppr);
 996		return -EIO;
 997	}
 998
 999	/* control regs */
1000
1001	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1002	retval |= __get_user(psr, &ppr->cr_ipsr);
1003
1004	/* app regs */
1005
1006	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1007	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1008	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1009	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1010	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1011	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1012
1013	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1014	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1015	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1016	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1017	retval |= __get_user(cfm, &ppr->cfm);
1018
1019	/* gr1-gr3 */
1020
1021	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1022	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1023
1024	/* gr4-gr7 */
1025
1026	for (i = 4; i < 8; i++) {
1027		retval |= __get_user(val, &ppr->gr[i]);
1028		/* NaT bit will be set via PT_NAT_BITS: */
1029		if (unw_set_gr(&info, i, val, 0) < 0)
1030			return -EIO;
1031	}
1032
1033	/* gr8-gr11 */
1034
1035	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1036
1037	/* gr12-gr15 */
1038
1039	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1040	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1041	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1042
1043	/* gr16-gr31 */
1044
1045	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1046
1047	/* b0 */
1048
1049	retval |= __get_user(pt->b0, &ppr->br[0]);
1050
1051	/* b1-b5 */
1052
1053	for (i = 1; i < 6; i++) {
1054		retval |= __get_user(val, &ppr->br[i]);
1055		unw_set_br(&info, i, val);
1056	}
1057
1058	/* b6-b7 */
1059
1060	retval |= __get_user(pt->b6, &ppr->br[6]);
1061	retval |= __get_user(pt->b7, &ppr->br[7]);
1062
1063	/* fr2-fr5 */
1064
1065	for (i = 2; i < 6; i++) {
1066		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1067		if (unw_set_fr(&info, i, fpval) < 0)
1068			return -EIO;
1069	}
1070
1071	/* fr6-fr11 */
1072
1073	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1074				   sizeof(ppr->fr[6]) * 6);
1075
1076	/* fp scratch regs(12-15) */
1077
1078	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1079				   sizeof(ppr->fr[12]) * 4);
1080
1081	/* fr16-fr31 */
1082
1083	for (i = 16; i < 32; i++) {
1084		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1085					   sizeof(fpval));
1086		if (unw_set_fr(&info, i, fpval) < 0)
1087			return -EIO;
1088	}
1089
1090	/* fph */
1091
1092	ia64_sync_fph(child);
1093	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1094				   sizeof(ppr->fr[32]) * 96);
1095
1096	/* preds */
1097
1098	retval |= __get_user(pt->pr, &ppr->pr);
1099
1100	/* nat bits */
1101
1102	retval |= __get_user(nat_bits, &ppr->nat);
1103
1104	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1105	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1106	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1107	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1108	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1109	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1110	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1111	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1112
1113	ret = retval ? -EIO : 0;
1114	return ret;
1115}
1116
1117void
1118user_enable_single_step (struct task_struct *child)
1119{
1120	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1121
1122	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1123	child_psr->ss = 1;
1124}
1125
1126void
1127user_enable_block_step (struct task_struct *child)
1128{
1129	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1130
1131	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1132	child_psr->tb = 1;
1133}
1134
1135void
1136user_disable_single_step (struct task_struct *child)
1137{
1138	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1139
1140	/* make sure the single step/taken-branch trap bits are not set: */
1141	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1142	child_psr->ss = 0;
1143	child_psr->tb = 0;
1144}
1145
1146/*
1147 * Called by kernel/ptrace.c when detaching..
1148 *
1149 * Make sure the single step bit is not set.
1150 */
1151void
1152ptrace_disable (struct task_struct *child)
1153{
1154	user_disable_single_step(child);
1155}
1156
1157long
1158arch_ptrace (struct task_struct *child, long request,
1159	     unsigned long addr, unsigned long data)
1160{
1161	switch (request) {
1162	case PTRACE_PEEKTEXT:
1163	case PTRACE_PEEKDATA:
1164		/* read word at location addr */
1165		if (ptrace_access_vm(child, addr, &data, sizeof(data),
1166				FOLL_FORCE)
1167		    != sizeof(data))
1168			return -EIO;
1169		/* ensure return value is not mistaken for error code */
1170		force_successful_syscall_return();
1171		return data;
1172
1173	/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1174	 * by the generic ptrace_request().
1175	 */
1176
1177	case PTRACE_PEEKUSR:
1178		/* read the word at addr in the USER area */
1179		if (access_uarea(child, addr, &data, 0) < 0)
1180			return -EIO;
1181		/* ensure return value is not mistaken for error code */
1182		force_successful_syscall_return();
1183		return data;
1184
1185	case PTRACE_POKEUSR:
1186		/* write the word at addr in the USER area */
1187		if (access_uarea(child, addr, &data, 1) < 0)
1188			return -EIO;
1189		return 0;
1190
1191	case PTRACE_OLD_GETSIGINFO:
1192		/* for backwards-compatibility */
1193		return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1194
1195	case PTRACE_OLD_SETSIGINFO:
1196		/* for backwards-compatibility */
1197		return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1198
1199	case PTRACE_GETREGS:
1200		return ptrace_getregs(child,
1201				      (struct pt_all_user_regs __user *) data);
1202
1203	case PTRACE_SETREGS:
1204		return ptrace_setregs(child,
1205				      (struct pt_all_user_regs __user *) data);
1206
1207	default:
1208		return ptrace_request(child, request, addr, data);
1209	}
1210}
1211
1212
1213/* "asmlinkage" so the input arguments are preserved... */
1214
1215asmlinkage long
1216syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1217		     long arg4, long arg5, long arg6, long arg7,
1218		     struct pt_regs regs)
1219{
1220	if (test_thread_flag(TIF_SYSCALL_TRACE))
1221		if (tracehook_report_syscall_entry(&regs))
1222			return -ENOSYS;
1223
1224	/* copy user rbs to kernel rbs */
1225	if (test_thread_flag(TIF_RESTORE_RSE))
1226		ia64_sync_krbs();
1227
 
 
 
1228
1229	audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
 
 
 
 
1230
1231	return 0;
1232}
1233
1234/* "asmlinkage" so the input arguments are preserved... */
1235
1236asmlinkage void
1237syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1238		     long arg4, long arg5, long arg6, long arg7,
1239		     struct pt_regs regs)
1240{
1241	int step;
1242
1243	audit_syscall_exit(&regs);
 
 
 
 
 
 
 
1244
1245	step = test_thread_flag(TIF_SINGLESTEP);
1246	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1247		tracehook_report_syscall_exit(&regs, step);
1248
1249	/* copy user rbs to kernel rbs */
1250	if (test_thread_flag(TIF_RESTORE_RSE))
1251		ia64_sync_krbs();
1252}
1253
1254/* Utrace implementation starts here */
1255struct regset_get {
1256	void *kbuf;
1257	void __user *ubuf;
1258};
1259
1260struct regset_set {
1261	const void *kbuf;
1262	const void __user *ubuf;
1263};
1264
1265struct regset_getset {
1266	struct task_struct *target;
1267	const struct user_regset *regset;
1268	union {
1269		struct regset_get get;
1270		struct regset_set set;
1271	} u;
1272	unsigned int pos;
1273	unsigned int count;
1274	int ret;
1275};
1276
1277static int
1278access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1279		unsigned long addr, unsigned long *data, int write_access)
1280{
1281	struct pt_regs *pt;
1282	unsigned long *ptr = NULL;
1283	int ret;
1284	char nat = 0;
1285
1286	pt = task_pt_regs(target);
1287	switch (addr) {
1288	case ELF_GR_OFFSET(1):
1289		ptr = &pt->r1;
1290		break;
1291	case ELF_GR_OFFSET(2):
1292	case ELF_GR_OFFSET(3):
1293		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1294		break;
1295	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1296		if (write_access) {
1297			/* read NaT bit first: */
1298			unsigned long dummy;
1299
1300			ret = unw_get_gr(info, addr/8, &dummy, &nat);
1301			if (ret < 0)
1302				return ret;
1303		}
1304		return unw_access_gr(info, addr/8, data, &nat, write_access);
1305	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1306		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1307		break;
1308	case ELF_GR_OFFSET(12):
1309	case ELF_GR_OFFSET(13):
1310		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1311		break;
1312	case ELF_GR_OFFSET(14):
1313		ptr = &pt->r14;
1314		break;
1315	case ELF_GR_OFFSET(15):
1316		ptr = &pt->r15;
1317	}
1318	if (write_access)
1319		*ptr = *data;
1320	else
1321		*data = *ptr;
1322	return 0;
1323}
1324
1325static int
1326access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1327		unsigned long addr, unsigned long *data, int write_access)
1328{
1329	struct pt_regs *pt;
1330	unsigned long *ptr = NULL;
1331
1332	pt = task_pt_regs(target);
1333	switch (addr) {
1334	case ELF_BR_OFFSET(0):
1335		ptr = &pt->b0;
1336		break;
1337	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1338		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1339				     data, write_access);
1340	case ELF_BR_OFFSET(6):
1341		ptr = &pt->b6;
1342		break;
1343	case ELF_BR_OFFSET(7):
1344		ptr = &pt->b7;
1345	}
1346	if (write_access)
1347		*ptr = *data;
1348	else
1349		*data = *ptr;
1350	return 0;
1351}
1352
1353static int
1354access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1355		unsigned long addr, unsigned long *data, int write_access)
1356{
1357	struct pt_regs *pt;
1358	unsigned long cfm, urbs_end;
1359	unsigned long *ptr = NULL;
1360
1361	pt = task_pt_regs(target);
1362	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1363		switch (addr) {
1364		case ELF_AR_RSC_OFFSET:
1365			/* force PL3 */
1366			if (write_access)
1367				pt->ar_rsc = *data | (3 << 2);
1368			else
1369				*data = pt->ar_rsc;
1370			return 0;
1371		case ELF_AR_BSP_OFFSET:
1372			/*
1373			 * By convention, we use PT_AR_BSP to refer to
1374			 * the end of the user-level backing store.
1375			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1376			 * to get the real value of ar.bsp at the time
1377			 * the kernel was entered.
1378			 *
1379			 * Furthermore, when changing the contents of
1380			 * PT_AR_BSP (or PT_CFM) while the task is
1381			 * blocked in a system call, convert the state
1382			 * so that the non-system-call exit
1383			 * path is used.  This ensures that the proper
1384			 * state will be picked up when resuming
1385			 * execution.  However, it *also* means that
1386			 * once we write PT_AR_BSP/PT_CFM, it won't be
1387			 * possible to modify the syscall arguments of
1388			 * the pending system call any longer.  This
1389			 * shouldn't be an issue because modifying
1390			 * PT_AR_BSP/PT_CFM generally implies that
1391			 * we're either abandoning the pending system
1392			 * call or that we defer it's re-execution
1393			 * (e.g., due to GDB doing an inferior
1394			 * function call).
1395			 */
1396			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1397			if (write_access) {
1398				if (*data != urbs_end) {
1399					if (in_syscall(pt))
1400						convert_to_non_syscall(target,
1401								       pt,
1402								       cfm);
1403					/*
1404					 * Simulate user-level write
1405					 * of ar.bsp:
1406					 */
1407					pt->loadrs = 0;
1408					pt->ar_bspstore = *data;
1409				}
1410			} else
1411				*data = urbs_end;
1412			return 0;
1413		case ELF_AR_BSPSTORE_OFFSET:
1414			ptr = &pt->ar_bspstore;
1415			break;
1416		case ELF_AR_RNAT_OFFSET:
1417			ptr = &pt->ar_rnat;
1418			break;
1419		case ELF_AR_CCV_OFFSET:
1420			ptr = &pt->ar_ccv;
1421			break;
1422		case ELF_AR_UNAT_OFFSET:
1423			ptr = &pt->ar_unat;
1424			break;
1425		case ELF_AR_FPSR_OFFSET:
1426			ptr = &pt->ar_fpsr;
1427			break;
1428		case ELF_AR_PFS_OFFSET:
1429			ptr = &pt->ar_pfs;
1430			break;
1431		case ELF_AR_LC_OFFSET:
1432			return unw_access_ar(info, UNW_AR_LC, data,
1433					     write_access);
1434		case ELF_AR_EC_OFFSET:
1435			return unw_access_ar(info, UNW_AR_EC, data,
1436					     write_access);
1437		case ELF_AR_CSD_OFFSET:
1438			ptr = &pt->ar_csd;
1439			break;
1440		case ELF_AR_SSD_OFFSET:
1441			ptr = &pt->ar_ssd;
1442		}
1443	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1444		switch (addr) {
1445		case ELF_CR_IIP_OFFSET:
1446			ptr = &pt->cr_iip;
1447			break;
1448		case ELF_CFM_OFFSET:
1449			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1450			if (write_access) {
1451				if (((cfm ^ *data) & PFM_MASK) != 0) {
1452					if (in_syscall(pt))
1453						convert_to_non_syscall(target,
1454								       pt,
1455								       cfm);
1456					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1457						      | (*data & PFM_MASK));
1458				}
1459			} else
1460				*data = cfm;
1461			return 0;
1462		case ELF_CR_IPSR_OFFSET:
1463			if (write_access) {
1464				unsigned long tmp = *data;
1465				/* psr.ri==3 is a reserved value: SDM 2:25 */
1466				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1467					tmp &= ~IA64_PSR_RI;
1468				pt->cr_ipsr = ((tmp & IPSR_MASK)
1469					       | (pt->cr_ipsr & ~IPSR_MASK));
1470			} else
1471				*data = (pt->cr_ipsr & IPSR_MASK);
1472			return 0;
1473		}
1474	} else if (addr == ELF_NAT_OFFSET)
1475		return access_nat_bits(target, pt, info,
1476				       data, write_access);
1477	else if (addr == ELF_PR_OFFSET)
1478		ptr = &pt->pr;
1479	else
1480		return -1;
1481
1482	if (write_access)
1483		*ptr = *data;
1484	else
1485		*data = *ptr;
1486
1487	return 0;
1488}
1489
1490static int
1491access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1492		unsigned long addr, unsigned long *data, int write_access)
1493{
1494	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1495		return access_elf_gpreg(target, info, addr, data, write_access);
1496	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1497		return access_elf_breg(target, info, addr, data, write_access);
1498	else
1499		return access_elf_areg(target, info, addr, data, write_access);
1500}
1501
1502void do_gpregs_get(struct unw_frame_info *info, void *arg)
1503{
1504	struct pt_regs *pt;
1505	struct regset_getset *dst = arg;
1506	elf_greg_t tmp[16];
1507	unsigned int i, index, min_copy;
1508
1509	if (unw_unwind_to_user(info) < 0)
1510		return;
1511
1512	/*
1513	 * coredump format:
1514	 *      r0-r31
1515	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1516	 *      predicate registers (p0-p63)
1517	 *      b0-b7
1518	 *      ip cfm user-mask
1519	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
1520	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1521	 */
1522
1523
1524	/* Skip r0 */
1525	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1526		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1527						      &dst->u.get.kbuf,
1528						      &dst->u.get.ubuf,
1529						      0, ELF_GR_OFFSET(1));
1530		if (dst->ret || dst->count == 0)
1531			return;
1532	}
1533
1534	/* gr1 - gr15 */
1535	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1536		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1537		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1538			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1539		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1540				index++)
1541			if (access_elf_reg(dst->target, info, i,
1542						&tmp[index], 0) < 0) {
1543				dst->ret = -EIO;
1544				return;
1545			}
1546		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1547				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1548				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1549		if (dst->ret || dst->count == 0)
1550			return;
1551	}
1552
1553	/* r16-r31 */
1554	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1555		pt = task_pt_regs(dst->target);
1556		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1557				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1558				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1559		if (dst->ret || dst->count == 0)
1560			return;
1561	}
1562
1563	/* nat, pr, b0 - b7 */
1564	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1565		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1566		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1567			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1568		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1569				index++)
1570			if (access_elf_reg(dst->target, info, i,
1571						&tmp[index], 0) < 0) {
1572				dst->ret = -EIO;
1573				return;
1574			}
1575		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1576				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1577				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1578		if (dst->ret || dst->count == 0)
1579			return;
1580	}
1581
1582	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1583	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1584	 */
1585	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1586		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1587		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1588			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1589		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1590				index++)
1591			if (access_elf_reg(dst->target, info, i,
1592						&tmp[index], 0) < 0) {
1593				dst->ret = -EIO;
1594				return;
1595			}
1596		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1597				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1598				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1599	}
1600}
1601
1602void do_gpregs_set(struct unw_frame_info *info, void *arg)
1603{
1604	struct pt_regs *pt;
1605	struct regset_getset *dst = arg;
1606	elf_greg_t tmp[16];
1607	unsigned int i, index;
1608
1609	if (unw_unwind_to_user(info) < 0)
1610		return;
1611
1612	/* Skip r0 */
1613	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1614		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1615						       &dst->u.set.kbuf,
1616						       &dst->u.set.ubuf,
1617						       0, ELF_GR_OFFSET(1));
1618		if (dst->ret || dst->count == 0)
1619			return;
1620	}
1621
1622	/* gr1-gr15 */
1623	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1624		i = dst->pos;
1625		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1626		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1627				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1628				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1629		if (dst->ret)
1630			return;
1631		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1632			if (access_elf_reg(dst->target, info, i,
1633						&tmp[index], 1) < 0) {
1634				dst->ret = -EIO;
1635				return;
1636			}
1637		if (dst->count == 0)
1638			return;
1639	}
1640
1641	/* gr16-gr31 */
1642	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1643		pt = task_pt_regs(dst->target);
1644		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1645				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1646				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1647		if (dst->ret || dst->count == 0)
1648			return;
1649	}
1650
1651	/* nat, pr, b0 - b7 */
1652	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1653		i = dst->pos;
1654		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1655		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1656				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1657				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1658		if (dst->ret)
1659			return;
1660		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1661			if (access_elf_reg(dst->target, info, i,
1662						&tmp[index], 1) < 0) {
1663				dst->ret = -EIO;
1664				return;
1665			}
1666		if (dst->count == 0)
1667			return;
1668	}
1669
1670	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1671	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1672	 */
1673	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1674		i = dst->pos;
1675		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1676		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1677				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1678				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1679		if (dst->ret)
1680			return;
1681		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1682			if (access_elf_reg(dst->target, info, i,
1683						&tmp[index], 1) < 0) {
1684				dst->ret = -EIO;
1685				return;
1686			}
1687	}
1688}
1689
1690#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
1691
1692void do_fpregs_get(struct unw_frame_info *info, void *arg)
1693{
1694	struct regset_getset *dst = arg;
1695	struct task_struct *task = dst->target;
1696	elf_fpreg_t tmp[30];
1697	int index, min_copy, i;
1698
1699	if (unw_unwind_to_user(info) < 0)
1700		return;
1701
1702	/* Skip pos 0 and 1 */
1703	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1704		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1705						      &dst->u.get.kbuf,
1706						      &dst->u.get.ubuf,
1707						      0, ELF_FP_OFFSET(2));
1708		if (dst->count == 0 || dst->ret)
1709			return;
1710	}
1711
1712	/* fr2-fr31 */
1713	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1714		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1715
1716		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1717				dst->pos + dst->count);
1718		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1719				index++)
1720			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1721					 &tmp[index])) {
1722				dst->ret = -EIO;
1723				return;
1724			}
1725		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1726				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1727				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1728		if (dst->count == 0 || dst->ret)
1729			return;
1730	}
1731
1732	/* fph */
1733	if (dst->count > 0) {
1734		ia64_flush_fph(dst->target);
1735		if (task->thread.flags & IA64_THREAD_FPH_VALID)
1736			dst->ret = user_regset_copyout(
1737				&dst->pos, &dst->count,
1738				&dst->u.get.kbuf, &dst->u.get.ubuf,
1739				&dst->target->thread.fph,
1740				ELF_FP_OFFSET(32), -1);
1741		else
1742			/* Zero fill instead.  */
1743			dst->ret = user_regset_copyout_zero(
1744				&dst->pos, &dst->count,
1745				&dst->u.get.kbuf, &dst->u.get.ubuf,
1746				ELF_FP_OFFSET(32), -1);
1747	}
1748}
1749
1750void do_fpregs_set(struct unw_frame_info *info, void *arg)
1751{
1752	struct regset_getset *dst = arg;
1753	elf_fpreg_t fpreg, tmp[30];
1754	int index, start, end;
1755
1756	if (unw_unwind_to_user(info) < 0)
1757		return;
1758
1759	/* Skip pos 0 and 1 */
1760	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1761		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1762						       &dst->u.set.kbuf,
1763						       &dst->u.set.ubuf,
1764						       0, ELF_FP_OFFSET(2));
1765		if (dst->count == 0 || dst->ret)
1766			return;
1767	}
1768
1769	/* fr2-fr31 */
1770	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1771		start = dst->pos;
1772		end = min(((unsigned int)ELF_FP_OFFSET(32)),
1773			 dst->pos + dst->count);
1774		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1775				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1776				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1777		if (dst->ret)
1778			return;
1779
1780		if (start & 0xF) { /* only write high part */
1781			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1782					 &fpreg)) {
1783				dst->ret = -EIO;
1784				return;
1785			}
1786			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1787				= fpreg.u.bits[0];
1788			start &= ~0xFUL;
1789		}
1790		if (end & 0xF) { /* only write low part */
1791			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1792					&fpreg)) {
1793				dst->ret = -EIO;
1794				return;
1795			}
1796			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1797				= fpreg.u.bits[1];
1798			end = (end + 0xF) & ~0xFUL;
1799		}
1800
1801		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
1802			index = start / sizeof(elf_fpreg_t);
1803			if (unw_set_fr(info, index, tmp[index - 2])) {
1804				dst->ret = -EIO;
1805				return;
1806			}
1807		}
1808		if (dst->ret || dst->count == 0)
1809			return;
1810	}
1811
1812	/* fph */
1813	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1814		ia64_sync_fph(dst->target);
1815		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1816						&dst->u.set.kbuf,
1817						&dst->u.set.ubuf,
1818						&dst->target->thread.fph,
1819						ELF_FP_OFFSET(32), -1);
1820	}
1821}
1822
1823static int
1824do_regset_call(void (*call)(struct unw_frame_info *, void *),
1825	       struct task_struct *target,
1826	       const struct user_regset *regset,
1827	       unsigned int pos, unsigned int count,
1828	       const void *kbuf, const void __user *ubuf)
1829{
1830	struct regset_getset info = { .target = target, .regset = regset,
1831				 .pos = pos, .count = count,
1832				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1833				 .ret = 0 };
1834
1835	if (target == current)
1836		unw_init_running(call, &info);
1837	else {
1838		struct unw_frame_info ufi;
1839		memset(&ufi, 0, sizeof(ufi));
1840		unw_init_from_blocked_task(&ufi, target);
1841		(*call)(&ufi, &info);
1842	}
1843
1844	return info.ret;
1845}
1846
1847static int
1848gpregs_get(struct task_struct *target,
1849	   const struct user_regset *regset,
1850	   unsigned int pos, unsigned int count,
1851	   void *kbuf, void __user *ubuf)
1852{
1853	return do_regset_call(do_gpregs_get, target, regset, pos, count,
1854		kbuf, ubuf);
1855}
1856
1857static int gpregs_set(struct task_struct *target,
1858		const struct user_regset *regset,
1859		unsigned int pos, unsigned int count,
1860		const void *kbuf, const void __user *ubuf)
1861{
1862	return do_regset_call(do_gpregs_set, target, regset, pos, count,
1863		kbuf, ubuf);
1864}
1865
1866static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1867{
1868	do_sync_rbs(info, ia64_sync_user_rbs);
1869}
1870
1871/*
1872 * This is called to write back the register backing store.
1873 * ptrace does this before it stops, so that a tracer reading the user
1874 * memory after the thread stops will get the current register data.
1875 */
1876static int
1877gpregs_writeback(struct task_struct *target,
1878		 const struct user_regset *regset,
1879		 int now)
1880{
1881	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1882		return 0;
1883	set_notify_resume(target);
1884	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1885		NULL, NULL);
1886}
1887
1888static int
1889fpregs_active(struct task_struct *target, const struct user_regset *regset)
1890{
1891	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1892}
1893
1894static int fpregs_get(struct task_struct *target,
1895		const struct user_regset *regset,
1896		unsigned int pos, unsigned int count,
1897		void *kbuf, void __user *ubuf)
1898{
1899	return do_regset_call(do_fpregs_get, target, regset, pos, count,
1900		kbuf, ubuf);
1901}
1902
1903static int fpregs_set(struct task_struct *target,
1904		const struct user_regset *regset,
1905		unsigned int pos, unsigned int count,
1906		const void *kbuf, const void __user *ubuf)
1907{
1908	return do_regset_call(do_fpregs_set, target, regset, pos, count,
1909		kbuf, ubuf);
1910}
1911
1912static int
1913access_uarea(struct task_struct *child, unsigned long addr,
1914	      unsigned long *data, int write_access)
1915{
1916	unsigned int pos = -1; /* an invalid value */
1917	int ret;
1918	unsigned long *ptr, regnum;
1919
1920	if ((addr & 0x7) != 0) {
1921		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1922		return -1;
1923	}
1924	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1925		(addr >= PT_R7 + 8 && addr < PT_B1) ||
1926		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1927		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1928		dprintk("ptrace: rejecting access to register "
1929					"address 0x%lx\n", addr);
1930		return -1;
1931	}
1932
1933	switch (addr) {
1934	case PT_F32 ... (PT_F127 + 15):
1935		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1936		break;
1937	case PT_F2 ... (PT_F5 + 15):
1938		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1939		break;
1940	case PT_F10 ... (PT_F31 + 15):
1941		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1942		break;
1943	case PT_F6 ... (PT_F9 + 15):
1944		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1945		break;
1946	}
1947
1948	if (pos != -1) {
1949		if (write_access)
1950			ret = fpregs_set(child, NULL, pos,
1951				sizeof(unsigned long), data, NULL);
1952		else
1953			ret = fpregs_get(child, NULL, pos,
1954				sizeof(unsigned long), data, NULL);
1955		if (ret != 0)
1956			return -1;
1957		return 0;
1958	}
1959
1960	switch (addr) {
1961	case PT_NAT_BITS:
1962		pos = ELF_NAT_OFFSET;
1963		break;
1964	case PT_R4 ... PT_R7:
1965		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1966		break;
1967	case PT_B1 ... PT_B5:
1968		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1969		break;
1970	case PT_AR_EC:
1971		pos = ELF_AR_EC_OFFSET;
1972		break;
1973	case PT_AR_LC:
1974		pos = ELF_AR_LC_OFFSET;
1975		break;
1976	case PT_CR_IPSR:
1977		pos = ELF_CR_IPSR_OFFSET;
1978		break;
1979	case PT_CR_IIP:
1980		pos = ELF_CR_IIP_OFFSET;
1981		break;
1982	case PT_CFM:
1983		pos = ELF_CFM_OFFSET;
1984		break;
1985	case PT_AR_UNAT:
1986		pos = ELF_AR_UNAT_OFFSET;
1987		break;
1988	case PT_AR_PFS:
1989		pos = ELF_AR_PFS_OFFSET;
1990		break;
1991	case PT_AR_RSC:
1992		pos = ELF_AR_RSC_OFFSET;
1993		break;
1994	case PT_AR_RNAT:
1995		pos = ELF_AR_RNAT_OFFSET;
1996		break;
1997	case PT_AR_BSPSTORE:
1998		pos = ELF_AR_BSPSTORE_OFFSET;
1999		break;
2000	case PT_PR:
2001		pos = ELF_PR_OFFSET;
2002		break;
2003	case PT_B6:
2004		pos = ELF_BR_OFFSET(6);
2005		break;
2006	case PT_AR_BSP:
2007		pos = ELF_AR_BSP_OFFSET;
2008		break;
2009	case PT_R1 ... PT_R3:
2010		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2011		break;
2012	case PT_R12 ... PT_R15:
2013		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2014		break;
2015	case PT_R8 ... PT_R11:
2016		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2017		break;
2018	case PT_R16 ... PT_R31:
2019		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2020		break;
2021	case PT_AR_CCV:
2022		pos = ELF_AR_CCV_OFFSET;
2023		break;
2024	case PT_AR_FPSR:
2025		pos = ELF_AR_FPSR_OFFSET;
2026		break;
2027	case PT_B0:
2028		pos = ELF_BR_OFFSET(0);
2029		break;
2030	case PT_B7:
2031		pos = ELF_BR_OFFSET(7);
2032		break;
2033	case PT_AR_CSD:
2034		pos = ELF_AR_CSD_OFFSET;
2035		break;
2036	case PT_AR_SSD:
2037		pos = ELF_AR_SSD_OFFSET;
2038		break;
2039	}
2040
2041	if (pos != -1) {
2042		if (write_access)
2043			ret = gpregs_set(child, NULL, pos,
2044				sizeof(unsigned long), data, NULL);
2045		else
2046			ret = gpregs_get(child, NULL, pos,
2047				sizeof(unsigned long), data, NULL);
2048		if (ret != 0)
2049			return -1;
2050		return 0;
2051	}
2052
2053	/* access debug registers */
2054	if (addr >= PT_IBR) {
2055		regnum = (addr - PT_IBR) >> 3;
2056		ptr = &child->thread.ibr[0];
2057	} else {
2058		regnum = (addr - PT_DBR) >> 3;
2059		ptr = &child->thread.dbr[0];
2060	}
2061
2062	if (regnum >= 8) {
2063		dprintk("ptrace: rejecting access to register "
2064				"address 0x%lx\n", addr);
2065		return -1;
2066	}
2067#ifdef CONFIG_PERFMON
2068	/*
2069	 * Check if debug registers are used by perfmon. This
2070	 * test must be done once we know that we can do the
2071	 * operation, i.e. the arguments are all valid, but
2072	 * before we start modifying the state.
2073	 *
2074	 * Perfmon needs to keep a count of how many processes
2075	 * are trying to modify the debug registers for system
2076	 * wide monitoring sessions.
2077	 *
2078	 * We also include read access here, because they may
2079	 * cause the PMU-installed debug register state
2080	 * (dbr[], ibr[]) to be reset. The two arrays are also
2081	 * used by perfmon, but we do not use
2082	 * IA64_THREAD_DBG_VALID. The registers are restored
2083	 * by the PMU context switch code.
2084	 */
2085	if (pfm_use_debug_registers(child))
2086		return -1;
2087#endif
2088
2089	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2090		child->thread.flags |= IA64_THREAD_DBG_VALID;
2091		memset(child->thread.dbr, 0,
2092				sizeof(child->thread.dbr));
2093		memset(child->thread.ibr, 0,
2094				sizeof(child->thread.ibr));
2095	}
2096
2097	ptr += regnum;
2098
2099	if ((regnum & 1) && write_access) {
2100		/* don't let the user set kernel-level breakpoints: */
2101		*ptr = *data & ~(7UL << 56);
2102		return 0;
2103	}
2104	if (write_access)
2105		*ptr = *data;
2106	else
2107		*data = *ptr;
2108	return 0;
2109}
2110
2111static const struct user_regset native_regsets[] = {
2112	{
2113		.core_note_type = NT_PRSTATUS,
2114		.n = ELF_NGREG,
2115		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2116		.get = gpregs_get, .set = gpregs_set,
2117		.writeback = gpregs_writeback
2118	},
2119	{
2120		.core_note_type = NT_PRFPREG,
2121		.n = ELF_NFPREG,
2122		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2123		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2124	},
2125};
2126
2127static const struct user_regset_view user_ia64_view = {
2128	.name = "ia64",
2129	.e_machine = EM_IA_64,
2130	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2131};
2132
2133const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2134{
2135	return &user_ia64_view;
2136}
2137
2138struct syscall_get_set_args {
2139	unsigned int i;
2140	unsigned int n;
2141	unsigned long *args;
2142	struct pt_regs *regs;
2143	int rw;
2144};
2145
2146static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2147{
2148	struct syscall_get_set_args *args = data;
2149	struct pt_regs *pt = args->regs;
2150	unsigned long *krbs, cfm, ndirty;
2151	int i, count;
2152
2153	if (unw_unwind_to_user(info) < 0)
2154		return;
2155
2156	cfm = pt->cr_ifs;
2157	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2158	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2159
2160	count = 0;
2161	if (in_syscall(pt))
2162		count = min_t(int, args->n, cfm & 0x7f);
2163
2164	for (i = 0; i < count; i++) {
2165		if (args->rw)
2166			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2167				args->args[i];
2168		else
2169			args->args[i] = *ia64_rse_skip_regs(krbs,
2170				ndirty + i + args->i);
2171	}
2172
2173	if (!args->rw) {
2174		while (i < args->n) {
2175			args->args[i] = 0;
2176			i++;
2177		}
2178	}
2179}
2180
2181void ia64_syscall_get_set_arguments(struct task_struct *task,
2182	struct pt_regs *regs, unsigned long *args, int rw)
 
2183{
2184	struct syscall_get_set_args data = {
2185		.i = 0,
2186		.n = 6,
2187		.args = args,
2188		.regs = regs,
2189		.rw = rw,
2190	};
2191
2192	if (task == current)
2193		unw_init_running(syscall_get_set_args_cb, &data);
2194	else {
2195		struct unw_frame_info ufi;
2196		memset(&ufi, 0, sizeof(ufi));
2197		unw_init_from_blocked_task(&ufi, task);
2198		syscall_get_set_args_cb(&ufi, &data);
2199	}
2200}
v3.1
 
   1/*
   2 * Kernel support for the ptrace() and syscall tracing interfaces.
   3 *
   4 * Copyright (C) 1999-2005 Hewlett-Packard Co
   5 *	David Mosberger-Tang <davidm@hpl.hp.com>
   6 * Copyright (C) 2006 Intel Co
   7 *  2006-08-12	- IA64 Native Utrace implementation support added by
   8 *	Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *
  10 * Derived from the x86 and Alpha versions.
  11 */
  12#include <linux/kernel.h>
  13#include <linux/sched.h>
 
 
  14#include <linux/mm.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/user.h>
  18#include <linux/security.h>
  19#include <linux/audit.h>
  20#include <linux/signal.h>
  21#include <linux/regset.h>
  22#include <linux/elf.h>
  23#include <linux/tracehook.h>
  24
  25#include <asm/pgtable.h>
  26#include <asm/processor.h>
  27#include <asm/ptrace_offsets.h>
  28#include <asm/rse.h>
  29#include <asm/system.h>
  30#include <asm/uaccess.h>
  31#include <asm/unwind.h>
  32#ifdef CONFIG_PERFMON
  33#include <asm/perfmon.h>
  34#endif
  35
  36#include "entry.h"
  37
  38/*
  39 * Bits in the PSR that we allow ptrace() to change:
  40 *	be, up, ac, mfl, mfh (the user mask; five bits total)
  41 *	db (debug breakpoint fault; one bit)
  42 *	id (instruction debug fault disable; one bit)
  43 *	dd (data debug fault disable; one bit)
  44 *	ri (restart instruction; two bits)
  45 *	is (instruction set; one bit)
  46 */
  47#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS	\
  48		   | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
  49
  50#define MASK(nbits)	((1UL << (nbits)) - 1)	/* mask with NBITS bits set */
  51#define PFM_MASK	MASK(38)
  52
  53#define PTRACE_DEBUG	0
  54
  55#if PTRACE_DEBUG
  56# define dprintk(format...)	printk(format)
  57# define inline
  58#else
  59# define dprintk(format...)
  60#endif
  61
  62/* Return TRUE if PT was created due to kernel-entry via a system-call.  */
  63
  64static inline int
  65in_syscall (struct pt_regs *pt)
  66{
  67	return (long) pt->cr_ifs >= 0;
  68}
  69
  70/*
  71 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
  72 * bitset where bit i is set iff the NaT bit of register i is set.
  73 */
  74unsigned long
  75ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
  76{
  77#	define GET_BITS(first, last, unat)				\
  78	({								\
  79		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
  80		unsigned long nbits = (last - first + 1);		\
  81		unsigned long mask = MASK(nbits) << first;		\
  82		unsigned long dist;					\
  83		if (bit < first)					\
  84			dist = 64 + bit - first;			\
  85		else							\
  86			dist = bit - first;				\
  87		ia64_rotr(unat, dist) & mask;				\
  88	})
  89	unsigned long val;
  90
  91	/*
  92	 * Registers that are stored consecutively in struct pt_regs
  93	 * can be handled in parallel.  If the register order in
  94	 * struct_pt_regs changes, this code MUST be updated.
  95	 */
  96	val  = GET_BITS( 1,  1, scratch_unat);
  97	val |= GET_BITS( 2,  3, scratch_unat);
  98	val |= GET_BITS(12, 13, scratch_unat);
  99	val |= GET_BITS(14, 14, scratch_unat);
 100	val |= GET_BITS(15, 15, scratch_unat);
 101	val |= GET_BITS( 8, 11, scratch_unat);
 102	val |= GET_BITS(16, 31, scratch_unat);
 103	return val;
 104
 105#	undef GET_BITS
 106}
 107
 108/*
 109 * Set the NaT bits for the scratch registers according to NAT and
 110 * return the resulting unat (assuming the scratch registers are
 111 * stored in PT).
 112 */
 113unsigned long
 114ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
 115{
 116#	define PUT_BITS(first, last, nat)				\
 117	({								\
 118		unsigned long bit = ia64_unat_pos(&pt->r##first);	\
 119		unsigned long nbits = (last - first + 1);		\
 120		unsigned long mask = MASK(nbits) << first;		\
 121		long dist;						\
 122		if (bit < first)					\
 123			dist = 64 + bit - first;			\
 124		else							\
 125			dist = bit - first;				\
 126		ia64_rotl(nat & mask, dist);				\
 127	})
 128	unsigned long scratch_unat;
 129
 130	/*
 131	 * Registers that are stored consecutively in struct pt_regs
 132	 * can be handled in parallel.  If the register order in
 133	 * struct_pt_regs changes, this code MUST be updated.
 134	 */
 135	scratch_unat  = PUT_BITS( 1,  1, nat);
 136	scratch_unat |= PUT_BITS( 2,  3, nat);
 137	scratch_unat |= PUT_BITS(12, 13, nat);
 138	scratch_unat |= PUT_BITS(14, 14, nat);
 139	scratch_unat |= PUT_BITS(15, 15, nat);
 140	scratch_unat |= PUT_BITS( 8, 11, nat);
 141	scratch_unat |= PUT_BITS(16, 31, nat);
 142
 143	return scratch_unat;
 144
 145#	undef PUT_BITS
 146}
 147
 148#define IA64_MLX_TEMPLATE	0x2
 149#define IA64_MOVL_OPCODE	6
 150
 151void
 152ia64_increment_ip (struct pt_regs *regs)
 153{
 154	unsigned long w0, ri = ia64_psr(regs)->ri + 1;
 155
 156	if (ri > 2) {
 157		ri = 0;
 158		regs->cr_iip += 16;
 159	} else if (ri == 2) {
 160		get_user(w0, (char __user *) regs->cr_iip + 0);
 161		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 162			/*
 163			 * rfi'ing to slot 2 of an MLX bundle causes
 164			 * an illegal operation fault.  We don't want
 165			 * that to happen...
 166			 */
 167			ri = 0;
 168			regs->cr_iip += 16;
 169		}
 170	}
 171	ia64_psr(regs)->ri = ri;
 172}
 173
 174void
 175ia64_decrement_ip (struct pt_regs *regs)
 176{
 177	unsigned long w0, ri = ia64_psr(regs)->ri - 1;
 178
 179	if (ia64_psr(regs)->ri == 0) {
 180		regs->cr_iip -= 16;
 181		ri = 2;
 182		get_user(w0, (char __user *) regs->cr_iip + 0);
 183		if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
 184			/*
 185			 * rfi'ing to slot 2 of an MLX bundle causes
 186			 * an illegal operation fault.  We don't want
 187			 * that to happen...
 188			 */
 189			ri = 1;
 190		}
 191	}
 192	ia64_psr(regs)->ri = ri;
 193}
 194
 195/*
 196 * This routine is used to read an rnat bits that are stored on the
 197 * kernel backing store.  Since, in general, the alignment of the user
 198 * and kernel are different, this is not completely trivial.  In
 199 * essence, we need to construct the user RNAT based on up to two
 200 * kernel RNAT values and/or the RNAT value saved in the child's
 201 * pt_regs.
 202 *
 203 * user rbs
 204 *
 205 * +--------+ <-- lowest address
 206 * | slot62 |
 207 * +--------+
 208 * |  rnat  | 0x....1f8
 209 * +--------+
 210 * | slot00 | \
 211 * +--------+ |
 212 * | slot01 | > child_regs->ar_rnat
 213 * +--------+ |
 214 * | slot02 | /				kernel rbs
 215 * +--------+				+--------+
 216 *	    <- child_regs->ar_bspstore	| slot61 | <-- krbs
 217 * +- - - - +				+--------+
 218 *					| slot62 |
 219 * +- - - - +				+--------+
 220 *					|  rnat	 |
 221 * +- - - - +				+--------+
 222 *   vrnat				| slot00 |
 223 * +- - - - +				+--------+
 224 *					=	 =
 225 *					+--------+
 226 *					| slot00 | \
 227 *					+--------+ |
 228 *					| slot01 | > child_stack->ar_rnat
 229 *					+--------+ |
 230 *					| slot02 | /
 231 *					+--------+
 232 *						  <--- child_stack->ar_bspstore
 233 *
 234 * The way to think of this code is as follows: bit 0 in the user rnat
 235 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
 236 * value.  The kernel rnat value holding this bit is stored in
 237 * variable rnat0.  rnat1 is loaded with the kernel rnat value that
 238 * form the upper bits of the user rnat value.
 239 *
 240 * Boundary cases:
 241 *
 242 * o when reading the rnat "below" the first rnat slot on the kernel
 243 *   backing store, rnat0/rnat1 are set to 0 and the low order bits are
 244 *   merged in from pt->ar_rnat.
 245 *
 246 * o when reading the rnat "above" the last rnat slot on the kernel
 247 *   backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
 248 */
 249static unsigned long
 250get_rnat (struct task_struct *task, struct switch_stack *sw,
 251	  unsigned long *krbs, unsigned long *urnat_addr,
 252	  unsigned long *urbs_end)
 253{
 254	unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
 255	unsigned long umask = 0, mask, m;
 256	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 257	long num_regs, nbits;
 258	struct pt_regs *pt;
 259
 260	pt = task_pt_regs(task);
 261	kbsp = (unsigned long *) sw->ar_bspstore;
 262	ubspstore = (unsigned long *) pt->ar_bspstore;
 263
 264	if (urbs_end < urnat_addr)
 265		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
 266	else
 267		nbits = 63;
 268	mask = MASK(nbits);
 269	/*
 270	 * First, figure out which bit number slot 0 in user-land maps
 271	 * to in the kernel rnat.  Do this by figuring out how many
 272	 * register slots we're beyond the user's backingstore and
 273	 * then computing the equivalent address in kernel space.
 274	 */
 275	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 276	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 277	shift = ia64_rse_slot_num(slot0_kaddr);
 278	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 279	rnat0_kaddr = rnat1_kaddr - 64;
 280
 281	if (ubspstore + 63 > urnat_addr) {
 282		/* some bits need to be merged in from pt->ar_rnat */
 283		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 284		urnat = (pt->ar_rnat & umask);
 285		mask &= ~umask;
 286		if (!mask)
 287			return urnat;
 288	}
 289
 290	m = mask << shift;
 291	if (rnat0_kaddr >= kbsp)
 292		rnat0 = sw->ar_rnat;
 293	else if (rnat0_kaddr > krbs)
 294		rnat0 = *rnat0_kaddr;
 295	urnat |= (rnat0 & m) >> shift;
 296
 297	m = mask >> (63 - shift);
 298	if (rnat1_kaddr >= kbsp)
 299		rnat1 = sw->ar_rnat;
 300	else if (rnat1_kaddr > krbs)
 301		rnat1 = *rnat1_kaddr;
 302	urnat |= (rnat1 & m) << (63 - shift);
 303	return urnat;
 304}
 305
 306/*
 307 * The reverse of get_rnat.
 308 */
 309static void
 310put_rnat (struct task_struct *task, struct switch_stack *sw,
 311	  unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
 312	  unsigned long *urbs_end)
 313{
 314	unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
 315	unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
 316	long num_regs, nbits;
 317	struct pt_regs *pt;
 318	unsigned long cfm, *urbs_kargs;
 319
 320	pt = task_pt_regs(task);
 321	kbsp = (unsigned long *) sw->ar_bspstore;
 322	ubspstore = (unsigned long *) pt->ar_bspstore;
 323
 324	urbs_kargs = urbs_end;
 325	if (in_syscall(pt)) {
 326		/*
 327		 * If entered via syscall, don't allow user to set rnat bits
 328		 * for syscall args.
 329		 */
 330		cfm = pt->cr_ifs;
 331		urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
 332	}
 333
 334	if (urbs_kargs >= urnat_addr)
 335		nbits = 63;
 336	else {
 337		if ((urnat_addr - 63) >= urbs_kargs)
 338			return;
 339		nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
 340	}
 341	mask = MASK(nbits);
 342
 343	/*
 344	 * First, figure out which bit number slot 0 in user-land maps
 345	 * to in the kernel rnat.  Do this by figuring out how many
 346	 * register slots we're beyond the user's backingstore and
 347	 * then computing the equivalent address in kernel space.
 348	 */
 349	num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
 350	slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
 351	shift = ia64_rse_slot_num(slot0_kaddr);
 352	rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
 353	rnat0_kaddr = rnat1_kaddr - 64;
 354
 355	if (ubspstore + 63 > urnat_addr) {
 356		/* some bits need to be place in pt->ar_rnat: */
 357		umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
 358		pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
 359		mask &= ~umask;
 360		if (!mask)
 361			return;
 362	}
 363	/*
 364	 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
 365	 * rnat slot is ignored. so we don't have to clear it here.
 366	 */
 367	rnat0 = (urnat << shift);
 368	m = mask << shift;
 369	if (rnat0_kaddr >= kbsp)
 370		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
 371	else if (rnat0_kaddr > krbs)
 372		*rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
 373
 374	rnat1 = (urnat >> (63 - shift));
 375	m = mask >> (63 - shift);
 376	if (rnat1_kaddr >= kbsp)
 377		sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
 378	else if (rnat1_kaddr > krbs)
 379		*rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
 380}
 381
 382static inline int
 383on_kernel_rbs (unsigned long addr, unsigned long bspstore,
 384	       unsigned long urbs_end)
 385{
 386	unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
 387						      urbs_end);
 388	return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
 389}
 390
 391/*
 392 * Read a word from the user-level backing store of task CHILD.  ADDR
 393 * is the user-level address to read the word from, VAL a pointer to
 394 * the return value, and USER_BSP gives the end of the user-level
 395 * backing store (i.e., it's the address that would be in ar.bsp after
 396 * the user executed a "cover" instruction).
 397 *
 398 * This routine takes care of accessing the kernel register backing
 399 * store for those registers that got spilled there.  It also takes
 400 * care of calculating the appropriate RNaT collection words.
 401 */
 402long
 403ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
 404	   unsigned long user_rbs_end, unsigned long addr, long *val)
 405{
 406	unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
 407	struct pt_regs *child_regs;
 408	size_t copied;
 409	long ret;
 410
 411	urbs_end = (long *) user_rbs_end;
 412	laddr = (unsigned long *) addr;
 413	child_regs = task_pt_regs(child);
 414	bspstore = (unsigned long *) child_regs->ar_bspstore;
 415	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 416	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 417			  (unsigned long) urbs_end))
 418	{
 419		/*
 420		 * Attempt to read the RBS in an area that's actually
 421		 * on the kernel RBS => read the corresponding bits in
 422		 * the kernel RBS.
 423		 */
 424		rnat_addr = ia64_rse_rnat_addr(laddr);
 425		ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
 426
 427		if (laddr == rnat_addr) {
 428			/* return NaT collection word itself */
 429			*val = ret;
 430			return 0;
 431		}
 432
 433		if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
 434			/*
 435			 * It is implementation dependent whether the
 436			 * data portion of a NaT value gets saved on a
 437			 * st8.spill or RSE spill (e.g., see EAS 2.6,
 438			 * 4.4.4.6 Register Spill and Fill).  To get
 439			 * consistent behavior across all possible
 440			 * IA-64 implementations, we return zero in
 441			 * this case.
 442			 */
 443			*val = 0;
 444			return 0;
 445		}
 446
 447		if (laddr < urbs_end) {
 448			/*
 449			 * The desired word is on the kernel RBS and
 450			 * is not a NaT.
 451			 */
 452			regnum = ia64_rse_num_regs(bspstore, laddr);
 453			*val = *ia64_rse_skip_regs(krbs, regnum);
 454			return 0;
 455		}
 456	}
 457	copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
 458	if (copied != sizeof(ret))
 459		return -EIO;
 460	*val = ret;
 461	return 0;
 462}
 463
 464long
 465ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
 466	   unsigned long user_rbs_end, unsigned long addr, long val)
 467{
 468	unsigned long *bspstore, *krbs, regnum, *laddr;
 469	unsigned long *urbs_end = (long *) user_rbs_end;
 470	struct pt_regs *child_regs;
 471
 472	laddr = (unsigned long *) addr;
 473	child_regs = task_pt_regs(child);
 474	bspstore = (unsigned long *) child_regs->ar_bspstore;
 475	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 476	if (on_kernel_rbs(addr, (unsigned long) bspstore,
 477			  (unsigned long) urbs_end))
 478	{
 479		/*
 480		 * Attempt to write the RBS in an area that's actually
 481		 * on the kernel RBS => write the corresponding bits
 482		 * in the kernel RBS.
 483		 */
 484		if (ia64_rse_is_rnat_slot(laddr))
 485			put_rnat(child, child_stack, krbs, laddr, val,
 486				 urbs_end);
 487		else {
 488			if (laddr < urbs_end) {
 489				regnum = ia64_rse_num_regs(bspstore, laddr);
 490				*ia64_rse_skip_regs(krbs, regnum) = val;
 491			}
 492		}
 493	} else if (access_process_vm(child, addr, &val, sizeof(val), 1)
 
 494		   != sizeof(val))
 495		return -EIO;
 496	return 0;
 497}
 498
 499/*
 500 * Calculate the address of the end of the user-level register backing
 501 * store.  This is the address that would have been stored in ar.bsp
 502 * if the user had executed a "cover" instruction right before
 503 * entering the kernel.  If CFMP is not NULL, it is used to return the
 504 * "current frame mask" that was active at the time the kernel was
 505 * entered.
 506 */
 507unsigned long
 508ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
 509		       unsigned long *cfmp)
 510{
 511	unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
 512	long ndirty;
 513
 514	krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
 515	bspstore = (unsigned long *) pt->ar_bspstore;
 516	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
 517
 518	if (in_syscall(pt))
 519		ndirty += (cfm & 0x7f);
 520	else
 521		cfm &= ~(1UL << 63);	/* clear valid bit */
 522
 523	if (cfmp)
 524		*cfmp = cfm;
 525	return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
 526}
 527
 528/*
 529 * Synchronize (i.e, write) the RSE backing store living in kernel
 530 * space to the VM of the CHILD task.  SW and PT are the pointers to
 531 * the switch_stack and pt_regs structures, respectively.
 532 * USER_RBS_END is the user-level address at which the backing store
 533 * ends.
 534 */
 535long
 536ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
 537		    unsigned long user_rbs_start, unsigned long user_rbs_end)
 538{
 539	unsigned long addr, val;
 540	long ret;
 541
 542	/* now copy word for word from kernel rbs to user rbs: */
 543	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 544		ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
 545		if (ret < 0)
 546			return ret;
 547		if (access_process_vm(child, addr, &val, sizeof(val), 1)
 
 548		    != sizeof(val))
 549			return -EIO;
 550	}
 551	return 0;
 552}
 553
 554static long
 555ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
 556		unsigned long user_rbs_start, unsigned long user_rbs_end)
 557{
 558	unsigned long addr, val;
 559	long ret;
 560
 561	/* now copy word for word from user rbs to kernel rbs: */
 562	for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
 563		if (access_process_vm(child, addr, &val, sizeof(val), 0)
 
 564				!= sizeof(val))
 565			return -EIO;
 566
 567		ret = ia64_poke(child, sw, user_rbs_end, addr, val);
 568		if (ret < 0)
 569			return ret;
 570	}
 571	return 0;
 572}
 573
 574typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
 575			    unsigned long, unsigned long);
 576
 577static void do_sync_rbs(struct unw_frame_info *info, void *arg)
 578{
 579	struct pt_regs *pt;
 580	unsigned long urbs_end;
 581	syncfunc_t fn = arg;
 582
 583	if (unw_unwind_to_user(info) < 0)
 584		return;
 585	pt = task_pt_regs(info->task);
 586	urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
 587
 588	fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
 589}
 590
 591/*
 592 * when a thread is stopped (ptraced), debugger might change thread's user
 593 * stack (change memory directly), and we must avoid the RSE stored in kernel
 594 * to override user stack (user space's RSE is newer than kernel's in the
 595 * case). To workaround the issue, we copy kernel RSE to user RSE before the
 596 * task is stopped, so user RSE has updated data.  we then copy user RSE to
 597 * kernel after the task is resummed from traced stop and kernel will use the
 598 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
 599 * synchronize user RSE to kernel.
 600 */
 601void ia64_ptrace_stop(void)
 602{
 603	if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
 604		return;
 605	set_notify_resume(current);
 606	unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
 607}
 608
 609/*
 610 * This is called to read back the register backing store.
 611 */
 612void ia64_sync_krbs(void)
 613{
 614	clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
 615
 616	unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
 617}
 618
 619/*
 620 * After PTRACE_ATTACH, a thread's register backing store area in user
 621 * space is assumed to contain correct data whenever the thread is
 622 * stopped.  arch_ptrace_stop takes care of this on tracing stops.
 623 * But if the child was already stopped for job control when we attach
 624 * to it, then it might not ever get into ptrace_stop by the time we
 625 * want to examine the user memory containing the RBS.
 626 */
 627void
 628ptrace_attach_sync_user_rbs (struct task_struct *child)
 629{
 630	int stopped = 0;
 631	struct unw_frame_info info;
 632
 633	/*
 634	 * If the child is in TASK_STOPPED, we need to change that to
 635	 * TASK_TRACED momentarily while we operate on it.  This ensures
 636	 * that the child won't be woken up and return to user mode while
 637	 * we are doing the sync.  (It can only be woken up for SIGKILL.)
 638	 */
 639
 640	read_lock(&tasklist_lock);
 641	if (child->sighand) {
 642		spin_lock_irq(&child->sighand->siglock);
 643		if (child->state == TASK_STOPPED &&
 644		    !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
 645			set_notify_resume(child);
 646
 647			child->state = TASK_TRACED;
 648			stopped = 1;
 649		}
 650		spin_unlock_irq(&child->sighand->siglock);
 651	}
 652	read_unlock(&tasklist_lock);
 653
 654	if (!stopped)
 655		return;
 656
 657	unw_init_from_blocked_task(&info, child);
 658	do_sync_rbs(&info, ia64_sync_user_rbs);
 659
 660	/*
 661	 * Now move the child back into TASK_STOPPED if it should be in a
 662	 * job control stop, so that SIGCONT can be used to wake it up.
 663	 */
 664	read_lock(&tasklist_lock);
 665	if (child->sighand) {
 666		spin_lock_irq(&child->sighand->siglock);
 667		if (child->state == TASK_TRACED &&
 668		    (child->signal->flags & SIGNAL_STOP_STOPPED)) {
 669			child->state = TASK_STOPPED;
 670		}
 671		spin_unlock_irq(&child->sighand->siglock);
 672	}
 673	read_unlock(&tasklist_lock);
 674}
 675
 676static inline int
 677thread_matches (struct task_struct *thread, unsigned long addr)
 678{
 679	unsigned long thread_rbs_end;
 680	struct pt_regs *thread_regs;
 681
 682	if (ptrace_check_attach(thread, 0) < 0)
 683		/*
 684		 * If the thread is not in an attachable state, we'll
 685		 * ignore it.  The net effect is that if ADDR happens
 686		 * to overlap with the portion of the thread's
 687		 * register backing store that is currently residing
 688		 * on the thread's kernel stack, then ptrace() may end
 689		 * up accessing a stale value.  But if the thread
 690		 * isn't stopped, that's a problem anyhow, so we're
 691		 * doing as well as we can...
 692		 */
 693		return 0;
 694
 695	thread_regs = task_pt_regs(thread);
 696	thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
 697	if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
 698		return 0;
 699
 700	return 1;	/* looks like we've got a winner */
 701}
 702
 703/*
 704 * Write f32-f127 back to task->thread.fph if it has been modified.
 705 */
 706inline void
 707ia64_flush_fph (struct task_struct *task)
 708{
 709	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 710
 711	/*
 712	 * Prevent migrating this task while
 713	 * we're fiddling with the FPU state
 714	 */
 715	preempt_disable();
 716	if (ia64_is_local_fpu_owner(task) && psr->mfh) {
 717		psr->mfh = 0;
 718		task->thread.flags |= IA64_THREAD_FPH_VALID;
 719		ia64_save_fpu(&task->thread.fph[0]);
 720	}
 721	preempt_enable();
 722}
 723
 724/*
 725 * Sync the fph state of the task so that it can be manipulated
 726 * through thread.fph.  If necessary, f32-f127 are written back to
 727 * thread.fph or, if the fph state hasn't been used before, thread.fph
 728 * is cleared to zeroes.  Also, access to f32-f127 is disabled to
 729 * ensure that the task picks up the state from thread.fph when it
 730 * executes again.
 731 */
 732void
 733ia64_sync_fph (struct task_struct *task)
 734{
 735	struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 736
 737	ia64_flush_fph(task);
 738	if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
 739		task->thread.flags |= IA64_THREAD_FPH_VALID;
 740		memset(&task->thread.fph, 0, sizeof(task->thread.fph));
 741	}
 742	ia64_drop_fpu(task);
 743	psr->dfh = 1;
 744}
 745
 746/*
 747 * Change the machine-state of CHILD such that it will return via the normal
 748 * kernel exit-path, rather than the syscall-exit path.
 749 */
 750static void
 751convert_to_non_syscall (struct task_struct *child, struct pt_regs  *pt,
 752			unsigned long cfm)
 753{
 754	struct unw_frame_info info, prev_info;
 755	unsigned long ip, sp, pr;
 756
 757	unw_init_from_blocked_task(&info, child);
 758	while (1) {
 759		prev_info = info;
 760		if (unw_unwind(&info) < 0)
 761			return;
 762
 763		unw_get_sp(&info, &sp);
 764		if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
 765		    < IA64_PT_REGS_SIZE) {
 766			dprintk("ptrace.%s: ran off the top of the kernel "
 767				"stack\n", __func__);
 768			return;
 769		}
 770		if (unw_get_pr (&prev_info, &pr) < 0) {
 771			unw_get_rp(&prev_info, &ip);
 772			dprintk("ptrace.%s: failed to read "
 773				"predicate register (ip=0x%lx)\n",
 774				__func__, ip);
 775			return;
 776		}
 777		if (unw_is_intr_frame(&info)
 778		    && (pr & (1UL << PRED_USER_STACK)))
 779			break;
 780	}
 781
 782	/*
 783	 * Note: at the time of this call, the target task is blocked
 784	 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
 785	 * (aka, "pLvSys") we redirect execution from
 786	 * .work_pending_syscall_end to .work_processed_kernel.
 787	 */
 788	unw_get_pr(&prev_info, &pr);
 789	pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
 790	pr |=  (1UL << PRED_NON_SYSCALL);
 791	unw_set_pr(&prev_info, pr);
 792
 793	pt->cr_ifs = (1UL << 63) | cfm;
 794	/*
 795	 * Clear the memory that is NOT written on syscall-entry to
 796	 * ensure we do not leak kernel-state to user when execution
 797	 * resumes.
 798	 */
 799	pt->r2 = 0;
 800	pt->r3 = 0;
 801	pt->r14 = 0;
 802	memset(&pt->r16, 0, 16*8);	/* clear r16-r31 */
 803	memset(&pt->f6, 0, 6*16);	/* clear f6-f11 */
 804	pt->b7 = 0;
 805	pt->ar_ccv = 0;
 806	pt->ar_csd = 0;
 807	pt->ar_ssd = 0;
 808}
 809
 810static int
 811access_nat_bits (struct task_struct *child, struct pt_regs *pt,
 812		 struct unw_frame_info *info,
 813		 unsigned long *data, int write_access)
 814{
 815	unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
 816	char nat = 0;
 817
 818	if (write_access) {
 819		nat_bits = *data;
 820		scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
 821		if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
 822			dprintk("ptrace: failed to set ar.unat\n");
 823			return -1;
 824		}
 825		for (regnum = 4; regnum <= 7; ++regnum) {
 826			unw_get_gr(info, regnum, &dummy, &nat);
 827			unw_set_gr(info, regnum, dummy,
 828				   (nat_bits >> regnum) & 1);
 829		}
 830	} else {
 831		if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
 832			dprintk("ptrace: failed to read ar.unat\n");
 833			return -1;
 834		}
 835		nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
 836		for (regnum = 4; regnum <= 7; ++regnum) {
 837			unw_get_gr(info, regnum, &dummy, &nat);
 838			nat_bits |= (nat != 0) << regnum;
 839		}
 840		*data = nat_bits;
 841	}
 842	return 0;
 843}
 844
 845static int
 846access_uarea (struct task_struct *child, unsigned long addr,
 847	      unsigned long *data, int write_access);
 848
 849static long
 850ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 851{
 852	unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
 853	struct unw_frame_info info;
 854	struct ia64_fpreg fpval;
 855	struct switch_stack *sw;
 856	struct pt_regs *pt;
 857	long ret, retval = 0;
 858	char nat = 0;
 859	int i;
 860
 861	if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
 862		return -EIO;
 863
 864	pt = task_pt_regs(child);
 865	sw = (struct switch_stack *) (child->thread.ksp + 16);
 866	unw_init_from_blocked_task(&info, child);
 867	if (unw_unwind_to_user(&info) < 0) {
 868		return -EIO;
 869	}
 870
 871	if (((unsigned long) ppr & 0x7) != 0) {
 872		dprintk("ptrace:unaligned register address %p\n", ppr);
 873		return -EIO;
 874	}
 875
 876	if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
 877	    || access_uarea(child, PT_AR_EC, &ec, 0) < 0
 878	    || access_uarea(child, PT_AR_LC, &lc, 0) < 0
 879	    || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
 880	    || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
 881	    || access_uarea(child, PT_CFM, &cfm, 0)
 882	    || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
 883		return -EIO;
 884
 885	/* control regs */
 886
 887	retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
 888	retval |= __put_user(psr, &ppr->cr_ipsr);
 889
 890	/* app regs */
 891
 892	retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
 893	retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
 894	retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
 895	retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
 896	retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
 897	retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
 898
 899	retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
 900	retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
 901	retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
 902	retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
 903	retval |= __put_user(cfm, &ppr->cfm);
 904
 905	/* gr1-gr3 */
 906
 907	retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
 908	retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
 909
 910	/* gr4-gr7 */
 911
 912	for (i = 4; i < 8; i++) {
 913		if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
 914			return -EIO;
 915		retval |= __put_user(val, &ppr->gr[i]);
 916	}
 917
 918	/* gr8-gr11 */
 919
 920	retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
 921
 922	/* gr12-gr15 */
 923
 924	retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
 925	retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
 926	retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
 927
 928	/* gr16-gr31 */
 929
 930	retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
 931
 932	/* b0 */
 933
 934	retval |= __put_user(pt->b0, &ppr->br[0]);
 935
 936	/* b1-b5 */
 937
 938	for (i = 1; i < 6; i++) {
 939		if (unw_access_br(&info, i, &val, 0) < 0)
 940			return -EIO;
 941		__put_user(val, &ppr->br[i]);
 942	}
 943
 944	/* b6-b7 */
 945
 946	retval |= __put_user(pt->b6, &ppr->br[6]);
 947	retval |= __put_user(pt->b7, &ppr->br[7]);
 948
 949	/* fr2-fr5 */
 950
 951	for (i = 2; i < 6; i++) {
 952		if (unw_get_fr(&info, i, &fpval) < 0)
 953			return -EIO;
 954		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 955	}
 956
 957	/* fr6-fr11 */
 958
 959	retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
 960				 sizeof(struct ia64_fpreg) * 6);
 961
 962	/* fp scratch regs(12-15) */
 963
 964	retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
 965				 sizeof(struct ia64_fpreg) * 4);
 966
 967	/* fr16-fr31 */
 968
 969	for (i = 16; i < 32; i++) {
 970		if (unw_get_fr(&info, i, &fpval) < 0)
 971			return -EIO;
 972		retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
 973	}
 974
 975	/* fph */
 976
 977	ia64_flush_fph(child);
 978	retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
 979				 sizeof(ppr->fr[32]) * 96);
 980
 981	/*  preds */
 982
 983	retval |= __put_user(pt->pr, &ppr->pr);
 984
 985	/* nat bits */
 986
 987	retval |= __put_user(nat_bits, &ppr->nat);
 988
 989	ret = retval ? -EIO : 0;
 990	return ret;
 991}
 992
 993static long
 994ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 995{
 996	unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
 997	struct unw_frame_info info;
 998	struct switch_stack *sw;
 999	struct ia64_fpreg fpval;
1000	struct pt_regs *pt;
1001	long ret, retval = 0;
1002	int i;
1003
1004	memset(&fpval, 0, sizeof(fpval));
1005
1006	if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
1007		return -EIO;
1008
1009	pt = task_pt_regs(child);
1010	sw = (struct switch_stack *) (child->thread.ksp + 16);
1011	unw_init_from_blocked_task(&info, child);
1012	if (unw_unwind_to_user(&info) < 0) {
1013		return -EIO;
1014	}
1015
1016	if (((unsigned long) ppr & 0x7) != 0) {
1017		dprintk("ptrace:unaligned register address %p\n", ppr);
1018		return -EIO;
1019	}
1020
1021	/* control regs */
1022
1023	retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1024	retval |= __get_user(psr, &ppr->cr_ipsr);
1025
1026	/* app regs */
1027
1028	retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1029	retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1030	retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1031	retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1032	retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1033	retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1034
1035	retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1036	retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1037	retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1038	retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1039	retval |= __get_user(cfm, &ppr->cfm);
1040
1041	/* gr1-gr3 */
1042
1043	retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1044	retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1045
1046	/* gr4-gr7 */
1047
1048	for (i = 4; i < 8; i++) {
1049		retval |= __get_user(val, &ppr->gr[i]);
1050		/* NaT bit will be set via PT_NAT_BITS: */
1051		if (unw_set_gr(&info, i, val, 0) < 0)
1052			return -EIO;
1053	}
1054
1055	/* gr8-gr11 */
1056
1057	retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1058
1059	/* gr12-gr15 */
1060
1061	retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1062	retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1063	retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1064
1065	/* gr16-gr31 */
1066
1067	retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1068
1069	/* b0 */
1070
1071	retval |= __get_user(pt->b0, &ppr->br[0]);
1072
1073	/* b1-b5 */
1074
1075	for (i = 1; i < 6; i++) {
1076		retval |= __get_user(val, &ppr->br[i]);
1077		unw_set_br(&info, i, val);
1078	}
1079
1080	/* b6-b7 */
1081
1082	retval |= __get_user(pt->b6, &ppr->br[6]);
1083	retval |= __get_user(pt->b7, &ppr->br[7]);
1084
1085	/* fr2-fr5 */
1086
1087	for (i = 2; i < 6; i++) {
1088		retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1089		if (unw_set_fr(&info, i, fpval) < 0)
1090			return -EIO;
1091	}
1092
1093	/* fr6-fr11 */
1094
1095	retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1096				   sizeof(ppr->fr[6]) * 6);
1097
1098	/* fp scratch regs(12-15) */
1099
1100	retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1101				   sizeof(ppr->fr[12]) * 4);
1102
1103	/* fr16-fr31 */
1104
1105	for (i = 16; i < 32; i++) {
1106		retval |= __copy_from_user(&fpval, &ppr->fr[i],
1107					   sizeof(fpval));
1108		if (unw_set_fr(&info, i, fpval) < 0)
1109			return -EIO;
1110	}
1111
1112	/* fph */
1113
1114	ia64_sync_fph(child);
1115	retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1116				   sizeof(ppr->fr[32]) * 96);
1117
1118	/* preds */
1119
1120	retval |= __get_user(pt->pr, &ppr->pr);
1121
1122	/* nat bits */
1123
1124	retval |= __get_user(nat_bits, &ppr->nat);
1125
1126	retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
1127	retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1128	retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1129	retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1130	retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1131	retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1132	retval |= access_uarea(child, PT_CFM, &cfm, 1);
1133	retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1134
1135	ret = retval ? -EIO : 0;
1136	return ret;
1137}
1138
1139void
1140user_enable_single_step (struct task_struct *child)
1141{
1142	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1143
1144	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1145	child_psr->ss = 1;
1146}
1147
1148void
1149user_enable_block_step (struct task_struct *child)
1150{
1151	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1152
1153	set_tsk_thread_flag(child, TIF_SINGLESTEP);
1154	child_psr->tb = 1;
1155}
1156
1157void
1158user_disable_single_step (struct task_struct *child)
1159{
1160	struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1161
1162	/* make sure the single step/taken-branch trap bits are not set: */
1163	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1164	child_psr->ss = 0;
1165	child_psr->tb = 0;
1166}
1167
1168/*
1169 * Called by kernel/ptrace.c when detaching..
1170 *
1171 * Make sure the single step bit is not set.
1172 */
1173void
1174ptrace_disable (struct task_struct *child)
1175{
1176	user_disable_single_step(child);
1177}
1178
1179long
1180arch_ptrace (struct task_struct *child, long request,
1181	     unsigned long addr, unsigned long data)
1182{
1183	switch (request) {
1184	case PTRACE_PEEKTEXT:
1185	case PTRACE_PEEKDATA:
1186		/* read word at location addr */
1187		if (access_process_vm(child, addr, &data, sizeof(data), 0)
 
1188		    != sizeof(data))
1189			return -EIO;
1190		/* ensure return value is not mistaken for error code */
1191		force_successful_syscall_return();
1192		return data;
1193
1194	/* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1195	 * by the generic ptrace_request().
1196	 */
1197
1198	case PTRACE_PEEKUSR:
1199		/* read the word at addr in the USER area */
1200		if (access_uarea(child, addr, &data, 0) < 0)
1201			return -EIO;
1202		/* ensure return value is not mistaken for error code */
1203		force_successful_syscall_return();
1204		return data;
1205
1206	case PTRACE_POKEUSR:
1207		/* write the word at addr in the USER area */
1208		if (access_uarea(child, addr, &data, 1) < 0)
1209			return -EIO;
1210		return 0;
1211
1212	case PTRACE_OLD_GETSIGINFO:
1213		/* for backwards-compatibility */
1214		return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1215
1216	case PTRACE_OLD_SETSIGINFO:
1217		/* for backwards-compatibility */
1218		return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1219
1220	case PTRACE_GETREGS:
1221		return ptrace_getregs(child,
1222				      (struct pt_all_user_regs __user *) data);
1223
1224	case PTRACE_SETREGS:
1225		return ptrace_setregs(child,
1226				      (struct pt_all_user_regs __user *) data);
1227
1228	default:
1229		return ptrace_request(child, request, addr, data);
1230	}
1231}
1232
1233
1234/* "asmlinkage" so the input arguments are preserved... */
1235
1236asmlinkage long
1237syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1238		     long arg4, long arg5, long arg6, long arg7,
1239		     struct pt_regs regs)
1240{
1241	if (test_thread_flag(TIF_SYSCALL_TRACE))
1242		if (tracehook_report_syscall_entry(&regs))
1243			return -ENOSYS;
1244
1245	/* copy user rbs to kernel rbs */
1246	if (test_thread_flag(TIF_RESTORE_RSE))
1247		ia64_sync_krbs();
1248
1249	if (unlikely(current->audit_context)) {
1250		long syscall;
1251		int arch;
1252
1253		syscall = regs.r15;
1254		arch = AUDIT_ARCH_IA64;
1255
1256		audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1257	}
1258
1259	return 0;
1260}
1261
1262/* "asmlinkage" so the input arguments are preserved... */
1263
1264asmlinkage void
1265syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1266		     long arg4, long arg5, long arg6, long arg7,
1267		     struct pt_regs regs)
1268{
1269	int step;
1270
1271	if (unlikely(current->audit_context)) {
1272		int success = AUDITSC_RESULT(regs.r10);
1273		long result = regs.r8;
1274
1275		if (success != AUDITSC_SUCCESS)
1276			result = -result;
1277		audit_syscall_exit(success, result);
1278	}
1279
1280	step = test_thread_flag(TIF_SINGLESTEP);
1281	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1282		tracehook_report_syscall_exit(&regs, step);
1283
1284	/* copy user rbs to kernel rbs */
1285	if (test_thread_flag(TIF_RESTORE_RSE))
1286		ia64_sync_krbs();
1287}
1288
1289/* Utrace implementation starts here */
1290struct regset_get {
1291	void *kbuf;
1292	void __user *ubuf;
1293};
1294
1295struct regset_set {
1296	const void *kbuf;
1297	const void __user *ubuf;
1298};
1299
1300struct regset_getset {
1301	struct task_struct *target;
1302	const struct user_regset *regset;
1303	union {
1304		struct regset_get get;
1305		struct regset_set set;
1306	} u;
1307	unsigned int pos;
1308	unsigned int count;
1309	int ret;
1310};
1311
1312static int
1313access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1314		unsigned long addr, unsigned long *data, int write_access)
1315{
1316	struct pt_regs *pt;
1317	unsigned long *ptr = NULL;
1318	int ret;
1319	char nat = 0;
1320
1321	pt = task_pt_regs(target);
1322	switch (addr) {
1323	case ELF_GR_OFFSET(1):
1324		ptr = &pt->r1;
1325		break;
1326	case ELF_GR_OFFSET(2):
1327	case ELF_GR_OFFSET(3):
1328		ptr = (void *)&pt->r2 + (addr - ELF_GR_OFFSET(2));
1329		break;
1330	case ELF_GR_OFFSET(4) ... ELF_GR_OFFSET(7):
1331		if (write_access) {
1332			/* read NaT bit first: */
1333			unsigned long dummy;
1334
1335			ret = unw_get_gr(info, addr/8, &dummy, &nat);
1336			if (ret < 0)
1337				return ret;
1338		}
1339		return unw_access_gr(info, addr/8, data, &nat, write_access);
1340	case ELF_GR_OFFSET(8) ... ELF_GR_OFFSET(11):
1341		ptr = (void *)&pt->r8 + addr - ELF_GR_OFFSET(8);
1342		break;
1343	case ELF_GR_OFFSET(12):
1344	case ELF_GR_OFFSET(13):
1345		ptr = (void *)&pt->r12 + addr - ELF_GR_OFFSET(12);
1346		break;
1347	case ELF_GR_OFFSET(14):
1348		ptr = &pt->r14;
1349		break;
1350	case ELF_GR_OFFSET(15):
1351		ptr = &pt->r15;
1352	}
1353	if (write_access)
1354		*ptr = *data;
1355	else
1356		*data = *ptr;
1357	return 0;
1358}
1359
1360static int
1361access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1362		unsigned long addr, unsigned long *data, int write_access)
1363{
1364	struct pt_regs *pt;
1365	unsigned long *ptr = NULL;
1366
1367	pt = task_pt_regs(target);
1368	switch (addr) {
1369	case ELF_BR_OFFSET(0):
1370		ptr = &pt->b0;
1371		break;
1372	case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1373		return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1374				     data, write_access);
1375	case ELF_BR_OFFSET(6):
1376		ptr = &pt->b6;
1377		break;
1378	case ELF_BR_OFFSET(7):
1379		ptr = &pt->b7;
1380	}
1381	if (write_access)
1382		*ptr = *data;
1383	else
1384		*data = *ptr;
1385	return 0;
1386}
1387
1388static int
1389access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1390		unsigned long addr, unsigned long *data, int write_access)
1391{
1392	struct pt_regs *pt;
1393	unsigned long cfm, urbs_end;
1394	unsigned long *ptr = NULL;
1395
1396	pt = task_pt_regs(target);
1397	if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1398		switch (addr) {
1399		case ELF_AR_RSC_OFFSET:
1400			/* force PL3 */
1401			if (write_access)
1402				pt->ar_rsc = *data | (3 << 2);
1403			else
1404				*data = pt->ar_rsc;
1405			return 0;
1406		case ELF_AR_BSP_OFFSET:
1407			/*
1408			 * By convention, we use PT_AR_BSP to refer to
1409			 * the end of the user-level backing store.
1410			 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1411			 * to get the real value of ar.bsp at the time
1412			 * the kernel was entered.
1413			 *
1414			 * Furthermore, when changing the contents of
1415			 * PT_AR_BSP (or PT_CFM) while the task is
1416			 * blocked in a system call, convert the state
1417			 * so that the non-system-call exit
1418			 * path is used.  This ensures that the proper
1419			 * state will be picked up when resuming
1420			 * execution.  However, it *also* means that
1421			 * once we write PT_AR_BSP/PT_CFM, it won't be
1422			 * possible to modify the syscall arguments of
1423			 * the pending system call any longer.  This
1424			 * shouldn't be an issue because modifying
1425			 * PT_AR_BSP/PT_CFM generally implies that
1426			 * we're either abandoning the pending system
1427			 * call or that we defer it's re-execution
1428			 * (e.g., due to GDB doing an inferior
1429			 * function call).
1430			 */
1431			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1432			if (write_access) {
1433				if (*data != urbs_end) {
1434					if (in_syscall(pt))
1435						convert_to_non_syscall(target,
1436								       pt,
1437								       cfm);
1438					/*
1439					 * Simulate user-level write
1440					 * of ar.bsp:
1441					 */
1442					pt->loadrs = 0;
1443					pt->ar_bspstore = *data;
1444				}
1445			} else
1446				*data = urbs_end;
1447			return 0;
1448		case ELF_AR_BSPSTORE_OFFSET:
1449			ptr = &pt->ar_bspstore;
1450			break;
1451		case ELF_AR_RNAT_OFFSET:
1452			ptr = &pt->ar_rnat;
1453			break;
1454		case ELF_AR_CCV_OFFSET:
1455			ptr = &pt->ar_ccv;
1456			break;
1457		case ELF_AR_UNAT_OFFSET:
1458			ptr = &pt->ar_unat;
1459			break;
1460		case ELF_AR_FPSR_OFFSET:
1461			ptr = &pt->ar_fpsr;
1462			break;
1463		case ELF_AR_PFS_OFFSET:
1464			ptr = &pt->ar_pfs;
1465			break;
1466		case ELF_AR_LC_OFFSET:
1467			return unw_access_ar(info, UNW_AR_LC, data,
1468					     write_access);
1469		case ELF_AR_EC_OFFSET:
1470			return unw_access_ar(info, UNW_AR_EC, data,
1471					     write_access);
1472		case ELF_AR_CSD_OFFSET:
1473			ptr = &pt->ar_csd;
1474			break;
1475		case ELF_AR_SSD_OFFSET:
1476			ptr = &pt->ar_ssd;
1477		}
1478	} else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1479		switch (addr) {
1480		case ELF_CR_IIP_OFFSET:
1481			ptr = &pt->cr_iip;
1482			break;
1483		case ELF_CFM_OFFSET:
1484			urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1485			if (write_access) {
1486				if (((cfm ^ *data) & PFM_MASK) != 0) {
1487					if (in_syscall(pt))
1488						convert_to_non_syscall(target,
1489								       pt,
1490								       cfm);
1491					pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1492						      | (*data & PFM_MASK));
1493				}
1494			} else
1495				*data = cfm;
1496			return 0;
1497		case ELF_CR_IPSR_OFFSET:
1498			if (write_access) {
1499				unsigned long tmp = *data;
1500				/* psr.ri==3 is a reserved value: SDM 2:25 */
1501				if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1502					tmp &= ~IA64_PSR_RI;
1503				pt->cr_ipsr = ((tmp & IPSR_MASK)
1504					       | (pt->cr_ipsr & ~IPSR_MASK));
1505			} else
1506				*data = (pt->cr_ipsr & IPSR_MASK);
1507			return 0;
1508		}
1509	} else if (addr == ELF_NAT_OFFSET)
1510		return access_nat_bits(target, pt, info,
1511				       data, write_access);
1512	else if (addr == ELF_PR_OFFSET)
1513		ptr = &pt->pr;
1514	else
1515		return -1;
1516
1517	if (write_access)
1518		*ptr = *data;
1519	else
1520		*data = *ptr;
1521
1522	return 0;
1523}
1524
1525static int
1526access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1527		unsigned long addr, unsigned long *data, int write_access)
1528{
1529	if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(15))
1530		return access_elf_gpreg(target, info, addr, data, write_access);
1531	else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1532		return access_elf_breg(target, info, addr, data, write_access);
1533	else
1534		return access_elf_areg(target, info, addr, data, write_access);
1535}
1536
1537void do_gpregs_get(struct unw_frame_info *info, void *arg)
1538{
1539	struct pt_regs *pt;
1540	struct regset_getset *dst = arg;
1541	elf_greg_t tmp[16];
1542	unsigned int i, index, min_copy;
1543
1544	if (unw_unwind_to_user(info) < 0)
1545		return;
1546
1547	/*
1548	 * coredump format:
1549	 *      r0-r31
1550	 *      NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1551	 *      predicate registers (p0-p63)
1552	 *      b0-b7
1553	 *      ip cfm user-mask
1554	 *      ar.rsc ar.bsp ar.bspstore ar.rnat
1555	 *      ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1556	 */
1557
1558
1559	/* Skip r0 */
1560	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1561		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1562						      &dst->u.get.kbuf,
1563						      &dst->u.get.ubuf,
1564						      0, ELF_GR_OFFSET(1));
1565		if (dst->ret || dst->count == 0)
1566			return;
1567	}
1568
1569	/* gr1 - gr15 */
1570	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1571		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1572		min_copy = ELF_GR_OFFSET(16) > (dst->pos + dst->count) ?
1573			 (dst->pos + dst->count) : ELF_GR_OFFSET(16);
1574		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1575				index++)
1576			if (access_elf_reg(dst->target, info, i,
1577						&tmp[index], 0) < 0) {
1578				dst->ret = -EIO;
1579				return;
1580			}
1581		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1582				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1583				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1584		if (dst->ret || dst->count == 0)
1585			return;
1586	}
1587
1588	/* r16-r31 */
1589	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1590		pt = task_pt_regs(dst->target);
1591		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1592				&dst->u.get.kbuf, &dst->u.get.ubuf, &pt->r16,
1593				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1594		if (dst->ret || dst->count == 0)
1595			return;
1596	}
1597
1598	/* nat, pr, b0 - b7 */
1599	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1600		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1601		min_copy = ELF_CR_IIP_OFFSET > (dst->pos + dst->count) ?
1602			 (dst->pos + dst->count) : ELF_CR_IIP_OFFSET;
1603		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1604				index++)
1605			if (access_elf_reg(dst->target, info, i,
1606						&tmp[index], 0) < 0) {
1607				dst->ret = -EIO;
1608				return;
1609			}
1610		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1611				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1612				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1613		if (dst->ret || dst->count == 0)
1614			return;
1615	}
1616
1617	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1618	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1619	 */
1620	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1621		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1622		min_copy = ELF_AR_END_OFFSET > (dst->pos + dst->count) ?
1623			 (dst->pos + dst->count) : ELF_AR_END_OFFSET;
1624		for (i = dst->pos; i < min_copy; i += sizeof(elf_greg_t),
1625				index++)
1626			if (access_elf_reg(dst->target, info, i,
1627						&tmp[index], 0) < 0) {
1628				dst->ret = -EIO;
1629				return;
1630			}
1631		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1632				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1633				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1634	}
1635}
1636
1637void do_gpregs_set(struct unw_frame_info *info, void *arg)
1638{
1639	struct pt_regs *pt;
1640	struct regset_getset *dst = arg;
1641	elf_greg_t tmp[16];
1642	unsigned int i, index;
1643
1644	if (unw_unwind_to_user(info) < 0)
1645		return;
1646
1647	/* Skip r0 */
1648	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1649		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1650						       &dst->u.set.kbuf,
1651						       &dst->u.set.ubuf,
1652						       0, ELF_GR_OFFSET(1));
1653		if (dst->ret || dst->count == 0)
1654			return;
1655	}
1656
1657	/* gr1-gr15 */
1658	if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(16)) {
1659		i = dst->pos;
1660		index = (dst->pos - ELF_GR_OFFSET(1)) / sizeof(elf_greg_t);
1661		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1662				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1663				ELF_GR_OFFSET(1), ELF_GR_OFFSET(16));
1664		if (dst->ret)
1665			return;
1666		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1667			if (access_elf_reg(dst->target, info, i,
1668						&tmp[index], 1) < 0) {
1669				dst->ret = -EIO;
1670				return;
1671			}
1672		if (dst->count == 0)
1673			return;
1674	}
1675
1676	/* gr16-gr31 */
1677	if (dst->count > 0 && dst->pos < ELF_NAT_OFFSET) {
1678		pt = task_pt_regs(dst->target);
1679		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1680				&dst->u.set.kbuf, &dst->u.set.ubuf, &pt->r16,
1681				ELF_GR_OFFSET(16), ELF_NAT_OFFSET);
1682		if (dst->ret || dst->count == 0)
1683			return;
1684	}
1685
1686	/* nat, pr, b0 - b7 */
1687	if (dst->count > 0 && dst->pos < ELF_CR_IIP_OFFSET) {
1688		i = dst->pos;
1689		index = (dst->pos - ELF_NAT_OFFSET) / sizeof(elf_greg_t);
1690		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1691				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1692				ELF_NAT_OFFSET, ELF_CR_IIP_OFFSET);
1693		if (dst->ret)
1694			return;
1695		for (; i < dst->pos; i += sizeof(elf_greg_t), index++)
1696			if (access_elf_reg(dst->target, info, i,
1697						&tmp[index], 1) < 0) {
1698				dst->ret = -EIO;
1699				return;
1700			}
1701		if (dst->count == 0)
1702			return;
1703	}
1704
1705	/* ip cfm psr ar.rsc ar.bsp ar.bspstore ar.rnat
1706	 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec ar.csd ar.ssd
1707	 */
1708	if (dst->count > 0 && dst->pos < (ELF_AR_END_OFFSET)) {
1709		i = dst->pos;
1710		index = (dst->pos - ELF_CR_IIP_OFFSET) / sizeof(elf_greg_t);
1711		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1712				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1713				ELF_CR_IIP_OFFSET, ELF_AR_END_OFFSET);
1714		if (dst->ret)
1715			return;
1716		for ( ; i < dst->pos; i += sizeof(elf_greg_t), index++)
1717			if (access_elf_reg(dst->target, info, i,
1718						&tmp[index], 1) < 0) {
1719				dst->ret = -EIO;
1720				return;
1721			}
1722	}
1723}
1724
1725#define ELF_FP_OFFSET(i)	(i * sizeof(elf_fpreg_t))
1726
1727void do_fpregs_get(struct unw_frame_info *info, void *arg)
1728{
1729	struct regset_getset *dst = arg;
1730	struct task_struct *task = dst->target;
1731	elf_fpreg_t tmp[30];
1732	int index, min_copy, i;
1733
1734	if (unw_unwind_to_user(info) < 0)
1735		return;
1736
1737	/* Skip pos 0 and 1 */
1738	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1739		dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1740						      &dst->u.get.kbuf,
1741						      &dst->u.get.ubuf,
1742						      0, ELF_FP_OFFSET(2));
1743		if (dst->count == 0 || dst->ret)
1744			return;
1745	}
1746
1747	/* fr2-fr31 */
1748	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1749		index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1750
1751		min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1752				dst->pos + dst->count);
1753		for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1754				index++)
1755			if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1756					 &tmp[index])) {
1757				dst->ret = -EIO;
1758				return;
1759			}
1760		dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1761				&dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1762				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1763		if (dst->count == 0 || dst->ret)
1764			return;
1765	}
1766
1767	/* fph */
1768	if (dst->count > 0) {
1769		ia64_flush_fph(dst->target);
1770		if (task->thread.flags & IA64_THREAD_FPH_VALID)
1771			dst->ret = user_regset_copyout(
1772				&dst->pos, &dst->count,
1773				&dst->u.get.kbuf, &dst->u.get.ubuf,
1774				&dst->target->thread.fph,
1775				ELF_FP_OFFSET(32), -1);
1776		else
1777			/* Zero fill instead.  */
1778			dst->ret = user_regset_copyout_zero(
1779				&dst->pos, &dst->count,
1780				&dst->u.get.kbuf, &dst->u.get.ubuf,
1781				ELF_FP_OFFSET(32), -1);
1782	}
1783}
1784
1785void do_fpregs_set(struct unw_frame_info *info, void *arg)
1786{
1787	struct regset_getset *dst = arg;
1788	elf_fpreg_t fpreg, tmp[30];
1789	int index, start, end;
1790
1791	if (unw_unwind_to_user(info) < 0)
1792		return;
1793
1794	/* Skip pos 0 and 1 */
1795	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1796		dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1797						       &dst->u.set.kbuf,
1798						       &dst->u.set.ubuf,
1799						       0, ELF_FP_OFFSET(2));
1800		if (dst->count == 0 || dst->ret)
1801			return;
1802	}
1803
1804	/* fr2-fr31 */
1805	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1806		start = dst->pos;
1807		end = min(((unsigned int)ELF_FP_OFFSET(32)),
1808			 dst->pos + dst->count);
1809		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1810				&dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1811				ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1812		if (dst->ret)
1813			return;
1814
1815		if (start & 0xF) { /* only write high part */
1816			if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1817					 &fpreg)) {
1818				dst->ret = -EIO;
1819				return;
1820			}
1821			tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1822				= fpreg.u.bits[0];
1823			start &= ~0xFUL;
1824		}
1825		if (end & 0xF) { /* only write low part */
1826			if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1827					&fpreg)) {
1828				dst->ret = -EIO;
1829				return;
1830			}
1831			tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1832				= fpreg.u.bits[1];
1833			end = (end + 0xF) & ~0xFUL;
1834		}
1835
1836		for ( ;	start < end ; start += sizeof(elf_fpreg_t)) {
1837			index = start / sizeof(elf_fpreg_t);
1838			if (unw_set_fr(info, index, tmp[index - 2])) {
1839				dst->ret = -EIO;
1840				return;
1841			}
1842		}
1843		if (dst->ret || dst->count == 0)
1844			return;
1845	}
1846
1847	/* fph */
1848	if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1849		ia64_sync_fph(dst->target);
1850		dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1851						&dst->u.set.kbuf,
1852						&dst->u.set.ubuf,
1853						&dst->target->thread.fph,
1854						ELF_FP_OFFSET(32), -1);
1855	}
1856}
1857
1858static int
1859do_regset_call(void (*call)(struct unw_frame_info *, void *),
1860	       struct task_struct *target,
1861	       const struct user_regset *regset,
1862	       unsigned int pos, unsigned int count,
1863	       const void *kbuf, const void __user *ubuf)
1864{
1865	struct regset_getset info = { .target = target, .regset = regset,
1866				 .pos = pos, .count = count,
1867				 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1868				 .ret = 0 };
1869
1870	if (target == current)
1871		unw_init_running(call, &info);
1872	else {
1873		struct unw_frame_info ufi;
1874		memset(&ufi, 0, sizeof(ufi));
1875		unw_init_from_blocked_task(&ufi, target);
1876		(*call)(&ufi, &info);
1877	}
1878
1879	return info.ret;
1880}
1881
1882static int
1883gpregs_get(struct task_struct *target,
1884	   const struct user_regset *regset,
1885	   unsigned int pos, unsigned int count,
1886	   void *kbuf, void __user *ubuf)
1887{
1888	return do_regset_call(do_gpregs_get, target, regset, pos, count,
1889		kbuf, ubuf);
1890}
1891
1892static int gpregs_set(struct task_struct *target,
1893		const struct user_regset *regset,
1894		unsigned int pos, unsigned int count,
1895		const void *kbuf, const void __user *ubuf)
1896{
1897	return do_regset_call(do_gpregs_set, target, regset, pos, count,
1898		kbuf, ubuf);
1899}
1900
1901static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1902{
1903	do_sync_rbs(info, ia64_sync_user_rbs);
1904}
1905
1906/*
1907 * This is called to write back the register backing store.
1908 * ptrace does this before it stops, so that a tracer reading the user
1909 * memory after the thread stops will get the current register data.
1910 */
1911static int
1912gpregs_writeback(struct task_struct *target,
1913		 const struct user_regset *regset,
1914		 int now)
1915{
1916	if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1917		return 0;
1918	set_notify_resume(target);
1919	return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1920		NULL, NULL);
1921}
1922
1923static int
1924fpregs_active(struct task_struct *target, const struct user_regset *regset)
1925{
1926	return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1927}
1928
1929static int fpregs_get(struct task_struct *target,
1930		const struct user_regset *regset,
1931		unsigned int pos, unsigned int count,
1932		void *kbuf, void __user *ubuf)
1933{
1934	return do_regset_call(do_fpregs_get, target, regset, pos, count,
1935		kbuf, ubuf);
1936}
1937
1938static int fpregs_set(struct task_struct *target,
1939		const struct user_regset *regset,
1940		unsigned int pos, unsigned int count,
1941		const void *kbuf, const void __user *ubuf)
1942{
1943	return do_regset_call(do_fpregs_set, target, regset, pos, count,
1944		kbuf, ubuf);
1945}
1946
1947static int
1948access_uarea(struct task_struct *child, unsigned long addr,
1949	      unsigned long *data, int write_access)
1950{
1951	unsigned int pos = -1; /* an invalid value */
1952	int ret;
1953	unsigned long *ptr, regnum;
1954
1955	if ((addr & 0x7) != 0) {
1956		dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1957		return -1;
1958	}
1959	if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1960		(addr >= PT_R7 + 8 && addr < PT_B1) ||
1961		(addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1962		(addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1963		dprintk("ptrace: rejecting access to register "
1964					"address 0x%lx\n", addr);
1965		return -1;
1966	}
1967
1968	switch (addr) {
1969	case PT_F32 ... (PT_F127 + 15):
1970		pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1971		break;
1972	case PT_F2 ... (PT_F5 + 15):
1973		pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1974		break;
1975	case PT_F10 ... (PT_F31 + 15):
1976		pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1977		break;
1978	case PT_F6 ... (PT_F9 + 15):
1979		pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1980		break;
1981	}
1982
1983	if (pos != -1) {
1984		if (write_access)
1985			ret = fpregs_set(child, NULL, pos,
1986				sizeof(unsigned long), data, NULL);
1987		else
1988			ret = fpregs_get(child, NULL, pos,
1989				sizeof(unsigned long), data, NULL);
1990		if (ret != 0)
1991			return -1;
1992		return 0;
1993	}
1994
1995	switch (addr) {
1996	case PT_NAT_BITS:
1997		pos = ELF_NAT_OFFSET;
1998		break;
1999	case PT_R4 ... PT_R7:
2000		pos = addr - PT_R4 + ELF_GR_OFFSET(4);
2001		break;
2002	case PT_B1 ... PT_B5:
2003		pos = addr - PT_B1 + ELF_BR_OFFSET(1);
2004		break;
2005	case PT_AR_EC:
2006		pos = ELF_AR_EC_OFFSET;
2007		break;
2008	case PT_AR_LC:
2009		pos = ELF_AR_LC_OFFSET;
2010		break;
2011	case PT_CR_IPSR:
2012		pos = ELF_CR_IPSR_OFFSET;
2013		break;
2014	case PT_CR_IIP:
2015		pos = ELF_CR_IIP_OFFSET;
2016		break;
2017	case PT_CFM:
2018		pos = ELF_CFM_OFFSET;
2019		break;
2020	case PT_AR_UNAT:
2021		pos = ELF_AR_UNAT_OFFSET;
2022		break;
2023	case PT_AR_PFS:
2024		pos = ELF_AR_PFS_OFFSET;
2025		break;
2026	case PT_AR_RSC:
2027		pos = ELF_AR_RSC_OFFSET;
2028		break;
2029	case PT_AR_RNAT:
2030		pos = ELF_AR_RNAT_OFFSET;
2031		break;
2032	case PT_AR_BSPSTORE:
2033		pos = ELF_AR_BSPSTORE_OFFSET;
2034		break;
2035	case PT_PR:
2036		pos = ELF_PR_OFFSET;
2037		break;
2038	case PT_B6:
2039		pos = ELF_BR_OFFSET(6);
2040		break;
2041	case PT_AR_BSP:
2042		pos = ELF_AR_BSP_OFFSET;
2043		break;
2044	case PT_R1 ... PT_R3:
2045		pos = addr - PT_R1 + ELF_GR_OFFSET(1);
2046		break;
2047	case PT_R12 ... PT_R15:
2048		pos = addr - PT_R12 + ELF_GR_OFFSET(12);
2049		break;
2050	case PT_R8 ... PT_R11:
2051		pos = addr - PT_R8 + ELF_GR_OFFSET(8);
2052		break;
2053	case PT_R16 ... PT_R31:
2054		pos = addr - PT_R16 + ELF_GR_OFFSET(16);
2055		break;
2056	case PT_AR_CCV:
2057		pos = ELF_AR_CCV_OFFSET;
2058		break;
2059	case PT_AR_FPSR:
2060		pos = ELF_AR_FPSR_OFFSET;
2061		break;
2062	case PT_B0:
2063		pos = ELF_BR_OFFSET(0);
2064		break;
2065	case PT_B7:
2066		pos = ELF_BR_OFFSET(7);
2067		break;
2068	case PT_AR_CSD:
2069		pos = ELF_AR_CSD_OFFSET;
2070		break;
2071	case PT_AR_SSD:
2072		pos = ELF_AR_SSD_OFFSET;
2073		break;
2074	}
2075
2076	if (pos != -1) {
2077		if (write_access)
2078			ret = gpregs_set(child, NULL, pos,
2079				sizeof(unsigned long), data, NULL);
2080		else
2081			ret = gpregs_get(child, NULL, pos,
2082				sizeof(unsigned long), data, NULL);
2083		if (ret != 0)
2084			return -1;
2085		return 0;
2086	}
2087
2088	/* access debug registers */
2089	if (addr >= PT_IBR) {
2090		regnum = (addr - PT_IBR) >> 3;
2091		ptr = &child->thread.ibr[0];
2092	} else {
2093		regnum = (addr - PT_DBR) >> 3;
2094		ptr = &child->thread.dbr[0];
2095	}
2096
2097	if (regnum >= 8) {
2098		dprintk("ptrace: rejecting access to register "
2099				"address 0x%lx\n", addr);
2100		return -1;
2101	}
2102#ifdef CONFIG_PERFMON
2103	/*
2104	 * Check if debug registers are used by perfmon. This
2105	 * test must be done once we know that we can do the
2106	 * operation, i.e. the arguments are all valid, but
2107	 * before we start modifying the state.
2108	 *
2109	 * Perfmon needs to keep a count of how many processes
2110	 * are trying to modify the debug registers for system
2111	 * wide monitoring sessions.
2112	 *
2113	 * We also include read access here, because they may
2114	 * cause the PMU-installed debug register state
2115	 * (dbr[], ibr[]) to be reset. The two arrays are also
2116	 * used by perfmon, but we do not use
2117	 * IA64_THREAD_DBG_VALID. The registers are restored
2118	 * by the PMU context switch code.
2119	 */
2120	if (pfm_use_debug_registers(child))
2121		return -1;
2122#endif
2123
2124	if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2125		child->thread.flags |= IA64_THREAD_DBG_VALID;
2126		memset(child->thread.dbr, 0,
2127				sizeof(child->thread.dbr));
2128		memset(child->thread.ibr, 0,
2129				sizeof(child->thread.ibr));
2130	}
2131
2132	ptr += regnum;
2133
2134	if ((regnum & 1) && write_access) {
2135		/* don't let the user set kernel-level breakpoints: */
2136		*ptr = *data & ~(7UL << 56);
2137		return 0;
2138	}
2139	if (write_access)
2140		*ptr = *data;
2141	else
2142		*data = *ptr;
2143	return 0;
2144}
2145
2146static const struct user_regset native_regsets[] = {
2147	{
2148		.core_note_type = NT_PRSTATUS,
2149		.n = ELF_NGREG,
2150		.size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2151		.get = gpregs_get, .set = gpregs_set,
2152		.writeback = gpregs_writeback
2153	},
2154	{
2155		.core_note_type = NT_PRFPREG,
2156		.n = ELF_NFPREG,
2157		.size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2158		.get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2159	},
2160};
2161
2162static const struct user_regset_view user_ia64_view = {
2163	.name = "ia64",
2164	.e_machine = EM_IA_64,
2165	.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2166};
2167
2168const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2169{
2170	return &user_ia64_view;
2171}
2172
2173struct syscall_get_set_args {
2174	unsigned int i;
2175	unsigned int n;
2176	unsigned long *args;
2177	struct pt_regs *regs;
2178	int rw;
2179};
2180
2181static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2182{
2183	struct syscall_get_set_args *args = data;
2184	struct pt_regs *pt = args->regs;
2185	unsigned long *krbs, cfm, ndirty;
2186	int i, count;
2187
2188	if (unw_unwind_to_user(info) < 0)
2189		return;
2190
2191	cfm = pt->cr_ifs;
2192	krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2193	ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2194
2195	count = 0;
2196	if (in_syscall(pt))
2197		count = min_t(int, args->n, cfm & 0x7f);
2198
2199	for (i = 0; i < count; i++) {
2200		if (args->rw)
2201			*ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2202				args->args[i];
2203		else
2204			args->args[i] = *ia64_rse_skip_regs(krbs,
2205				ndirty + i + args->i);
2206	}
2207
2208	if (!args->rw) {
2209		while (i < args->n) {
2210			args->args[i] = 0;
2211			i++;
2212		}
2213	}
2214}
2215
2216void ia64_syscall_get_set_arguments(struct task_struct *task,
2217	struct pt_regs *regs, unsigned int i, unsigned int n,
2218	unsigned long *args, int rw)
2219{
2220	struct syscall_get_set_args data = {
2221		.i = i,
2222		.n = n,
2223		.args = args,
2224		.regs = regs,
2225		.rw = rw,
2226	};
2227
2228	if (task == current)
2229		unw_init_running(syscall_get_set_args_cb, &data);
2230	else {
2231		struct unw_frame_info ufi;
2232		memset(&ufi, 0, sizeof(ufi));
2233		unw_init_from_blocked_task(&ufi, task);
2234		syscall_get_set_args_cb(&ufi, &data);
2235	}
2236}