Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright (C) 1999-2004 Hewlett-Packard Co
   3 *	David Mosberger-Tang <davidm@hpl.hp.com>
   4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
   5 * 	- Change pt_regs_off() to make it less dependent on pt_regs structure.
   6 */
   7/*
   8 * This file implements call frame unwind support for the Linux
   9 * kernel.  Parsing and processing the unwind information is
  10 * time-consuming, so this implementation translates the unwind
  11 * descriptors into unwind scripts.  These scripts are very simple
  12 * (basically a sequence of assignments) and efficient to execute.
  13 * They are cached for later re-use.  Each script is specific for a
  14 * given instruction pointer address and the set of predicate values
  15 * that the script depends on (most unwind descriptors are
  16 * unconditional and scripts often do not depend on predicates at
  17 * all).  This code is based on the unwind conventions described in
  18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
  19 *
  20 * SMP conventions:
  21 *	o updates to the global unwind data (in structure "unw") are serialized
  22 *	  by the unw.lock spinlock
  23 *	o each unwind script has its own read-write lock; a thread must acquire
  24 *	  a read lock before executing a script and must acquire a write lock
  25 *	  before modifying a script
  26 *	o if both the unw.lock spinlock and a script's read-write lock must be
  27 *	  acquired, then the read-write lock must be acquired first.
  28 */
  29#include <linux/module.h>
  30#include <linux/bootmem.h>
  31#include <linux/elf.h>
  32#include <linux/kernel.h>
  33#include <linux/sched.h>
  34#include <linux/slab.h>
  35
  36#include <asm/unwind.h>
  37
  38#include <asm/delay.h>
  39#include <asm/page.h>
  40#include <asm/ptrace.h>
  41#include <asm/ptrace_offsets.h>
  42#include <asm/rse.h>
  43#include <asm/sections.h>
  44#include <asm/system.h>
  45#include <asm/uaccess.h>
  46
  47#include "entry.h"
  48#include "unwind_i.h"
  49
  50#define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
  51#define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
  52
  53#define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
  54#define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
  55
  56#define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
  57
  58#ifdef UNW_DEBUG
  59  static unsigned int unw_debug_level = UNW_DEBUG;
  60#  define UNW_DEBUG_ON(n)	unw_debug_level >= n
  61   /* Do not code a printk level, not all debug lines end in newline */
  62#  define UNW_DPRINT(n, ...)  if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
  63#  undef inline
  64#  define inline
  65#else /* !UNW_DEBUG */
  66#  define UNW_DEBUG_ON(n)  0
  67#  define UNW_DPRINT(n, ...)
  68#endif /* UNW_DEBUG */
  69
  70#if UNW_STATS
  71# define STAT(x...)	x
  72#else
  73# define STAT(x...)
  74#endif
  75
  76#define alloc_reg_state()	kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
  77#define free_reg_state(usr)	kfree(usr)
  78#define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
  79#define free_labeled_state(usr)	kfree(usr)
  80
  81typedef unsigned long unw_word;
  82typedef unsigned char unw_hash_index_t;
  83
  84static struct {
  85	spinlock_t lock;			/* spinlock for unwind data */
  86
  87	/* list of unwind tables (one per load-module) */
  88	struct unw_table *tables;
  89
  90	unsigned long r0;			/* constant 0 for r0 */
  91
  92	/* table of registers that prologues can save (and order in which they're saved): */
  93	const unsigned char save_order[8];
  94
  95	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
  96	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
  97
  98	unsigned short lru_head;		/* index of lead-recently used script */
  99	unsigned short lru_tail;		/* index of most-recently used script */
 100
 101	/* index into unw_frame_info for preserved register i */
 102	unsigned short preg_index[UNW_NUM_REGS];
 103
 104	short pt_regs_offsets[32];
 105
 106	/* unwind table for the kernel: */
 107	struct unw_table kernel_table;
 108
 109	/* unwind table describing the gate page (kernel code that is mapped into user space): */
 110	size_t gate_table_size;
 111	unsigned long *gate_table;
 112
 113	/* hash table that maps instruction pointer to script index: */
 114	unsigned short hash[UNW_HASH_SIZE];
 115
 116	/* script cache: */
 117	struct unw_script cache[UNW_CACHE_SIZE];
 118
 119# ifdef UNW_DEBUG
 120	const char *preg_name[UNW_NUM_REGS];
 121# endif
 122# if UNW_STATS
 123	struct {
 124		struct {
 125			int lookups;
 126			int hinted_hits;
 127			int normal_hits;
 128			int collision_chain_traversals;
 129		} cache;
 130		struct {
 131			unsigned long build_time;
 132			unsigned long run_time;
 133			unsigned long parse_time;
 134			int builds;
 135			int news;
 136			int collisions;
 137			int runs;
 138		} script;
 139		struct {
 140			unsigned long init_time;
 141			unsigned long unwind_time;
 142			int inits;
 143			int unwinds;
 144		} api;
 145	} stat;
 146# endif
 147} unw = {
 148	.tables = &unw.kernel_table,
 149	.lock = __SPIN_LOCK_UNLOCKED(unw.lock),
 150	.save_order = {
 151		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
 152		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
 153	},
 154	.preg_index = {
 155		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
 156		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
 157		offsetof(struct unw_frame_info, bsp_loc)/8,
 158		offsetof(struct unw_frame_info, bspstore_loc)/8,
 159		offsetof(struct unw_frame_info, pfs_loc)/8,
 160		offsetof(struct unw_frame_info, rnat_loc)/8,
 161		offsetof(struct unw_frame_info, psp)/8,
 162		offsetof(struct unw_frame_info, rp_loc)/8,
 163		offsetof(struct unw_frame_info, r4)/8,
 164		offsetof(struct unw_frame_info, r5)/8,
 165		offsetof(struct unw_frame_info, r6)/8,
 166		offsetof(struct unw_frame_info, r7)/8,
 167		offsetof(struct unw_frame_info, unat_loc)/8,
 168		offsetof(struct unw_frame_info, pr_loc)/8,
 169		offsetof(struct unw_frame_info, lc_loc)/8,
 170		offsetof(struct unw_frame_info, fpsr_loc)/8,
 171		offsetof(struct unw_frame_info, b1_loc)/8,
 172		offsetof(struct unw_frame_info, b2_loc)/8,
 173		offsetof(struct unw_frame_info, b3_loc)/8,
 174		offsetof(struct unw_frame_info, b4_loc)/8,
 175		offsetof(struct unw_frame_info, b5_loc)/8,
 176		offsetof(struct unw_frame_info, f2_loc)/8,
 177		offsetof(struct unw_frame_info, f3_loc)/8,
 178		offsetof(struct unw_frame_info, f4_loc)/8,
 179		offsetof(struct unw_frame_info, f5_loc)/8,
 180		offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
 181		offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
 182		offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
 183		offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
 184		offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
 185		offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
 186		offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
 187		offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
 188		offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
 189		offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
 190		offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
 191		offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
 192		offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
 193		offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
 194		offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
 195		offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
 196	},
 197	.pt_regs_offsets = {
 198		[0] = -1,
 199		offsetof(struct pt_regs,  r1),
 200		offsetof(struct pt_regs,  r2),
 201		offsetof(struct pt_regs,  r3),
 202		[4] = -1, [5] = -1, [6] = -1, [7] = -1,
 203		offsetof(struct pt_regs,  r8),
 204		offsetof(struct pt_regs,  r9),
 205		offsetof(struct pt_regs, r10),
 206		offsetof(struct pt_regs, r11),
 207		offsetof(struct pt_regs, r12),
 208		offsetof(struct pt_regs, r13),
 209		offsetof(struct pt_regs, r14),
 210		offsetof(struct pt_regs, r15),
 211		offsetof(struct pt_regs, r16),
 212		offsetof(struct pt_regs, r17),
 213		offsetof(struct pt_regs, r18),
 214		offsetof(struct pt_regs, r19),
 215		offsetof(struct pt_regs, r20),
 216		offsetof(struct pt_regs, r21),
 217		offsetof(struct pt_regs, r22),
 218		offsetof(struct pt_regs, r23),
 219		offsetof(struct pt_regs, r24),
 220		offsetof(struct pt_regs, r25),
 221		offsetof(struct pt_regs, r26),
 222		offsetof(struct pt_regs, r27),
 223		offsetof(struct pt_regs, r28),
 224		offsetof(struct pt_regs, r29),
 225		offsetof(struct pt_regs, r30),
 226		offsetof(struct pt_regs, r31),
 227	},
 228	.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
 229#ifdef UNW_DEBUG
 230	.preg_name = {
 231		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
 232		"r4", "r5", "r6", "r7",
 233		"ar.unat", "pr", "ar.lc", "ar.fpsr",
 234		"b1", "b2", "b3", "b4", "b5",
 235		"f2", "f3", "f4", "f5",
 236		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
 237		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
 238	}
 239#endif
 240};
 241
 242static inline int
 243read_only (void *addr)
 244{
 245	return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
 246}
 247
 248/*
 249 * Returns offset of rREG in struct pt_regs.
 250 */
 251static inline unsigned long
 252pt_regs_off (unsigned long reg)
 253{
 254	short off = -1;
 255
 256	if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
 257		off = unw.pt_regs_offsets[reg];
 258
 259	if (off < 0) {
 260		UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
 261		off = 0;
 262	}
 263	return (unsigned long) off;
 264}
 265
 266static inline struct pt_regs *
 267get_scratch_regs (struct unw_frame_info *info)
 268{
 269	if (!info->pt) {
 270		/* This should not happen with valid unwind info.  */
 271		UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
 272		if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
 273			info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
 274		else
 275			info->pt = info->sp - 16;
 276	}
 277	UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
 278	return (struct pt_regs *) info->pt;
 279}
 280
 281/* Unwind accessors.  */
 282
 283int
 284unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
 285{
 286	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
 287	struct unw_ireg *ireg;
 288	struct pt_regs *pt;
 289
 290	if ((unsigned) regnum - 1 >= 127) {
 291		if (regnum == 0 && !write) {
 292			*val = 0;	/* read r0 always returns 0 */
 293			*nat = 0;
 294			return 0;
 295		}
 296		UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
 297			   __func__, regnum);
 298		return -1;
 299	}
 300
 301	if (regnum < 32) {
 302		if (regnum >= 4 && regnum <= 7) {
 303			/* access a preserved register */
 304			ireg = &info->r4 + (regnum - 4);
 305			addr = ireg->loc;
 306			if (addr) {
 307				nat_addr = addr + ireg->nat.off;
 308				switch (ireg->nat.type) {
 309				      case UNW_NAT_VAL:
 310					/* simulate getf.sig/setf.sig */
 311					if (write) {
 312						if (*nat) {
 313							/* write NaTVal and be done with it */
 314							addr[0] = 0;
 315							addr[1] = 0x1fffe;
 316							return 0;
 317						}
 318						addr[1] = 0x1003e;
 319					} else {
 320						if (addr[0] == 0 && addr[1] == 0x1ffe) {
 321							/* return NaT and be done with it */
 322							*val = 0;
 323							*nat = 1;
 324							return 0;
 325						}
 326					}
 327					/* fall through */
 328				      case UNW_NAT_NONE:
 329					dummy_nat = 0;
 330					nat_addr = &dummy_nat;
 331					break;
 332
 333				      case UNW_NAT_MEMSTK:
 334					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
 335					break;
 336
 337				      case UNW_NAT_REGSTK:
 338					nat_addr = ia64_rse_rnat_addr(addr);
 339					if ((unsigned long) addr < info->regstk.limit
 340					    || (unsigned long) addr >= info->regstk.top)
 341					{
 342						UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
 343							"[0x%lx-0x%lx)\n",
 344							__func__, (void *) addr,
 345							info->regstk.limit,
 346							info->regstk.top);
 347						return -1;
 348					}
 349					if ((unsigned long) nat_addr >= info->regstk.top)
 350						nat_addr = &info->sw->ar_rnat;
 351					nat_mask = (1UL << ia64_rse_slot_num(addr));
 352					break;
 353				}
 354			} else {
 355				addr = &info->sw->r4 + (regnum - 4);
 356				nat_addr = &info->sw->ar_unat;
 357				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
 358			}
 359		} else {
 360			/* access a scratch register */
 361			pt = get_scratch_regs(info);
 362			addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
 363			if (info->pri_unat_loc)
 364				nat_addr = info->pri_unat_loc;
 365			else
 366				nat_addr = &info->sw->caller_unat;
 367			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
 368		}
 369	} else {
 370		/* access a stacked register */
 371		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
 372		nat_addr = ia64_rse_rnat_addr(addr);
 373		if ((unsigned long) addr < info->regstk.limit
 374		    || (unsigned long) addr >= info->regstk.top)
 375		{
 376			UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
 377				   "of rbs\n",  __func__);
 378			return -1;
 379		}
 380		if ((unsigned long) nat_addr >= info->regstk.top)
 381			nat_addr = &info->sw->ar_rnat;
 382		nat_mask = (1UL << ia64_rse_slot_num(addr));
 383	}
 384
 385	if (write) {
 386		if (read_only(addr)) {
 387			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 388				__func__);
 389		} else {
 390			*addr = *val;
 391			if (*nat)
 392				*nat_addr |= nat_mask;
 393			else
 394				*nat_addr &= ~nat_mask;
 395		}
 396	} else {
 397		if ((*nat_addr & nat_mask) == 0) {
 398			*val = *addr;
 399			*nat = 0;
 400		} else {
 401			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
 402			*nat = 1;
 403		}
 404	}
 405	return 0;
 406}
 407EXPORT_SYMBOL(unw_access_gr);
 408
 409int
 410unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
 411{
 412	unsigned long *addr;
 413	struct pt_regs *pt;
 414
 415	switch (regnum) {
 416		/* scratch: */
 417	      case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
 418	      case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
 419	      case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
 420
 421		/* preserved: */
 422	      case 1: case 2: case 3: case 4: case 5:
 423		addr = *(&info->b1_loc + (regnum - 1));
 424		if (!addr)
 425			addr = &info->sw->b1 + (regnum - 1);
 426		break;
 427
 428	      default:
 429		UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
 430			   __func__, regnum);
 431		return -1;
 432	}
 433	if (write)
 434		if (read_only(addr)) {
 435			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 436				__func__);
 437		} else
 438			*addr = *val;
 439	else
 440		*val = *addr;
 441	return 0;
 442}
 443EXPORT_SYMBOL(unw_access_br);
 444
 445int
 446unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
 447{
 448	struct ia64_fpreg *addr = NULL;
 449	struct pt_regs *pt;
 450
 451	if ((unsigned) (regnum - 2) >= 126) {
 452		UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
 453			   __func__, regnum);
 454		return -1;
 455	}
 456
 457	if (regnum <= 5) {
 458		addr = *(&info->f2_loc + (regnum - 2));
 459		if (!addr)
 460			addr = &info->sw->f2 + (regnum - 2);
 461	} else if (regnum <= 15) {
 462		if (regnum <= 11) {
 463			pt = get_scratch_regs(info);
 464			addr = &pt->f6  + (regnum - 6);
 465		}
 466		else
 467			addr = &info->sw->f12 + (regnum - 12);
 468	} else if (regnum <= 31) {
 469		addr = info->fr_loc[regnum - 16];
 470		if (!addr)
 471			addr = &info->sw->f16 + (regnum - 16);
 472	} else {
 473		struct task_struct *t = info->task;
 474
 475		if (write)
 476			ia64_sync_fph(t);
 477		else
 478			ia64_flush_fph(t);
 479		addr = t->thread.fph + (regnum - 32);
 480	}
 481
 482	if (write)
 483		if (read_only(addr)) {
 484			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 485				__func__);
 486		} else
 487			*addr = *val;
 488	else
 489		*val = *addr;
 490	return 0;
 491}
 492EXPORT_SYMBOL(unw_access_fr);
 493
 494int
 495unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
 496{
 497	unsigned long *addr;
 498	struct pt_regs *pt;
 499
 500	switch (regnum) {
 501	      case UNW_AR_BSP:
 502		addr = info->bsp_loc;
 503		if (!addr)
 504			addr = &info->sw->ar_bspstore;
 505		break;
 506
 507	      case UNW_AR_BSPSTORE:
 508		addr = info->bspstore_loc;
 509		if (!addr)
 510			addr = &info->sw->ar_bspstore;
 511		break;
 512
 513	      case UNW_AR_PFS:
 514		addr = info->pfs_loc;
 515		if (!addr)
 516			addr = &info->sw->ar_pfs;
 517		break;
 518
 519	      case UNW_AR_RNAT:
 520		addr = info->rnat_loc;
 521		if (!addr)
 522			addr = &info->sw->ar_rnat;
 523		break;
 524
 525	      case UNW_AR_UNAT:
 526		addr = info->unat_loc;
 527		if (!addr)
 528			addr = &info->sw->caller_unat;
 529		break;
 530
 531	      case UNW_AR_LC:
 532		addr = info->lc_loc;
 533		if (!addr)
 534			addr = &info->sw->ar_lc;
 535		break;
 536
 537	      case UNW_AR_EC:
 538		if (!info->cfm_loc)
 539			return -1;
 540		if (write)
 541			*info->cfm_loc =
 542				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
 543		else
 544			*val = (*info->cfm_loc >> 52) & 0x3f;
 545		return 0;
 546
 547	      case UNW_AR_FPSR:
 548		addr = info->fpsr_loc;
 549		if (!addr)
 550			addr = &info->sw->ar_fpsr;
 551		break;
 552
 553	      case UNW_AR_RSC:
 554		pt = get_scratch_regs(info);
 555		addr = &pt->ar_rsc;
 556		break;
 557
 558	      case UNW_AR_CCV:
 559		pt = get_scratch_regs(info);
 560		addr = &pt->ar_ccv;
 561		break;
 562
 563	      case UNW_AR_CSD:
 564		pt = get_scratch_regs(info);
 565		addr = &pt->ar_csd;
 566		break;
 567
 568	      case UNW_AR_SSD:
 569		pt = get_scratch_regs(info);
 570		addr = &pt->ar_ssd;
 571		break;
 572
 573	      default:
 574		UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
 575			   __func__, regnum);
 576		return -1;
 577	}
 578
 579	if (write) {
 580		if (read_only(addr)) {
 581			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 582				__func__);
 583		} else
 584			*addr = *val;
 585	} else
 586		*val = *addr;
 587	return 0;
 588}
 589EXPORT_SYMBOL(unw_access_ar);
 590
 591int
 592unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
 593{
 594	unsigned long *addr;
 595
 596	addr = info->pr_loc;
 597	if (!addr)
 598		addr = &info->sw->pr;
 599
 600	if (write) {
 601		if (read_only(addr)) {
 602			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 603				__func__);
 604		} else
 605			*addr = *val;
 606	} else
 607		*val = *addr;
 608	return 0;
 609}
 610EXPORT_SYMBOL(unw_access_pr);
 611
 612
 613/* Routines to manipulate the state stack.  */
 614
 615static inline void
 616push (struct unw_state_record *sr)
 617{
 618	struct unw_reg_state *rs;
 619
 620	rs = alloc_reg_state();
 621	if (!rs) {
 622		printk(KERN_ERR "unwind: cannot stack reg state!\n");
 623		return;
 624	}
 625	memcpy(rs, &sr->curr, sizeof(*rs));
 626	sr->curr.next = rs;
 627}
 628
 629static void
 630pop (struct unw_state_record *sr)
 631{
 632	struct unw_reg_state *rs = sr->curr.next;
 633
 634	if (!rs) {
 635		printk(KERN_ERR "unwind: stack underflow!\n");
 636		return;
 637	}
 638	memcpy(&sr->curr, rs, sizeof(*rs));
 639	free_reg_state(rs);
 640}
 641
 642/* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
 643static struct unw_reg_state *
 644dup_state_stack (struct unw_reg_state *rs)
 645{
 646	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
 647
 648	while (rs) {
 649		copy = alloc_reg_state();
 650		if (!copy) {
 651			printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
 652			return NULL;
 653		}
 654		memcpy(copy, rs, sizeof(*copy));
 655		if (first)
 656			prev->next = copy;
 657		else
 658			first = copy;
 659		rs = rs->next;
 660		prev = copy;
 661	}
 662	return first;
 663}
 664
 665/* Free all stacked register states (but not RS itself).  */
 666static void
 667free_state_stack (struct unw_reg_state *rs)
 668{
 669	struct unw_reg_state *p, *next;
 670
 671	for (p = rs->next; p != NULL; p = next) {
 672		next = p->next;
 673		free_reg_state(p);
 674	}
 675	rs->next = NULL;
 676}
 677
 678/* Unwind decoder routines */
 679
 680static enum unw_register_index __attribute_const__
 681decode_abreg (unsigned char abreg, int memory)
 682{
 683	switch (abreg) {
 684	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
 685	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
 686	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
 687	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
 688	      case 0x60: return UNW_REG_PR;
 689	      case 0x61: return UNW_REG_PSP;
 690	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
 691	      case 0x63: return UNW_REG_RP;
 692	      case 0x64: return UNW_REG_BSP;
 693	      case 0x65: return UNW_REG_BSPSTORE;
 694	      case 0x66: return UNW_REG_RNAT;
 695	      case 0x67: return UNW_REG_UNAT;
 696	      case 0x68: return UNW_REG_FPSR;
 697	      case 0x69: return UNW_REG_PFS;
 698	      case 0x6a: return UNW_REG_LC;
 699	      default:
 700		break;
 701	}
 702	UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
 703	return UNW_REG_LC;
 704}
 705
 706static void
 707set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
 708{
 709	reg->val = val;
 710	reg->where = where;
 711	if (reg->when == UNW_WHEN_NEVER)
 712		reg->when = when;
 713}
 714
 715static void
 716alloc_spill_area (unsigned long *offp, unsigned long regsize,
 717		  struct unw_reg_info *lo, struct unw_reg_info *hi)
 718{
 719	struct unw_reg_info *reg;
 720
 721	for (reg = hi; reg >= lo; --reg) {
 722		if (reg->where == UNW_WHERE_SPILL_HOME) {
 723			reg->where = UNW_WHERE_PSPREL;
 724			*offp -= regsize;
 725			reg->val = *offp;
 726		}
 727	}
 728}
 729
 730static inline void
 731spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
 732{
 733	struct unw_reg_info *reg;
 734
 735	for (reg = *regp; reg <= lim; ++reg) {
 736		if (reg->where == UNW_WHERE_SPILL_HOME) {
 737			reg->when = t;
 738			*regp = reg + 1;
 739			return;
 740		}
 741	}
 742	UNW_DPRINT(0, "unwind.%s: excess spill!\n",  __func__);
 743}
 744
 745static inline void
 746finish_prologue (struct unw_state_record *sr)
 747{
 748	struct unw_reg_info *reg;
 749	unsigned long off;
 750	int i;
 751
 752	/*
 753	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
 754	 * for Using Unwind Descriptors", rule 3):
 755	 */
 756	for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
 757		reg = sr->curr.reg + unw.save_order[i];
 758		if (reg->where == UNW_WHERE_GR_SAVE) {
 759			reg->where = UNW_WHERE_GR;
 760			reg->val = sr->gr_save_loc++;
 761		}
 762	}
 763
 764	/*
 765	 * Next, compute when the fp, general, and branch registers get
 766	 * saved.  This must come before alloc_spill_area() because
 767	 * we need to know which registers are spilled to their home
 768	 * locations.
 769	 */
 770	if (sr->imask) {
 771		unsigned char kind, mask = 0, *cp = sr->imask;
 772		int t;
 773		static const unsigned char limit[3] = {
 774			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
 775		};
 776		struct unw_reg_info *(regs[3]);
 777
 778		regs[0] = sr->curr.reg + UNW_REG_F2;
 779		regs[1] = sr->curr.reg + UNW_REG_R4;
 780		regs[2] = sr->curr.reg + UNW_REG_B1;
 781
 782		for (t = 0; t < sr->region_len; ++t) {
 783			if ((t & 3) == 0)
 784				mask = *cp++;
 785			kind = (mask >> 2*(3-(t & 3))) & 3;
 786			if (kind > 0)
 787				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
 788						sr->region_start + t);
 789		}
 790	}
 791	/*
 792	 * Next, lay out the memory stack spill area:
 793	 */
 794	if (sr->any_spills) {
 795		off = sr->spill_offset;
 796		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
 797		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
 798		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
 799	}
 800}
 801
 802/*
 803 * Region header descriptors.
 804 */
 805
 806static void
 807desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
 808	       struct unw_state_record *sr)
 809{
 810	int i, region_start;
 811
 812	if (!(sr->in_body || sr->first_region))
 813		finish_prologue(sr);
 814	sr->first_region = 0;
 815
 816	/* check if we're done: */
 817	if (sr->when_target < sr->region_start + sr->region_len) {
 818		sr->done = 1;
 819		return;
 820	}
 821
 822	region_start = sr->region_start + sr->region_len;
 823
 824	for (i = 0; i < sr->epilogue_count; ++i)
 825		pop(sr);
 826	sr->epilogue_count = 0;
 827	sr->epilogue_start = UNW_WHEN_NEVER;
 828
 829	sr->region_start = region_start;
 830	sr->region_len = rlen;
 831	sr->in_body = body;
 832
 833	if (!body) {
 834		push(sr);
 835
 836		for (i = 0; i < 4; ++i) {
 837			if (mask & 0x8)
 838				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
 839					sr->region_start + sr->region_len - 1, grsave++);
 840			mask <<= 1;
 841		}
 842		sr->gr_save_loc = grsave;
 843		sr->any_spills = 0;
 844		sr->imask = NULL;
 845		sr->spill_offset = 0x10;	/* default to psp+16 */
 846	}
 847}
 848
 849/*
 850 * Prologue descriptors.
 851 */
 852
 853static inline void
 854desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
 855{
 856	if (abi == 3 && context == 'i') {
 857		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
 858		UNW_DPRINT(3, "unwind.%s: interrupt frame\n",  __func__);
 859	}
 860	else
 861		UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
 862				__func__, abi, context);
 863}
 864
 865static inline void
 866desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
 867{
 868	int i;
 869
 870	for (i = 0; i < 5; ++i) {
 871		if (brmask & 1)
 872			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
 873				sr->region_start + sr->region_len - 1, gr++);
 874		brmask >>= 1;
 875	}
 876}
 877
 878static inline void
 879desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
 880{
 881	int i;
 882
 883	for (i = 0; i < 5; ++i) {
 884		if (brmask & 1) {
 885			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
 886				sr->region_start + sr->region_len - 1, 0);
 887			sr->any_spills = 1;
 888		}
 889		brmask >>= 1;
 890	}
 891}
 892
 893static inline void
 894desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
 895{
 896	int i;
 897
 898	for (i = 0; i < 4; ++i) {
 899		if ((grmask & 1) != 0) {
 900			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
 901				sr->region_start + sr->region_len - 1, 0);
 902			sr->any_spills = 1;
 903		}
 904		grmask >>= 1;
 905	}
 906	for (i = 0; i < 20; ++i) {
 907		if ((frmask & 1) != 0) {
 908			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
 909			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
 910				sr->region_start + sr->region_len - 1, 0);
 911			sr->any_spills = 1;
 912		}
 913		frmask >>= 1;
 914	}
 915}
 916
 917static inline void
 918desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
 919{
 920	int i;
 921
 922	for (i = 0; i < 4; ++i) {
 923		if ((frmask & 1) != 0) {
 924			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
 925				sr->region_start + sr->region_len - 1, 0);
 926			sr->any_spills = 1;
 927		}
 928		frmask >>= 1;
 929	}
 930}
 931
 932static inline void
 933desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
 934{
 935	int i;
 936
 937	for (i = 0; i < 4; ++i) {
 938		if ((grmask & 1) != 0)
 939			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
 940				sr->region_start + sr->region_len - 1, gr++);
 941		grmask >>= 1;
 942	}
 943}
 944
 945static inline void
 946desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
 947{
 948	int i;
 949
 950	for (i = 0; i < 4; ++i) {
 951		if ((grmask & 1) != 0) {
 952			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
 953				sr->region_start + sr->region_len - 1, 0);
 954			sr->any_spills = 1;
 955		}
 956		grmask >>= 1;
 957	}
 958}
 959
 960static inline void
 961desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
 962{
 963	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
 964		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
 965}
 966
 967static inline void
 968desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
 969{
 970	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
 971}
 972
 973static inline void
 974desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
 975{
 976	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
 977}
 978
 979static inline void
 980desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
 981{
 982	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
 983		0x10 - 4*pspoff);
 984}
 985
 986static inline void
 987desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
 988{
 989	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
 990		4*spoff);
 991}
 992
 993static inline void
 994desc_rp_br (unsigned char dst, struct unw_state_record *sr)
 995{
 996	sr->return_link_reg = dst;
 997}
 998
 999static inline void
1000desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1001{
1002	struct unw_reg_info *reg = sr->curr.reg + regnum;
1003
1004	if (reg->where == UNW_WHERE_NONE)
1005		reg->where = UNW_WHERE_GR_SAVE;
1006	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1007}
1008
1009static inline void
1010desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1011{
1012	sr->spill_offset = 0x10 - 4*pspoff;
1013}
1014
1015static inline unsigned char *
1016desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1017{
1018	sr->imask = imaskp;
1019	return imaskp + (2*sr->region_len + 7)/8;
1020}
1021
1022/*
1023 * Body descriptors.
1024 */
1025static inline void
1026desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1027{
1028	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1029	sr->epilogue_count = ecount + 1;
1030}
1031
1032static inline void
1033desc_copy_state (unw_word label, struct unw_state_record *sr)
1034{
1035	struct unw_labeled_state *ls;
1036
1037	for (ls = sr->labeled_states; ls; ls = ls->next) {
1038		if (ls->label == label) {
1039			free_state_stack(&sr->curr);
1040			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1041			sr->curr.next = dup_state_stack(ls->saved_state.next);
1042			return;
1043		}
1044	}
1045	printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1046}
1047
1048static inline void
1049desc_label_state (unw_word label, struct unw_state_record *sr)
1050{
1051	struct unw_labeled_state *ls;
1052
1053	ls = alloc_labeled_state();
1054	if (!ls) {
1055		printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1056		return;
1057	}
1058	ls->label = label;
1059	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1060	ls->saved_state.next = dup_state_stack(sr->curr.next);
1061
1062	/* insert into list of labeled states: */
1063	ls->next = sr->labeled_states;
1064	sr->labeled_states = ls;
1065}
1066
1067/*
1068 * General descriptors.
1069 */
1070
1071static inline int
1072desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1073{
1074	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1075		return 0;
1076	if (qp > 0) {
1077		if ((sr->pr_val & (1UL << qp)) == 0)
1078			return 0;
1079		sr->pr_mask |= (1UL << qp);
1080	}
1081	return 1;
1082}
1083
1084static inline void
1085desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1086{
1087	struct unw_reg_info *r;
1088
1089	if (!desc_is_active(qp, t, sr))
1090		return;
1091
1092	r = sr->curr.reg + decode_abreg(abreg, 0);
1093	r->where = UNW_WHERE_NONE;
1094	r->when = UNW_WHEN_NEVER;
1095	r->val = 0;
1096}
1097
1098static inline void
1099desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1100		     unsigned char ytreg, struct unw_state_record *sr)
1101{
1102	enum unw_where where = UNW_WHERE_GR;
1103	struct unw_reg_info *r;
1104
1105	if (!desc_is_active(qp, t, sr))
1106		return;
1107
1108	if (x)
1109		where = UNW_WHERE_BR;
1110	else if (ytreg & 0x80)
1111		where = UNW_WHERE_FR;
1112
1113	r = sr->curr.reg + decode_abreg(abreg, 0);
1114	r->where = where;
1115	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1116	r->val = (ytreg & 0x7f);
1117}
1118
1119static inline void
1120desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1121		     struct unw_state_record *sr)
1122{
1123	struct unw_reg_info *r;
1124
1125	if (!desc_is_active(qp, t, sr))
1126		return;
1127
1128	r = sr->curr.reg + decode_abreg(abreg, 1);
1129	r->where = UNW_WHERE_PSPREL;
1130	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1131	r->val = 0x10 - 4*pspoff;
1132}
1133
1134static inline void
1135desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1136		       struct unw_state_record *sr)
1137{
1138	struct unw_reg_info *r;
1139
1140	if (!desc_is_active(qp, t, sr))
1141		return;
1142
1143	r = sr->curr.reg + decode_abreg(abreg, 1);
1144	r->where = UNW_WHERE_SPREL;
1145	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1146	r->val = 4*spoff;
1147}
1148
1149#define UNW_DEC_BAD_CODE(code)			printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1150						       code);
1151
1152/*
1153 * region headers:
1154 */
1155#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
1156#define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
1157/*
1158 * prologue descriptors:
1159 */
1160#define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
1161#define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
1162#define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
1163#define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
1164#define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
1165#define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
1166#define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
1167#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
1168#define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
1169#define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
1170#define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
1171#define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
1172#define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
1173#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1174#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1175#define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1176#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1178#define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
1179#define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
1180#define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
1181/*
1182 * body descriptors:
1183 */
1184#define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
1185#define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
1186#define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
1187/*
1188 * general unwind descriptors:
1189 */
1190#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
1191#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
1192#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
1193#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
1194#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
1195#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
1196#define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
1197#define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
1198
1199#include "unwind_decoder.c"
1200
1201
1202/* Unwind scripts. */
1203
1204static inline unw_hash_index_t
1205hash (unsigned long ip)
1206{
1207	/* magic number = ((sqrt(5)-1)/2)*2^64 */
1208	static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1209
1210	return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1211}
1212
1213static inline long
1214cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1215{
1216	read_lock(&script->lock);
1217	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1218		/* keep the read lock... */
1219		return 1;
1220	read_unlock(&script->lock);
1221	return 0;
1222}
1223
1224static inline struct unw_script *
1225script_lookup (struct unw_frame_info *info)
1226{
1227	struct unw_script *script = unw.cache + info->hint;
1228	unsigned short index;
1229	unsigned long ip, pr;
1230
1231	if (UNW_DEBUG_ON(0))
1232		return NULL;	/* Always regenerate scripts in debug mode */
1233
1234	STAT(++unw.stat.cache.lookups);
1235
1236	ip = info->ip;
1237	pr = info->pr;
1238
1239	if (cache_match(script, ip, pr)) {
1240		STAT(++unw.stat.cache.hinted_hits);
1241		return script;
1242	}
1243
1244	index = unw.hash[hash(ip)];
1245	if (index >= UNW_CACHE_SIZE)
1246		return NULL;
1247
1248	script = unw.cache + index;
1249	while (1) {
1250		if (cache_match(script, ip, pr)) {
1251			/* update hint; no locking required as single-word writes are atomic */
1252			STAT(++unw.stat.cache.normal_hits);
1253			unw.cache[info->prev_script].hint = script - unw.cache;
1254			return script;
1255		}
1256		if (script->coll_chain >= UNW_HASH_SIZE)
1257			return NULL;
1258		script = unw.cache + script->coll_chain;
1259		STAT(++unw.stat.cache.collision_chain_traversals);
1260	}
1261}
1262
1263/*
1264 * On returning, a write lock for the SCRIPT is still being held.
1265 */
1266static inline struct unw_script *
1267script_new (unsigned long ip)
1268{
1269	struct unw_script *script, *prev, *tmp;
1270	unw_hash_index_t index;
1271	unsigned short head;
1272
1273	STAT(++unw.stat.script.news);
1274
1275	/*
1276	 * Can't (easily) use cmpxchg() here because of ABA problem
1277	 * that is intrinsic in cmpxchg()...
1278	 */
1279	head = unw.lru_head;
1280	script = unw.cache + head;
1281	unw.lru_head = script->lru_chain;
1282
1283	/*
1284	 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1285	 * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
1286	 * alternative would be to disable interrupts whenever we hold a read-lock, but
1287	 * that seems silly.
1288	 */
1289	if (!write_trylock(&script->lock))
1290		return NULL;
1291
1292	/* re-insert script at the tail of the LRU chain: */
1293	unw.cache[unw.lru_tail].lru_chain = head;
1294	unw.lru_tail = head;
1295
1296	/* remove the old script from the hash table (if it's there): */
1297	if (script->ip) {
1298		index = hash(script->ip);
1299		tmp = unw.cache + unw.hash[index];
1300		prev = NULL;
1301		while (1) {
1302			if (tmp == script) {
1303				if (prev)
1304					prev->coll_chain = tmp->coll_chain;
1305				else
1306					unw.hash[index] = tmp->coll_chain;
1307				break;
1308			} else
1309				prev = tmp;
1310			if (tmp->coll_chain >= UNW_CACHE_SIZE)
1311			/* old script wasn't in the hash-table */
1312				break;
1313			tmp = unw.cache + tmp->coll_chain;
1314		}
1315	}
1316
1317	/* enter new script in the hash table */
1318	index = hash(ip);
1319	script->coll_chain = unw.hash[index];
1320	unw.hash[index] = script - unw.cache;
1321
1322	script->ip = ip;	/* set new IP while we're holding the locks */
1323
1324	STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1325
1326	script->flags = 0;
1327	script->hint = 0;
1328	script->count = 0;
1329	return script;
1330}
1331
1332static void
1333script_finalize (struct unw_script *script, struct unw_state_record *sr)
1334{
1335	script->pr_mask = sr->pr_mask;
1336	script->pr_val = sr->pr_val;
1337	/*
1338	 * We could down-grade our write-lock on script->lock here but
1339	 * the rwlock API doesn't offer atomic lock downgrading, so
1340	 * we'll just keep the write-lock and release it later when
1341	 * we're done using the script.
1342	 */
1343}
1344
1345static inline void
1346script_emit (struct unw_script *script, struct unw_insn insn)
1347{
1348	if (script->count >= UNW_MAX_SCRIPT_LEN) {
1349		UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1350			__func__, UNW_MAX_SCRIPT_LEN);
1351		return;
1352	}
1353	script->insn[script->count++] = insn;
1354}
1355
1356static inline void
1357emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1358{
1359	struct unw_reg_info *r = sr->curr.reg + i;
1360	enum unw_insn_opcode opc;
1361	struct unw_insn insn;
1362	unsigned long val = 0;
1363
1364	switch (r->where) {
1365	      case UNW_WHERE_GR:
1366		if (r->val >= 32) {
1367			/* register got spilled to a stacked register */
1368			opc = UNW_INSN_SETNAT_TYPE;
1369			val = UNW_NAT_REGSTK;
1370		} else
1371			/* register got spilled to a scratch register */
1372			opc = UNW_INSN_SETNAT_MEMSTK;
1373		break;
1374
1375	      case UNW_WHERE_FR:
1376		opc = UNW_INSN_SETNAT_TYPE;
1377		val = UNW_NAT_VAL;
1378		break;
1379
1380	      case UNW_WHERE_BR:
1381		opc = UNW_INSN_SETNAT_TYPE;
1382		val = UNW_NAT_NONE;
1383		break;
1384
1385	      case UNW_WHERE_PSPREL:
1386	      case UNW_WHERE_SPREL:
1387		opc = UNW_INSN_SETNAT_MEMSTK;
1388		break;
1389
1390	      default:
1391		UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1392			   __func__, r->where);
1393		return;
1394	}
1395	insn.opc = opc;
1396	insn.dst = unw.preg_index[i];
1397	insn.val = val;
1398	script_emit(script, insn);
1399}
1400
1401static void
1402compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1403{
1404	struct unw_reg_info *r = sr->curr.reg + i;
1405	enum unw_insn_opcode opc;
1406	unsigned long val, rval;
1407	struct unw_insn insn;
1408	long need_nat_info;
1409
1410	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1411		return;
1412
1413	opc = UNW_INSN_MOVE;
1414	val = rval = r->val;
1415	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1416
1417	switch (r->where) {
1418	      case UNW_WHERE_GR:
1419		if (rval >= 32) {
1420			opc = UNW_INSN_MOVE_STACKED;
1421			val = rval - 32;
1422		} else if (rval >= 4 && rval <= 7) {
1423			if (need_nat_info) {
1424				opc = UNW_INSN_MOVE2;
1425				need_nat_info = 0;
1426			}
1427			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1428		} else if (rval == 0) {
1429			opc = UNW_INSN_MOVE_CONST;
1430			val = 0;
1431		} else {
1432			/* register got spilled to a scratch register */
1433			opc = UNW_INSN_MOVE_SCRATCH;
1434			val = pt_regs_off(rval);
1435		}
1436		break;
1437
1438	      case UNW_WHERE_FR:
1439		if (rval <= 5)
1440			val = unw.preg_index[UNW_REG_F2  + (rval -  2)];
1441		else if (rval >= 16 && rval <= 31)
1442			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1443		else {
1444			opc = UNW_INSN_MOVE_SCRATCH;
1445			if (rval <= 11)
1446				val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1447			else
1448				UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1449					   __func__, rval);
1450		}
1451		break;
1452
1453	      case UNW_WHERE_BR:
1454		if (rval >= 1 && rval <= 5)
1455			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1456		else {
1457			opc = UNW_INSN_MOVE_SCRATCH;
1458			if (rval == 0)
1459				val = offsetof(struct pt_regs, b0);
1460			else if (rval == 6)
1461				val = offsetof(struct pt_regs, b6);
1462			else
1463				val = offsetof(struct pt_regs, b7);
1464		}
1465		break;
1466
1467	      case UNW_WHERE_SPREL:
1468		opc = UNW_INSN_ADD_SP;
1469		break;
1470
1471	      case UNW_WHERE_PSPREL:
1472		opc = UNW_INSN_ADD_PSP;
1473		break;
1474
1475	      default:
1476		UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1477			   __func__, i, r->where);
1478		break;
1479	}
1480	insn.opc = opc;
1481	insn.dst = unw.preg_index[i];
1482	insn.val = val;
1483	script_emit(script, insn);
1484	if (need_nat_info)
1485		emit_nat_info(sr, i, script);
1486
1487	if (i == UNW_REG_PSP) {
1488		/*
1489		 * info->psp must contain the _value_ of the previous
1490		 * sp, not it's save location.  We get this by
1491		 * dereferencing the value we just stored in
1492		 * info->psp:
1493		 */
1494		insn.opc = UNW_INSN_LOAD;
1495		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1496		script_emit(script, insn);
1497	}
1498}
1499
1500static inline const struct unw_table_entry *
1501lookup (struct unw_table *table, unsigned long rel_ip)
1502{
1503	const struct unw_table_entry *e = NULL;
1504	unsigned long lo, hi, mid;
1505
1506	/* do a binary search for right entry: */
1507	for (lo = 0, hi = table->length; lo < hi; ) {
1508		mid = (lo + hi) / 2;
1509		e = &table->array[mid];
1510		if (rel_ip < e->start_offset)
1511			hi = mid;
1512		else if (rel_ip >= e->end_offset)
1513			lo = mid + 1;
1514		else
1515			break;
1516	}
1517	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1518		return NULL;
1519	return e;
1520}
1521
1522/*
1523 * Build an unwind script that unwinds from state OLD_STATE to the
1524 * entrypoint of the function that called OLD_STATE.
1525 */
1526static inline struct unw_script *
1527build_script (struct unw_frame_info *info)
1528{
1529	const struct unw_table_entry *e = NULL;
1530	struct unw_script *script = NULL;
1531	struct unw_labeled_state *ls, *next;
1532	unsigned long ip = info->ip;
1533	struct unw_state_record sr;
1534	struct unw_table *table, *prev;
1535	struct unw_reg_info *r;
1536	struct unw_insn insn;
1537	u8 *dp, *desc_end;
1538	u64 hdr;
1539	int i;
1540	STAT(unsigned long start, parse_start;)
1541
1542	STAT(++unw.stat.script.builds; start = ia64_get_itc());
1543
1544	/* build state record */
1545	memset(&sr, 0, sizeof(sr));
1546	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1547		r->when = UNW_WHEN_NEVER;
1548	sr.pr_val = info->pr;
1549
1550	UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1551	script = script_new(ip);
1552	if (!script) {
1553		UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n",  __func__);
1554		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1555		return NULL;
1556	}
1557	unw.cache[info->prev_script].hint = script - unw.cache;
1558
1559	/* search the kernels and the modules' unwind tables for IP: */
1560
1561	STAT(parse_start = ia64_get_itc());
1562
1563	prev = NULL;
1564	for (table = unw.tables; table; table = table->next) {
1565		if (ip >= table->start && ip < table->end) {
1566			/*
1567			 * Leave the kernel unwind table at the very front,
1568			 * lest moving it breaks some assumption elsewhere.
1569			 * Otherwise, move the matching table to the second
1570			 * position in the list so that traversals can benefit
1571			 * from commonality in backtrace paths.
1572			 */
1573			if (prev && prev != unw.tables) {
1574				/* unw is safe - we're already spinlocked */
1575				prev->next = table->next;
1576				table->next = unw.tables->next;
1577				unw.tables->next = table;
1578			}
1579			e = lookup(table, ip - table->segment_base);
1580			break;
1581		}
1582		prev = table;
1583	}
1584	if (!e) {
1585		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
1586		UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1587			__func__, ip, unw.cache[info->prev_script].ip);
1588		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1589		sr.curr.reg[UNW_REG_RP].when = -1;
1590		sr.curr.reg[UNW_REG_RP].val = 0;
1591		compile_reg(&sr, UNW_REG_RP, script);
1592		script_finalize(script, &sr);
1593		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1594		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1595		return script;
1596	}
1597
1598	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1599			  + (ip & 0xfUL));
1600	hdr = *(u64 *) (table->segment_base + e->info_offset);
1601	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
1602	desc_end = dp + 8*UNW_LENGTH(hdr);
1603
1604	while (!sr.done && dp < desc_end)
1605		dp = unw_decode(dp, sr.in_body, &sr);
1606
1607	if (sr.when_target > sr.epilogue_start) {
1608		/*
1609		 * sp has been restored and all values on the memory stack below
1610		 * psp also have been restored.
1611		 */
1612		sr.curr.reg[UNW_REG_PSP].val = 0;
1613		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1614		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1615		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1616			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1617			    || r->where == UNW_WHERE_SPREL)
1618			{
1619				r->val = 0;
1620				r->where = UNW_WHERE_NONE;
1621				r->when = UNW_WHEN_NEVER;
1622			}
1623	}
1624
1625	script->flags = sr.flags;
1626
1627	/*
1628	 * If RP did't get saved, generate entry for the return link
1629	 * register.
1630	 */
1631	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1632		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1633		sr.curr.reg[UNW_REG_RP].when = -1;
1634		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1635		UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1636			   __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1637			   sr.curr.reg[UNW_REG_RP].val);
1638	}
1639
1640#ifdef UNW_DEBUG
1641	UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1642		__func__, table->segment_base + e->start_offset, sr.when_target);
1643	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1644		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1645			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
1646			switch (r->where) {
1647			      case UNW_WHERE_GR:     UNW_DPRINT(1, "r%lu", r->val); break;
1648			      case UNW_WHERE_FR:     UNW_DPRINT(1, "f%lu", r->val); break;
1649			      case UNW_WHERE_BR:     UNW_DPRINT(1, "b%lu", r->val); break;
1650			      case UNW_WHERE_SPREL:  UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1651			      case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1652			      case UNW_WHERE_NONE:
1653				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1654				break;
1655
1656			      default:
1657				UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1658				break;
1659			}
1660			UNW_DPRINT(1, "\t\t%d\n", r->when);
1661		}
1662	}
1663#endif
1664
1665	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1666
1667	/* translate state record into unwinder instructions: */
1668
1669	/*
1670	 * First, set psp if we're dealing with a fixed-size frame;
1671	 * subsequent instructions may depend on this value.
1672	 */
1673	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1674	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1675	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
1676		/* new psp is sp plus frame size */
1677		insn.opc = UNW_INSN_ADD;
1678		insn.dst = offsetof(struct unw_frame_info, psp)/8;
1679		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
1680		script_emit(script, insn);
1681	}
1682
1683	/* determine where the primary UNaT is: */
1684	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1685		i = UNW_REG_PRI_UNAT_MEM;
1686	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1687		i = UNW_REG_PRI_UNAT_GR;
1688	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1689		i = UNW_REG_PRI_UNAT_MEM;
1690	else
1691		i = UNW_REG_PRI_UNAT_GR;
1692
1693	compile_reg(&sr, i, script);
1694
1695	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1696		compile_reg(&sr, i, script);
1697
1698	/* free labeled register states & stack: */
1699
1700	STAT(parse_start = ia64_get_itc());
1701	for (ls = sr.labeled_states; ls; ls = next) {
1702		next = ls->next;
1703		free_state_stack(&ls->saved_state);
1704		free_labeled_state(ls);
1705	}
1706	free_state_stack(&sr.curr);
1707	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1708
1709	script_finalize(script, &sr);
1710	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1711	return script;
1712}
1713
1714/*
1715 * Apply the unwinding actions represented by OPS and update SR to
1716 * reflect the state that existed upon entry to the function that this
1717 * unwinder represents.
1718 */
1719static inline void
1720run_script (struct unw_script *script, struct unw_frame_info *state)
1721{
1722	struct unw_insn *ip, *limit, next_insn;
1723	unsigned long opc, dst, val, off;
1724	unsigned long *s = (unsigned long *) state;
1725	STAT(unsigned long start;)
1726
1727	STAT(++unw.stat.script.runs; start = ia64_get_itc());
1728	state->flags = script->flags;
1729	ip = script->insn;
1730	limit = script->insn + script->count;
1731	next_insn = *ip;
1732
1733	while (ip++ < limit) {
1734		opc = next_insn.opc;
1735		dst = next_insn.dst;
1736		val = next_insn.val;
1737		next_insn = *ip;
1738
1739	  redo:
1740		switch (opc) {
1741		      case UNW_INSN_ADD:
1742			s[dst] += val;
1743			break;
1744
1745		      case UNW_INSN_MOVE2:
1746			if (!s[val])
1747				goto lazy_init;
1748			s[dst+1] = s[val+1];
1749			s[dst] = s[val];
1750			break;
1751
1752		      case UNW_INSN_MOVE:
1753			if (!s[val])
1754				goto lazy_init;
1755			s[dst] = s[val];
1756			break;
1757
1758		      case UNW_INSN_MOVE_SCRATCH:
1759			if (state->pt) {
1760				s[dst] = (unsigned long) get_scratch_regs(state) + val;
1761			} else {
1762				s[dst] = 0;
1763				UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1764					   __func__, dst, val);
1765			}
1766			break;
1767
1768		      case UNW_INSN_MOVE_CONST:
1769			if (val == 0)
1770				s[dst] = (unsigned long) &unw.r0;
1771			else {
1772				s[dst] = 0;
1773				UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1774					   __func__, val);
1775			}
1776			break;
1777
1778
1779		      case UNW_INSN_MOVE_STACKED:
1780			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1781								    val);
1782			break;
1783
1784		      case UNW_INSN_ADD_PSP:
1785			s[dst] = state->psp + val;
1786			break;
1787
1788		      case UNW_INSN_ADD_SP:
1789			s[dst] = state->sp + val;
1790			break;
1791
1792		      case UNW_INSN_SETNAT_MEMSTK:
1793			if (!state->pri_unat_loc)
1794				state->pri_unat_loc = &state->sw->caller_unat;
1795			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1796			s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1797			break;
1798
1799		      case UNW_INSN_SETNAT_TYPE:
1800			s[dst+1] = val;
1801			break;
1802
1803		      case UNW_INSN_LOAD:
1804#ifdef UNW_DEBUG
1805			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1806			    || s[val] < TASK_SIZE)
1807			{
1808				UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1809					   __func__, s[val]);
1810				break;
1811			}
1812#endif
1813			s[dst] = *(unsigned long *) s[val];
1814			break;
1815		}
1816	}
1817	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1818	return;
1819
1820  lazy_init:
1821	off = unw.sw_off[val];
1822	s[val] = (unsigned long) state->sw + off;
1823	if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1824		/*
1825		 * We're initializing a general register: init NaT info, too.  Note that
1826		 * the offset is a multiple of 8 which gives us the 3 bits needed for
1827		 * the type field.
1828		 */
1829		s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1830	goto redo;
1831}
1832
1833static int
1834find_save_locs (struct unw_frame_info *info)
1835{
1836	int have_write_lock = 0;
1837	struct unw_script *scr;
1838	unsigned long flags = 0;
1839
1840	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1841		/* don't let obviously bad addresses pollute the cache */
1842		/* FIXME: should really be level 0 but it occurs too often. KAO */
1843		UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1844		info->rp_loc = NULL;
1845		return -1;
1846	}
1847
1848	scr = script_lookup(info);
1849	if (!scr) {
1850		spin_lock_irqsave(&unw.lock, flags);
1851		scr = build_script(info);
1852		if (!scr) {
1853			spin_unlock_irqrestore(&unw.lock, flags);
1854			UNW_DPRINT(0,
1855				   "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1856				   __func__, info->ip);
1857			return -1;
1858		}
1859		have_write_lock = 1;
1860	}
1861	info->hint = scr->hint;
1862	info->prev_script = scr - unw.cache;
1863
1864	run_script(scr, info);
1865
1866	if (have_write_lock) {
1867		write_unlock(&scr->lock);
1868		spin_unlock_irqrestore(&unw.lock, flags);
1869	} else
1870		read_unlock(&scr->lock);
1871	return 0;
1872}
1873
1874static int
1875unw_valid(const struct unw_frame_info *info, unsigned long* p)
1876{
1877	unsigned long loc = (unsigned long)p;
1878	return (loc >= info->regstk.limit && loc < info->regstk.top) ||
1879	       (loc >= info->memstk.top && loc < info->memstk.limit);
1880}
1881
1882int
1883unw_unwind (struct unw_frame_info *info)
1884{
1885	unsigned long prev_ip, prev_sp, prev_bsp;
1886	unsigned long ip, pr, num_regs;
1887	STAT(unsigned long start, flags;)
1888	int retval;
1889
1890	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1891
1892	prev_ip = info->ip;
1893	prev_sp = info->sp;
1894	prev_bsp = info->bsp;
1895
1896	/* validate the return IP pointer */
1897	if (!unw_valid(info, info->rp_loc)) {
1898		/* FIXME: should really be level 0 but it occurs too often. KAO */
1899		UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1900			   __func__, info->ip);
1901		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1902		return -1;
1903	}
1904	/* restore the ip */
1905	ip = info->ip = *info->rp_loc;
1906	if (ip < GATE_ADDR) {
1907		UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1908		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1909		return -1;
1910	}
1911
1912	/* validate the previous stack frame pointer */
1913	if (!unw_valid(info, info->pfs_loc)) {
1914		UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1915		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1916		return -1;
1917	}
1918	/* restore the cfm: */
1919	info->cfm_loc = info->pfs_loc;
1920
1921	/* restore the bsp: */
1922	pr = info->pr;
1923	num_regs = 0;
1924	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1925		info->pt = info->sp + 16;
1926		if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1927			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
1928		info->pfs_loc =
1929			(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1930		UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1931	} else
1932		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
1933	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1934	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1935		UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1936			__func__, info->bsp, info->regstk.limit, info->regstk.top);
1937		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1938		return -1;
1939	}
1940
1941	/* restore the sp: */
1942	info->sp = info->psp;
1943	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1944		UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1945			__func__, info->sp, info->memstk.top, info->memstk.limit);
1946		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1947		return -1;
1948	}
1949
1950	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1951		UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1952			   __func__, ip);
1953		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1954		return -1;
1955	}
1956
1957	/* as we unwind, the saved ar.unat becomes the primary unat: */
1958	info->pri_unat_loc = info->unat_loc;
1959
1960	/* finally, restore the predicates: */
1961	unw_get_pr(info, &info->pr);
1962
1963	retval = find_save_locs(info);
1964	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1965	return retval;
1966}
1967EXPORT_SYMBOL(unw_unwind);
1968
1969int
1970unw_unwind_to_user (struct unw_frame_info *info)
1971{
1972	unsigned long ip, sp, pr = info->pr;
1973
1974	do {
1975		unw_get_sp(info, &sp);
1976		if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1977		    < IA64_PT_REGS_SIZE) {
1978			UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1979				   __func__);
1980			break;
1981		}
1982		if (unw_is_intr_frame(info) &&
1983		    (pr & (1UL << PRED_USER_STACK)))
1984			return 0;
1985		if (unw_get_pr (info, &pr) < 0) {
1986			unw_get_rp(info, &ip);
1987			UNW_DPRINT(0, "unwind.%s: failed to read "
1988				   "predicate register (ip=0x%lx)\n",
1989				__func__, ip);
1990			return -1;
1991		}
1992	} while (unw_unwind(info) >= 0);
1993	unw_get_ip(info, &ip);
1994	UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1995		   __func__, ip);
1996	return -1;
1997}
1998EXPORT_SYMBOL(unw_unwind_to_user);
1999
2000static void
2001init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2002		 struct switch_stack *sw, unsigned long stktop)
2003{
2004	unsigned long rbslimit, rbstop, stklimit;
2005	STAT(unsigned long start, flags;)
2006
2007	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2008
2009	/*
2010	 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2011	 * don't want to do that because it would be slow as each preserved register would
2012	 * have to be processed.  Instead, what we do here is zero out the frame info and
2013	 * start the unwind process at the function that created the switch_stack frame.
2014	 * When a preserved value in switch_stack needs to be accessed, run_script() will
2015	 * initialize the appropriate pointer on demand.
2016	 */
2017	memset(info, 0, sizeof(*info));
2018
2019	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2020	stklimit = (unsigned long) t + IA64_STK_OFFSET;
2021
2022	rbstop   = sw->ar_bspstore;
2023	if (rbstop > stklimit || rbstop < rbslimit)
2024		rbstop = rbslimit;
2025
2026	if (stktop <= rbstop)
2027		stktop = rbstop;
2028	if (stktop > stklimit)
2029		stktop = stklimit;
2030
2031	info->regstk.limit = rbslimit;
2032	info->regstk.top   = rbstop;
2033	info->memstk.limit = stklimit;
2034	info->memstk.top   = stktop;
2035	info->task = t;
2036	info->sw  = sw;
2037	info->sp = info->psp = stktop;
2038	info->pr = sw->pr;
2039	UNW_DPRINT(3, "unwind.%s:\n"
2040		   "  task   0x%lx\n"
2041		   "  rbs = [0x%lx-0x%lx)\n"
2042		   "  stk = [0x%lx-0x%lx)\n"
2043		   "  pr     0x%lx\n"
2044		   "  sw     0x%lx\n"
2045		   "  sp     0x%lx\n",
2046		   __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2047		   info->pr, (unsigned long) info->sw, info->sp);
2048	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2049}
2050
2051void
2052unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2053{
2054	unsigned long sol;
2055
2056	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2057	info->cfm_loc = &sw->ar_pfs;
2058	sol = (*info->cfm_loc >> 7) & 0x7f;
2059	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2060	info->ip = sw->b0;
2061	UNW_DPRINT(3, "unwind.%s:\n"
2062		   "  bsp    0x%lx\n"
2063		   "  sol    0x%lx\n"
2064		   "  ip     0x%lx\n",
2065		   __func__, info->bsp, sol, info->ip);
2066	find_save_locs(info);
2067}
2068
2069EXPORT_SYMBOL(unw_init_frame_info);
2070
2071void
2072unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2073{
2074	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2075
2076	UNW_DPRINT(1, "unwind.%s\n", __func__);
2077	unw_init_frame_info(info, t, sw);
2078}
2079EXPORT_SYMBOL(unw_init_from_blocked_task);
2080
2081static void
2082init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2083		   unsigned long gp, const void *table_start, const void *table_end)
2084{
2085	const struct unw_table_entry *start = table_start, *end = table_end;
2086
2087	table->name = name;
2088	table->segment_base = segment_base;
2089	table->gp = gp;
2090	table->start = segment_base + start[0].start_offset;
2091	table->end = segment_base + end[-1].end_offset;
2092	table->array = start;
2093	table->length = end - start;
2094}
2095
2096void *
2097unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2098		      const void *table_start, const void *table_end)
2099{
2100	const struct unw_table_entry *start = table_start, *end = table_end;
2101	struct unw_table *table;
2102	unsigned long flags;
2103
2104	if (end - start <= 0) {
2105		UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2106			   __func__);
2107		return NULL;
2108	}
2109
2110	table = kmalloc(sizeof(*table), GFP_USER);
2111	if (!table)
2112		return NULL;
2113
2114	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2115
2116	spin_lock_irqsave(&unw.lock, flags);
2117	{
2118		/* keep kernel unwind table at the front (it's searched most commonly): */
2119		table->next = unw.tables->next;
2120		unw.tables->next = table;
2121	}
2122	spin_unlock_irqrestore(&unw.lock, flags);
2123
2124	return table;
2125}
2126
2127void
2128unw_remove_unwind_table (void *handle)
2129{
2130	struct unw_table *table, *prev;
2131	struct unw_script *tmp;
2132	unsigned long flags;
2133	long index;
2134
2135	if (!handle) {
2136		UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2137			   __func__);
2138		return;
2139	}
2140
2141	table = handle;
2142	if (table == &unw.kernel_table) {
2143		UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2144			   "no-can-do!\n", __func__);
2145		return;
2146	}
2147
2148	spin_lock_irqsave(&unw.lock, flags);
2149	{
2150		/* first, delete the table: */
2151
2152		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2153			if (prev->next == table)
2154				break;
2155		if (!prev) {
2156			UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2157				   __func__, (void *) table);
2158			spin_unlock_irqrestore(&unw.lock, flags);
2159			return;
2160		}
2161		prev->next = table->next;
2162	}
2163	spin_unlock_irqrestore(&unw.lock, flags);
2164
2165	/* next, remove hash table entries for this table */
2166
2167	for (index = 0; index < UNW_HASH_SIZE; ++index) {
2168		tmp = unw.cache + unw.hash[index];
2169		if (unw.hash[index] >= UNW_CACHE_SIZE
2170		    || tmp->ip < table->start || tmp->ip >= table->end)
2171			continue;
2172
2173		write_lock(&tmp->lock);
2174		{
2175			if (tmp->ip >= table->start && tmp->ip < table->end) {
2176				unw.hash[index] = tmp->coll_chain;
2177				tmp->ip = 0;
2178			}
2179		}
2180		write_unlock(&tmp->lock);
2181	}
2182
2183	kfree(table);
2184}
2185
2186static int __init
2187create_gate_table (void)
2188{
2189	const struct unw_table_entry *entry, *start, *end;
2190	unsigned long *lp, segbase = GATE_ADDR;
2191	size_t info_size, size;
2192	char *info;
2193	Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2194	int i;
2195
2196	for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2197		if (phdr->p_type == PT_IA_64_UNWIND) {
2198			punw = phdr;
2199			break;
2200		}
2201
2202	if (!punw) {
2203		printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2204		return 0;
2205	}
2206
2207	start = (const struct unw_table_entry *) punw->p_vaddr;
2208	end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2209	size  = 0;
2210
2211	unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2212
2213	for (entry = start; entry < end; ++entry)
2214		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2215	size += 8;	/* reserve space for "end of table" marker */
2216
2217	unw.gate_table = kmalloc(size, GFP_KERNEL);
2218	if (!unw.gate_table) {
2219		unw.gate_table_size = 0;
2220		printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2221		return 0;
2222	}
2223	unw.gate_table_size = size;
2224
2225	lp = unw.gate_table;
2226	info = (char *) unw.gate_table + size;
2227
2228	for (entry = start; entry < end; ++entry, lp += 3) {
2229		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2230		info -= info_size;
2231		memcpy(info, (char *) segbase + entry->info_offset, info_size);
2232
2233		lp[0] = segbase + entry->start_offset;		/* start */
2234		lp[1] = segbase + entry->end_offset;		/* end */
2235		lp[2] = info - (char *) unw.gate_table;		/* info */
2236	}
2237	*lp = 0;	/* end-of-table marker */
2238	return 0;
2239}
2240
2241__initcall(create_gate_table);
2242
2243void __init
2244unw_init (void)
2245{
2246	extern char __gp[];
2247	extern void unw_hash_index_t_is_too_narrow (void);
2248	long i, off;
2249
2250	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2251		unw_hash_index_t_is_too_narrow();
2252
2253	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2254	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2255	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2256	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2257	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2258	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2259	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2260	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2261	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2262		unw.sw_off[unw.preg_index[i]] = off;
2263	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2264		unw.sw_off[unw.preg_index[i]] = off;
2265	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2266		unw.sw_off[unw.preg_index[i]] = off;
2267	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2268		unw.sw_off[unw.preg_index[i]] = off;
2269
2270	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2271		if (i > 0)
2272			unw.cache[i].lru_chain = (i - 1);
2273		unw.cache[i].coll_chain = -1;
2274		rwlock_init(&unw.cache[i].lock);
2275	}
2276	unw.lru_head = UNW_CACHE_SIZE - 1;
2277	unw.lru_tail = 0;
2278
2279	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2280			  __start_unwind, __end_unwind);
2281}
2282
2283/*
2284 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2285 *
2286 *	This system call has been deprecated.  The new and improved way to get
2287 *	at the kernel's unwind info is via the gate DSO.  The address of the
2288 *	ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2289 *
2290 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2291 *
2292 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2293 * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
2294 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2295 * unwind data.
2296 *
2297 * The first portion of the unwind data contains an unwind table and rest contains the
2298 * associated unwind info (in no particular order).  The unwind table consists of a table
2299 * of entries of the form:
2300 *
2301 *	u64 start;	(64-bit address of start of function)
2302 *	u64 end;	(64-bit address of start of function)
2303 *	u64 info;	(BUF-relative offset to unwind info)
2304 *
2305 * The end of the unwind table is indicated by an entry with a START address of zero.
2306 *
2307 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2308 * on the format of the unwind info.
2309 *
2310 * ERRORS
2311 *	EFAULT	BUF points outside your accessible address space.
2312 */
2313asmlinkage long
2314sys_getunwind (void __user *buf, size_t buf_size)
2315{
2316	if (buf && buf_size >= unw.gate_table_size)
2317		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2318			return -EFAULT;
2319	return unw.gate_table_size;
2320}
v4.6
   1/*
   2 * Copyright (C) 1999-2004 Hewlett-Packard Co
   3 *	David Mosberger-Tang <davidm@hpl.hp.com>
   4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
   5 * 	- Change pt_regs_off() to make it less dependent on pt_regs structure.
   6 */
   7/*
   8 * This file implements call frame unwind support for the Linux
   9 * kernel.  Parsing and processing the unwind information is
  10 * time-consuming, so this implementation translates the unwind
  11 * descriptors into unwind scripts.  These scripts are very simple
  12 * (basically a sequence of assignments) and efficient to execute.
  13 * They are cached for later re-use.  Each script is specific for a
  14 * given instruction pointer address and the set of predicate values
  15 * that the script depends on (most unwind descriptors are
  16 * unconditional and scripts often do not depend on predicates at
  17 * all).  This code is based on the unwind conventions described in
  18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
  19 *
  20 * SMP conventions:
  21 *	o updates to the global unwind data (in structure "unw") are serialized
  22 *	  by the unw.lock spinlock
  23 *	o each unwind script has its own read-write lock; a thread must acquire
  24 *	  a read lock before executing a script and must acquire a write lock
  25 *	  before modifying a script
  26 *	o if both the unw.lock spinlock and a script's read-write lock must be
  27 *	  acquired, then the read-write lock must be acquired first.
  28 */
  29#include <linux/module.h>
  30#include <linux/bootmem.h>
  31#include <linux/elf.h>
  32#include <linux/kernel.h>
  33#include <linux/sched.h>
  34#include <linux/slab.h>
  35
  36#include <asm/unwind.h>
  37
  38#include <asm/delay.h>
  39#include <asm/page.h>
  40#include <asm/ptrace.h>
  41#include <asm/ptrace_offsets.h>
  42#include <asm/rse.h>
  43#include <asm/sections.h>
 
  44#include <asm/uaccess.h>
  45
  46#include "entry.h"
  47#include "unwind_i.h"
  48
  49#define UNW_LOG_CACHE_SIZE	7	/* each unw_script is ~256 bytes in size */
  50#define UNW_CACHE_SIZE		(1 << UNW_LOG_CACHE_SIZE)
  51
  52#define UNW_LOG_HASH_SIZE	(UNW_LOG_CACHE_SIZE + 1)
  53#define UNW_HASH_SIZE		(1 << UNW_LOG_HASH_SIZE)
  54
  55#define UNW_STATS	0	/* WARNING: this disabled interrupts for long time-spans!! */
  56
  57#ifdef UNW_DEBUG
  58  static unsigned int unw_debug_level = UNW_DEBUG;
  59#  define UNW_DEBUG_ON(n)	unw_debug_level >= n
  60   /* Do not code a printk level, not all debug lines end in newline */
  61#  define UNW_DPRINT(n, ...)  if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
  62#  undef inline
  63#  define inline
  64#else /* !UNW_DEBUG */
  65#  define UNW_DEBUG_ON(n)  0
  66#  define UNW_DPRINT(n, ...)
  67#endif /* UNW_DEBUG */
  68
  69#if UNW_STATS
  70# define STAT(x...)	x
  71#else
  72# define STAT(x...)
  73#endif
  74
  75#define alloc_reg_state()	kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
  76#define free_reg_state(usr)	kfree(usr)
  77#define alloc_labeled_state()	kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
  78#define free_labeled_state(usr)	kfree(usr)
  79
  80typedef unsigned long unw_word;
  81typedef unsigned char unw_hash_index_t;
  82
  83static struct {
  84	spinlock_t lock;			/* spinlock for unwind data */
  85
  86	/* list of unwind tables (one per load-module) */
  87	struct unw_table *tables;
  88
  89	unsigned long r0;			/* constant 0 for r0 */
  90
  91	/* table of registers that prologues can save (and order in which they're saved): */
  92	const unsigned char save_order[8];
  93
  94	/* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
  95	unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
  96
  97	unsigned short lru_head;		/* index of lead-recently used script */
  98	unsigned short lru_tail;		/* index of most-recently used script */
  99
 100	/* index into unw_frame_info for preserved register i */
 101	unsigned short preg_index[UNW_NUM_REGS];
 102
 103	short pt_regs_offsets[32];
 104
 105	/* unwind table for the kernel: */
 106	struct unw_table kernel_table;
 107
 108	/* unwind table describing the gate page (kernel code that is mapped into user space): */
 109	size_t gate_table_size;
 110	unsigned long *gate_table;
 111
 112	/* hash table that maps instruction pointer to script index: */
 113	unsigned short hash[UNW_HASH_SIZE];
 114
 115	/* script cache: */
 116	struct unw_script cache[UNW_CACHE_SIZE];
 117
 118# ifdef UNW_DEBUG
 119	const char *preg_name[UNW_NUM_REGS];
 120# endif
 121# if UNW_STATS
 122	struct {
 123		struct {
 124			int lookups;
 125			int hinted_hits;
 126			int normal_hits;
 127			int collision_chain_traversals;
 128		} cache;
 129		struct {
 130			unsigned long build_time;
 131			unsigned long run_time;
 132			unsigned long parse_time;
 133			int builds;
 134			int news;
 135			int collisions;
 136			int runs;
 137		} script;
 138		struct {
 139			unsigned long init_time;
 140			unsigned long unwind_time;
 141			int inits;
 142			int unwinds;
 143		} api;
 144	} stat;
 145# endif
 146} unw = {
 147	.tables = &unw.kernel_table,
 148	.lock = __SPIN_LOCK_UNLOCKED(unw.lock),
 149	.save_order = {
 150		UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
 151		UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
 152	},
 153	.preg_index = {
 154		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_GR */
 155		offsetof(struct unw_frame_info, pri_unat_loc)/8,	/* PRI_UNAT_MEM */
 156		offsetof(struct unw_frame_info, bsp_loc)/8,
 157		offsetof(struct unw_frame_info, bspstore_loc)/8,
 158		offsetof(struct unw_frame_info, pfs_loc)/8,
 159		offsetof(struct unw_frame_info, rnat_loc)/8,
 160		offsetof(struct unw_frame_info, psp)/8,
 161		offsetof(struct unw_frame_info, rp_loc)/8,
 162		offsetof(struct unw_frame_info, r4)/8,
 163		offsetof(struct unw_frame_info, r5)/8,
 164		offsetof(struct unw_frame_info, r6)/8,
 165		offsetof(struct unw_frame_info, r7)/8,
 166		offsetof(struct unw_frame_info, unat_loc)/8,
 167		offsetof(struct unw_frame_info, pr_loc)/8,
 168		offsetof(struct unw_frame_info, lc_loc)/8,
 169		offsetof(struct unw_frame_info, fpsr_loc)/8,
 170		offsetof(struct unw_frame_info, b1_loc)/8,
 171		offsetof(struct unw_frame_info, b2_loc)/8,
 172		offsetof(struct unw_frame_info, b3_loc)/8,
 173		offsetof(struct unw_frame_info, b4_loc)/8,
 174		offsetof(struct unw_frame_info, b5_loc)/8,
 175		offsetof(struct unw_frame_info, f2_loc)/8,
 176		offsetof(struct unw_frame_info, f3_loc)/8,
 177		offsetof(struct unw_frame_info, f4_loc)/8,
 178		offsetof(struct unw_frame_info, f5_loc)/8,
 179		offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
 180		offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
 181		offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
 182		offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
 183		offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
 184		offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
 185		offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
 186		offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
 187		offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
 188		offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
 189		offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
 190		offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
 191		offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
 192		offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
 193		offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
 194		offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
 195	},
 196	.pt_regs_offsets = {
 197		[0] = -1,
 198		offsetof(struct pt_regs,  r1),
 199		offsetof(struct pt_regs,  r2),
 200		offsetof(struct pt_regs,  r3),
 201		[4] = -1, [5] = -1, [6] = -1, [7] = -1,
 202		offsetof(struct pt_regs,  r8),
 203		offsetof(struct pt_regs,  r9),
 204		offsetof(struct pt_regs, r10),
 205		offsetof(struct pt_regs, r11),
 206		offsetof(struct pt_regs, r12),
 207		offsetof(struct pt_regs, r13),
 208		offsetof(struct pt_regs, r14),
 209		offsetof(struct pt_regs, r15),
 210		offsetof(struct pt_regs, r16),
 211		offsetof(struct pt_regs, r17),
 212		offsetof(struct pt_regs, r18),
 213		offsetof(struct pt_regs, r19),
 214		offsetof(struct pt_regs, r20),
 215		offsetof(struct pt_regs, r21),
 216		offsetof(struct pt_regs, r22),
 217		offsetof(struct pt_regs, r23),
 218		offsetof(struct pt_regs, r24),
 219		offsetof(struct pt_regs, r25),
 220		offsetof(struct pt_regs, r26),
 221		offsetof(struct pt_regs, r27),
 222		offsetof(struct pt_regs, r28),
 223		offsetof(struct pt_regs, r29),
 224		offsetof(struct pt_regs, r30),
 225		offsetof(struct pt_regs, r31),
 226	},
 227	.hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
 228#ifdef UNW_DEBUG
 229	.preg_name = {
 230		"pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
 231		"r4", "r5", "r6", "r7",
 232		"ar.unat", "pr", "ar.lc", "ar.fpsr",
 233		"b1", "b2", "b3", "b4", "b5",
 234		"f2", "f3", "f4", "f5",
 235		"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
 236		"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
 237	}
 238#endif
 239};
 240
 241static inline int
 242read_only (void *addr)
 243{
 244	return (unsigned long) ((char *) addr - (char *) &unw.r0) < sizeof(unw.r0);
 245}
 246
 247/*
 248 * Returns offset of rREG in struct pt_regs.
 249 */
 250static inline unsigned long
 251pt_regs_off (unsigned long reg)
 252{
 253	short off = -1;
 254
 255	if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
 256		off = unw.pt_regs_offsets[reg];
 257
 258	if (off < 0) {
 259		UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg);
 260		off = 0;
 261	}
 262	return (unsigned long) off;
 263}
 264
 265static inline struct pt_regs *
 266get_scratch_regs (struct unw_frame_info *info)
 267{
 268	if (!info->pt) {
 269		/* This should not happen with valid unwind info.  */
 270		UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__);
 271		if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
 272			info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
 273		else
 274			info->pt = info->sp - 16;
 275	}
 276	UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt);
 277	return (struct pt_regs *) info->pt;
 278}
 279
 280/* Unwind accessors.  */
 281
 282int
 283unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
 284{
 285	unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
 286	struct unw_ireg *ireg;
 287	struct pt_regs *pt;
 288
 289	if ((unsigned) regnum - 1 >= 127) {
 290		if (regnum == 0 && !write) {
 291			*val = 0;	/* read r0 always returns 0 */
 292			*nat = 0;
 293			return 0;
 294		}
 295		UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
 296			   __func__, regnum);
 297		return -1;
 298	}
 299
 300	if (regnum < 32) {
 301		if (regnum >= 4 && regnum <= 7) {
 302			/* access a preserved register */
 303			ireg = &info->r4 + (regnum - 4);
 304			addr = ireg->loc;
 305			if (addr) {
 306				nat_addr = addr + ireg->nat.off;
 307				switch (ireg->nat.type) {
 308				      case UNW_NAT_VAL:
 309					/* simulate getf.sig/setf.sig */
 310					if (write) {
 311						if (*nat) {
 312							/* write NaTVal and be done with it */
 313							addr[0] = 0;
 314							addr[1] = 0x1fffe;
 315							return 0;
 316						}
 317						addr[1] = 0x1003e;
 318					} else {
 319						if (addr[0] == 0 && addr[1] == 0x1ffe) {
 320							/* return NaT and be done with it */
 321							*val = 0;
 322							*nat = 1;
 323							return 0;
 324						}
 325					}
 326					/* fall through */
 327				      case UNW_NAT_NONE:
 328					dummy_nat = 0;
 329					nat_addr = &dummy_nat;
 330					break;
 331
 332				      case UNW_NAT_MEMSTK:
 333					nat_mask = (1UL << ((long) addr & 0x1f8)/8);
 334					break;
 335
 336				      case UNW_NAT_REGSTK:
 337					nat_addr = ia64_rse_rnat_addr(addr);
 338					if ((unsigned long) addr < info->regstk.limit
 339					    || (unsigned long) addr >= info->regstk.top)
 340					{
 341						UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
 342							"[0x%lx-0x%lx)\n",
 343							__func__, (void *) addr,
 344							info->regstk.limit,
 345							info->regstk.top);
 346						return -1;
 347					}
 348					if ((unsigned long) nat_addr >= info->regstk.top)
 349						nat_addr = &info->sw->ar_rnat;
 350					nat_mask = (1UL << ia64_rse_slot_num(addr));
 351					break;
 352				}
 353			} else {
 354				addr = &info->sw->r4 + (regnum - 4);
 355				nat_addr = &info->sw->ar_unat;
 356				nat_mask = (1UL << ((long) addr & 0x1f8)/8);
 357			}
 358		} else {
 359			/* access a scratch register */
 360			pt = get_scratch_regs(info);
 361			addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
 362			if (info->pri_unat_loc)
 363				nat_addr = info->pri_unat_loc;
 364			else
 365				nat_addr = &info->sw->caller_unat;
 366			nat_mask = (1UL << ((long) addr & 0x1f8)/8);
 367		}
 368	} else {
 369		/* access a stacked register */
 370		addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
 371		nat_addr = ia64_rse_rnat_addr(addr);
 372		if ((unsigned long) addr < info->regstk.limit
 373		    || (unsigned long) addr >= info->regstk.top)
 374		{
 375			UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
 376				   "of rbs\n",  __func__);
 377			return -1;
 378		}
 379		if ((unsigned long) nat_addr >= info->regstk.top)
 380			nat_addr = &info->sw->ar_rnat;
 381		nat_mask = (1UL << ia64_rse_slot_num(addr));
 382	}
 383
 384	if (write) {
 385		if (read_only(addr)) {
 386			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 387				__func__);
 388		} else {
 389			*addr = *val;
 390			if (*nat)
 391				*nat_addr |= nat_mask;
 392			else
 393				*nat_addr &= ~nat_mask;
 394		}
 395	} else {
 396		if ((*nat_addr & nat_mask) == 0) {
 397			*val = *addr;
 398			*nat = 0;
 399		} else {
 400			*val = 0;	/* if register is a NaT, *addr may contain kernel data! */
 401			*nat = 1;
 402		}
 403	}
 404	return 0;
 405}
 406EXPORT_SYMBOL(unw_access_gr);
 407
 408int
 409unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
 410{
 411	unsigned long *addr;
 412	struct pt_regs *pt;
 413
 414	switch (regnum) {
 415		/* scratch: */
 416	      case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
 417	      case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
 418	      case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
 419
 420		/* preserved: */
 421	      case 1: case 2: case 3: case 4: case 5:
 422		addr = *(&info->b1_loc + (regnum - 1));
 423		if (!addr)
 424			addr = &info->sw->b1 + (regnum - 1);
 425		break;
 426
 427	      default:
 428		UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
 429			   __func__, regnum);
 430		return -1;
 431	}
 432	if (write)
 433		if (read_only(addr)) {
 434			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 435				__func__);
 436		} else
 437			*addr = *val;
 438	else
 439		*val = *addr;
 440	return 0;
 441}
 442EXPORT_SYMBOL(unw_access_br);
 443
 444int
 445unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
 446{
 447	struct ia64_fpreg *addr = NULL;
 448	struct pt_regs *pt;
 449
 450	if ((unsigned) (regnum - 2) >= 126) {
 451		UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
 452			   __func__, regnum);
 453		return -1;
 454	}
 455
 456	if (regnum <= 5) {
 457		addr = *(&info->f2_loc + (regnum - 2));
 458		if (!addr)
 459			addr = &info->sw->f2 + (regnum - 2);
 460	} else if (regnum <= 15) {
 461		if (regnum <= 11) {
 462			pt = get_scratch_regs(info);
 463			addr = &pt->f6  + (regnum - 6);
 464		}
 465		else
 466			addr = &info->sw->f12 + (regnum - 12);
 467	} else if (regnum <= 31) {
 468		addr = info->fr_loc[regnum - 16];
 469		if (!addr)
 470			addr = &info->sw->f16 + (regnum - 16);
 471	} else {
 472		struct task_struct *t = info->task;
 473
 474		if (write)
 475			ia64_sync_fph(t);
 476		else
 477			ia64_flush_fph(t);
 478		addr = t->thread.fph + (regnum - 32);
 479	}
 480
 481	if (write)
 482		if (read_only(addr)) {
 483			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 484				__func__);
 485		} else
 486			*addr = *val;
 487	else
 488		*val = *addr;
 489	return 0;
 490}
 491EXPORT_SYMBOL(unw_access_fr);
 492
 493int
 494unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
 495{
 496	unsigned long *addr;
 497	struct pt_regs *pt;
 498
 499	switch (regnum) {
 500	      case UNW_AR_BSP:
 501		addr = info->bsp_loc;
 502		if (!addr)
 503			addr = &info->sw->ar_bspstore;
 504		break;
 505
 506	      case UNW_AR_BSPSTORE:
 507		addr = info->bspstore_loc;
 508		if (!addr)
 509			addr = &info->sw->ar_bspstore;
 510		break;
 511
 512	      case UNW_AR_PFS:
 513		addr = info->pfs_loc;
 514		if (!addr)
 515			addr = &info->sw->ar_pfs;
 516		break;
 517
 518	      case UNW_AR_RNAT:
 519		addr = info->rnat_loc;
 520		if (!addr)
 521			addr = &info->sw->ar_rnat;
 522		break;
 523
 524	      case UNW_AR_UNAT:
 525		addr = info->unat_loc;
 526		if (!addr)
 527			addr = &info->sw->caller_unat;
 528		break;
 529
 530	      case UNW_AR_LC:
 531		addr = info->lc_loc;
 532		if (!addr)
 533			addr = &info->sw->ar_lc;
 534		break;
 535
 536	      case UNW_AR_EC:
 537		if (!info->cfm_loc)
 538			return -1;
 539		if (write)
 540			*info->cfm_loc =
 541				(*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
 542		else
 543			*val = (*info->cfm_loc >> 52) & 0x3f;
 544		return 0;
 545
 546	      case UNW_AR_FPSR:
 547		addr = info->fpsr_loc;
 548		if (!addr)
 549			addr = &info->sw->ar_fpsr;
 550		break;
 551
 552	      case UNW_AR_RSC:
 553		pt = get_scratch_regs(info);
 554		addr = &pt->ar_rsc;
 555		break;
 556
 557	      case UNW_AR_CCV:
 558		pt = get_scratch_regs(info);
 559		addr = &pt->ar_ccv;
 560		break;
 561
 562	      case UNW_AR_CSD:
 563		pt = get_scratch_regs(info);
 564		addr = &pt->ar_csd;
 565		break;
 566
 567	      case UNW_AR_SSD:
 568		pt = get_scratch_regs(info);
 569		addr = &pt->ar_ssd;
 570		break;
 571
 572	      default:
 573		UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
 574			   __func__, regnum);
 575		return -1;
 576	}
 577
 578	if (write) {
 579		if (read_only(addr)) {
 580			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 581				__func__);
 582		} else
 583			*addr = *val;
 584	} else
 585		*val = *addr;
 586	return 0;
 587}
 588EXPORT_SYMBOL(unw_access_ar);
 589
 590int
 591unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
 592{
 593	unsigned long *addr;
 594
 595	addr = info->pr_loc;
 596	if (!addr)
 597		addr = &info->sw->pr;
 598
 599	if (write) {
 600		if (read_only(addr)) {
 601			UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n",
 602				__func__);
 603		} else
 604			*addr = *val;
 605	} else
 606		*val = *addr;
 607	return 0;
 608}
 609EXPORT_SYMBOL(unw_access_pr);
 610
 611
 612/* Routines to manipulate the state stack.  */
 613
 614static inline void
 615push (struct unw_state_record *sr)
 616{
 617	struct unw_reg_state *rs;
 618
 619	rs = alloc_reg_state();
 620	if (!rs) {
 621		printk(KERN_ERR "unwind: cannot stack reg state!\n");
 622		return;
 623	}
 624	memcpy(rs, &sr->curr, sizeof(*rs));
 625	sr->curr.next = rs;
 626}
 627
 628static void
 629pop (struct unw_state_record *sr)
 630{
 631	struct unw_reg_state *rs = sr->curr.next;
 632
 633	if (!rs) {
 634		printk(KERN_ERR "unwind: stack underflow!\n");
 635		return;
 636	}
 637	memcpy(&sr->curr, rs, sizeof(*rs));
 638	free_reg_state(rs);
 639}
 640
 641/* Make a copy of the state stack.  Non-recursive to avoid stack overflows.  */
 642static struct unw_reg_state *
 643dup_state_stack (struct unw_reg_state *rs)
 644{
 645	struct unw_reg_state *copy, *prev = NULL, *first = NULL;
 646
 647	while (rs) {
 648		copy = alloc_reg_state();
 649		if (!copy) {
 650			printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
 651			return NULL;
 652		}
 653		memcpy(copy, rs, sizeof(*copy));
 654		if (first)
 655			prev->next = copy;
 656		else
 657			first = copy;
 658		rs = rs->next;
 659		prev = copy;
 660	}
 661	return first;
 662}
 663
 664/* Free all stacked register states (but not RS itself).  */
 665static void
 666free_state_stack (struct unw_reg_state *rs)
 667{
 668	struct unw_reg_state *p, *next;
 669
 670	for (p = rs->next; p != NULL; p = next) {
 671		next = p->next;
 672		free_reg_state(p);
 673	}
 674	rs->next = NULL;
 675}
 676
 677/* Unwind decoder routines */
 678
 679static enum unw_register_index __attribute_const__
 680decode_abreg (unsigned char abreg, int memory)
 681{
 682	switch (abreg) {
 683	      case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
 684	      case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
 685	      case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
 686	      case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
 687	      case 0x60: return UNW_REG_PR;
 688	      case 0x61: return UNW_REG_PSP;
 689	      case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
 690	      case 0x63: return UNW_REG_RP;
 691	      case 0x64: return UNW_REG_BSP;
 692	      case 0x65: return UNW_REG_BSPSTORE;
 693	      case 0x66: return UNW_REG_RNAT;
 694	      case 0x67: return UNW_REG_UNAT;
 695	      case 0x68: return UNW_REG_FPSR;
 696	      case 0x69: return UNW_REG_PFS;
 697	      case 0x6a: return UNW_REG_LC;
 698	      default:
 699		break;
 700	}
 701	UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg);
 702	return UNW_REG_LC;
 703}
 704
 705static void
 706set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
 707{
 708	reg->val = val;
 709	reg->where = where;
 710	if (reg->when == UNW_WHEN_NEVER)
 711		reg->when = when;
 712}
 713
 714static void
 715alloc_spill_area (unsigned long *offp, unsigned long regsize,
 716		  struct unw_reg_info *lo, struct unw_reg_info *hi)
 717{
 718	struct unw_reg_info *reg;
 719
 720	for (reg = hi; reg >= lo; --reg) {
 721		if (reg->where == UNW_WHERE_SPILL_HOME) {
 722			reg->where = UNW_WHERE_PSPREL;
 723			*offp -= regsize;
 724			reg->val = *offp;
 725		}
 726	}
 727}
 728
 729static inline void
 730spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
 731{
 732	struct unw_reg_info *reg;
 733
 734	for (reg = *regp; reg <= lim; ++reg) {
 735		if (reg->where == UNW_WHERE_SPILL_HOME) {
 736			reg->when = t;
 737			*regp = reg + 1;
 738			return;
 739		}
 740	}
 741	UNW_DPRINT(0, "unwind.%s: excess spill!\n",  __func__);
 742}
 743
 744static inline void
 745finish_prologue (struct unw_state_record *sr)
 746{
 747	struct unw_reg_info *reg;
 748	unsigned long off;
 749	int i;
 750
 751	/*
 752	 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
 753	 * for Using Unwind Descriptors", rule 3):
 754	 */
 755	for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
 756		reg = sr->curr.reg + unw.save_order[i];
 757		if (reg->where == UNW_WHERE_GR_SAVE) {
 758			reg->where = UNW_WHERE_GR;
 759			reg->val = sr->gr_save_loc++;
 760		}
 761	}
 762
 763	/*
 764	 * Next, compute when the fp, general, and branch registers get
 765	 * saved.  This must come before alloc_spill_area() because
 766	 * we need to know which registers are spilled to their home
 767	 * locations.
 768	 */
 769	if (sr->imask) {
 770		unsigned char kind, mask = 0, *cp = sr->imask;
 771		int t;
 772		static const unsigned char limit[3] = {
 773			UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
 774		};
 775		struct unw_reg_info *(regs[3]);
 776
 777		regs[0] = sr->curr.reg + UNW_REG_F2;
 778		regs[1] = sr->curr.reg + UNW_REG_R4;
 779		regs[2] = sr->curr.reg + UNW_REG_B1;
 780
 781		for (t = 0; t < sr->region_len; ++t) {
 782			if ((t & 3) == 0)
 783				mask = *cp++;
 784			kind = (mask >> 2*(3-(t & 3))) & 3;
 785			if (kind > 0)
 786				spill_next_when(&regs[kind - 1], sr->curr.reg + limit[kind - 1],
 787						sr->region_start + t);
 788		}
 789	}
 790	/*
 791	 * Next, lay out the memory stack spill area:
 792	 */
 793	if (sr->any_spills) {
 794		off = sr->spill_offset;
 795		alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
 796		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
 797		alloc_spill_area(&off,  8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
 798	}
 799}
 800
 801/*
 802 * Region header descriptors.
 803 */
 804
 805static void
 806desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
 807	       struct unw_state_record *sr)
 808{
 809	int i, region_start;
 810
 811	if (!(sr->in_body || sr->first_region))
 812		finish_prologue(sr);
 813	sr->first_region = 0;
 814
 815	/* check if we're done: */
 816	if (sr->when_target < sr->region_start + sr->region_len) {
 817		sr->done = 1;
 818		return;
 819	}
 820
 821	region_start = sr->region_start + sr->region_len;
 822
 823	for (i = 0; i < sr->epilogue_count; ++i)
 824		pop(sr);
 825	sr->epilogue_count = 0;
 826	sr->epilogue_start = UNW_WHEN_NEVER;
 827
 828	sr->region_start = region_start;
 829	sr->region_len = rlen;
 830	sr->in_body = body;
 831
 832	if (!body) {
 833		push(sr);
 834
 835		for (i = 0; i < 4; ++i) {
 836			if (mask & 0x8)
 837				set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
 838					sr->region_start + sr->region_len - 1, grsave++);
 839			mask <<= 1;
 840		}
 841		sr->gr_save_loc = grsave;
 842		sr->any_spills = 0;
 843		sr->imask = NULL;
 844		sr->spill_offset = 0x10;	/* default to psp+16 */
 845	}
 846}
 847
 848/*
 849 * Prologue descriptors.
 850 */
 851
 852static inline void
 853desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
 854{
 855	if (abi == 3 && context == 'i') {
 856		sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
 857		UNW_DPRINT(3, "unwind.%s: interrupt frame\n",  __func__);
 858	}
 859	else
 860		UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
 861				__func__, abi, context);
 862}
 863
 864static inline void
 865desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
 866{
 867	int i;
 868
 869	for (i = 0; i < 5; ++i) {
 870		if (brmask & 1)
 871			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
 872				sr->region_start + sr->region_len - 1, gr++);
 873		brmask >>= 1;
 874	}
 875}
 876
 877static inline void
 878desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
 879{
 880	int i;
 881
 882	for (i = 0; i < 5; ++i) {
 883		if (brmask & 1) {
 884			set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
 885				sr->region_start + sr->region_len - 1, 0);
 886			sr->any_spills = 1;
 887		}
 888		brmask >>= 1;
 889	}
 890}
 891
 892static inline void
 893desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
 894{
 895	int i;
 896
 897	for (i = 0; i < 4; ++i) {
 898		if ((grmask & 1) != 0) {
 899			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
 900				sr->region_start + sr->region_len - 1, 0);
 901			sr->any_spills = 1;
 902		}
 903		grmask >>= 1;
 904	}
 905	for (i = 0; i < 20; ++i) {
 906		if ((frmask & 1) != 0) {
 907			int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
 908			set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
 909				sr->region_start + sr->region_len - 1, 0);
 910			sr->any_spills = 1;
 911		}
 912		frmask >>= 1;
 913	}
 914}
 915
 916static inline void
 917desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
 918{
 919	int i;
 920
 921	for (i = 0; i < 4; ++i) {
 922		if ((frmask & 1) != 0) {
 923			set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
 924				sr->region_start + sr->region_len - 1, 0);
 925			sr->any_spills = 1;
 926		}
 927		frmask >>= 1;
 928	}
 929}
 930
 931static inline void
 932desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
 933{
 934	int i;
 935
 936	for (i = 0; i < 4; ++i) {
 937		if ((grmask & 1) != 0)
 938			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
 939				sr->region_start + sr->region_len - 1, gr++);
 940		grmask >>= 1;
 941	}
 942}
 943
 944static inline void
 945desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
 946{
 947	int i;
 948
 949	for (i = 0; i < 4; ++i) {
 950		if ((grmask & 1) != 0) {
 951			set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
 952				sr->region_start + sr->region_len - 1, 0);
 953			sr->any_spills = 1;
 954		}
 955		grmask >>= 1;
 956	}
 957}
 958
 959static inline void
 960desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
 961{
 962	set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
 963		sr->region_start + min_t(int, t, sr->region_len - 1), 16*size);
 964}
 965
 966static inline void
 967desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
 968{
 969	sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1);
 970}
 971
 972static inline void
 973desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
 974{
 975	set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
 976}
 977
 978static inline void
 979desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
 980{
 981	set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
 982		0x10 - 4*pspoff);
 983}
 984
 985static inline void
 986desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
 987{
 988	set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
 989		4*spoff);
 990}
 991
 992static inline void
 993desc_rp_br (unsigned char dst, struct unw_state_record *sr)
 994{
 995	sr->return_link_reg = dst;
 996}
 997
 998static inline void
 999desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
1000{
1001	struct unw_reg_info *reg = sr->curr.reg + regnum;
1002
1003	if (reg->where == UNW_WHERE_NONE)
1004		reg->where = UNW_WHERE_GR_SAVE;
1005	reg->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1006}
1007
1008static inline void
1009desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
1010{
1011	sr->spill_offset = 0x10 - 4*pspoff;
1012}
1013
1014static inline unsigned char *
1015desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
1016{
1017	sr->imask = imaskp;
1018	return imaskp + (2*sr->region_len + 7)/8;
1019}
1020
1021/*
1022 * Body descriptors.
1023 */
1024static inline void
1025desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1026{
1027	sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1028	sr->epilogue_count = ecount + 1;
1029}
1030
1031static inline void
1032desc_copy_state (unw_word label, struct unw_state_record *sr)
1033{
1034	struct unw_labeled_state *ls;
1035
1036	for (ls = sr->labeled_states; ls; ls = ls->next) {
1037		if (ls->label == label) {
1038			free_state_stack(&sr->curr);
1039			memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1040			sr->curr.next = dup_state_stack(ls->saved_state.next);
1041			return;
1042		}
1043	}
1044	printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1045}
1046
1047static inline void
1048desc_label_state (unw_word label, struct unw_state_record *sr)
1049{
1050	struct unw_labeled_state *ls;
1051
1052	ls = alloc_labeled_state();
1053	if (!ls) {
1054		printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1055		return;
1056	}
1057	ls->label = label;
1058	memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1059	ls->saved_state.next = dup_state_stack(sr->curr.next);
1060
1061	/* insert into list of labeled states: */
1062	ls->next = sr->labeled_states;
1063	sr->labeled_states = ls;
1064}
1065
1066/*
1067 * General descriptors.
1068 */
1069
1070static inline int
1071desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1072{
1073	if (sr->when_target <= sr->region_start + min_t(int, t, sr->region_len - 1))
1074		return 0;
1075	if (qp > 0) {
1076		if ((sr->pr_val & (1UL << qp)) == 0)
1077			return 0;
1078		sr->pr_mask |= (1UL << qp);
1079	}
1080	return 1;
1081}
1082
1083static inline void
1084desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1085{
1086	struct unw_reg_info *r;
1087
1088	if (!desc_is_active(qp, t, sr))
1089		return;
1090
1091	r = sr->curr.reg + decode_abreg(abreg, 0);
1092	r->where = UNW_WHERE_NONE;
1093	r->when = UNW_WHEN_NEVER;
1094	r->val = 0;
1095}
1096
1097static inline void
1098desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1099		     unsigned char ytreg, struct unw_state_record *sr)
1100{
1101	enum unw_where where = UNW_WHERE_GR;
1102	struct unw_reg_info *r;
1103
1104	if (!desc_is_active(qp, t, sr))
1105		return;
1106
1107	if (x)
1108		where = UNW_WHERE_BR;
1109	else if (ytreg & 0x80)
1110		where = UNW_WHERE_FR;
1111
1112	r = sr->curr.reg + decode_abreg(abreg, 0);
1113	r->where = where;
1114	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1115	r->val = (ytreg & 0x7f);
1116}
1117
1118static inline void
1119desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1120		     struct unw_state_record *sr)
1121{
1122	struct unw_reg_info *r;
1123
1124	if (!desc_is_active(qp, t, sr))
1125		return;
1126
1127	r = sr->curr.reg + decode_abreg(abreg, 1);
1128	r->where = UNW_WHERE_PSPREL;
1129	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1130	r->val = 0x10 - 4*pspoff;
1131}
1132
1133static inline void
1134desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1135		       struct unw_state_record *sr)
1136{
1137	struct unw_reg_info *r;
1138
1139	if (!desc_is_active(qp, t, sr))
1140		return;
1141
1142	r = sr->curr.reg + decode_abreg(abreg, 1);
1143	r->where = UNW_WHERE_SPREL;
1144	r->when = sr->region_start + min_t(int, t, sr->region_len - 1);
1145	r->val = 4*spoff;
1146}
1147
1148#define UNW_DEC_BAD_CODE(code)			printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1149						       code);
1150
1151/*
1152 * region headers:
1153 */
1154#define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg)	desc_prologue(0,r,m,gr,arg)
1155#define UNW_DEC_PROLOGUE(fmt,b,r,arg)		desc_prologue(b,r,0,32,arg)
1156/*
1157 * prologue descriptors:
1158 */
1159#define UNW_DEC_ABI(fmt,a,c,arg)		desc_abi(a,c,arg)
1160#define UNW_DEC_BR_GR(fmt,b,g,arg)		desc_br_gr(b,g,arg)
1161#define UNW_DEC_BR_MEM(fmt,b,arg)		desc_br_mem(b,arg)
1162#define UNW_DEC_FRGR_MEM(fmt,g,f,arg)		desc_frgr_mem(g,f,arg)
1163#define UNW_DEC_FR_MEM(fmt,f,arg)		desc_fr_mem(f,arg)
1164#define UNW_DEC_GR_GR(fmt,m,g,arg)		desc_gr_gr(m,g,arg)
1165#define UNW_DEC_GR_MEM(fmt,m,arg)		desc_gr_mem(m,arg)
1166#define UNW_DEC_MEM_STACK_F(fmt,t,s,arg)	desc_mem_stack_f(t,s,arg)
1167#define UNW_DEC_MEM_STACK_V(fmt,t,arg)		desc_mem_stack_v(t,arg)
1168#define UNW_DEC_REG_GR(fmt,r,d,arg)		desc_reg_gr(r,d,arg)
1169#define UNW_DEC_REG_PSPREL(fmt,r,o,arg)		desc_reg_psprel(r,o,arg)
1170#define UNW_DEC_REG_SPREL(fmt,r,o,arg)		desc_reg_sprel(r,o,arg)
1171#define UNW_DEC_REG_WHEN(fmt,r,t,arg)		desc_reg_when(r,t,arg)
1172#define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1173#define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg)	desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1174#define UNW_DEC_PRIUNAT_GR(fmt,r,arg)		desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1175#define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg)	desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1176#define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg)	desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1177#define UNW_DEC_RP_BR(fmt,d,arg)		desc_rp_br(d,arg)
1178#define UNW_DEC_SPILL_BASE(fmt,o,arg)		desc_spill_base(o,arg)
1179#define UNW_DEC_SPILL_MASK(fmt,m,arg)		(m = desc_spill_mask(m,arg))
1180/*
1181 * body descriptors:
1182 */
1183#define UNW_DEC_EPILOGUE(fmt,t,c,arg)		desc_epilogue(t,c,arg)
1184#define UNW_DEC_COPY_STATE(fmt,l,arg)		desc_copy_state(l,arg)
1185#define UNW_DEC_LABEL_STATE(fmt,l,arg)		desc_label_state(l,arg)
1186/*
1187 * general unwind descriptors:
1188 */
1189#define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg)	desc_spill_reg_p(p,t,a,x,y,arg)
1190#define UNW_DEC_SPILL_REG(f,t,a,x,y,arg)	desc_spill_reg_p(0,t,a,x,y,arg)
1191#define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg)	desc_spill_psprel_p(p,t,a,o,arg)
1192#define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg)	desc_spill_psprel_p(0,t,a,o,arg)
1193#define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg)	desc_spill_sprel_p(p,t,a,o,arg)
1194#define UNW_DEC_SPILL_SPREL(f,t,a,o,arg)	desc_spill_sprel_p(0,t,a,o,arg)
1195#define UNW_DEC_RESTORE_P(f,p,t,a,arg)		desc_restore_p(p,t,a,arg)
1196#define UNW_DEC_RESTORE(f,t,a,arg)		desc_restore_p(0,t,a,arg)
1197
1198#include "unwind_decoder.c"
1199
1200
1201/* Unwind scripts. */
1202
1203static inline unw_hash_index_t
1204hash (unsigned long ip)
1205{
1206	/* magic number = ((sqrt(5)-1)/2)*2^64 */
1207	static const unsigned long hashmagic = 0x9e3779b97f4a7c16UL;
1208
1209	return (ip >> 4) * hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1210}
1211
1212static inline long
1213cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1214{
1215	read_lock(&script->lock);
1216	if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1217		/* keep the read lock... */
1218		return 1;
1219	read_unlock(&script->lock);
1220	return 0;
1221}
1222
1223static inline struct unw_script *
1224script_lookup (struct unw_frame_info *info)
1225{
1226	struct unw_script *script = unw.cache + info->hint;
1227	unsigned short index;
1228	unsigned long ip, pr;
1229
1230	if (UNW_DEBUG_ON(0))
1231		return NULL;	/* Always regenerate scripts in debug mode */
1232
1233	STAT(++unw.stat.cache.lookups);
1234
1235	ip = info->ip;
1236	pr = info->pr;
1237
1238	if (cache_match(script, ip, pr)) {
1239		STAT(++unw.stat.cache.hinted_hits);
1240		return script;
1241	}
1242
1243	index = unw.hash[hash(ip)];
1244	if (index >= UNW_CACHE_SIZE)
1245		return NULL;
1246
1247	script = unw.cache + index;
1248	while (1) {
1249		if (cache_match(script, ip, pr)) {
1250			/* update hint; no locking required as single-word writes are atomic */
1251			STAT(++unw.stat.cache.normal_hits);
1252			unw.cache[info->prev_script].hint = script - unw.cache;
1253			return script;
1254		}
1255		if (script->coll_chain >= UNW_HASH_SIZE)
1256			return NULL;
1257		script = unw.cache + script->coll_chain;
1258		STAT(++unw.stat.cache.collision_chain_traversals);
1259	}
1260}
1261
1262/*
1263 * On returning, a write lock for the SCRIPT is still being held.
1264 */
1265static inline struct unw_script *
1266script_new (unsigned long ip)
1267{
1268	struct unw_script *script, *prev, *tmp;
1269	unw_hash_index_t index;
1270	unsigned short head;
1271
1272	STAT(++unw.stat.script.news);
1273
1274	/*
1275	 * Can't (easily) use cmpxchg() here because of ABA problem
1276	 * that is intrinsic in cmpxchg()...
1277	 */
1278	head = unw.lru_head;
1279	script = unw.cache + head;
1280	unw.lru_head = script->lru_chain;
1281
1282	/*
1283	 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1284	 * script->lock.  Thus, if the write_trylock() fails, we simply bail out.  The
1285	 * alternative would be to disable interrupts whenever we hold a read-lock, but
1286	 * that seems silly.
1287	 */
1288	if (!write_trylock(&script->lock))
1289		return NULL;
1290
1291	/* re-insert script at the tail of the LRU chain: */
1292	unw.cache[unw.lru_tail].lru_chain = head;
1293	unw.lru_tail = head;
1294
1295	/* remove the old script from the hash table (if it's there): */
1296	if (script->ip) {
1297		index = hash(script->ip);
1298		tmp = unw.cache + unw.hash[index];
1299		prev = NULL;
1300		while (1) {
1301			if (tmp == script) {
1302				if (prev)
1303					prev->coll_chain = tmp->coll_chain;
1304				else
1305					unw.hash[index] = tmp->coll_chain;
1306				break;
1307			} else
1308				prev = tmp;
1309			if (tmp->coll_chain >= UNW_CACHE_SIZE)
1310			/* old script wasn't in the hash-table */
1311				break;
1312			tmp = unw.cache + tmp->coll_chain;
1313		}
1314	}
1315
1316	/* enter new script in the hash table */
1317	index = hash(ip);
1318	script->coll_chain = unw.hash[index];
1319	unw.hash[index] = script - unw.cache;
1320
1321	script->ip = ip;	/* set new IP while we're holding the locks */
1322
1323	STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1324
1325	script->flags = 0;
1326	script->hint = 0;
1327	script->count = 0;
1328	return script;
1329}
1330
1331static void
1332script_finalize (struct unw_script *script, struct unw_state_record *sr)
1333{
1334	script->pr_mask = sr->pr_mask;
1335	script->pr_val = sr->pr_val;
1336	/*
1337	 * We could down-grade our write-lock on script->lock here but
1338	 * the rwlock API doesn't offer atomic lock downgrading, so
1339	 * we'll just keep the write-lock and release it later when
1340	 * we're done using the script.
1341	 */
1342}
1343
1344static inline void
1345script_emit (struct unw_script *script, struct unw_insn insn)
1346{
1347	if (script->count >= UNW_MAX_SCRIPT_LEN) {
1348		UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1349			__func__, UNW_MAX_SCRIPT_LEN);
1350		return;
1351	}
1352	script->insn[script->count++] = insn;
1353}
1354
1355static inline void
1356emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1357{
1358	struct unw_reg_info *r = sr->curr.reg + i;
1359	enum unw_insn_opcode opc;
1360	struct unw_insn insn;
1361	unsigned long val = 0;
1362
1363	switch (r->where) {
1364	      case UNW_WHERE_GR:
1365		if (r->val >= 32) {
1366			/* register got spilled to a stacked register */
1367			opc = UNW_INSN_SETNAT_TYPE;
1368			val = UNW_NAT_REGSTK;
1369		} else
1370			/* register got spilled to a scratch register */
1371			opc = UNW_INSN_SETNAT_MEMSTK;
1372		break;
1373
1374	      case UNW_WHERE_FR:
1375		opc = UNW_INSN_SETNAT_TYPE;
1376		val = UNW_NAT_VAL;
1377		break;
1378
1379	      case UNW_WHERE_BR:
1380		opc = UNW_INSN_SETNAT_TYPE;
1381		val = UNW_NAT_NONE;
1382		break;
1383
1384	      case UNW_WHERE_PSPREL:
1385	      case UNW_WHERE_SPREL:
1386		opc = UNW_INSN_SETNAT_MEMSTK;
1387		break;
1388
1389	      default:
1390		UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1391			   __func__, r->where);
1392		return;
1393	}
1394	insn.opc = opc;
1395	insn.dst = unw.preg_index[i];
1396	insn.val = val;
1397	script_emit(script, insn);
1398}
1399
1400static void
1401compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1402{
1403	struct unw_reg_info *r = sr->curr.reg + i;
1404	enum unw_insn_opcode opc;
1405	unsigned long val, rval;
1406	struct unw_insn insn;
1407	long need_nat_info;
1408
1409	if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1410		return;
1411
1412	opc = UNW_INSN_MOVE;
1413	val = rval = r->val;
1414	need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1415
1416	switch (r->where) {
1417	      case UNW_WHERE_GR:
1418		if (rval >= 32) {
1419			opc = UNW_INSN_MOVE_STACKED;
1420			val = rval - 32;
1421		} else if (rval >= 4 && rval <= 7) {
1422			if (need_nat_info) {
1423				opc = UNW_INSN_MOVE2;
1424				need_nat_info = 0;
1425			}
1426			val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1427		} else if (rval == 0) {
1428			opc = UNW_INSN_MOVE_CONST;
1429			val = 0;
1430		} else {
1431			/* register got spilled to a scratch register */
1432			opc = UNW_INSN_MOVE_SCRATCH;
1433			val = pt_regs_off(rval);
1434		}
1435		break;
1436
1437	      case UNW_WHERE_FR:
1438		if (rval <= 5)
1439			val = unw.preg_index[UNW_REG_F2  + (rval -  2)];
1440		else if (rval >= 16 && rval <= 31)
1441			val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1442		else {
1443			opc = UNW_INSN_MOVE_SCRATCH;
1444			if (rval <= 11)
1445				val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1446			else
1447				UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1448					   __func__, rval);
1449		}
1450		break;
1451
1452	      case UNW_WHERE_BR:
1453		if (rval >= 1 && rval <= 5)
1454			val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1455		else {
1456			opc = UNW_INSN_MOVE_SCRATCH;
1457			if (rval == 0)
1458				val = offsetof(struct pt_regs, b0);
1459			else if (rval == 6)
1460				val = offsetof(struct pt_regs, b6);
1461			else
1462				val = offsetof(struct pt_regs, b7);
1463		}
1464		break;
1465
1466	      case UNW_WHERE_SPREL:
1467		opc = UNW_INSN_ADD_SP;
1468		break;
1469
1470	      case UNW_WHERE_PSPREL:
1471		opc = UNW_INSN_ADD_PSP;
1472		break;
1473
1474	      default:
1475		UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1476			   __func__, i, r->where);
1477		break;
1478	}
1479	insn.opc = opc;
1480	insn.dst = unw.preg_index[i];
1481	insn.val = val;
1482	script_emit(script, insn);
1483	if (need_nat_info)
1484		emit_nat_info(sr, i, script);
1485
1486	if (i == UNW_REG_PSP) {
1487		/*
1488		 * info->psp must contain the _value_ of the previous
1489		 * sp, not it's save location.  We get this by
1490		 * dereferencing the value we just stored in
1491		 * info->psp:
1492		 */
1493		insn.opc = UNW_INSN_LOAD;
1494		insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1495		script_emit(script, insn);
1496	}
1497}
1498
1499static inline const struct unw_table_entry *
1500lookup (struct unw_table *table, unsigned long rel_ip)
1501{
1502	const struct unw_table_entry *e = NULL;
1503	unsigned long lo, hi, mid;
1504
1505	/* do a binary search for right entry: */
1506	for (lo = 0, hi = table->length; lo < hi; ) {
1507		mid = (lo + hi) / 2;
1508		e = &table->array[mid];
1509		if (rel_ip < e->start_offset)
1510			hi = mid;
1511		else if (rel_ip >= e->end_offset)
1512			lo = mid + 1;
1513		else
1514			break;
1515	}
1516	if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1517		return NULL;
1518	return e;
1519}
1520
1521/*
1522 * Build an unwind script that unwinds from state OLD_STATE to the
1523 * entrypoint of the function that called OLD_STATE.
1524 */
1525static inline struct unw_script *
1526build_script (struct unw_frame_info *info)
1527{
1528	const struct unw_table_entry *e = NULL;
1529	struct unw_script *script = NULL;
1530	struct unw_labeled_state *ls, *next;
1531	unsigned long ip = info->ip;
1532	struct unw_state_record sr;
1533	struct unw_table *table, *prev;
1534	struct unw_reg_info *r;
1535	struct unw_insn insn;
1536	u8 *dp, *desc_end;
1537	u64 hdr;
1538	int i;
1539	STAT(unsigned long start, parse_start;)
1540
1541	STAT(++unw.stat.script.builds; start = ia64_get_itc());
1542
1543	/* build state record */
1544	memset(&sr, 0, sizeof(sr));
1545	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1546		r->when = UNW_WHEN_NEVER;
1547	sr.pr_val = info->pr;
1548
1549	UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip);
1550	script = script_new(ip);
1551	if (!script) {
1552		UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n",  __func__);
1553		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1554		return NULL;
1555	}
1556	unw.cache[info->prev_script].hint = script - unw.cache;
1557
1558	/* search the kernels and the modules' unwind tables for IP: */
1559
1560	STAT(parse_start = ia64_get_itc());
1561
1562	prev = NULL;
1563	for (table = unw.tables; table; table = table->next) {
1564		if (ip >= table->start && ip < table->end) {
1565			/*
1566			 * Leave the kernel unwind table at the very front,
1567			 * lest moving it breaks some assumption elsewhere.
1568			 * Otherwise, move the matching table to the second
1569			 * position in the list so that traversals can benefit
1570			 * from commonality in backtrace paths.
1571			 */
1572			if (prev && prev != unw.tables) {
1573				/* unw is safe - we're already spinlocked */
1574				prev->next = table->next;
1575				table->next = unw.tables->next;
1576				unw.tables->next = table;
1577			}
1578			e = lookup(table, ip - table->segment_base);
1579			break;
1580		}
1581		prev = table;
1582	}
1583	if (!e) {
1584		/* no info, return default unwinder (leaf proc, no mem stack, no saved regs)  */
1585		UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1586			__func__, ip, unw.cache[info->prev_script].ip);
1587		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1588		sr.curr.reg[UNW_REG_RP].when = -1;
1589		sr.curr.reg[UNW_REG_RP].val = 0;
1590		compile_reg(&sr, UNW_REG_RP, script);
1591		script_finalize(script, &sr);
1592		STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1593		STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1594		return script;
1595	}
1596
1597	sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1598			  + (ip & 0xfUL));
1599	hdr = *(u64 *) (table->segment_base + e->info_offset);
1600	dp =   (u8 *)  (table->segment_base + e->info_offset + 8);
1601	desc_end = dp + 8*UNW_LENGTH(hdr);
1602
1603	while (!sr.done && dp < desc_end)
1604		dp = unw_decode(dp, sr.in_body, &sr);
1605
1606	if (sr.when_target > sr.epilogue_start) {
1607		/*
1608		 * sp has been restored and all values on the memory stack below
1609		 * psp also have been restored.
1610		 */
1611		sr.curr.reg[UNW_REG_PSP].val = 0;
1612		sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1613		sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1614		for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1615			if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1616			    || r->where == UNW_WHERE_SPREL)
1617			{
1618				r->val = 0;
1619				r->where = UNW_WHERE_NONE;
1620				r->when = UNW_WHEN_NEVER;
1621			}
1622	}
1623
1624	script->flags = sr.flags;
1625
1626	/*
1627	 * If RP did't get saved, generate entry for the return link
1628	 * register.
1629	 */
1630	if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1631		sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1632		sr.curr.reg[UNW_REG_RP].when = -1;
1633		sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1634		UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1635			   __func__, ip, sr.curr.reg[UNW_REG_RP].where,
1636			   sr.curr.reg[UNW_REG_RP].val);
1637	}
1638
1639#ifdef UNW_DEBUG
1640	UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1641		__func__, table->segment_base + e->start_offset, sr.when_target);
1642	for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1643		if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1644			UNW_DPRINT(1, "  %s <- ", unw.preg_name[r - sr.curr.reg]);
1645			switch (r->where) {
1646			      case UNW_WHERE_GR:     UNW_DPRINT(1, "r%lu", r->val); break;
1647			      case UNW_WHERE_FR:     UNW_DPRINT(1, "f%lu", r->val); break;
1648			      case UNW_WHERE_BR:     UNW_DPRINT(1, "b%lu", r->val); break;
1649			      case UNW_WHERE_SPREL:  UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1650			      case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1651			      case UNW_WHERE_NONE:
1652				UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1653				break;
1654
1655			      default:
1656				UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1657				break;
1658			}
1659			UNW_DPRINT(1, "\t\t%d\n", r->when);
1660		}
1661	}
1662#endif
1663
1664	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1665
1666	/* translate state record into unwinder instructions: */
1667
1668	/*
1669	 * First, set psp if we're dealing with a fixed-size frame;
1670	 * subsequent instructions may depend on this value.
1671	 */
1672	if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1673	    && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1674	    && sr.curr.reg[UNW_REG_PSP].val != 0) {
1675		/* new psp is sp plus frame size */
1676		insn.opc = UNW_INSN_ADD;
1677		insn.dst = offsetof(struct unw_frame_info, psp)/8;
1678		insn.val = sr.curr.reg[UNW_REG_PSP].val;	/* frame size */
1679		script_emit(script, insn);
1680	}
1681
1682	/* determine where the primary UNaT is: */
1683	if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1684		i = UNW_REG_PRI_UNAT_MEM;
1685	else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1686		i = UNW_REG_PRI_UNAT_GR;
1687	else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1688		i = UNW_REG_PRI_UNAT_MEM;
1689	else
1690		i = UNW_REG_PRI_UNAT_GR;
1691
1692	compile_reg(&sr, i, script);
1693
1694	for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1695		compile_reg(&sr, i, script);
1696
1697	/* free labeled register states & stack: */
1698
1699	STAT(parse_start = ia64_get_itc());
1700	for (ls = sr.labeled_states; ls; ls = next) {
1701		next = ls->next;
1702		free_state_stack(&ls->saved_state);
1703		free_labeled_state(ls);
1704	}
1705	free_state_stack(&sr.curr);
1706	STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1707
1708	script_finalize(script, &sr);
1709	STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1710	return script;
1711}
1712
1713/*
1714 * Apply the unwinding actions represented by OPS and update SR to
1715 * reflect the state that existed upon entry to the function that this
1716 * unwinder represents.
1717 */
1718static inline void
1719run_script (struct unw_script *script, struct unw_frame_info *state)
1720{
1721	struct unw_insn *ip, *limit, next_insn;
1722	unsigned long opc, dst, val, off;
1723	unsigned long *s = (unsigned long *) state;
1724	STAT(unsigned long start;)
1725
1726	STAT(++unw.stat.script.runs; start = ia64_get_itc());
1727	state->flags = script->flags;
1728	ip = script->insn;
1729	limit = script->insn + script->count;
1730	next_insn = *ip;
1731
1732	while (ip++ < limit) {
1733		opc = next_insn.opc;
1734		dst = next_insn.dst;
1735		val = next_insn.val;
1736		next_insn = *ip;
1737
1738	  redo:
1739		switch (opc) {
1740		      case UNW_INSN_ADD:
1741			s[dst] += val;
1742			break;
1743
1744		      case UNW_INSN_MOVE2:
1745			if (!s[val])
1746				goto lazy_init;
1747			s[dst+1] = s[val+1];
1748			s[dst] = s[val];
1749			break;
1750
1751		      case UNW_INSN_MOVE:
1752			if (!s[val])
1753				goto lazy_init;
1754			s[dst] = s[val];
1755			break;
1756
1757		      case UNW_INSN_MOVE_SCRATCH:
1758			if (state->pt) {
1759				s[dst] = (unsigned long) get_scratch_regs(state) + val;
1760			} else {
1761				s[dst] = 0;
1762				UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1763					   __func__, dst, val);
1764			}
1765			break;
1766
1767		      case UNW_INSN_MOVE_CONST:
1768			if (val == 0)
1769				s[dst] = (unsigned long) &unw.r0;
1770			else {
1771				s[dst] = 0;
1772				UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n",
1773					   __func__, val);
1774			}
1775			break;
1776
1777
1778		      case UNW_INSN_MOVE_STACKED:
1779			s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1780								    val);
1781			break;
1782
1783		      case UNW_INSN_ADD_PSP:
1784			s[dst] = state->psp + val;
1785			break;
1786
1787		      case UNW_INSN_ADD_SP:
1788			s[dst] = state->sp + val;
1789			break;
1790
1791		      case UNW_INSN_SETNAT_MEMSTK:
1792			if (!state->pri_unat_loc)
1793				state->pri_unat_loc = &state->sw->caller_unat;
1794			/* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1795			s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1796			break;
1797
1798		      case UNW_INSN_SETNAT_TYPE:
1799			s[dst+1] = val;
1800			break;
1801
1802		      case UNW_INSN_LOAD:
1803#ifdef UNW_DEBUG
1804			if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1805			    || s[val] < TASK_SIZE)
1806			{
1807				UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1808					   __func__, s[val]);
1809				break;
1810			}
1811#endif
1812			s[dst] = *(unsigned long *) s[val];
1813			break;
1814		}
1815	}
1816	STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1817	return;
1818
1819  lazy_init:
1820	off = unw.sw_off[val];
1821	s[val] = (unsigned long) state->sw + off;
1822	if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1823		/*
1824		 * We're initializing a general register: init NaT info, too.  Note that
1825		 * the offset is a multiple of 8 which gives us the 3 bits needed for
1826		 * the type field.
1827		 */
1828		s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1829	goto redo;
1830}
1831
1832static int
1833find_save_locs (struct unw_frame_info *info)
1834{
1835	int have_write_lock = 0;
1836	struct unw_script *scr;
1837	unsigned long flags = 0;
1838
1839	if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1840		/* don't let obviously bad addresses pollute the cache */
1841		/* FIXME: should really be level 0 but it occurs too often. KAO */
1842		UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip);
1843		info->rp_loc = NULL;
1844		return -1;
1845	}
1846
1847	scr = script_lookup(info);
1848	if (!scr) {
1849		spin_lock_irqsave(&unw.lock, flags);
1850		scr = build_script(info);
1851		if (!scr) {
1852			spin_unlock_irqrestore(&unw.lock, flags);
1853			UNW_DPRINT(0,
1854				   "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1855				   __func__, info->ip);
1856			return -1;
1857		}
1858		have_write_lock = 1;
1859	}
1860	info->hint = scr->hint;
1861	info->prev_script = scr - unw.cache;
1862
1863	run_script(scr, info);
1864
1865	if (have_write_lock) {
1866		write_unlock(&scr->lock);
1867		spin_unlock_irqrestore(&unw.lock, flags);
1868	} else
1869		read_unlock(&scr->lock);
1870	return 0;
1871}
1872
1873static int
1874unw_valid(const struct unw_frame_info *info, unsigned long* p)
1875{
1876	unsigned long loc = (unsigned long)p;
1877	return (loc >= info->regstk.limit && loc < info->regstk.top) ||
1878	       (loc >= info->memstk.top && loc < info->memstk.limit);
1879}
1880
1881int
1882unw_unwind (struct unw_frame_info *info)
1883{
1884	unsigned long prev_ip, prev_sp, prev_bsp;
1885	unsigned long ip, pr, num_regs;
1886	STAT(unsigned long start, flags;)
1887	int retval;
1888
1889	STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1890
1891	prev_ip = info->ip;
1892	prev_sp = info->sp;
1893	prev_bsp = info->bsp;
1894
1895	/* validate the return IP pointer */
1896	if (!unw_valid(info, info->rp_loc)) {
1897		/* FIXME: should really be level 0 but it occurs too often. KAO */
1898		UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1899			   __func__, info->ip);
1900		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1901		return -1;
1902	}
1903	/* restore the ip */
1904	ip = info->ip = *info->rp_loc;
1905	if (ip < GATE_ADDR) {
1906		UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip);
1907		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1908		return -1;
1909	}
1910
1911	/* validate the previous stack frame pointer */
1912	if (!unw_valid(info, info->pfs_loc)) {
1913		UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__);
1914		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1915		return -1;
1916	}
1917	/* restore the cfm: */
1918	info->cfm_loc = info->pfs_loc;
1919
1920	/* restore the bsp: */
1921	pr = info->pr;
1922	num_regs = 0;
1923	if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1924		info->pt = info->sp + 16;
1925		if ((pr & (1UL << PRED_NON_SYSCALL)) != 0)
1926			num_regs = *info->cfm_loc & 0x7f;		/* size of frame */
1927		info->pfs_loc =
1928			(unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1929		UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt);
1930	} else
1931		num_regs = (*info->cfm_loc >> 7) & 0x7f;	/* size of locals */
1932	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1933	if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1934		UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1935			__func__, info->bsp, info->regstk.limit, info->regstk.top);
1936		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1937		return -1;
1938	}
1939
1940	/* restore the sp: */
1941	info->sp = info->psp;
1942	if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1943		UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1944			__func__, info->sp, info->memstk.top, info->memstk.limit);
1945		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1946		return -1;
1947	}
1948
1949	if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1950		UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1951			   __func__, ip);
1952		STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1953		return -1;
1954	}
1955
1956	/* as we unwind, the saved ar.unat becomes the primary unat: */
1957	info->pri_unat_loc = info->unat_loc;
1958
1959	/* finally, restore the predicates: */
1960	unw_get_pr(info, &info->pr);
1961
1962	retval = find_save_locs(info);
1963	STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1964	return retval;
1965}
1966EXPORT_SYMBOL(unw_unwind);
1967
1968int
1969unw_unwind_to_user (struct unw_frame_info *info)
1970{
1971	unsigned long ip, sp, pr = info->pr;
1972
1973	do {
1974		unw_get_sp(info, &sp);
1975		if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1976		    < IA64_PT_REGS_SIZE) {
1977			UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1978				   __func__);
1979			break;
1980		}
1981		if (unw_is_intr_frame(info) &&
1982		    (pr & (1UL << PRED_USER_STACK)))
1983			return 0;
1984		if (unw_get_pr (info, &pr) < 0) {
1985			unw_get_rp(info, &ip);
1986			UNW_DPRINT(0, "unwind.%s: failed to read "
1987				   "predicate register (ip=0x%lx)\n",
1988				__func__, ip);
1989			return -1;
1990		}
1991	} while (unw_unwind(info) >= 0);
1992	unw_get_ip(info, &ip);
1993	UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1994		   __func__, ip);
1995	return -1;
1996}
1997EXPORT_SYMBOL(unw_unwind_to_user);
1998
1999static void
2000init_frame_info (struct unw_frame_info *info, struct task_struct *t,
2001		 struct switch_stack *sw, unsigned long stktop)
2002{
2003	unsigned long rbslimit, rbstop, stklimit;
2004	STAT(unsigned long start, flags;)
2005
2006	STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
2007
2008	/*
2009	 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
2010	 * don't want to do that because it would be slow as each preserved register would
2011	 * have to be processed.  Instead, what we do here is zero out the frame info and
2012	 * start the unwind process at the function that created the switch_stack frame.
2013	 * When a preserved value in switch_stack needs to be accessed, run_script() will
2014	 * initialize the appropriate pointer on demand.
2015	 */
2016	memset(info, 0, sizeof(*info));
2017
2018	rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
2019	stklimit = (unsigned long) t + IA64_STK_OFFSET;
2020
2021	rbstop   = sw->ar_bspstore;
2022	if (rbstop > stklimit || rbstop < rbslimit)
2023		rbstop = rbslimit;
2024
2025	if (stktop <= rbstop)
2026		stktop = rbstop;
2027	if (stktop > stklimit)
2028		stktop = stklimit;
2029
2030	info->regstk.limit = rbslimit;
2031	info->regstk.top   = rbstop;
2032	info->memstk.limit = stklimit;
2033	info->memstk.top   = stktop;
2034	info->task = t;
2035	info->sw  = sw;
2036	info->sp = info->psp = stktop;
2037	info->pr = sw->pr;
2038	UNW_DPRINT(3, "unwind.%s:\n"
2039		   "  task   0x%lx\n"
2040		   "  rbs = [0x%lx-0x%lx)\n"
2041		   "  stk = [0x%lx-0x%lx)\n"
2042		   "  pr     0x%lx\n"
2043		   "  sw     0x%lx\n"
2044		   "  sp     0x%lx\n",
2045		   __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
2046		   info->pr, (unsigned long) info->sw, info->sp);
2047	STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
2048}
2049
2050void
2051unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2052{
2053	unsigned long sol;
2054
2055	init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2056	info->cfm_loc = &sw->ar_pfs;
2057	sol = (*info->cfm_loc >> 7) & 0x7f;
2058	info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2059	info->ip = sw->b0;
2060	UNW_DPRINT(3, "unwind.%s:\n"
2061		   "  bsp    0x%lx\n"
2062		   "  sol    0x%lx\n"
2063		   "  ip     0x%lx\n",
2064		   __func__, info->bsp, sol, info->ip);
2065	find_save_locs(info);
2066}
2067
2068EXPORT_SYMBOL(unw_init_frame_info);
2069
2070void
2071unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2072{
2073	struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2074
2075	UNW_DPRINT(1, "unwind.%s\n", __func__);
2076	unw_init_frame_info(info, t, sw);
2077}
2078EXPORT_SYMBOL(unw_init_from_blocked_task);
2079
2080static void
2081init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2082		   unsigned long gp, const void *table_start, const void *table_end)
2083{
2084	const struct unw_table_entry *start = table_start, *end = table_end;
2085
2086	table->name = name;
2087	table->segment_base = segment_base;
2088	table->gp = gp;
2089	table->start = segment_base + start[0].start_offset;
2090	table->end = segment_base + end[-1].end_offset;
2091	table->array = start;
2092	table->length = end - start;
2093}
2094
2095void *
2096unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2097		      const void *table_start, const void *table_end)
2098{
2099	const struct unw_table_entry *start = table_start, *end = table_end;
2100	struct unw_table *table;
2101	unsigned long flags;
2102
2103	if (end - start <= 0) {
2104		UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2105			   __func__);
2106		return NULL;
2107	}
2108
2109	table = kmalloc(sizeof(*table), GFP_USER);
2110	if (!table)
2111		return NULL;
2112
2113	init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2114
2115	spin_lock_irqsave(&unw.lock, flags);
2116	{
2117		/* keep kernel unwind table at the front (it's searched most commonly): */
2118		table->next = unw.tables->next;
2119		unw.tables->next = table;
2120	}
2121	spin_unlock_irqrestore(&unw.lock, flags);
2122
2123	return table;
2124}
2125
2126void
2127unw_remove_unwind_table (void *handle)
2128{
2129	struct unw_table *table, *prev;
2130	struct unw_script *tmp;
2131	unsigned long flags;
2132	long index;
2133
2134	if (!handle) {
2135		UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2136			   __func__);
2137		return;
2138	}
2139
2140	table = handle;
2141	if (table == &unw.kernel_table) {
2142		UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2143			   "no-can-do!\n", __func__);
2144		return;
2145	}
2146
2147	spin_lock_irqsave(&unw.lock, flags);
2148	{
2149		/* first, delete the table: */
2150
2151		for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2152			if (prev->next == table)
2153				break;
2154		if (!prev) {
2155			UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2156				   __func__, (void *) table);
2157			spin_unlock_irqrestore(&unw.lock, flags);
2158			return;
2159		}
2160		prev->next = table->next;
2161	}
2162	spin_unlock_irqrestore(&unw.lock, flags);
2163
2164	/* next, remove hash table entries for this table */
2165
2166	for (index = 0; index < UNW_HASH_SIZE; ++index) {
2167		tmp = unw.cache + unw.hash[index];
2168		if (unw.hash[index] >= UNW_CACHE_SIZE
2169		    || tmp->ip < table->start || tmp->ip >= table->end)
2170			continue;
2171
2172		write_lock(&tmp->lock);
2173		{
2174			if (tmp->ip >= table->start && tmp->ip < table->end) {
2175				unw.hash[index] = tmp->coll_chain;
2176				tmp->ip = 0;
2177			}
2178		}
2179		write_unlock(&tmp->lock);
2180	}
2181
2182	kfree(table);
2183}
2184
2185static int __init
2186create_gate_table (void)
2187{
2188	const struct unw_table_entry *entry, *start, *end;
2189	unsigned long *lp, segbase = GATE_ADDR;
2190	size_t info_size, size;
2191	char *info;
2192	Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2193	int i;
2194
2195	for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2196		if (phdr->p_type == PT_IA_64_UNWIND) {
2197			punw = phdr;
2198			break;
2199		}
2200
2201	if (!punw) {
2202		printk("%s: failed to find gate DSO's unwind table!\n", __func__);
2203		return 0;
2204	}
2205
2206	start = (const struct unw_table_entry *) punw->p_vaddr;
2207	end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2208	size  = 0;
2209
2210	unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2211
2212	for (entry = start; entry < end; ++entry)
2213		size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2214	size += 8;	/* reserve space for "end of table" marker */
2215
2216	unw.gate_table = kmalloc(size, GFP_KERNEL);
2217	if (!unw.gate_table) {
2218		unw.gate_table_size = 0;
2219		printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__);
2220		return 0;
2221	}
2222	unw.gate_table_size = size;
2223
2224	lp = unw.gate_table;
2225	info = (char *) unw.gate_table + size;
2226
2227	for (entry = start; entry < end; ++entry, lp += 3) {
2228		info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2229		info -= info_size;
2230		memcpy(info, (char *) segbase + entry->info_offset, info_size);
2231
2232		lp[0] = segbase + entry->start_offset;		/* start */
2233		lp[1] = segbase + entry->end_offset;		/* end */
2234		lp[2] = info - (char *) unw.gate_table;		/* info */
2235	}
2236	*lp = 0;	/* end-of-table marker */
2237	return 0;
2238}
2239
2240__initcall(create_gate_table);
2241
2242void __init
2243unw_init (void)
2244{
2245	extern char __gp[];
2246	extern void unw_hash_index_t_is_too_narrow (void);
2247	long i, off;
2248
2249	if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2250		unw_hash_index_t_is_too_narrow();
2251
2252	unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(CALLER_UNAT);
2253	unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2254	unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_PFS);
2255	unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2256	unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(CALLER_UNAT);
2257	unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2258	unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2259	unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2260	for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2261		unw.sw_off[unw.preg_index[i]] = off;
2262	for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2263		unw.sw_off[unw.preg_index[i]] = off;
2264	for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2265		unw.sw_off[unw.preg_index[i]] = off;
2266	for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2267		unw.sw_off[unw.preg_index[i]] = off;
2268
2269	for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2270		if (i > 0)
2271			unw.cache[i].lru_chain = (i - 1);
2272		unw.cache[i].coll_chain = -1;
2273		rwlock_init(&unw.cache[i].lock);
2274	}
2275	unw.lru_head = UNW_CACHE_SIZE - 1;
2276	unw.lru_tail = 0;
2277
2278	init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) __gp,
2279			  __start_unwind, __end_unwind);
2280}
2281
2282/*
2283 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2284 *
2285 *	This system call has been deprecated.  The new and improved way to get
2286 *	at the kernel's unwind info is via the gate DSO.  The address of the
2287 *	ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2288 *
2289 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2290 *
2291 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2292 * the size of the unwind data.  If BUF_SIZE is smaller than the size of the unwind data
2293 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2294 * unwind data.
2295 *
2296 * The first portion of the unwind data contains an unwind table and rest contains the
2297 * associated unwind info (in no particular order).  The unwind table consists of a table
2298 * of entries of the form:
2299 *
2300 *	u64 start;	(64-bit address of start of function)
2301 *	u64 end;	(64-bit address of start of function)
2302 *	u64 info;	(BUF-relative offset to unwind info)
2303 *
2304 * The end of the unwind table is indicated by an entry with a START address of zero.
2305 *
2306 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2307 * on the format of the unwind info.
2308 *
2309 * ERRORS
2310 *	EFAULT	BUF points outside your accessible address space.
2311 */
2312asmlinkage long
2313sys_getunwind (void __user *buf, size_t buf_size)
2314{
2315	if (buf && buf_size >= unw.gate_table_size)
2316		if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2317			return -EFAULT;
2318	return unw.gate_table_size;
2319}