Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
Note: File does not exist in v6.2.
   1/*
   2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * Linux interrupt vectors.
  15 */
  16
  17#include <linux/linkage.h>
  18#include <linux/errno.h>
  19#include <linux/unistd.h>
  20#include <asm/ptrace.h>
  21#include <asm/thread_info.h>
  22#include <asm/irqflags.h>
  23#include <asm/asm-offsets.h>
  24#include <asm/types.h>
  25#include <asm/signal.h>
  26#include <hv/hypervisor.h>
  27#include <arch/abi.h>
  28#include <arch/interrupts.h>
  29#include <arch/spr_def.h>
  30
  31#ifdef CONFIG_PREEMPT
  32# error "No support for kernel preemption currently"
  33#endif
  34
  35#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
  36
  37#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
  38
  39
  40	.macro  push_reg reg, ptr=sp, delta=-8
  41	{
  42	 st     \ptr, \reg
  43	 addli  \ptr, \ptr, \delta
  44	}
  45	.endm
  46
  47	.macro  pop_reg reg, ptr=sp, delta=8
  48	{
  49	 ld     \reg, \ptr
  50	 addli  \ptr, \ptr, \delta
  51	}
  52	.endm
  53
  54	.macro  pop_reg_zero reg, zreg, ptr=sp, delta=8
  55	{
  56	 move   \zreg, zero
  57	 ld     \reg, \ptr
  58	 addi   \ptr, \ptr, \delta
  59	}
  60	.endm
  61
  62	.macro  push_extra_callee_saves reg
  63	PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
  64	push_reg r51, \reg
  65	push_reg r50, \reg
  66	push_reg r49, \reg
  67	push_reg r48, \reg
  68	push_reg r47, \reg
  69	push_reg r46, \reg
  70	push_reg r45, \reg
  71	push_reg r44, \reg
  72	push_reg r43, \reg
  73	push_reg r42, \reg
  74	push_reg r41, \reg
  75	push_reg r40, \reg
  76	push_reg r39, \reg
  77	push_reg r38, \reg
  78	push_reg r37, \reg
  79	push_reg r36, \reg
  80	push_reg r35, \reg
  81	push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
  82	.endm
  83
  84	.macro  panic str
  85	.pushsection .rodata, "a"
  861:
  87	.asciz  "\str"
  88	.popsection
  89	{
  90	 moveli r0, hw2_last(1b)
  91	}
  92	{
  93	 shl16insli r0, r0, hw1(1b)
  94	}
  95	{
  96	 shl16insli r0, r0, hw0(1b)
  97	 jal    panic
  98	}
  99	.endm
 100
 101
 102#ifdef __COLLECT_LINKER_FEEDBACK__
 103	.pushsection .text.intvec_feedback,"ax"
 104intvec_feedback:
 105	.popsection
 106#endif
 107
 108	/*
 109	 * Default interrupt handler.
 110	 *
 111	 * vecnum is where we'll put this code.
 112	 * c_routine is the C routine we'll call.
 113	 *
 114	 * The C routine is passed two arguments:
 115	 * - A pointer to the pt_regs state.
 116	 * - The interrupt vector number.
 117	 *
 118	 * The "processing" argument specifies the code for processing
 119	 * the interrupt. Defaults to "handle_interrupt".
 120	 */
 121	.macro  int_hand vecnum, vecname, c_routine, processing=handle_interrupt
 122	.org    (\vecnum << 8)
 123intvec_\vecname:
 124	/* Temporarily save a register so we have somewhere to work. */
 125
 126	mtspr   SPR_SYSTEM_SAVE_K_1, r0
 127	mfspr   r0, SPR_EX_CONTEXT_K_1
 128
 129	andi    r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
 130
 131	.ifc    \vecnum, INT_DOUBLE_FAULT
 132	/*
 133	 * For double-faults from user-space, fall through to the normal
 134	 * register save and stack setup path.  Otherwise, it's the
 135	 * hypervisor giving us one last chance to dump diagnostics, and we
 136	 * branch to the kernel_double_fault routine to do so.
 137	 */
 138	beqz    r0, 1f
 139	j       _kernel_double_fault
 1401:
 141	.else
 142	/*
 143	 * If we're coming from user-space, then set sp to the top of
 144	 * the kernel stack.  Otherwise, assume sp is already valid.
 145	 */
 146	{
 147	 bnez   r0, 0f
 148	 move   r0, sp
 149	}
 150	.endif
 151
 152	.ifc    \c_routine, do_page_fault
 153	/*
 154	 * The page_fault handler may be downcalled directly by the
 155	 * hypervisor even when Linux is running and has ICS set.
 156	 *
 157	 * In this case the contents of EX_CONTEXT_K_1 reflect the
 158	 * previous fault and can't be relied on to choose whether or
 159	 * not to reinitialize the stack pointer.  So we add a test
 160	 * to see whether SYSTEM_SAVE_K_2 has the high bit set,
 161	 * and if so we don't reinitialize sp, since we must be coming
 162	 * from Linux.  (In fact the precise case is !(val & ~1),
 163	 * but any Linux PC has to have the high bit set.)
 164	 *
 165	 * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
 166	 * any path that turns into a downcall to one of our TLB handlers.
 167	 *
 168	 * FIXME: if we end up never using this path, perhaps we should
 169	 * prevent the hypervisor from generating downcalls in this case.
 170	 * The advantage of getting a downcall is we can panic in Linux.
 171	 */
 172	mfspr   r0, SPR_SYSTEM_SAVE_K_2
 173	{
 174	 bltz   r0, 0f    /* high bit in S_S_1_2 is for a PC to use */
 175	 move   r0, sp
 176	}
 177	.endif
 178
 179
 180	/*
 181	 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
 182	 * the current stack top in the higher bits.  So we recover
 183	 * our stack top by just masking off the low bits, then
 184	 * point sp at the top aligned address on the actual stack page.
 185	 */
 186	mfspr   r0, SPR_SYSTEM_SAVE_K_0
 187	mm      r0, zero, LOG2_THREAD_SIZE, 63
 188
 1890:
 190	/*
 191	 * Align the stack mod 64 so we can properly predict what
 192	 * cache lines we need to write-hint to reduce memory fetch
 193	 * latency as we enter the kernel.  The layout of memory is
 194	 * as follows, with cache line 0 at the lowest VA, and cache
 195	 * line 8 just below the r0 value this "andi" computes.
 196	 * Note that we never write to cache line 8, and we skip
 197	 * cache lines 1-3 for syscalls.
 198	 *
 199	 *    cache line 8: ptregs padding (two words)
 200	 *    cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
 201	 *    cache line 6: r46...r53 (tp)
 202	 *    cache line 5: r38...r45
 203	 *    cache line 4: r30...r37
 204	 *    cache line 3: r22...r29
 205	 *    cache line 2: r14...r21
 206	 *    cache line 1: r6...r13
 207	 *    cache line 0: 2 x frame, r0..r5
 208	 */
 209	andi    r0, r0, -64
 210
 211	/*
 212	 * Push the first four registers on the stack, so that we can set
 213	 * them to vector-unique values before we jump to the common code.
 214	 *
 215	 * Registers are pushed on the stack as a struct pt_regs,
 216	 * with the sp initially just above the struct, and when we're
 217	 * done, sp points to the base of the struct, minus
 218	 * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
 219	 *
 220	 * This routine saves just the first four registers, plus the
 221	 * stack context so we can do proper backtracing right away,
 222	 * and defers to handle_interrupt to save the rest.
 223	 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
 224	 * and needs sp set to its final location at the bottom of
 225	 * the stack frame.
 226	 */
 227	addli   r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
 228	wh64    r0   /* cache line 7 */
 229	{
 230	 st     r0, lr
 231	 addli  r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
 232	}
 233	{
 234	 st     r0, sp
 235	 addli  sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
 236	}
 237	wh64    sp   /* cache line 6 */
 238	{
 239	 st     sp, r52
 240	 addli  sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
 241	}
 242	wh64    sp   /* cache line 0 */
 243	{
 244	 st     sp, r1
 245	 addli  sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
 246	}
 247	{
 248	 st     sp, r2
 249	 addli  sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
 250	}
 251	{
 252	 st     sp, r3
 253	 addli  sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
 254	}
 255	mfspr   r0, SPR_EX_CONTEXT_K_0
 256	.ifc \processing,handle_syscall
 257	/*
 258	 * Bump the saved PC by one bundle so that when we return, we won't
 259	 * execute the same swint instruction again.  We need to do this while
 260	 * we're in the critical section.
 261	 */
 262	addi    r0, r0, 8
 263	.endif
 264	{
 265	 st     sp, r0
 266	 addli  sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
 267	}
 268	mfspr   r0, SPR_EX_CONTEXT_K_1
 269	{
 270	 st     sp, r0
 271	 addi   sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
 272	/*
 273	 * Use r0 for syscalls so it's a temporary; use r1 for interrupts
 274	 * so that it gets passed through unchanged to the handler routine.
 275	 * Note that the .if conditional confusingly spans bundles.
 276	 */
 277	 .ifc \processing,handle_syscall
 278	 movei  r0, \vecnum
 279	}
 280	{
 281	 st     sp, r0
 282	 .else
 283	 movei  r1, \vecnum
 284	}
 285	{
 286	 st     sp, r1
 287	 .endif
 288	 addli  sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
 289	}
 290	mfspr   r0, SPR_SYSTEM_SAVE_K_1    /* Original r0 */
 291	{
 292	 st     sp, r0
 293	 addi   sp, sp, -PTREGS_OFFSET_REG(0) - 8
 294	}
 295	{
 296	 st     sp, zero        /* write zero into "Next SP" frame pointer */
 297	 addi   sp, sp, -8      /* leave SP pointing at bottom of frame */
 298	}
 299	.ifc \processing,handle_syscall
 300	j       handle_syscall
 301	.else
 302	/* Capture per-interrupt SPR context to registers. */
 303	.ifc \c_routine, do_page_fault
 304	mfspr   r2, SPR_SYSTEM_SAVE_K_3   /* address of page fault */
 305	mfspr   r3, SPR_SYSTEM_SAVE_K_2   /* info about page fault */
 306	.else
 307	.ifc \vecnum, INT_ILL_TRANS
 308	mfspr   r2, ILL_TRANS_REASON
 309	.else
 310	.ifc \vecnum, INT_DOUBLE_FAULT
 311	mfspr   r2, SPR_SYSTEM_SAVE_K_2   /* double fault info from HV */
 312	.else
 313	.ifc \c_routine, do_trap
 314	mfspr   r2, GPV_REASON
 315	.else
 316	.ifc \c_routine, op_handle_perf_interrupt
 317	mfspr   r2, PERF_COUNT_STS
 318#if CHIP_HAS_AUX_PERF_COUNTERS()
 319	.else
 320	.ifc \c_routine, op_handle_aux_perf_interrupt
 321	mfspr   r2, AUX_PERF_COUNT_STS
 322	.endif
 323#endif
 324	.endif
 325	.endif
 326	.endif
 327	.endif
 328	.endif
 329	/* Put function pointer in r0 */
 330	moveli  r0, hw2_last(\c_routine)
 331	shl16insli r0, r0, hw1(\c_routine)
 332	{
 333	 shl16insli r0, r0, hw0(\c_routine)
 334	 j       \processing
 335	}
 336	.endif
 337	ENDPROC(intvec_\vecname)
 338
 339#ifdef __COLLECT_LINKER_FEEDBACK__
 340	.pushsection .text.intvec_feedback,"ax"
 341	.org    (\vecnum << 5)
 342	FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
 343	jrp     lr
 344	.popsection
 345#endif
 346
 347	.endm
 348
 349
 350	/*
 351	 * Save the rest of the registers that we didn't save in the actual
 352	 * vector itself.  We can't use r0-r10 inclusive here.
 353	 */
 354	.macro  finish_interrupt_save, function
 355
 356	/* If it's a syscall, save a proper orig_r0, otherwise just zero. */
 357	PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
 358	{
 359	 .ifc \function,handle_syscall
 360	 st     r52, r0
 361	 .else
 362	 st     r52, zero
 363	 .endif
 364	 PTREGS_PTR(r52, PTREGS_OFFSET_TP)
 365	}
 366	st      r52, tp
 367	{
 368	 mfspr  tp, CMPEXCH_VALUE
 369	 PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
 370	}
 371
 372	/*
 373	 * For ordinary syscalls, we save neither caller- nor callee-
 374	 * save registers, since the syscall invoker doesn't expect the
 375	 * caller-saves to be saved, and the called kernel functions will
 376	 * take care of saving the callee-saves for us.
 377	 *
 378	 * For interrupts we save just the caller-save registers.  Saving
 379	 * them is required (since the "caller" can't save them).  Again,
 380	 * the called kernel functions will restore the callee-save
 381	 * registers for us appropriately.
 382	 *
 383	 * On return, we normally restore nothing special for syscalls,
 384	 * and just the caller-save registers for interrupts.
 385	 *
 386	 * However, there are some important caveats to all this:
 387	 *
 388	 * - We always save a few callee-save registers to give us
 389	 *   some scratchpad registers to carry across function calls.
 390	 *
 391	 * - fork/vfork/etc require us to save all the callee-save
 392	 *   registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
 393	 *
 394	 * - We always save r0..r5 and r10 for syscalls, since we need
 395	 *   to reload them a bit later for the actual kernel call, and
 396	 *   since we might need them for -ERESTARTNOINTR, etc.
 397	 *
 398	 * - Before invoking a signal handler, we save the unsaved
 399	 *   callee-save registers so they are visible to the
 400	 *   signal handler or any ptracer.
 401	 *
 402	 * - If the unsaved callee-save registers are modified, we set
 403	 *   a bit in pt_regs so we know to reload them from pt_regs
 404	 *   and not just rely on the kernel function unwinding.
 405	 *   (Done for ptrace register writes and SA_SIGINFO handler.)
 406	 */
 407	{
 408	 st     r52, tp
 409	 PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
 410	}
 411	wh64    r52    /* cache line 4 */
 412	push_reg r33, r52
 413	push_reg r32, r52
 414	push_reg r31, r52
 415	.ifc \function,handle_syscall
 416	push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
 417	push_reg TREG_SYSCALL_NR_NAME, r52, \
 418	  PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
 419	.else
 420
 421	push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
 422	wh64    r52   /* cache line 3 */
 423	push_reg r29, r52
 424	push_reg r28, r52
 425	push_reg r27, r52
 426	push_reg r26, r52
 427	push_reg r25, r52
 428	push_reg r24, r52
 429	push_reg r23, r52
 430	push_reg r22, r52
 431	wh64    r52   /* cache line 2 */
 432	push_reg r21, r52
 433	push_reg r20, r52
 434	push_reg r19, r52
 435	push_reg r18, r52
 436	push_reg r17, r52
 437	push_reg r16, r52
 438	push_reg r15, r52
 439	push_reg r14, r52
 440	wh64    r52   /* cache line 1 */
 441	push_reg r13, r52
 442	push_reg r12, r52
 443	push_reg r11, r52
 444	push_reg r10, r52
 445	push_reg r9, r52
 446	push_reg r8, r52
 447	push_reg r7, r52
 448	push_reg r6, r52
 449
 450	.endif
 451
 452	push_reg r5, r52
 453	st      r52, r4
 454
 455	/*
 456	 * If we will be returning to the kernel, we will need to
 457	 * reset the interrupt masks to the state they had before.
 458	 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
 459	 */
 460	mfspr   r32, SPR_EX_CONTEXT_K_1
 461	{
 462	 andi   r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
 463	 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
 464	}
 465	beqzt   r32, 1f       /* zero if from user space */
 466	IRQS_DISABLED(r32)    /* zero if irqs enabled */
 467#if PT_FLAGS_DISABLE_IRQ != 1
 468# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
 469#endif
 4701:
 471	.ifnc \function,handle_syscall
 472	/* Record the fact that we saved the caller-save registers above. */
 473	ori     r32, r32, PT_FLAGS_CALLER_SAVES
 474	.endif
 475	st      r21, r32
 476
 477	/*
 478	 * we've captured enough state to the stack (including in
 479	 * particular our EX_CONTEXT state) that we can now release
 480	 * the interrupt critical section and replace it with our
 481	 * standard "interrupts disabled" mask value.  This allows
 482	 * synchronous interrupts (and profile interrupts) to punch
 483	 * through from this point onwards.
 484	 *
 485	 * It's important that no code before this point touch memory
 486	 * other than our own stack (to keep the invariant that this
 487	 * is all that gets touched under ICS), and that no code after
 488	 * this point reference any interrupt-specific SPR, in particular
 489	 * the EX_CONTEXT_K_ values.
 490	 */
 491	.ifc \function,handle_nmi
 492	IRQ_DISABLE_ALL(r20)
 493	.else
 494	IRQ_DISABLE(r20, r21)
 495	.endif
 496	mtspr   INTERRUPT_CRITICAL_SECTION, zero
 497
 498	/* Load tp with our per-cpu offset. */
 499#ifdef CONFIG_SMP
 500	{
 501	 mfspr  r20, SPR_SYSTEM_SAVE_K_0
 502	 moveli r21, hw2_last(__per_cpu_offset)
 503	}
 504	{
 505	 shl16insli r21, r21, hw1(__per_cpu_offset)
 506	 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
 507	}
 508	shl16insli r21, r21, hw0(__per_cpu_offset)
 509	shl3add r20, r20, r21
 510	ld      tp, r20
 511#else
 512	move    tp, zero
 513#endif
 514
 515#ifdef __COLLECT_LINKER_FEEDBACK__
 516	/*
 517	 * Notify the feedback routines that we were in the
 518	 * appropriate fixed interrupt vector area.  Note that we
 519	 * still have ICS set at this point, so we can't invoke any
 520	 * atomic operations or we will panic.  The feedback
 521	 * routines internally preserve r0..r10 and r30 up.
 522	 */
 523	.ifnc \function,handle_syscall
 524	shli    r20, r1, 5
 525	.else
 526	moveli  r20, INT_SWINT_1 << 5
 527	.endif
 528	moveli  r21, hw2_last(intvec_feedback)
 529	shl16insli r21, r21, hw1(intvec_feedback)
 530	shl16insli r21, r21, hw0(intvec_feedback)
 531	add     r20, r20, r21
 532	jalr    r20
 533
 534	/* And now notify the feedback routines that we are here. */
 535	FEEDBACK_ENTER(\function)
 536#endif
 537
 538	/*
 539	 * Prepare the first 256 stack bytes to be rapidly accessible
 540	 * without having to fetch the background data.
 541	 */
 542	addi    r52, sp, -64
 543	{
 544	 wh64   r52
 545	 addi   r52, r52, -64
 546	}
 547	{
 548	 wh64   r52
 549	 addi   r52, r52, -64
 550	}
 551	{
 552	 wh64   r52
 553	 addi   r52, r52, -64
 554	}
 555	wh64    r52
 556
 557#ifdef CONFIG_TRACE_IRQFLAGS
 558	.ifnc \function,handle_nmi
 559	/*
 560	 * We finally have enough state set up to notify the irq
 561	 * tracing code that irqs were disabled on entry to the handler.
 562	 * The TRACE_IRQS_OFF call clobbers registers r0-r29.
 563	 * For syscalls, we already have the register state saved away
 564	 * on the stack, so we don't bother to do any register saves here,
 565	 * and later we pop the registers back off the kernel stack.
 566	 * For interrupt handlers, save r0-r3 in callee-saved registers.
 567	 */
 568	.ifnc \function,handle_syscall
 569	{ move r30, r0; move r31, r1 }
 570	{ move r32, r2; move r33, r3 }
 571	.endif
 572	TRACE_IRQS_OFF
 573	.ifnc \function,handle_syscall
 574	{ move r0, r30; move r1, r31 }
 575	{ move r2, r32; move r3, r33 }
 576	.endif
 577	.endif
 578#endif
 579
 580	.endm
 581
 582	/*
 583	 * Redispatch a downcall.
 584	 */
 585	.macro  dc_dispatch vecnum, vecname
 586	.org    (\vecnum << 8)
 587intvec_\vecname:
 588	j       hv_downcall_dispatch
 589	ENDPROC(intvec_\vecname)
 590	.endm
 591
 592	/*
 593	 * Common code for most interrupts.  The C function we're eventually
 594	 * going to is in r0, and the faultnum is in r1; the original
 595	 * values for those registers are on the stack.
 596	 */
 597	.pushsection .text.handle_interrupt,"ax"
 598handle_interrupt:
 599	finish_interrupt_save handle_interrupt
 600
 601	/* Jump to the C routine; it should enable irqs as soon as possible. */
 602	{
 603	 jalr   r0
 604	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
 605	}
 606	FEEDBACK_REENTER(handle_interrupt)
 607	{
 608	 movei  r30, 0   /* not an NMI */
 609	 j      interrupt_return
 610	}
 611	STD_ENDPROC(handle_interrupt)
 612
 613/*
 614 * This routine takes a boolean in r30 indicating if this is an NMI.
 615 * If so, we also expect a boolean in r31 indicating whether to
 616 * re-enable the oprofile interrupts.
 617 *
 618 * Note that .Lresume_userspace is jumped to directly in several
 619 * places, and we need to make sure r30 is set correctly in those
 620 * callers as well.
 621 */
 622STD_ENTRY(interrupt_return)
 623	/* If we're resuming to kernel space, don't check thread flags. */
 624	{
 625	 bnez   r30, .Lrestore_all  /* NMIs don't special-case user-space */
 626	 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
 627	}
 628	ld      r29, r29
 629	andi    r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
 630	{
 631	 beqzt  r29, .Lresume_userspace
 632	 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
 633	}
 634
 635	/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
 636	moveli  r27, hw2_last(_cpu_idle_nap)
 637	{
 638	 ld     r28, r29
 639	 shl16insli r27, r27, hw1(_cpu_idle_nap)
 640	}
 641	{
 642	 shl16insli r27, r27, hw0(_cpu_idle_nap)
 643	}
 644	{
 645	 cmpeq  r27, r27, r28
 646	}
 647	{
 648	 blbc   r27, .Lrestore_all
 649	 addi   r28, r28, 8
 650	}
 651	st      r29, r28
 652	j       .Lrestore_all
 653
 654.Lresume_userspace:
 655	FEEDBACK_REENTER(interrupt_return)
 656
 657	/*
 658	 * Use r33 to hold whether we have already loaded the callee-saves
 659	 * into ptregs.  We don't want to do it twice in this loop, since
 660	 * then we'd clobber whatever changes are made by ptrace, etc.
 661	 */
 662	{
 663	 movei  r33, 0
 664	 move   r32, sp
 665	}
 666
 667	/* Get base of stack in r32. */
 668	EXTRACT_THREAD_INFO(r32)
 669
 670.Lretry_work_pending:
 671	/*
 672	 * Disable interrupts so as to make sure we don't
 673	 * miss an interrupt that sets any of the thread flags (like
 674	 * need_resched or sigpending) between sampling and the iret.
 675	 * Routines like schedule() or do_signal() may re-enable
 676	 * interrupts before returning.
 677	 */
 678	IRQ_DISABLE(r20, r21)
 679	TRACE_IRQS_OFF  /* Note: clobbers registers r0-r29 */
 680
 681
 682	/* Check to see if there is any work to do before returning to user. */
 683	{
 684	 addi   r29, r32, THREAD_INFO_FLAGS_OFFSET
 685	 moveli r1, hw1_last(_TIF_ALLWORK_MASK)
 686	}
 687	{
 688	 ld     r29, r29
 689	 shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
 690	}
 691	and     r1, r29, r1
 692	beqzt   r1, .Lrestore_all
 693
 694	/*
 695	 * Make sure we have all the registers saved for signal
 696	 * handling or notify-resume.  Call out to C code to figure out
 697	 * exactly what we need to do for each flag bit, then if
 698	 * necessary, reload the flags and recheck.
 699	 */
 700	{
 701	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
 702	 bnez   r33, 1f
 703	}
 704	push_extra_callee_saves r0
 705	movei   r33, 1
 7061:	jal     do_work_pending
 707	bnez    r0, .Lretry_work_pending
 708
 709	/*
 710	 * In the NMI case we
 711	 * omit the call to single_process_check_nohz, which normally checks
 712	 * to see if we should start or stop the scheduler tick, because
 713	 * we can't call arbitrary Linux code from an NMI context.
 714	 * We always call the homecache TLB deferral code to re-trigger
 715	 * the deferral mechanism.
 716	 *
 717	 * The other chunk of responsibility this code has is to reset the
 718	 * interrupt masks appropriately to reset irqs and NMIs.  We have
 719	 * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
 720	 * lockdep-type stuff, but we can't set ICS until afterwards, since
 721	 * ICS can only be used in very tight chunks of code to avoid
 722	 * tripping over various assertions that it is off.
 723	 */
 724.Lrestore_all:
 725	PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
 726	{
 727	 ld      r0, r0
 728	 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
 729	}
 730	{
 731	 andi   r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
 732	 ld     r32, r32
 733	}
 734	bnez    r0, 1f
 735	j       2f
 736#if PT_FLAGS_DISABLE_IRQ != 1
 737# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
 738#endif
 7391:	blbct   r32, 2f
 740	IRQ_DISABLE(r20,r21)
 741	TRACE_IRQS_OFF
 742	movei   r0, 1
 743	mtspr   INTERRUPT_CRITICAL_SECTION, r0
 744	beqzt   r30, .Lrestore_regs
 745	j       3f
 7462:	TRACE_IRQS_ON
 747	IRQ_ENABLE_LOAD(r20, r21)
 748	movei   r0, 1
 749	mtspr   INTERRUPT_CRITICAL_SECTION, r0
 750	IRQ_ENABLE_APPLY(r20, r21)
 751	beqzt   r30, .Lrestore_regs
 7523:
 753
 754
 755	/*
 756	 * We now commit to returning from this interrupt, since we will be
 757	 * doing things like setting EX_CONTEXT SPRs and unwinding the stack
 758	 * frame.  No calls should be made to any other code after this point.
 759	 * This code should only be entered with ICS set.
 760	 * r32 must still be set to ptregs.flags.
 761	 * We launch loads to each cache line separately first, so we can
 762	 * get some parallelism out of the memory subsystem.
 763	 * We start zeroing caller-saved registers throughout, since
 764	 * that will save some cycles if this turns out to be a syscall.
 765	 */
 766.Lrestore_regs:
 767
 768	/*
 769	 * Rotate so we have one high bit and one low bit to test.
 770	 * - low bit says whether to restore all the callee-saved registers,
 771	 *   or just r30-r33, and r52 up.
 772	 * - high bit (i.e. sign bit) says whether to restore all the
 773	 *   caller-saved registers, or just r0.
 774	 */
 775#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
 776# error Rotate trick does not work :-)
 777#endif
 778	{
 779	 rotli  r20, r32, 62
 780	 PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
 781	}
 782
 783	/*
 784	 * Load cache lines 0, 4, 6 and 7, in that order, then use
 785	 * the last loaded value, which makes it likely that the other
 786	 * cache lines have also loaded, at which point we should be
 787	 * able to safely read all the remaining words on those cache
 788	 * lines without waiting for the memory subsystem.
 789	 */
 790	pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
 791	pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
 792	pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
 793	pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
 794	pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
 795	{
 796	 mtspr  CMPEXCH_VALUE, r21
 797	 move   r4, zero
 798	}
 799	pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
 800	{
 801	 mtspr  SPR_EX_CONTEXT_K_1, lr
 802	 andi   lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK  /* mask off ICS */
 803	}
 804	{
 805	 mtspr  SPR_EX_CONTEXT_K_0, r21
 806	 move   r5, zero
 807	}
 808
 809	/* Restore callee-saveds that we actually use. */
 810	pop_reg_zero r31, r6
 811	pop_reg_zero r32, r7
 812	pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
 813
 814	/*
 815	 * If we modified other callee-saveds, restore them now.
 816	 * This is rare, but could be via ptrace or signal handler.
 817	 */
 818	{
 819	 move   r9, zero
 820	 blbs   r20, .Lrestore_callees
 821	}
 822.Lcontinue_restore_regs:
 823
 824	/* Check if we're returning from a syscall. */
 825	{
 826	 move   r10, zero
 827	 bltzt  r20, 1f  /* no, so go restore callee-save registers */
 828	}
 829
 830	/*
 831	 * Check if we're returning to userspace.
 832	 * Note that if we're not, we don't worry about zeroing everything.
 833	 */
 834	{
 835	 addli  sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
 836	 bnez   lr, .Lkernel_return
 837	}
 838
 839	/*
 840	 * On return from syscall, we've restored r0 from pt_regs, but we
 841	 * clear the remainder of the caller-saved registers.  We could
 842	 * restore the syscall arguments, but there's not much point,
 843	 * and it ensures user programs aren't trying to use the
 844	 * caller-saves if we clear them, as well as avoiding leaking
 845	 * kernel pointers into userspace.
 846	 */
 847	pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
 848	pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
 849	{
 850	 ld     sp, sp
 851	 move   r13, zero
 852	 move   r14, zero
 853	}
 854	{ move r15, zero; move r16, zero }
 855	{ move r17, zero; move r18, zero }
 856	{ move r19, zero; move r20, zero }
 857	{ move r21, zero; move r22, zero }
 858	{ move r23, zero; move r24, zero }
 859	{ move r25, zero; move r26, zero }
 860
 861	/* Set r1 to errno if we are returning an error, otherwise zero. */
 862	{
 863	 moveli r29, 4096
 864	 sub    r1, zero, r0
 865	}
 866	{
 867	 move   r28, zero
 868	 cmpltu r29, r1, r29
 869	}
 870	{
 871	 mnz    r1, r29, r1
 872	 move   r29, zero
 873	}
 874	iret
 875
 876	/*
 877	 * Not a syscall, so restore caller-saved registers.
 878	 * First kick off loads for cache lines 1-3, which we're touching
 879	 * for the first time here.
 880	 */
 881	.align 64
 8821:	pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
 883	pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
 884	pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
 885	pop_reg r1
 886	pop_reg r2
 887	pop_reg r3
 888	pop_reg r4
 889	pop_reg r5
 890	pop_reg r6
 891	pop_reg r7
 892	pop_reg r8
 893	pop_reg r9
 894	pop_reg r10
 895	pop_reg r11
 896	pop_reg r12, sp, 16
 897	/* r13 already restored above */
 898	pop_reg r14
 899	pop_reg r15
 900	pop_reg r16
 901	pop_reg r17
 902	pop_reg r18
 903	pop_reg r19
 904	pop_reg r20, sp, 16
 905	/* r21 already restored above */
 906	pop_reg r22
 907	pop_reg r23
 908	pop_reg r24
 909	pop_reg r25
 910	pop_reg r26
 911	pop_reg r27
 912	pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
 913	/* r29 already restored above */
 914	bnez    lr, .Lkernel_return
 915	pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
 916	pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
 917	ld      sp, sp
 918	iret
 919
 920	/*
 921	 * We can't restore tp when in kernel mode, since a thread might
 922	 * have migrated from another cpu and brought a stale tp value.
 923	 */
 924.Lkernel_return:
 925	pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
 926	ld      sp, sp
 927	iret
 928
 929	/* Restore callee-saved registers from r34 to r51. */
 930.Lrestore_callees:
 931	addli  sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
 932	pop_reg r34
 933	pop_reg r35
 934	pop_reg r36
 935	pop_reg r37
 936	pop_reg r38
 937	pop_reg r39
 938	pop_reg r40
 939	pop_reg r41
 940	pop_reg r42
 941	pop_reg r43
 942	pop_reg r44
 943	pop_reg r45
 944	pop_reg r46
 945	pop_reg r47
 946	pop_reg r48
 947	pop_reg r49
 948	pop_reg r50
 949	pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
 950	j .Lcontinue_restore_regs
 951	STD_ENDPROC(interrupt_return)
 952
 953	/*
 954	 * "NMI" interrupts mask ALL interrupts before calling the
 955	 * handler, and don't check thread flags, etc., on the way
 956	 * back out.  In general, the only things we do here for NMIs
 957	 * are register save/restore and dataplane kernel-TLB management.
 958	 * We don't (for example) deal with start/stop of the sched tick.
 959	 */
 960	.pushsection .text.handle_nmi,"ax"
 961handle_nmi:
 962	finish_interrupt_save handle_nmi
 963	{
 964	 jalr   r0
 965	 PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
 966	}
 967	FEEDBACK_REENTER(handle_nmi)
 968	{
 969	 movei  r30, 1
 970	 move   r31, r0
 971	}
 972	j       interrupt_return
 973	STD_ENDPROC(handle_nmi)
 974
 975	/*
 976	 * Parallel code for syscalls to handle_interrupt.
 977	 */
 978	.pushsection .text.handle_syscall,"ax"
 979handle_syscall:
 980	finish_interrupt_save handle_syscall
 981
 982	/* Enable irqs. */
 983	TRACE_IRQS_ON
 984	IRQ_ENABLE(r20, r21)
 985
 986	/* Bump the counter for syscalls made on this tile. */
 987	moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
 988	shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
 989	shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
 990	add     r20, r20, tp
 991	ld4s    r21, r20
 992	{
 993	 addi   r21, r21, 1
 994	 move   r31, sp
 995	}
 996	{
 997	 st4    r20, r21
 998	 EXTRACT_THREAD_INFO(r31)
 999	}
1000
1001	/* Trace syscalls, if requested. */
1002	addi	r31, r31, THREAD_INFO_FLAGS_OFFSET
1003	ld	r30, r31
1004	andi    r30, r30, _TIF_SYSCALL_TRACE
1005	{
1006	 addi   r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
1007	 beqzt	r30, .Lrestore_syscall_regs
1008	}
1009	jal	do_syscall_trace
1010	FEEDBACK_REENTER(handle_syscall)
1011
1012	/*
1013	 * We always reload our registers from the stack at this
1014	 * point.  They might be valid, if we didn't build with
1015	 * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
1016	 * doing syscall tracing, but there are enough cases now that it
1017	 * seems simplest just to do the reload unconditionally.
1018	 */
1019.Lrestore_syscall_regs:
1020	{
1021	 ld     r30, r30
1022	 PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
1023	}
1024	pop_reg r0,  r11
1025	pop_reg r1,  r11
1026	pop_reg r2,  r11
1027	pop_reg r3,  r11
1028	pop_reg r4,  r11
1029	pop_reg r5,  r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1030	{
1031	 ld     TREG_SYSCALL_NR_NAME, r11
1032	 moveli r21, __NR_syscalls
1033	}
1034
1035	/* Ensure that the syscall number is within the legal range. */
1036	{
1037	 moveli r20, hw2(sys_call_table)
1038	 blbs   r30, .Lcompat_syscall
1039	}
1040	{
1041	 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1042	 shl16insli r20, r20, hw1(sys_call_table)
1043	}
1044	{
1045	 blbc   r21, .Linvalid_syscall
1046	 shl16insli r20, r20, hw0(sys_call_table)
1047	}
1048.Lload_syscall_pointer:
1049	shl3add r20, TREG_SYSCALL_NR_NAME, r20
1050	ld      r20, r20
1051
1052	/* Jump to syscall handler. */
1053	jalr    r20
1054.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1055
1056	/*
1057	 * Write our r0 onto the stack so it gets restored instead
1058	 * of whatever the user had there before.
1059	 * In compat mode, sign-extend r0 before storing it.
1060	 */
1061	{
1062	 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1063	 blbct  r30, 1f
1064	}
1065	addxi   r0, r0, 0
10661:	st      r29, r0
1067
1068.Lsyscall_sigreturn_skip:
1069	FEEDBACK_REENTER(handle_syscall)
1070
1071	/* Do syscall trace again, if requested. */
1072	ld	r30, r31
1073	andi    r0, r30, _TIF_SYSCALL_TRACE
1074	{
1075	 andi    r0, r30, _TIF_SINGLESTEP
1076	 beqzt   r0, 1f
1077	}
1078	jal	do_syscall_trace
1079	FEEDBACK_REENTER(handle_syscall)
1080	andi    r0, r30, _TIF_SINGLESTEP
1081
10821:	beqzt	r0, 2f
1083
1084	/* Single stepping -- notify ptrace. */
1085	{
1086	 movei   r0, SIGTRAP
1087	 jal     ptrace_notify
1088	}
1089	FEEDBACK_REENTER(handle_syscall)
1090
10912:	{
1092	 movei  r30, 0               /* not an NMI */
1093	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
1094	}
1095
1096.Lcompat_syscall:
1097	/*
1098	 * Load the base of the compat syscall table in r20, and
1099	 * range-check the syscall number (duplicated from 64-bit path).
1100	 * Sign-extend all the user's passed arguments to make them consistent.
1101	 * Also save the original "r(n)" values away in "r(11+n)" in
1102	 * case the syscall table entry wants to validate them.
1103	 */
1104	moveli  r20, hw2(compat_sys_call_table)
1105	{
1106	 cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1107	 shl16insli r20, r20, hw1(compat_sys_call_table)
1108	}
1109	{
1110	 blbc   r21, .Linvalid_syscall
1111	 shl16insli r20, r20, hw0(compat_sys_call_table)
1112	}
1113	{ move r11, r0; addxi r0, r0, 0 }
1114	{ move r12, r1; addxi r1, r1, 0 }
1115	{ move r13, r2; addxi r2, r2, 0 }
1116	{ move r14, r3; addxi r3, r3, 0 }
1117	{ move r15, r4; addxi r4, r4, 0 }
1118	{ move r16, r5; addxi r5, r5, 0 }
1119	j .Lload_syscall_pointer
1120
1121.Linvalid_syscall:
1122	/* Report an invalid syscall back to the user program */
1123	{
1124	 PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1125	 movei  r28, -ENOSYS
1126	}
1127	st      r29, r28
1128	{
1129	 movei  r30, 0               /* not an NMI */
1130	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
1131	}
1132	STD_ENDPROC(handle_syscall)
1133
1134	/* Return the address for oprofile to suppress in backtraces. */
1135STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1136	lnk     r0
1137	{
1138	 addli  r0, r0, .Lhandle_syscall_link - .
1139	 jrp    lr
1140	}
1141	STD_ENDPROC(handle_syscall_link_address)
1142
1143STD_ENTRY(ret_from_fork)
1144	jal     sim_notify_fork
1145	jal     schedule_tail
1146	FEEDBACK_REENTER(ret_from_fork)
1147	{
1148	 movei  r30, 0               /* not an NMI */
1149	 j      .Lresume_userspace   /* jump into middle of interrupt_return */
1150	}
1151	STD_ENDPROC(ret_from_fork)
1152
1153/* Various stub interrupt handlers and syscall handlers */
1154
1155STD_ENTRY_LOCAL(_kernel_double_fault)
1156	mfspr   r1, SPR_EX_CONTEXT_K_0
1157	move    r2, lr
1158	move    r3, sp
1159	move    r4, r52
1160	addi    sp, sp, -C_ABI_SAVE_AREA_SIZE
1161	j       kernel_double_fault
1162	STD_ENDPROC(_kernel_double_fault)
1163
1164STD_ENTRY_LOCAL(bad_intr)
1165	mfspr   r2, SPR_EX_CONTEXT_K_0
1166	panic   "Unhandled interrupt %#x: PC %#lx"
1167	STD_ENDPROC(bad_intr)
1168
1169/* Put address of pt_regs in reg and jump. */
1170#define PTREGS_SYSCALL(x, reg)                          \
1171	STD_ENTRY(_##x);                                \
1172	{                                               \
1173	 PTREGS_PTR(reg, PTREGS_OFFSET_BASE);           \
1174	 j      x                                       \
1175	};                                              \
1176	STD_ENDPROC(_##x)
1177
1178/*
1179 * Special-case sigreturn to not write r0 to the stack on return.
1180 * This is technically more efficient, but it also avoids difficulties
1181 * in the 64-bit OS when handling 32-bit compat code, since we must not
1182 * sign-extend r0 for the sigreturn return-value case.
1183 */
1184#define PTREGS_SYSCALL_SIGRETURN(x, reg)                \
1185	STD_ENTRY(_##x);                                \
1186	addli   lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1187	{                                               \
1188	 PTREGS_PTR(reg, PTREGS_OFFSET_BASE);           \
1189	 j      x                                       \
1190	};                                              \
1191	STD_ENDPROC(_##x)
1192
1193PTREGS_SYSCALL(sys_execve, r3)
1194PTREGS_SYSCALL(sys_sigaltstack, r2)
1195PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1196#ifdef CONFIG_COMPAT
1197PTREGS_SYSCALL(compat_sys_execve, r3)
1198PTREGS_SYSCALL(compat_sys_sigaltstack, r2)
1199PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
1200#endif
1201
1202/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1203STD_ENTRY(_sys_clone)
1204	push_extra_callee_saves r4
1205	j       sys_clone
1206	STD_ENDPROC(_sys_clone)
1207
1208/* The single-step support may need to read all the registers. */
1209int_unalign:
1210	push_extra_callee_saves r0
1211	j       do_trap
1212
1213/* Fill the return address stack with nonzero entries. */
1214STD_ENTRY(fill_ra_stack)
1215	{
1216	 move	r0, lr
1217	 jal	1f
1218	}
12191:	jal	2f
12202:	jal	3f
12213:	jal	4f
12224:	jrp	r0
1223	STD_ENDPROC(fill_ra_stack)
1224
1225/* Include .intrpt1 array of interrupt vectors */
1226	.section ".intrpt1", "ax"
1227
1228#define op_handle_perf_interrupt bad_intr
1229#define op_handle_aux_perf_interrupt bad_intr
1230
1231#ifndef CONFIG_HARDWALL
1232#define do_hardwall_trap bad_intr
1233#endif
1234
1235	int_hand     INT_MEM_ERROR, MEM_ERROR, do_trap
1236	int_hand     INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
1237#if CONFIG_KERNEL_PL == 2
1238	int_hand     INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
1239	int_hand     INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
1240#else
1241	int_hand     INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
1242	int_hand     INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
1243#endif
1244	int_hand     INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
1245	int_hand     INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1246	int_hand     INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1247	int_hand     INT_ITLB_MISS, ITLB_MISS, do_page_fault
1248	int_hand     INT_ILL, ILL, do_trap
1249	int_hand     INT_GPV, GPV, do_trap
1250	int_hand     INT_IDN_ACCESS, IDN_ACCESS, do_trap
1251	int_hand     INT_UDN_ACCESS, UDN_ACCESS, do_trap
1252	int_hand     INT_SWINT_3, SWINT_3, do_trap
1253	int_hand     INT_SWINT_2, SWINT_2, do_trap
1254	int_hand     INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1255	int_hand     INT_SWINT_0, SWINT_0, do_trap
1256	int_hand     INT_ILL_TRANS, ILL_TRANS, do_trap
1257	int_hand     INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1258	int_hand     INT_DTLB_MISS, DTLB_MISS, do_page_fault
1259	int_hand     INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1260	int_hand     INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
1261	int_hand     INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1262	int_hand     INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1263	int_hand     INT_IDN_TIMER, IDN_TIMER, bad_intr
1264	int_hand     INT_UDN_TIMER, UDN_TIMER, bad_intr
1265	int_hand     INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1266	int_hand     INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1267	int_hand     INT_IPI_3, IPI_3, bad_intr
1268#if CONFIG_KERNEL_PL == 2
1269	int_hand     INT_IPI_2, IPI_2, tile_dev_intr
1270	int_hand     INT_IPI_1, IPI_1, bad_intr
1271#else
1272	int_hand     INT_IPI_2, IPI_2, bad_intr
1273	int_hand     INT_IPI_1, IPI_1, tile_dev_intr
1274#endif
1275	int_hand     INT_IPI_0, IPI_0, bad_intr
1276	int_hand     INT_PERF_COUNT, PERF_COUNT, \
1277		     op_handle_perf_interrupt, handle_nmi
1278	int_hand     INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1279		     op_handle_perf_interrupt, handle_nmi
1280	int_hand     INT_INTCTRL_3, INTCTRL_3, bad_intr
1281#if CONFIG_KERNEL_PL == 2
1282	dc_dispatch  INT_INTCTRL_2, INTCTRL_2
1283	int_hand     INT_INTCTRL_1, INTCTRL_1, bad_intr
1284#else
1285	int_hand     INT_INTCTRL_2, INTCTRL_2, bad_intr
1286	dc_dispatch  INT_INTCTRL_1, INTCTRL_1
1287#endif
1288	int_hand     INT_INTCTRL_0, INTCTRL_0, bad_intr
1289	int_hand     INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1290		     hv_message_intr
1291	int_hand     INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
1292	int_hand     INT_I_ASID, I_ASID, bad_intr
1293	int_hand     INT_D_ASID, D_ASID, bad_intr
1294	int_hand     INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1295
1296	/* Synthetic interrupt delivered only by the simulator */
1297	int_hand     INT_BREAKPOINT, BREAKPOINT, do_breakpoint