Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * arch/sh/kernel/cpu/sh5/entry.S
   3 *
   4 * Copyright (C) 2000, 2001  Paolo Alberelli
   5 * Copyright (C) 2004 - 2008  Paul Mundt
   6 * Copyright (C) 2003, 2004  Richard Curnow
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/sys.h>
  15#include <cpu/registers.h>
  16#include <asm/processor.h>
  17#include <asm/unistd.h>
  18#include <asm/thread_info.h>
  19#include <asm/asm-offsets.h>
  20
  21/*
  22 * SR fields.
  23 */
  24#define SR_ASID_MASK	0x00ff0000
  25#define SR_FD_MASK	0x00008000
  26#define SR_SS		0x08000000
  27#define SR_BL		0x10000000
  28#define SR_MD		0x40000000
  29
  30/*
  31 * Event code.
  32 */
  33#define	EVENT_INTERRUPT		0
  34#define	EVENT_FAULT_TLB		1
  35#define	EVENT_FAULT_NOT_TLB	2
  36#define	EVENT_DEBUG		3
  37
  38/* EXPEVT values */
  39#define	RESET_CAUSE		0x20
  40#define DEBUGSS_CAUSE		0x980
  41
  42/*
  43 * Frame layout. Quad index.
  44 */
  45#define	FRAME_T(x)	FRAME_TBASE+(x*8)
  46#define	FRAME_R(x)	FRAME_RBASE+(x*8)
  47#define	FRAME_S(x)	FRAME_SBASE+(x*8)
  48#define FSPC		0
  49#define FSSR		1
  50#define FSYSCALL_ID	2
  51
  52/* Arrange the save frame to be a multiple of 32 bytes long */
  53#define FRAME_SBASE	0
  54#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
  55#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
  56#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
  57#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
  58
  59#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
  60#define FP_FRAME_BASE	0
  61
  62#define	SAVED_R2	0*8
  63#define	SAVED_R3	1*8
  64#define	SAVED_R4	2*8
  65#define	SAVED_R5	3*8
  66#define	SAVED_R18	4*8
  67#define	SAVED_R6	5*8
  68#define	SAVED_TR0	6*8
  69
  70/* These are the registers saved in the TLB path that aren't saved in the first
  71   level of the normal one. */
  72#define	TLB_SAVED_R25	7*8
  73#define	TLB_SAVED_TR1	8*8
  74#define	TLB_SAVED_TR2	9*8
  75#define	TLB_SAVED_TR3	10*8
  76#define	TLB_SAVED_TR4	11*8
  77/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
  78   breakage otherwise. */
  79#define	TLB_SAVED_R0	12*8
  80#define	TLB_SAVED_R1	13*8
  81
  82#define CLI()				\
  83	getcon	SR, r6;			\
  84	ori	r6, 0xf0, r6;		\
  85	putcon	r6, SR;
  86
  87#define STI()				\
  88	getcon	SR, r6;			\
  89	andi	r6, ~0xf0, r6;		\
  90	putcon	r6, SR;
  91
  92#ifdef CONFIG_PREEMPT
  93#  define preempt_stop()	CLI()
  94#else
  95#  define preempt_stop()
  96#  define resume_kernel		restore_all
  97#endif
  98
  99	.section	.data, "aw"
 100
 101#define FAST_TLBMISS_STACK_CACHELINES 4
 102#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
 103
 104/* Register back-up area for all exceptions */
 105	.balign	32
 106	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
 107	 * register saves etc. */
 108	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
 109/* This is 32 byte aligned by construction */
 110/* Register back-up area for all exceptions */
 111reg_save_area:
 112	.quad	0
 113	.quad	0
 114	.quad	0
 115	.quad	0
 116
 117	.quad	0
 118	.quad	0
 119	.quad	0
 120	.quad	0
 121
 122	.quad	0
 123	.quad	0
 124	.quad	0
 125	.quad	0
 126
 127	.quad	0
 128	.quad   0
 129
 130/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
 131 * reentrancy. Note this area may be accessed via physical address.
 132 * Align so this fits a whole single cache line, for ease of purging.
 133 */
 134	.balign 32,0,32
 135resvec_save_area:
 136	.quad	0
 137	.quad	0
 138	.quad	0
 139	.quad	0
 140	.quad	0
 141	.balign 32,0,32
 142
 143/* Jump table of 3rd level handlers  */
 144trap_jtable:
 145	.long	do_exception_error		/* 0x000 */
 146	.long	do_exception_error		/* 0x020 */
 147#ifdef CONFIG_MMU
 148	.long	tlb_miss_load				/* 0x040 */
 149	.long	tlb_miss_store				/* 0x060 */
 150#else
 151	.long	do_exception_error
 152	.long	do_exception_error
 153#endif
 154	! ARTIFICIAL pseudo-EXPEVT setting
 155	.long	do_debug_interrupt		/* 0x080 */
 156#ifdef CONFIG_MMU
 157	.long	tlb_miss_load				/* 0x0A0 */
 158	.long	tlb_miss_store				/* 0x0C0 */
 159#else
 160	.long	do_exception_error
 161	.long	do_exception_error
 162#endif
 163	.long	do_address_error_load	/* 0x0E0 */
 164	.long	do_address_error_store	/* 0x100 */
 165#ifdef CONFIG_SH_FPU
 166	.long	do_fpu_error		/* 0x120 */
 167#else
 168	.long	do_exception_error		/* 0x120 */
 169#endif
 170	.long	do_exception_error		/* 0x140 */
 171	.long	system_call				/* 0x160 */
 172	.long	do_reserved_inst		/* 0x180 */
 173	.long	do_illegal_slot_inst	/* 0x1A0 */
 174	.long	do_exception_error		/* 0x1C0 - NMI */
 175	.long	do_exception_error		/* 0x1E0 */
 176	.rept 15
 177		.long do_IRQ		/* 0x200 - 0x3C0 */
 178	.endr
 179	.long	do_exception_error		/* 0x3E0 */
 180	.rept 32
 181		.long do_IRQ		/* 0x400 - 0x7E0 */
 182	.endr
 183	.long	fpu_error_or_IRQA			/* 0x800 */
 184	.long	fpu_error_or_IRQB			/* 0x820 */
 185	.long	do_IRQ			/* 0x840 */
 186	.long	do_IRQ			/* 0x860 */
 187	.rept 6
 188		.long do_exception_error	/* 0x880 - 0x920 */
 189	.endr
 190	.long	breakpoint_trap_handler	/* 0x940 */
 191	.long	do_exception_error		/* 0x960 */
 192	.long	do_single_step		/* 0x980 */
 193
 194	.rept 3
 195		.long do_exception_error	/* 0x9A0 - 0x9E0 */
 196	.endr
 197	.long	do_IRQ			/* 0xA00 */
 198	.long	do_IRQ			/* 0xA20 */
 199#ifdef CONFIG_MMU
 200	.long	itlb_miss_or_IRQ			/* 0xA40 */
 201#else
 202	.long	do_IRQ
 203#endif
 204	.long	do_IRQ			/* 0xA60 */
 205	.long	do_IRQ			/* 0xA80 */
 206#ifdef CONFIG_MMU
 207	.long	itlb_miss_or_IRQ			/* 0xAA0 */
 208#else
 209	.long	do_IRQ
 210#endif
 211	.long	do_exception_error		/* 0xAC0 */
 212	.long	do_address_error_exec	/* 0xAE0 */
 213	.rept 8
 214		.long do_exception_error	/* 0xB00 - 0xBE0 */
 215	.endr
 216	.rept 18
 217		.long do_IRQ		/* 0xC00 - 0xE20 */
 218	.endr
 219
 220	.section	.text64, "ax"
 221
 222/*
 223 * --- Exception/Interrupt/Event Handling Section
 224 */
 225
 226/*
 227 * VBR and RESVEC blocks.
 228 *
 229 * First level handler for VBR-based exceptions.
 230 *
 231 * To avoid waste of space, align to the maximum text block size.
 232 * This is assumed to be at most 128 bytes or 32 instructions.
 233 * DO NOT EXCEED 32 instructions on the first level handlers !
 234 *
 235 * Also note that RESVEC is contained within the VBR block
 236 * where the room left (1KB - TEXT_SIZE) allows placing
 237 * the RESVEC block (at most 512B + TEXT_SIZE).
 238 *
 239 * So first (and only) level handler for RESVEC-based exceptions.
 240 *
 241 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
 242 * and interrupt) we are a lot tight with register space until
 243 * saving onto the stack frame, which is done in handle_exception().
 244 *
 245 */
 246
 247#define	TEXT_SIZE 	128
 248#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
 249
 250	.balign TEXT_SIZE
 251LVBR_block:
 252	.space	256, 0			/* Power-on class handler, */
 253					/* not required here       */
 254not_a_tlb_miss:
 255	synco	/* TAKum03020 (but probably a good idea anyway.) */
 256	/* Save original stack pointer into KCR1 */
 257	putcon	SP, KCR1
 258
 259	/* Save other original registers into reg_save_area */
 260        movi  reg_save_area, SP
 261	st.q	SP, SAVED_R2, r2
 262	st.q	SP, SAVED_R3, r3
 263	st.q	SP, SAVED_R4, r4
 264	st.q	SP, SAVED_R5, r5
 265	st.q	SP, SAVED_R6, r6
 266	st.q	SP, SAVED_R18, r18
 267	gettr	tr0, r3
 268	st.q	SP, SAVED_TR0, r3
 269
 270	/* Set args for Non-debug, Not a TLB miss class handler */
 271	getcon	EXPEVT, r2
 272	movi	ret_from_exception, r3
 273	ori	r3, 1, r3
 274	movi	EVENT_FAULT_NOT_TLB, r4
 275	or	SP, ZERO, r5
 276	getcon	KCR1, SP
 277	pta	handle_exception, tr0
 278	blink	tr0, ZERO
 279
 280	.balign 256
 281	! VBR+0x200
 282	nop
 283	.balign 256
 284	! VBR+0x300
 285	nop
 286	.balign 256
 287	/*
 288	 * Instead of the natural .balign 1024 place RESVEC here
 289	 * respecting the final 1KB alignment.
 290	 */
 291	.balign TEXT_SIZE
 292	/*
 293	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
 294	 * block making sure the final alignment is correct.
 295	 */
 296#ifdef CONFIG_MMU
 297tlb_miss:
 298	synco	/* TAKum03020 (but probably a good idea anyway.) */
 299	putcon	SP, KCR1
 300	movi	reg_save_area, SP
 301	/* SP is guaranteed 32-byte aligned. */
 302	st.q	SP, TLB_SAVED_R0 , r0
 303	st.q	SP, TLB_SAVED_R1 , r1
 304	st.q	SP, SAVED_R2 , r2
 305	st.q	SP, SAVED_R3 , r3
 306	st.q	SP, SAVED_R4 , r4
 307	st.q	SP, SAVED_R5 , r5
 308	st.q	SP, SAVED_R6 , r6
 309	st.q	SP, SAVED_R18, r18
 310
 311	/* Save R25 for safety; as/ld may want to use it to achieve the call to
 312	 * the code in mm/tlbmiss.c */
 313	st.q	SP, TLB_SAVED_R25, r25
 314	gettr	tr0, r2
 315	gettr	tr1, r3
 316	gettr	tr2, r4
 317	gettr	tr3, r5
 318	gettr	tr4, r18
 319	st.q	SP, SAVED_TR0 , r2
 320	st.q	SP, TLB_SAVED_TR1 , r3
 321	st.q	SP, TLB_SAVED_TR2 , r4
 322	st.q	SP, TLB_SAVED_TR3 , r5
 323	st.q	SP, TLB_SAVED_TR4 , r18
 324
 325	pt	do_fast_page_fault, tr0
 326	getcon	SSR, r2
 327	getcon	EXPEVT, r3
 328	getcon	TEA, r4
 329	shlri	r2, 30, r2
 330	andi	r2, 1, r2	/* r2 = SSR.MD */
 331	blink 	tr0, LINK
 332
 333	pt	fixup_to_invoke_general_handler, tr1
 334
 335	/* If the fast path handler fixed the fault, just drop through quickly
 336	   to the restore code right away to return to the excepting context.
 337	   */
 338	bnei/u	r2, 0, tr1
 339
 340fast_tlb_miss_restore:
 341	ld.q	SP, SAVED_TR0, r2
 342	ld.q	SP, TLB_SAVED_TR1, r3
 343	ld.q	SP, TLB_SAVED_TR2, r4
 344
 345	ld.q	SP, TLB_SAVED_TR3, r5
 346	ld.q	SP, TLB_SAVED_TR4, r18
 347
 348	ptabs	r2, tr0
 349	ptabs	r3, tr1
 350	ptabs	r4, tr2
 351	ptabs	r5, tr3
 352	ptabs	r18, tr4
 353
 354	ld.q	SP, TLB_SAVED_R0, r0
 355	ld.q	SP, TLB_SAVED_R1, r1
 356	ld.q	SP, SAVED_R2, r2
 357	ld.q	SP, SAVED_R3, r3
 358	ld.q	SP, SAVED_R4, r4
 359	ld.q	SP, SAVED_R5, r5
 360	ld.q	SP, SAVED_R6, r6
 361	ld.q	SP, SAVED_R18, r18
 362	ld.q	SP, TLB_SAVED_R25, r25
 363
 364	getcon	KCR1, SP
 365	rte
 366	nop /* for safety, in case the code is run on sh5-101 cut1.x */
 367
 368fixup_to_invoke_general_handler:
 369
 370	/* OK, new method.  Restore stuff that's not expected to get saved into
 371	   the 'first-level' reg save area, then just fall through to setting
 372	   up the registers and calling the second-level handler. */
 373
 374	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
 375	   r25,tr1-4 and save r6 to get into the right state.  */
 376
 377	ld.q	SP, TLB_SAVED_TR1, r3
 378	ld.q	SP, TLB_SAVED_TR2, r4
 379	ld.q	SP, TLB_SAVED_TR3, r5
 380	ld.q	SP, TLB_SAVED_TR4, r18
 381	ld.q	SP, TLB_SAVED_R25, r25
 382
 383	ld.q	SP, TLB_SAVED_R0, r0
 384	ld.q	SP, TLB_SAVED_R1, r1
 385
 386	ptabs/u	r3, tr1
 387	ptabs/u	r4, tr2
 388	ptabs/u	r5, tr3
 389	ptabs/u	r18, tr4
 390
 391	/* Set args for Non-debug, TLB miss class handler */
 392	getcon	EXPEVT, r2
 393	movi	ret_from_exception, r3
 394	ori	r3, 1, r3
 395	movi	EVENT_FAULT_TLB, r4
 396	or	SP, ZERO, r5
 397	getcon	KCR1, SP
 398	pta	handle_exception, tr0
 399	blink	tr0, ZERO
 400#else /* CONFIG_MMU */
 401	.balign 256
 402#endif
 403
 404/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
 405   DOES END UP AT VBR+0x600 */
 406	nop
 407	nop
 408	nop
 409	nop
 410	nop
 411	nop
 412
 413	.balign 256
 414	/* VBR + 0x600 */
 415
 416interrupt:
 417	synco	/* TAKum03020 (but probably a good idea anyway.) */
 418	/* Save original stack pointer into KCR1 */
 419	putcon	SP, KCR1
 420
 421	/* Save other original registers into reg_save_area */
 422        movi  reg_save_area, SP
 423	st.q	SP, SAVED_R2, r2
 424	st.q	SP, SAVED_R3, r3
 425	st.q	SP, SAVED_R4, r4
 426	st.q	SP, SAVED_R5, r5
 427	st.q	SP, SAVED_R6, r6
 428	st.q	SP, SAVED_R18, r18
 429	gettr	tr0, r3
 430	st.q	SP, SAVED_TR0, r3
 431
 432	/* Set args for interrupt class handler */
 433	getcon	INTEVT, r2
 434	movi	ret_from_irq, r3
 435	ori	r3, 1, r3
 436	movi	EVENT_INTERRUPT, r4
 437	or	SP, ZERO, r5
 438	getcon	KCR1, SP
 439	pta	handle_exception, tr0
 440	blink	tr0, ZERO
 441	.balign	TEXT_SIZE		/* let's waste the bare minimum */
 442
 443LVBR_block_end:				/* Marker. Used for total checking */
 444
 445	.balign 256
 446LRESVEC_block:
 447	/* Panic handler. Called with MMU off. Possible causes/actions:
 448	 * - Reset:		Jump to program start.
 449	 * - Single Step:	Turn off Single Step & return.
 450	 * - Others:		Call panic handler, passing PC as arg.
 451	 *			(this may need to be extended...)
 452	 */
 453reset_or_panic:
 454	synco	/* TAKum03020 (but probably a good idea anyway.) */
 455	putcon	SP, DCR
 456	/* First save r0-1 and tr0, as we need to use these */
 457	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
 458	st.q	SP, 0, r0
 459	st.q	SP, 8, r1
 460	gettr	tr0, r0
 461	st.q	SP, 32, r0
 462
 463	/* Check cause */
 464	getcon	EXPEVT, r0
 465	movi	RESET_CAUSE, r1
 466	sub	r1, r0, r1		/* r1=0 if reset */
 467	movi	_stext-CONFIG_PAGE_OFFSET, r0
 468	ori	r0, 1, r0
 469	ptabs	r0, tr0
 470	beqi	r1, 0, tr0		/* Jump to start address if reset */
 471
 472	getcon	EXPEVT, r0
 473	movi	DEBUGSS_CAUSE, r1
 474	sub	r1, r0, r1		/* r1=0 if single step */
 475	pta	single_step_panic, tr0
 476	beqi	r1, 0, tr0		/* jump if single step */
 477
 478	/* Now jump to where we save the registers. */
 479	movi	panic_stash_regs-CONFIG_PAGE_OFFSET, r1
 480	ptabs	r1, tr0
 481	blink	tr0, r63
 482
 483single_step_panic:
 484	/* We are in a handler with Single Step set. We need to resume the
 485	 * handler, by turning on MMU & turning off Single Step. */
 486	getcon	SSR, r0
 487	movi	SR_MMU, r1
 488	or	r0, r1, r0
 489	movi	~SR_SS, r1
 490	and	r0, r1, r0
 491	putcon	r0, SSR
 492	/* Restore EXPEVT, as the rte won't do this */
 493	getcon	PEXPEVT, r0
 494	putcon	r0, EXPEVT
 495	/* Restore regs */
 496	ld.q	SP, 32, r0
 497	ptabs	r0, tr0
 498	ld.q	SP, 0, r0
 499	ld.q	SP, 8, r1
 500	getcon	DCR, SP
 501	synco
 502	rte
 503
 504
 505	.balign	256
 506debug_exception:
 507	synco	/* TAKum03020 (but probably a good idea anyway.) */
 508	/*
 509	 * Single step/software_break_point first level handler.
 510	 * Called with MMU off, so the first thing we do is enable it
 511	 * by doing an rte with appropriate SSR.
 512	 */
 513	putcon	SP, DCR
 514	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
 515	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
 516
 517	/* With the MMU off, we are bypassing the cache, so purge any
 518         * data that will be made stale by the following stores.
 519         */
 520	ocbp	SP, 0
 521	synco
 522
 523	st.q	SP, 0, r0
 524	st.q	SP, 8, r1
 525	getcon	SPC, r0
 526	st.q	SP, 16, r0
 527	getcon	SSR, r0
 528	st.q	SP, 24, r0
 529
 530	/* Enable MMU, block exceptions, set priv mode, disable single step */
 531	movi	SR_MMU | SR_BL | SR_MD, r1
 532	or	r0, r1, r0
 533	movi	~SR_SS, r1
 534	and	r0, r1, r0
 535	putcon	r0, SSR
 536	/* Force control to debug_exception_2 when rte is executed */
 537	movi	debug_exeception_2, r0
 538	ori	r0, 1, r0      /* force SHmedia, just in case */
 539	putcon	r0, SPC
 540	getcon	DCR, SP
 541	synco
 542	rte
 543debug_exeception_2:
 544	/* Restore saved regs */
 545	putcon	SP, KCR1
 546	movi	resvec_save_area, SP
 547	ld.q	SP, 24, r0
 548	putcon	r0, SSR
 549	ld.q	SP, 16, r0
 550	putcon	r0, SPC
 551	ld.q	SP, 0, r0
 552	ld.q	SP, 8, r1
 553
 554	/* Save other original registers into reg_save_area */
 555        movi  reg_save_area, SP
 556	st.q	SP, SAVED_R2, r2
 557	st.q	SP, SAVED_R3, r3
 558	st.q	SP, SAVED_R4, r4
 559	st.q	SP, SAVED_R5, r5
 560	st.q	SP, SAVED_R6, r6
 561	st.q	SP, SAVED_R18, r18
 562	gettr	tr0, r3
 563	st.q	SP, SAVED_TR0, r3
 564
 565	/* Set args for debug class handler */
 566	getcon	EXPEVT, r2
 567	movi	ret_from_exception, r3
 568	ori	r3, 1, r3
 569	movi	EVENT_DEBUG, r4
 570	or	SP, ZERO, r5
 571	getcon	KCR1, SP
 572	pta	handle_exception, tr0
 573	blink	tr0, ZERO
 574
 575	.balign	256
 576debug_interrupt:
 577	/* !!! WE COME HERE IN REAL MODE !!! */
 578	/* Hook-up debug interrupt to allow various debugging options to be
 579	 * hooked into its handler. */
 580	/* Save original stack pointer into KCR1 */
 581	synco
 582	putcon	SP, KCR1
 583	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
 584	ocbp	SP, 0
 585	ocbp	SP, 32
 586	synco
 587
 588	/* Save other original registers into reg_save_area thru real addresses */
 589	st.q	SP, SAVED_R2, r2
 590	st.q	SP, SAVED_R3, r3
 591	st.q	SP, SAVED_R4, r4
 592	st.q	SP, SAVED_R5, r5
 593	st.q	SP, SAVED_R6, r6
 594	st.q	SP, SAVED_R18, r18
 595	gettr	tr0, r3
 596	st.q	SP, SAVED_TR0, r3
 597
 598	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
 599	   them back again, so that they look like the originals
 600	   as far as the real handler code is concerned. */
 601	getcon	spc, r6
 602	putcon	r6, pspc
 603	getcon	ssr, r6
 604	putcon	r6, pssr
 605
 606	! construct useful SR for handle_exception
 607	movi	3, r6
 608	shlli	r6, 30, r6
 609	getcon	sr, r18
 610	or	r18, r6, r6
 611	putcon	r6, ssr
 612
 613	! SSR is now the current SR with the MD and MMU bits set
 614	! i.e. the rte will switch back to priv mode and put
 615	! the mmu back on
 616
 617	! construct spc
 618	movi	handle_exception, r18
 619	ori	r18, 1, r18		! for safety (do we need this?)
 620	putcon	r18, spc
 621
 622	/* Set args for Non-debug, Not a TLB miss class handler */
 623
 624	! EXPEVT==0x80 is unused, so 'steal' this value to put the
 625	! debug interrupt handler in the vectoring table
 626	movi	0x80, r2
 627	movi	ret_from_exception, r3
 628	ori	r3, 1, r3
 629	movi	EVENT_FAULT_NOT_TLB, r4
 630
 631	or	SP, ZERO, r5
 632	movi	CONFIG_PAGE_OFFSET, r6
 633	add	r6, r5, r5
 634	getcon	KCR1, SP
 635
 636	synco	! for safety
 637	rte	! -> handle_exception, switch back to priv mode again
 638
 639LRESVEC_block_end:			/* Marker. Unused. */
 640
 641	.balign	TEXT_SIZE
 642
 643/*
 644 * Second level handler for VBR-based exceptions. Pre-handler.
 645 * In common to all stack-frame sensitive handlers.
 646 *
 647 * Inputs:
 648 * (KCR0) Current [current task union]
 649 * (KCR1) Original SP
 650 * (r2)   INTEVT/EXPEVT
 651 * (r3)   appropriate return address
 652 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
 653 * (r5)   Pointer to reg_save_area
 654 * (SP)   Original SP
 655 *
 656 * Available registers:
 657 * (r6)
 658 * (r18)
 659 * (tr0)
 660 *
 661 */
 662handle_exception:
 663	/* Common 2nd level handler. */
 664
 665	/* First thing we need an appropriate stack pointer */
 666	getcon	SSR, r6
 667	shlri	r6, 30, r6
 668	andi	r6, 1, r6
 669	pta	stack_ok, tr0
 670	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
 671
 672	/* Set stack pointer for user fault */
 673	getcon	KCR0, SP
 674	movi	THREAD_SIZE, r6		/* Point to the end */
 675	add	SP, r6, SP
 676
 677stack_ok:
 678
 679/* DEBUG : check for underflow/overflow of the kernel stack */
 680	pta	no_underflow, tr0
 681	getcon  KCR0, r6
 682	movi	1024, r18
 683	add	r6, r18, r6
 684	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
 685
 686/* Just panic to cause a crash. */
 687bad_sp:
 688	ld.b	r63, 0, r6
 689	nop
 690
 691no_underflow:
 692	pta	bad_sp, tr0
 693	getcon	kcr0, r6
 694	movi	THREAD_SIZE, r18
 695	add	r18, r6, r6
 696	bgt	SP, r6, tr0	! sp above the stack
 697
 698	/* Make some room for the BASIC frame. */
 699	movi	-(FRAME_SIZE), r6
 700	add	SP, r6, SP
 701
 702/* Could do this with no stalling if we had another spare register, but the
 703   code below will be OK. */
 704	ld.q	r5, SAVED_R2, r6
 705	ld.q	r5, SAVED_R3, r18
 706	st.q	SP, FRAME_R(2), r6
 707	ld.q	r5, SAVED_R4, r6
 708	st.q	SP, FRAME_R(3), r18
 709	ld.q	r5, SAVED_R5, r18
 710	st.q	SP, FRAME_R(4), r6
 711	ld.q	r5, SAVED_R6, r6
 712	st.q	SP, FRAME_R(5), r18
 713	ld.q	r5, SAVED_R18, r18
 714	st.q	SP, FRAME_R(6), r6
 715	ld.q	r5, SAVED_TR0, r6
 716	st.q	SP, FRAME_R(18), r18
 717	st.q	SP, FRAME_T(0), r6
 718
 719	/* Keep old SP around */
 720	getcon	KCR1, r6
 721
 722	/* Save the rest of the general purpose registers */
 723	st.q	SP, FRAME_R(0), r0
 724	st.q	SP, FRAME_R(1), r1
 725	st.q	SP, FRAME_R(7), r7
 726	st.q	SP, FRAME_R(8), r8
 727	st.q	SP, FRAME_R(9), r9
 728	st.q	SP, FRAME_R(10), r10
 729	st.q	SP, FRAME_R(11), r11
 730	st.q	SP, FRAME_R(12), r12
 731	st.q	SP, FRAME_R(13), r13
 732	st.q	SP, FRAME_R(14), r14
 733
 734	/* SP is somewhere else */
 735	st.q	SP, FRAME_R(15), r6
 736
 737	st.q	SP, FRAME_R(16), r16
 738	st.q	SP, FRAME_R(17), r17
 739	/* r18 is saved earlier. */
 740	st.q	SP, FRAME_R(19), r19
 741	st.q	SP, FRAME_R(20), r20
 742	st.q	SP, FRAME_R(21), r21
 743	st.q	SP, FRAME_R(22), r22
 744	st.q	SP, FRAME_R(23), r23
 745	st.q	SP, FRAME_R(24), r24
 746	st.q	SP, FRAME_R(25), r25
 747	st.q	SP, FRAME_R(26), r26
 748	st.q	SP, FRAME_R(27), r27
 749	st.q	SP, FRAME_R(28), r28
 750	st.q	SP, FRAME_R(29), r29
 751	st.q	SP, FRAME_R(30), r30
 752	st.q	SP, FRAME_R(31), r31
 753	st.q	SP, FRAME_R(32), r32
 754	st.q	SP, FRAME_R(33), r33
 755	st.q	SP, FRAME_R(34), r34
 756	st.q	SP, FRAME_R(35), r35
 757	st.q	SP, FRAME_R(36), r36
 758	st.q	SP, FRAME_R(37), r37
 759	st.q	SP, FRAME_R(38), r38
 760	st.q	SP, FRAME_R(39), r39
 761	st.q	SP, FRAME_R(40), r40
 762	st.q	SP, FRAME_R(41), r41
 763	st.q	SP, FRAME_R(42), r42
 764	st.q	SP, FRAME_R(43), r43
 765	st.q	SP, FRAME_R(44), r44
 766	st.q	SP, FRAME_R(45), r45
 767	st.q	SP, FRAME_R(46), r46
 768	st.q	SP, FRAME_R(47), r47
 769	st.q	SP, FRAME_R(48), r48
 770	st.q	SP, FRAME_R(49), r49
 771	st.q	SP, FRAME_R(50), r50
 772	st.q	SP, FRAME_R(51), r51
 773	st.q	SP, FRAME_R(52), r52
 774	st.q	SP, FRAME_R(53), r53
 775	st.q	SP, FRAME_R(54), r54
 776	st.q	SP, FRAME_R(55), r55
 777	st.q	SP, FRAME_R(56), r56
 778	st.q	SP, FRAME_R(57), r57
 779	st.q	SP, FRAME_R(58), r58
 780	st.q	SP, FRAME_R(59), r59
 781	st.q	SP, FRAME_R(60), r60
 782	st.q	SP, FRAME_R(61), r61
 783	st.q	SP, FRAME_R(62), r62
 784
 785	/*
 786	 * Save the S* registers.
 787	 */
 788	getcon	SSR, r61
 789	st.q	SP, FRAME_S(FSSR), r61
 790	getcon	SPC, r62
 791	st.q	SP, FRAME_S(FSPC), r62
 792	movi	-1, r62			/* Reset syscall_nr */
 793	st.q	SP, FRAME_S(FSYSCALL_ID), r62
 794
 795	/* Save the rest of the target registers */
 796	gettr	tr1, r6
 797	st.q	SP, FRAME_T(1), r6
 798	gettr	tr2, r6
 799	st.q	SP, FRAME_T(2), r6
 800	gettr	tr3, r6
 801	st.q	SP, FRAME_T(3), r6
 802	gettr	tr4, r6
 803	st.q	SP, FRAME_T(4), r6
 804	gettr	tr5, r6
 805	st.q	SP, FRAME_T(5), r6
 806	gettr	tr6, r6
 807	st.q	SP, FRAME_T(6), r6
 808	gettr	tr7, r6
 809	st.q	SP, FRAME_T(7), r6
 810
 811	! setup FP so that unwinder can wind back through nested kernel mode
 812	! exceptions
 813	add	SP, ZERO, r14
 814
 815	/* For syscall and debug race condition, get TRA now */
 816	getcon	TRA, r5
 817
 818	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
 819	 * Also set FD, to catch FPU usage in the kernel.
 820	 *
 821	 * benedict.gaster@superh.com 29/07/2002
 822	 *
 823	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
 824	 * same time change BL from 1->0, as any pending interrupt of a level
 825	 * higher than he previous value of IMASK will leak through and be
 826	 * taken unexpectedly.
 827	 *
 828	 * To avoid this we raise the IMASK and then issue another PUTCON to
 829	 * enable interrupts.
 830         */
 831	getcon	SR, r6
 832	movi	SR_IMASK | SR_FD, r7
 833	or	r6, r7, r6
 834	putcon	r6, SR
 835	movi	SR_UNBLOCK_EXC, r7
 836	and	r6, r7, r6
 837	putcon	r6, SR
 838
 839
 840	/* Now call the appropriate 3rd level handler */
 841	or	r3, ZERO, LINK
 842	movi	trap_jtable, r3
 843	shlri	r2, 3, r2
 844	ldx.l	r2, r3, r3
 845	shlri	r2, 2, r2
 846	ptabs	r3, tr0
 847	or	SP, ZERO, r3
 848	blink	tr0, ZERO
 849
 850/*
 851 * Second level handler for VBR-based exceptions. Post-handlers.
 852 *
 853 * Post-handlers for interrupts (ret_from_irq), exceptions
 854 * (ret_from_exception) and common reentrance doors (restore_all
 855 * to get back to the original context, ret_from_syscall loop to
 856 * check kernel exiting).
 857 *
 858 * ret_with_reschedule and work_notifysig are an inner lables of
 859 * the ret_from_syscall loop.
 860 *
 861 * In common to all stack-frame sensitive handlers.
 862 *
 863 * Inputs:
 864 * (SP)   struct pt_regs *, original register's frame pointer (basic)
 865 *
 866 */
 867	.global ret_from_irq
 868ret_from_irq:
 869	ld.q	SP, FRAME_S(FSSR), r6
 870	shlri	r6, 30, r6
 871	andi	r6, 1, r6
 872	pta	resume_kernel, tr0
 873	bne	r6, ZERO, tr0		/* no further checks */
 874	STI()
 875	pta	ret_with_reschedule, tr0
 876	blink	tr0, ZERO		/* Do not check softirqs */
 877
 878	.global ret_from_exception
 879ret_from_exception:
 880	preempt_stop()
 881
 882	ld.q	SP, FRAME_S(FSSR), r6
 883	shlri	r6, 30, r6
 884	andi	r6, 1, r6
 885	pta	resume_kernel, tr0
 886	bne	r6, ZERO, tr0		/* no further checks */
 887
 888	/* Check softirqs */
 889
 890#ifdef CONFIG_PREEMPT
 891	pta   ret_from_syscall, tr0
 892	blink   tr0, ZERO
 893
 894resume_kernel:
 895	CLI()
 896
 897	pta	restore_all, tr0
 898
 899	getcon	KCR0, r6
 900	ld.l	r6, TI_PRE_COUNT, r7
 901	beq/u	r7, ZERO, tr0
 902
 903need_resched:
 904	ld.l	r6, TI_FLAGS, r7
 905	movi	(1 << TIF_NEED_RESCHED), r8
 906	and	r8, r7, r8
 907	bne	r8, ZERO, tr0
 908
 909	getcon	SR, r7
 910	andi	r7, 0xf0, r7
 911	bne	r7, ZERO, tr0
 912
 913	movi	preempt_schedule_irq, r7
 914	ori	r7, 1, r7
 915	ptabs	r7, tr1
 916	blink	tr1, LINK
 917
 918	pta	need_resched, tr1
 919	blink	tr1, ZERO
 920#endif
 921
 922	.global ret_from_syscall
 923ret_from_syscall:
 924
 925ret_with_reschedule:
 926	getcon	KCR0, r6		! r6 contains current_thread_info
 927	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
 928
 929	movi	_TIF_NEED_RESCHED, r8
 930	and	r8, r7, r8
 931	pta	work_resched, tr0
 932	bne	r8, ZERO, tr0
 933
 934	pta	restore_all, tr1
 935
 936	movi	(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
 937	and	r8, r7, r8
 938	pta	work_notifysig, tr0
 939	bne	r8, ZERO, tr0
 940
 941	blink	tr1, ZERO
 942
 943work_resched:
 944	pta	ret_from_syscall, tr0
 945	gettr	tr0, LINK
 946	movi	schedule, r6
 947	ptabs	r6, tr0
 948	blink	tr0, ZERO		/* Call schedule(), return on top */
 949
 950work_notifysig:
 951	gettr	tr1, LINK
 952
 953	movi	do_notify_resume, r6
 954	ptabs	r6, tr0
 955	or	SP, ZERO, r2
 956	or	r7, ZERO, r3
 957	blink	tr0, LINK	    /* Call do_notify_resume(regs, current_thread_info->flags), return here */
 958
 959restore_all:
 960	/* Do prefetches */
 961
 962	ld.q	SP, FRAME_T(0), r6
 963	ld.q	SP, FRAME_T(1), r7
 964	ld.q	SP, FRAME_T(2), r8
 965	ld.q	SP, FRAME_T(3), r9
 966	ptabs	r6, tr0
 967	ptabs	r7, tr1
 968	ptabs	r8, tr2
 969	ptabs	r9, tr3
 970	ld.q	SP, FRAME_T(4), r6
 971	ld.q	SP, FRAME_T(5), r7
 972	ld.q	SP, FRAME_T(6), r8
 973	ld.q	SP, FRAME_T(7), r9
 974	ptabs	r6, tr4
 975	ptabs	r7, tr5
 976	ptabs	r8, tr6
 977	ptabs	r9, tr7
 978
 979	ld.q	SP, FRAME_R(0), r0
 980	ld.q	SP, FRAME_R(1), r1
 981	ld.q	SP, FRAME_R(2), r2
 982	ld.q	SP, FRAME_R(3), r3
 983	ld.q	SP, FRAME_R(4), r4
 984	ld.q	SP, FRAME_R(5), r5
 985	ld.q	SP, FRAME_R(6), r6
 986	ld.q	SP, FRAME_R(7), r7
 987	ld.q	SP, FRAME_R(8), r8
 988	ld.q	SP, FRAME_R(9), r9
 989	ld.q	SP, FRAME_R(10), r10
 990	ld.q	SP, FRAME_R(11), r11
 991	ld.q	SP, FRAME_R(12), r12
 992	ld.q	SP, FRAME_R(13), r13
 993	ld.q	SP, FRAME_R(14), r14
 994
 995	ld.q	SP, FRAME_R(16), r16
 996	ld.q	SP, FRAME_R(17), r17
 997	ld.q	SP, FRAME_R(18), r18
 998	ld.q	SP, FRAME_R(19), r19
 999	ld.q	SP, FRAME_R(20), r20
1000	ld.q	SP, FRAME_R(21), r21
1001	ld.q	SP, FRAME_R(22), r22
1002	ld.q	SP, FRAME_R(23), r23
1003	ld.q	SP, FRAME_R(24), r24
1004	ld.q	SP, FRAME_R(25), r25
1005	ld.q	SP, FRAME_R(26), r26
1006	ld.q	SP, FRAME_R(27), r27
1007	ld.q	SP, FRAME_R(28), r28
1008	ld.q	SP, FRAME_R(29), r29
1009	ld.q	SP, FRAME_R(30), r30
1010	ld.q	SP, FRAME_R(31), r31
1011	ld.q	SP, FRAME_R(32), r32
1012	ld.q	SP, FRAME_R(33), r33
1013	ld.q	SP, FRAME_R(34), r34
1014	ld.q	SP, FRAME_R(35), r35
1015	ld.q	SP, FRAME_R(36), r36
1016	ld.q	SP, FRAME_R(37), r37
1017	ld.q	SP, FRAME_R(38), r38
1018	ld.q	SP, FRAME_R(39), r39
1019	ld.q	SP, FRAME_R(40), r40
1020	ld.q	SP, FRAME_R(41), r41
1021	ld.q	SP, FRAME_R(42), r42
1022	ld.q	SP, FRAME_R(43), r43
1023	ld.q	SP, FRAME_R(44), r44
1024	ld.q	SP, FRAME_R(45), r45
1025	ld.q	SP, FRAME_R(46), r46
1026	ld.q	SP, FRAME_R(47), r47
1027	ld.q	SP, FRAME_R(48), r48
1028	ld.q	SP, FRAME_R(49), r49
1029	ld.q	SP, FRAME_R(50), r50
1030	ld.q	SP, FRAME_R(51), r51
1031	ld.q	SP, FRAME_R(52), r52
1032	ld.q	SP, FRAME_R(53), r53
1033	ld.q	SP, FRAME_R(54), r54
1034	ld.q	SP, FRAME_R(55), r55
1035	ld.q	SP, FRAME_R(56), r56
1036	ld.q	SP, FRAME_R(57), r57
1037	ld.q	SP, FRAME_R(58), r58
1038
1039	getcon	SR, r59
1040	movi	SR_BLOCK_EXC, r60
1041	or	r59, r60, r59
1042	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
1043	ld.q	SP, FRAME_S(FSSR), r61
1044	ld.q	SP, FRAME_S(FSPC), r62
1045	movi	SR_ASID_MASK, r60
1046	and	r59, r60, r59
1047	andc	r61, r60, r61		/* Clear out older ASID */
1048	or	r59, r61, r61		/* Retain current ASID */
1049	putcon	r61, SSR
1050	putcon	r62, SPC
1051
1052	/* Ignore FSYSCALL_ID */
1053
1054	ld.q	SP, FRAME_R(59), r59
1055	ld.q	SP, FRAME_R(60), r60
1056	ld.q	SP, FRAME_R(61), r61
1057	ld.q	SP, FRAME_R(62), r62
1058
1059	/* Last touch */
1060	ld.q	SP, FRAME_R(15), SP
1061	rte
1062	nop
1063
1064/*
1065 * Third level handlers for VBR-based exceptions. Adapting args to
1066 * and/or deflecting to fourth level handlers.
1067 *
1068 * Fourth level handlers interface.
1069 * Most are C-coded handlers directly pointed by the trap_jtable.
1070 * (Third = Fourth level)
1071 * Inputs:
1072 * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
1073 *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1074 * (r3)   struct pt_regs *, original register's frame pointer
1075 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1076 * (r5)   TRA control register (for syscall/debug benefit only)
1077 * (LINK) return address
1078 * (SP)   = r3
1079 *
1080 * Kernel TLB fault handlers will get a slightly different interface.
1081 * (r2)   struct pt_regs *, original register's frame pointer
1082 * (r3)   page fault error code (see asm/thread_info.h)
1083 * (r4)   Effective Address of fault
 
1084 * (LINK) return address
1085 * (SP)   = r2
1086 *
1087 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1088 *
1089 */
1090#ifdef CONFIG_MMU
1091tlb_miss_load:
1092	or	SP, ZERO, r2
1093	or	ZERO, ZERO, r3		/* Read */
1094	getcon	TEA, r4
 
1095	pta	call_do_page_fault, tr0
1096	beq	ZERO, ZERO, tr0
1097
1098tlb_miss_store:
1099	or	SP, ZERO, r2
1100	movi	FAULT_CODE_WRITE, r3		/* Write */
1101	getcon	TEA, r4
 
1102	pta	call_do_page_fault, tr0
1103	beq	ZERO, ZERO, tr0
1104
1105itlb_miss_or_IRQ:
1106	pta	its_IRQ, tr0
1107	beqi/u	r4, EVENT_INTERRUPT, tr0
1108
1109	/* ITLB miss */
1110	or	SP, ZERO, r2
1111	movi	FAULT_CODE_ITLB, r3
1112	getcon	TEA, r4
 
1113	/* Fall through */
1114
1115call_do_page_fault:
1116	movi	do_page_fault, r6
1117        ptabs	r6, tr0
1118        blink	tr0, ZERO
1119#endif /* CONFIG_MMU */
1120
1121fpu_error_or_IRQA:
1122	pta	its_IRQ, tr0
1123	beqi/l	r4, EVENT_INTERRUPT, tr0
1124#ifdef CONFIG_SH_FPU
1125	movi	fpu_state_restore_trap_handler, r6
1126#else
1127	movi	do_exception_error, r6
1128#endif
1129	ptabs	r6, tr0
1130	blink	tr0, ZERO
1131
1132fpu_error_or_IRQB:
1133	pta	its_IRQ, tr0
1134	beqi/l	r4, EVENT_INTERRUPT, tr0
1135#ifdef CONFIG_SH_FPU
1136	movi	fpu_state_restore_trap_handler, r6
1137#else
1138	movi	do_exception_error, r6
1139#endif
1140	ptabs	r6, tr0
1141	blink	tr0, ZERO
1142
1143its_IRQ:
1144	movi	do_IRQ, r6
1145	ptabs	r6, tr0
1146	blink	tr0, ZERO
1147
1148/*
1149 * system_call/unknown_trap third level handler:
1150 *
1151 * Inputs:
1152 * (r2)   fault/interrupt code, entry number (TRAP = 11)
1153 * (r3)   struct pt_regs *, original register's frame pointer
1154 * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1155 * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1156 * (SP)   = r3
1157 * (LINK) return address: ret_from_exception
1158 * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1159 *
1160 * Outputs:
1161 * (*r3)  Syscall reply (Saved r2)
1162 * (LINK) In case of syscall only it can be scrapped.
1163 *        Common second level post handler will be ret_from_syscall.
1164 *        Common (non-trace) exit point to that is syscall_ret (saving
1165 *        result to r2). Common bad exit point is syscall_bad (returning
1166 *        ENOSYS then saved to r2).
1167 *
1168 */
1169
1170unknown_trap:
1171	/* Unknown Trap or User Trace */
1172	movi	do_unknown_trapa, r6
1173	ptabs	r6, tr0
1174        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
1175        andi    r2, 0x1ff, r2		/* r2 = syscall # */
1176	blink	tr0, LINK
1177
1178	pta	syscall_ret, tr0
1179	blink	tr0, ZERO
1180
1181        /* New syscall implementation*/
1182system_call:
1183	pta	unknown_trap, tr0
1184        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
1185        shlri   r4, 20, r4
1186	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
1187
1188        /* It's a system call */
1189	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
1190	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
1191
1192	STI()
1193
1194	pta	syscall_allowed, tr0
1195	movi	NR_syscalls - 1, r4	/* Last valid */
1196	bgeu/l	r4, r5, tr0
1197
1198syscall_bad:
1199	/* Return ENOSYS ! */
1200	movi	-(ENOSYS), r2		/* Fall-through */
1201
1202	.global syscall_ret
1203syscall_ret:
1204	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
1205	ld.q	SP, FRAME_S(FSPC), r2
1206	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1207	st.q	SP, FRAME_S(FSPC), r2
1208	pta	ret_from_syscall, tr0
1209	blink	tr0, ZERO
1210
1211
1212/*  A different return path for ret_from_fork, because we now need
1213 *  to call schedule_tail with the later kernels. Because prev is
1214 *  loaded into r2 by switch_to() means we can just call it straight  away
1215 */
1216
1217.global	ret_from_fork
1218ret_from_fork:
1219
1220	movi	schedule_tail,r5
1221	ori	r5, 1, r5
1222	ptabs	r5, tr0
1223	blink	tr0, LINK
1224
1225	ld.q	SP, FRAME_S(FSPC), r2
1226	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1227	st.q	SP, FRAME_S(FSPC), r2
1228	pta	ret_from_syscall, tr0
1229	blink	tr0, ZERO
1230
1231.global	ret_from_kernel_thread
1232ret_from_kernel_thread:
1233
1234	movi	schedule_tail,r5
1235	ori	r5, 1, r5
1236	ptabs	r5, tr0
1237	blink	tr0, LINK
1238
1239	ld.q	SP, FRAME_R(2), r2
1240	ld.q	SP, FRAME_R(3), r3
1241	ptabs	r3, tr0
1242	blink	tr0, LINK
1243
1244	ld.q	SP, FRAME_S(FSPC), r2
1245	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1246	st.q	SP, FRAME_S(FSPC), r2
1247	pta	ret_from_syscall, tr0
1248	blink	tr0, ZERO
1249
1250syscall_allowed:
1251	/* Use LINK to deflect the exit point, default is syscall_ret */
1252	pta	syscall_ret, tr0
1253	gettr	tr0, LINK
1254	pta	syscall_notrace, tr0
1255
1256	getcon	KCR0, r2
1257	ld.l	r2, TI_FLAGS, r4
1258	movi	_TIF_WORK_SYSCALL_MASK, r6
1259	and	r6, r4, r6
1260	beq/l	r6, ZERO, tr0
1261
1262	/* Trace it by calling syscall_trace before and after */
1263	movi	do_syscall_trace_enter, r4
1264	or	SP, ZERO, r2
1265	ptabs	r4, tr0
1266	blink	tr0, LINK
1267
1268	/* Save the retval */
1269	st.q	SP, FRAME_R(2), r2
1270
1271	/* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1272	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
1273	andi	r5, 0x1ff, r5
1274
1275	pta	syscall_ret_trace, tr0
1276	gettr	tr0, LINK
1277
1278syscall_notrace:
1279	/* Now point to the appropriate 4th level syscall handler */
1280	movi	sys_call_table, r4
1281	shlli	r5, 2, r5
1282	ldx.l	r4, r5, r5
1283	ptabs	r5, tr0
1284
1285	/* Prepare original args */
1286	ld.q	SP, FRAME_R(2), r2
1287	ld.q	SP, FRAME_R(3), r3
1288	ld.q	SP, FRAME_R(4), r4
1289	ld.q	SP, FRAME_R(5), r5
1290	ld.q	SP, FRAME_R(6), r6
1291	ld.q	SP, FRAME_R(7), r7
1292
1293	/* And now the trick for those syscalls requiring regs * ! */
1294	or	SP, ZERO, r8
1295
1296	/* Call it */
1297	blink	tr0, ZERO	/* LINK is already properly set */
1298
1299syscall_ret_trace:
1300	/* We get back here only if under trace */
1301	st.q	SP, FRAME_R(9), r2	/* Save return value */
1302
1303	movi	do_syscall_trace_leave, LINK
1304	or	SP, ZERO, r2
1305	ptabs	LINK, tr0
1306	blink	tr0, LINK
1307
1308	/* This needs to be done after any syscall tracing */
1309	ld.q	SP, FRAME_S(FSPC), r2
1310	addi	r2, 4, r2	/* Move PC, being pre-execution event */
1311	st.q	SP, FRAME_S(FSPC), r2
1312
1313	pta	ret_from_syscall, tr0
1314	blink	tr0, ZERO		/* Resume normal return sequence */
1315
1316/*
1317 * --- Switch to running under a particular ASID and return the previous ASID value
1318 * --- The caller is assumed to have done a cli before calling this.
1319 *
1320 * Input r2 : new ASID
1321 * Output r2 : old ASID
1322 */
1323
1324	.global switch_and_save_asid
1325switch_and_save_asid:
1326	getcon	sr, r0
1327	movi	255, r4
1328	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
1329	and	r0, r4, r3	/* r3 = shifted old ASID */
1330	andi	r2, 255, r2	/* mask down new ASID */
1331	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
1332	andc	r0, r4, r0	/* efface old ASID from SR */
1333	or	r0, r2, r0	/* insert the new ASID */
1334	putcon	r0, ssr
1335	movi	1f, r0
1336	putcon	r0, spc
1337	rte
1338	nop
13391:
1340	ptabs	LINK, tr0
1341	shlri	r3, 16, r2	/* r2 = old ASID */
1342	blink tr0, r63
1343
1344	.global	route_to_panic_handler
1345route_to_panic_handler:
1346	/* Switch to real mode, goto panic_handler, don't return.  Useful for
1347	   last-chance debugging, e.g. if no output wants to go to the console.
1348	   */
1349
1350	movi	panic_handler - CONFIG_PAGE_OFFSET, r1
1351	ptabs	r1, tr0
1352	pta	1f, tr1
1353	gettr	tr1, r0
1354	putcon	r0, spc
1355	getcon	sr, r0
1356	movi	1, r1
1357	shlli	r1, 31, r1
1358	andc	r0, r1, r0
1359	putcon	r0, ssr
1360	rte
1361	nop
13621:	/* Now in real mode */
1363	blink tr0, r63
1364	nop
1365
1366	.global peek_real_address_q
1367peek_real_address_q:
1368	/* Two args:
1369	   r2 : real mode address to peek
1370	   r2(out) : result quadword
1371
1372	   This is provided as a cheapskate way of manipulating device
1373	   registers for debugging (to avoid the need to ioremap the debug
1374	   module, and to avoid the need to ioremap the watchpoint
1375	   controller in a way that identity maps sufficient bits to avoid the
1376	   SH5-101 cut2 silicon defect).
1377
1378	   This code is not performance critical
1379	*/
1380
1381	add.l	r2, r63, r2	/* sign extend address */
1382	getcon	sr, r0		/* r0 = saved original SR */
1383	movi	1, r1
1384	shlli	r1, 28, r1
1385	or	r0, r1, r1	/* r0 with block bit set */
1386	putcon	r1, sr		/* now in critical section */
1387	movi	1, r36
1388	shlli	r36, 31, r36
1389	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1390
1391	putcon	r1, ssr
1392	movi	.peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1393	movi	1f, r37		/* virtual mode return addr */
1394	putcon	r36, spc
1395
1396	synco
1397	rte
1398	nop
1399
1400.peek0:	/* come here in real mode, don't touch caches!!
1401           still in critical section (sr.bl==1) */
1402	putcon	r0, ssr
1403	putcon	r37, spc
1404	/* Here's the actual peek.  If the address is bad, all bets are now off
1405	 * what will happen (handlers invoked in real-mode = bad news) */
1406	ld.q	r2, 0, r2
1407	synco
1408	rte	/* Back to virtual mode */
1409	nop
1410
14111:
1412	ptabs	LINK, tr0
1413	blink	tr0, r63
1414
1415	.global poke_real_address_q
1416poke_real_address_q:
1417	/* Two args:
1418	   r2 : real mode address to poke
1419	   r3 : quadword value to write.
1420
1421	   This is provided as a cheapskate way of manipulating device
1422	   registers for debugging (to avoid the need to ioremap the debug
1423	   module, and to avoid the need to ioremap the watchpoint
1424	   controller in a way that identity maps sufficient bits to avoid the
1425	   SH5-101 cut2 silicon defect).
1426
1427	   This code is not performance critical
1428	*/
1429
1430	add.l	r2, r63, r2	/* sign extend address */
1431	getcon	sr, r0		/* r0 = saved original SR */
1432	movi	1, r1
1433	shlli	r1, 28, r1
1434	or	r0, r1, r1	/* r0 with block bit set */
1435	putcon	r1, sr		/* now in critical section */
1436	movi	1, r36
1437	shlli	r36, 31, r36
1438	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1439
1440	putcon	r1, ssr
1441	movi	.poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1442	movi	1f, r37		/* virtual mode return addr */
1443	putcon	r36, spc
1444
1445	synco
1446	rte
1447	nop
1448
1449.poke0:	/* come here in real mode, don't touch caches!!
1450           still in critical section (sr.bl==1) */
1451	putcon	r0, ssr
1452	putcon	r37, spc
1453	/* Here's the actual poke.  If the address is bad, all bets are now off
1454	 * what will happen (handlers invoked in real-mode = bad news) */
1455	st.q	r2, 0, r3
1456	synco
1457	rte	/* Back to virtual mode */
1458	nop
1459
14601:
1461	ptabs	LINK, tr0
1462	blink	tr0, r63
1463
1464#ifdef CONFIG_MMU
1465/*
1466 * --- User Access Handling Section
1467 */
1468
1469/*
1470 * User Access support. It all moved to non inlined Assembler
1471 * functions in here.
1472 *
1473 * __kernel_size_t __copy_user(void *__to, const void *__from,
1474 *			       __kernel_size_t __n)
1475 *
1476 * Inputs:
1477 * (r2)  target address
1478 * (r3)  source address
1479 * (r4)  size in bytes
1480 *
1481 * Ouputs:
1482 * (*r2) target data
1483 * (r2)  non-copied bytes
1484 *
1485 * If a fault occurs on the user pointer, bail out early and return the
1486 * number of bytes not copied in r2.
1487 * Strategy : for large blocks, call a real memcpy function which can
1488 * move >1 byte at a time using unaligned ld/st instructions, and can
1489 * manipulate the cache using prefetch + alloco to improve the speed
1490 * further.  If a fault occurs in that function, just revert to the
1491 * byte-by-byte approach used for small blocks; this is rare so the
1492 * performance hit for that case does not matter.
1493 *
1494 * For small blocks it's not worth the overhead of setting up and calling
1495 * the memcpy routine; do the copy a byte at a time.
1496 *
1497 */
1498	.global	__copy_user
1499__copy_user:
1500	pta	__copy_user_byte_by_byte, tr1
1501	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
1502	bge/u	r0, r4, tr1
1503	pta copy_user_memcpy, tr0
1504	addi	SP, -32, SP
1505	/* Save arguments in case we have to fix-up unhandled page fault */
1506	st.q	SP, 0, r2
1507	st.q	SP, 8, r3
1508	st.q	SP, 16, r4
1509	st.q	SP, 24, r35 ! r35 is callee-save
1510	/* Save LINK in a register to reduce RTS time later (otherwise
1511	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1512	ori	LINK, 0, r35
1513	blink	tr0, LINK
1514
1515	/* Copy completed normally if we get back here */
1516	ptabs	r35, tr0
1517	ld.q	SP, 24, r35
1518	/* don't restore r2-r4, pointless */
1519	/* set result=r2 to zero as the copy must have succeeded. */
1520	or	r63, r63, r2
1521	addi	SP, 32, SP
1522	blink	tr0, r63 ! RTS
1523
1524	.global __copy_user_fixup
1525__copy_user_fixup:
1526	/* Restore stack frame */
1527	ori	r35, 0, LINK
1528	ld.q	SP, 24, r35
1529	ld.q	SP, 16, r4
1530	ld.q	SP,  8, r3
1531	ld.q	SP,  0, r2
1532	addi	SP, 32, SP
1533	/* Fall through to original code, in the 'same' state we entered with */
1534
1535/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1536   user address.  In that rare case, the speed drop can be tolerated. */
1537__copy_user_byte_by_byte:
1538	pta	___copy_user_exit, tr1
1539	pta	___copy_user1, tr0
1540	beq/u	r4, r63, tr1	/* early exit for zero length copy */
1541	sub	r2, r3, r0
1542	addi	r0, -1, r0
1543
1544___copy_user1:
1545	ld.b	r3, 0, r5		/* Fault address 1 */
1546
1547	/* Could rewrite this to use just 1 add, but the second comes 'free'
1548	   due to load latency */
1549	addi	r3, 1, r3
1550	addi	r4, -1, r4		/* No real fixup required */
1551___copy_user2:
1552	stx.b	r3, r0, r5		/* Fault address 2 */
1553	bne     r4, ZERO, tr0
1554
1555___copy_user_exit:
1556	or	r4, ZERO, r2
1557	ptabs	LINK, tr0
1558	blink	tr0, ZERO
1559
1560/*
1561 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1562 *
1563 * Inputs:
1564 * (r2)  target address
1565 * (r3)  size in bytes
1566 *
1567 * Ouputs:
1568 * (*r2) zero-ed target data
1569 * (r2)  non-zero-ed bytes
1570 */
1571	.global	__clear_user
1572__clear_user:
1573	pta	___clear_user_exit, tr1
1574	pta	___clear_user1, tr0
1575	beq/u	r3, r63, tr1
1576
1577___clear_user1:
1578	st.b	r2, 0, ZERO		/* Fault address */
1579	addi	r2, 1, r2
1580	addi	r3, -1, r3		/* No real fixup required */
1581	bne     r3, ZERO, tr0
1582
1583___clear_user_exit:
1584	or	r3, ZERO, r2
1585	ptabs	LINK, tr0
1586	blink	tr0, ZERO
1587
1588#endif /* CONFIG_MMU */
1589
1590/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591 * extern long __get_user_asm_?(void *val, long addr)
1592 *
1593 * Inputs:
1594 * (r2)  dest address
1595 * (r3)  source address (in User Space)
1596 *
1597 * Ouputs:
1598 * (r2)  -EFAULT (faulting)
1599 *       0 	 (not faulting)
1600 */
1601	.global	__get_user_asm_b
1602__get_user_asm_b:
1603	or	r2, ZERO, r4
1604	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1605
1606___get_user_asm_b1:
1607	ld.b	r3, 0, r5		/* r5 = data */
1608	st.b	r4, 0, r5
1609	or	ZERO, ZERO, r2
1610
1611___get_user_asm_b_exit:
1612	ptabs	LINK, tr0
1613	blink	tr0, ZERO
1614
1615
1616	.global	__get_user_asm_w
1617__get_user_asm_w:
1618	or	r2, ZERO, r4
1619	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1620
1621___get_user_asm_w1:
1622	ld.w	r3, 0, r5		/* r5 = data */
1623	st.w	r4, 0, r5
1624	or	ZERO, ZERO, r2
1625
1626___get_user_asm_w_exit:
1627	ptabs	LINK, tr0
1628	blink	tr0, ZERO
1629
1630
1631	.global	__get_user_asm_l
1632__get_user_asm_l:
1633	or	r2, ZERO, r4
1634	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1635
1636___get_user_asm_l1:
1637	ld.l	r3, 0, r5		/* r5 = data */
1638	st.l	r4, 0, r5
1639	or	ZERO, ZERO, r2
1640
1641___get_user_asm_l_exit:
1642	ptabs	LINK, tr0
1643	blink	tr0, ZERO
1644
1645
1646	.global	__get_user_asm_q
1647__get_user_asm_q:
1648	or	r2, ZERO, r4
1649	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1650
1651___get_user_asm_q1:
1652	ld.q	r3, 0, r5		/* r5 = data */
1653	st.q	r4, 0, r5
1654	or	ZERO, ZERO, r2
1655
1656___get_user_asm_q_exit:
1657	ptabs	LINK, tr0
1658	blink	tr0, ZERO
1659
1660/*
1661 * extern long __put_user_asm_?(void *pval, long addr)
1662 *
1663 * Inputs:
1664 * (r2)  kernel pointer to value
1665 * (r3)  dest address (in User Space)
1666 *
1667 * Ouputs:
1668 * (r2)  -EFAULT (faulting)
1669 *       0 	 (not faulting)
1670 */
1671	.global	__put_user_asm_b
1672__put_user_asm_b:
1673	ld.b	r2, 0, r4		/* r4 = data */
1674	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1675
1676___put_user_asm_b1:
1677	st.b	r3, 0, r4
1678	or	ZERO, ZERO, r2
1679
1680___put_user_asm_b_exit:
1681	ptabs	LINK, tr0
1682	blink	tr0, ZERO
1683
1684
1685	.global	__put_user_asm_w
1686__put_user_asm_w:
1687	ld.w	r2, 0, r4		/* r4 = data */
1688	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1689
1690___put_user_asm_w1:
1691	st.w	r3, 0, r4
1692	or	ZERO, ZERO, r2
1693
1694___put_user_asm_w_exit:
1695	ptabs	LINK, tr0
1696	blink	tr0, ZERO
1697
1698
1699	.global	__put_user_asm_l
1700__put_user_asm_l:
1701	ld.l	r2, 0, r4		/* r4 = data */
1702	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1703
1704___put_user_asm_l1:
1705	st.l	r3, 0, r4
1706	or	ZERO, ZERO, r2
1707
1708___put_user_asm_l_exit:
1709	ptabs	LINK, tr0
1710	blink	tr0, ZERO
1711
1712
1713	.global	__put_user_asm_q
1714__put_user_asm_q:
1715	ld.q	r2, 0, r4		/* r4 = data */
1716	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1717
1718___put_user_asm_q1:
1719	st.q	r3, 0, r4
1720	or	ZERO, ZERO, r2
1721
1722___put_user_asm_q_exit:
1723	ptabs	LINK, tr0
1724	blink	tr0, ZERO
1725
1726panic_stash_regs:
1727	/* The idea is : when we get an unhandled panic, we dump the registers
1728	   to a known memory location, the just sit in a tight loop.
1729	   This allows the human to look at the memory region through the GDB
1730	   session (assuming the debug module's SHwy initiator isn't locked up
1731	   or anything), to hopefully analyze the cause of the panic. */
1732
1733	/* On entry, former r15 (SP) is in DCR
1734	   former r0  is at resvec_saved_area + 0
1735	   former r1  is at resvec_saved_area + 8
1736	   former tr0 is at resvec_saved_area + 32
1737	   DCR is the only register whose value is lost altogether.
1738	*/
1739
1740	movi	0xffffffff80000000, r0 ! phy of dump area
1741	ld.q	SP, 0x000, r1	! former r0
1742	st.q	r0,  0x000, r1
1743	ld.q	SP, 0x008, r1	! former r1
1744	st.q	r0,  0x008, r1
1745	st.q	r0,  0x010, r2
1746	st.q	r0,  0x018, r3
1747	st.q	r0,  0x020, r4
1748	st.q	r0,  0x028, r5
1749	st.q	r0,  0x030, r6
1750	st.q	r0,  0x038, r7
1751	st.q	r0,  0x040, r8
1752	st.q	r0,  0x048, r9
1753	st.q	r0,  0x050, r10
1754	st.q	r0,  0x058, r11
1755	st.q	r0,  0x060, r12
1756	st.q	r0,  0x068, r13
1757	st.q	r0,  0x070, r14
1758	getcon	dcr, r14
1759	st.q	r0,  0x078, r14
1760	st.q	r0,  0x080, r16
1761	st.q	r0,  0x088, r17
1762	st.q	r0,  0x090, r18
1763	st.q	r0,  0x098, r19
1764	st.q	r0,  0x0a0, r20
1765	st.q	r0,  0x0a8, r21
1766	st.q	r0,  0x0b0, r22
1767	st.q	r0,  0x0b8, r23
1768	st.q	r0,  0x0c0, r24
1769	st.q	r0,  0x0c8, r25
1770	st.q	r0,  0x0d0, r26
1771	st.q	r0,  0x0d8, r27
1772	st.q	r0,  0x0e0, r28
1773	st.q	r0,  0x0e8, r29
1774	st.q	r0,  0x0f0, r30
1775	st.q	r0,  0x0f8, r31
1776	st.q	r0,  0x100, r32
1777	st.q	r0,  0x108, r33
1778	st.q	r0,  0x110, r34
1779	st.q	r0,  0x118, r35
1780	st.q	r0,  0x120, r36
1781	st.q	r0,  0x128, r37
1782	st.q	r0,  0x130, r38
1783	st.q	r0,  0x138, r39
1784	st.q	r0,  0x140, r40
1785	st.q	r0,  0x148, r41
1786	st.q	r0,  0x150, r42
1787	st.q	r0,  0x158, r43
1788	st.q	r0,  0x160, r44
1789	st.q	r0,  0x168, r45
1790	st.q	r0,  0x170, r46
1791	st.q	r0,  0x178, r47
1792	st.q	r0,  0x180, r48
1793	st.q	r0,  0x188, r49
1794	st.q	r0,  0x190, r50
1795	st.q	r0,  0x198, r51
1796	st.q	r0,  0x1a0, r52
1797	st.q	r0,  0x1a8, r53
1798	st.q	r0,  0x1b0, r54
1799	st.q	r0,  0x1b8, r55
1800	st.q	r0,  0x1c0, r56
1801	st.q	r0,  0x1c8, r57
1802	st.q	r0,  0x1d0, r58
1803	st.q	r0,  0x1d8, r59
1804	st.q	r0,  0x1e0, r60
1805	st.q	r0,  0x1e8, r61
1806	st.q	r0,  0x1f0, r62
1807	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
1808
1809	ld.q	SP, 0x020, r1  ! former tr0
1810	st.q	r0,  0x200, r1
1811	gettr	tr1, r1
1812	st.q	r0,  0x208, r1
1813	gettr	tr2, r1
1814	st.q	r0,  0x210, r1
1815	gettr	tr3, r1
1816	st.q	r0,  0x218, r1
1817	gettr	tr4, r1
1818	st.q	r0,  0x220, r1
1819	gettr	tr5, r1
1820	st.q	r0,  0x228, r1
1821	gettr	tr6, r1
1822	st.q	r0,  0x230, r1
1823	gettr	tr7, r1
1824	st.q	r0,  0x238, r1
1825
1826	getcon	sr,  r1
1827	getcon	ssr,  r2
1828	getcon	pssr,  r3
1829	getcon	spc,  r4
1830	getcon	pspc,  r5
1831	getcon	intevt,  r6
1832	getcon	expevt,  r7
1833	getcon	pexpevt,  r8
1834	getcon	tra,  r9
1835	getcon	tea,  r10
1836	getcon	kcr0, r11
1837	getcon	kcr1, r12
1838	getcon	vbr,  r13
1839	getcon	resvec,  r14
1840
1841	st.q	r0,  0x240, r1
1842	st.q	r0,  0x248, r2
1843	st.q	r0,  0x250, r3
1844	st.q	r0,  0x258, r4
1845	st.q	r0,  0x260, r5
1846	st.q	r0,  0x268, r6
1847	st.q	r0,  0x270, r7
1848	st.q	r0,  0x278, r8
1849	st.q	r0,  0x280, r9
1850	st.q	r0,  0x288, r10
1851	st.q	r0,  0x290, r11
1852	st.q	r0,  0x298, r12
1853	st.q	r0,  0x2a0, r13
1854	st.q	r0,  0x2a8, r14
1855
1856	getcon	SPC,r2
1857	getcon	SSR,r3
1858	getcon	EXPEVT,r4
1859	/* Prepare to jump to C - physical address */
1860	movi	panic_handler-CONFIG_PAGE_OFFSET, r1
1861	ori	r1, 1, r1
1862	ptabs   r1, tr0
1863	getcon	DCR, SP
1864	blink	tr0, ZERO
1865	nop
1866	nop
1867	nop
1868	nop
1869
1870
1871
1872
1873/*
1874 * --- Signal Handling Section
1875 */
1876
1877/*
1878 * extern long long _sa_default_rt_restorer
1879 * extern long long _sa_default_restorer
1880 *
1881 *		 or, better,
1882 *
1883 * extern void _sa_default_rt_restorer(void)
1884 * extern void _sa_default_restorer(void)
1885 *
1886 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1887 * from user space. Copied into user space by signal management.
1888 * Both must be quad aligned and 2 quad long (4 instructions).
1889 *
1890 */
1891	.balign 8
1892	.global sa_default_rt_restorer
1893sa_default_rt_restorer:
1894	movi	0x10, r9
1895	shori	__NR_rt_sigreturn, r9
1896	trapa	r9
1897	nop
1898
1899	.balign 8
1900	.global sa_default_restorer
1901sa_default_restorer:
1902	movi	0x10, r9
1903	shori	__NR_sigreturn, r9
1904	trapa	r9
1905	nop
1906
1907/*
1908 * --- __ex_table Section
1909 */
1910
1911/*
1912 * User Access Exception Table.
1913 */
1914	.section	__ex_table,  "a"
1915
1916	.global asm_uaccess_start	/* Just a marker */
1917asm_uaccess_start:
1918
1919#ifdef CONFIG_MMU
1920	.long	___copy_user1, ___copy_user_exit
1921	.long	___copy_user2, ___copy_user_exit
1922	.long	___clear_user1, ___clear_user_exit
1923#endif
 
 
1924	.long	___get_user_asm_b1, ___get_user_asm_b_exit
1925	.long	___get_user_asm_w1, ___get_user_asm_w_exit
1926	.long	___get_user_asm_l1, ___get_user_asm_l_exit
1927	.long	___get_user_asm_q1, ___get_user_asm_q_exit
1928	.long	___put_user_asm_b1, ___put_user_asm_b_exit
1929	.long	___put_user_asm_w1, ___put_user_asm_w_exit
1930	.long	___put_user_asm_l1, ___put_user_asm_l_exit
1931	.long	___put_user_asm_q1, ___put_user_asm_q_exit
1932
1933	.global asm_uaccess_end		/* Just a marker */
1934asm_uaccess_end:
1935
1936
1937
1938
1939/*
1940 * --- .init.text Section
1941 */
1942
1943	__INIT
1944
1945/*
1946 * void trap_init (void)
1947 *
1948 */
1949	.global	trap_init
1950trap_init:
1951	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
1952	st.q	SP, 0, r28
1953	st.q	SP, 8, r29
1954	st.q	SP, 16, r30
1955
1956	/* Set VBR and RESVEC */
1957	movi	LVBR_block, r19
1958	andi	r19, -4, r19			/* reset MMUOFF + reserved */
1959	/* For RESVEC exceptions we force the MMU off, which means we need the
1960	   physical address. */
1961	movi	LRESVEC_block-CONFIG_PAGE_OFFSET, r20
1962	andi	r20, -4, r20			/* reset reserved */
1963	ori	r20, 1, r20			/* set MMUOFF */
1964	putcon	r19, VBR
1965	putcon	r20, RESVEC
1966
1967	/* Sanity check */
1968	movi	LVBR_block_end, r21
1969	andi	r21, -4, r21
1970	movi	BLOCK_SIZE, r29			/* r29 = expected size */
1971	or	r19, ZERO, r30
1972	add	r19, r29, r19
1973
1974	/*
1975	 * Ugly, but better loop forever now than crash afterwards.
1976	 * We should print a message, but if we touch LVBR or
1977	 * LRESVEC blocks we should not be surprised if we get stuck
1978	 * in trap_init().
1979	 */
1980	pta	trap_init_loop, tr1
1981	gettr	tr1, r28			/* r28 = trap_init_loop */
1982	sub	r21, r30, r30			/* r30 = actual size */
1983
1984	/*
1985	 * VBR/RESVEC handlers overlap by being bigger than
1986	 * allowed. Very bad. Just loop forever.
1987	 * (r28) panic/loop address
1988	 * (r29) expected size
1989	 * (r30) actual size
1990	 */
1991trap_init_loop:
1992	bne	r19, r21, tr1
1993
1994	/* Now that exception vectors are set up reset SR.BL */
1995	getcon 	SR, r22
1996	movi	SR_UNBLOCK_EXC, r23
1997	and	r22, r23, r22
1998	putcon	r22, SR
1999
2000	addi	SP, 24, SP
2001	ptabs	LINK, tr0
2002	blink	tr0, ZERO
2003
v3.1
   1/*
   2 * arch/sh/kernel/cpu/sh5/entry.S
   3 *
   4 * Copyright (C) 2000, 2001  Paolo Alberelli
   5 * Copyright (C) 2004 - 2008  Paul Mundt
   6 * Copyright (C) 2003, 2004  Richard Curnow
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/sys.h>
  15#include <cpu/registers.h>
  16#include <asm/processor.h>
  17#include <asm/unistd.h>
  18#include <asm/thread_info.h>
  19#include <asm/asm-offsets.h>
  20
  21/*
  22 * SR fields.
  23 */
  24#define SR_ASID_MASK	0x00ff0000
  25#define SR_FD_MASK	0x00008000
  26#define SR_SS		0x08000000
  27#define SR_BL		0x10000000
  28#define SR_MD		0x40000000
  29
  30/*
  31 * Event code.
  32 */
  33#define	EVENT_INTERRUPT		0
  34#define	EVENT_FAULT_TLB		1
  35#define	EVENT_FAULT_NOT_TLB	2
  36#define	EVENT_DEBUG		3
  37
  38/* EXPEVT values */
  39#define	RESET_CAUSE		0x20
  40#define DEBUGSS_CAUSE		0x980
  41
  42/*
  43 * Frame layout. Quad index.
  44 */
  45#define	FRAME_T(x)	FRAME_TBASE+(x*8)
  46#define	FRAME_R(x)	FRAME_RBASE+(x*8)
  47#define	FRAME_S(x)	FRAME_SBASE+(x*8)
  48#define FSPC		0
  49#define FSSR		1
  50#define FSYSCALL_ID	2
  51
  52/* Arrange the save frame to be a multiple of 32 bytes long */
  53#define FRAME_SBASE	0
  54#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
  55#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
  56#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
  57#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
  58
  59#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
  60#define FP_FRAME_BASE	0
  61
  62#define	SAVED_R2	0*8
  63#define	SAVED_R3	1*8
  64#define	SAVED_R4	2*8
  65#define	SAVED_R5	3*8
  66#define	SAVED_R18	4*8
  67#define	SAVED_R6	5*8
  68#define	SAVED_TR0	6*8
  69
  70/* These are the registers saved in the TLB path that aren't saved in the first
  71   level of the normal one. */
  72#define	TLB_SAVED_R25	7*8
  73#define	TLB_SAVED_TR1	8*8
  74#define	TLB_SAVED_TR2	9*8
  75#define	TLB_SAVED_TR3	10*8
  76#define	TLB_SAVED_TR4	11*8
  77/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
  78   breakage otherwise. */
  79#define	TLB_SAVED_R0	12*8
  80#define	TLB_SAVED_R1	13*8
  81
  82#define CLI()				\
  83	getcon	SR, r6;			\
  84	ori	r6, 0xf0, r6;		\
  85	putcon	r6, SR;
  86
  87#define STI()				\
  88	getcon	SR, r6;			\
  89	andi	r6, ~0xf0, r6;		\
  90	putcon	r6, SR;
  91
  92#ifdef CONFIG_PREEMPT
  93#  define preempt_stop()	CLI()
  94#else
  95#  define preempt_stop()
  96#  define resume_kernel		restore_all
  97#endif
  98
  99	.section	.data, "aw"
 100
 101#define FAST_TLBMISS_STACK_CACHELINES 4
 102#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
 103
 104/* Register back-up area for all exceptions */
 105	.balign	32
 106	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
 107	 * register saves etc. */
 108	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
 109/* This is 32 byte aligned by construction */
 110/* Register back-up area for all exceptions */
 111reg_save_area:
 112	.quad	0
 113	.quad	0
 114	.quad	0
 115	.quad	0
 116
 117	.quad	0
 118	.quad	0
 119	.quad	0
 120	.quad	0
 121
 122	.quad	0
 123	.quad	0
 124	.quad	0
 125	.quad	0
 126
 127	.quad	0
 128	.quad   0
 129
 130/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
 131 * reentrancy. Note this area may be accessed via physical address.
 132 * Align so this fits a whole single cache line, for ease of purging.
 133 */
 134	.balign 32,0,32
 135resvec_save_area:
 136	.quad	0
 137	.quad	0
 138	.quad	0
 139	.quad	0
 140	.quad	0
 141	.balign 32,0,32
 142
 143/* Jump table of 3rd level handlers  */
 144trap_jtable:
 145	.long	do_exception_error		/* 0x000 */
 146	.long	do_exception_error		/* 0x020 */
 147#ifdef CONFIG_MMU
 148	.long	tlb_miss_load				/* 0x040 */
 149	.long	tlb_miss_store				/* 0x060 */
 150#else
 151	.long	do_exception_error
 152	.long	do_exception_error
 153#endif
 154	! ARTIFICIAL pseudo-EXPEVT setting
 155	.long	do_debug_interrupt		/* 0x080 */
 156#ifdef CONFIG_MMU
 157	.long	tlb_miss_load				/* 0x0A0 */
 158	.long	tlb_miss_store				/* 0x0C0 */
 159#else
 160	.long	do_exception_error
 161	.long	do_exception_error
 162#endif
 163	.long	do_address_error_load	/* 0x0E0 */
 164	.long	do_address_error_store	/* 0x100 */
 165#ifdef CONFIG_SH_FPU
 166	.long	do_fpu_error		/* 0x120 */
 167#else
 168	.long	do_exception_error		/* 0x120 */
 169#endif
 170	.long	do_exception_error		/* 0x140 */
 171	.long	system_call				/* 0x160 */
 172	.long	do_reserved_inst		/* 0x180 */
 173	.long	do_illegal_slot_inst	/* 0x1A0 */
 174	.long	do_exception_error		/* 0x1C0 - NMI */
 175	.long	do_exception_error		/* 0x1E0 */
 176	.rept 15
 177		.long do_IRQ		/* 0x200 - 0x3C0 */
 178	.endr
 179	.long	do_exception_error		/* 0x3E0 */
 180	.rept 32
 181		.long do_IRQ		/* 0x400 - 0x7E0 */
 182	.endr
 183	.long	fpu_error_or_IRQA			/* 0x800 */
 184	.long	fpu_error_or_IRQB			/* 0x820 */
 185	.long	do_IRQ			/* 0x840 */
 186	.long	do_IRQ			/* 0x860 */
 187	.rept 6
 188		.long do_exception_error	/* 0x880 - 0x920 */
 189	.endr
 190	.long	breakpoint_trap_handler	/* 0x940 */
 191	.long	do_exception_error		/* 0x960 */
 192	.long	do_single_step		/* 0x980 */
 193
 194	.rept 3
 195		.long do_exception_error	/* 0x9A0 - 0x9E0 */
 196	.endr
 197	.long	do_IRQ			/* 0xA00 */
 198	.long	do_IRQ			/* 0xA20 */
 199#ifdef CONFIG_MMU
 200	.long	itlb_miss_or_IRQ			/* 0xA40 */
 201#else
 202	.long	do_IRQ
 203#endif
 204	.long	do_IRQ			/* 0xA60 */
 205	.long	do_IRQ			/* 0xA80 */
 206#ifdef CONFIG_MMU
 207	.long	itlb_miss_or_IRQ			/* 0xAA0 */
 208#else
 209	.long	do_IRQ
 210#endif
 211	.long	do_exception_error		/* 0xAC0 */
 212	.long	do_address_error_exec	/* 0xAE0 */
 213	.rept 8
 214		.long do_exception_error	/* 0xB00 - 0xBE0 */
 215	.endr
 216	.rept 18
 217		.long do_IRQ		/* 0xC00 - 0xE20 */
 218	.endr
 219
 220	.section	.text64, "ax"
 221
 222/*
 223 * --- Exception/Interrupt/Event Handling Section
 224 */
 225
 226/*
 227 * VBR and RESVEC blocks.
 228 *
 229 * First level handler for VBR-based exceptions.
 230 *
 231 * To avoid waste of space, align to the maximum text block size.
 232 * This is assumed to be at most 128 bytes or 32 instructions.
 233 * DO NOT EXCEED 32 instructions on the first level handlers !
 234 *
 235 * Also note that RESVEC is contained within the VBR block
 236 * where the room left (1KB - TEXT_SIZE) allows placing
 237 * the RESVEC block (at most 512B + TEXT_SIZE).
 238 *
 239 * So first (and only) level handler for RESVEC-based exceptions.
 240 *
 241 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
 242 * and interrupt) we are a lot tight with register space until
 243 * saving onto the stack frame, which is done in handle_exception().
 244 *
 245 */
 246
 247#define	TEXT_SIZE 	128
 248#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
 249
 250	.balign TEXT_SIZE
 251LVBR_block:
 252	.space	256, 0			/* Power-on class handler, */
 253					/* not required here       */
 254not_a_tlb_miss:
 255	synco	/* TAKum03020 (but probably a good idea anyway.) */
 256	/* Save original stack pointer into KCR1 */
 257	putcon	SP, KCR1
 258
 259	/* Save other original registers into reg_save_area */
 260        movi  reg_save_area, SP
 261	st.q	SP, SAVED_R2, r2
 262	st.q	SP, SAVED_R3, r3
 263	st.q	SP, SAVED_R4, r4
 264	st.q	SP, SAVED_R5, r5
 265	st.q	SP, SAVED_R6, r6
 266	st.q	SP, SAVED_R18, r18
 267	gettr	tr0, r3
 268	st.q	SP, SAVED_TR0, r3
 269
 270	/* Set args for Non-debug, Not a TLB miss class handler */
 271	getcon	EXPEVT, r2
 272	movi	ret_from_exception, r3
 273	ori	r3, 1, r3
 274	movi	EVENT_FAULT_NOT_TLB, r4
 275	or	SP, ZERO, r5
 276	getcon	KCR1, SP
 277	pta	handle_exception, tr0
 278	blink	tr0, ZERO
 279
 280	.balign 256
 281	! VBR+0x200
 282	nop
 283	.balign 256
 284	! VBR+0x300
 285	nop
 286	.balign 256
 287	/*
 288	 * Instead of the natural .balign 1024 place RESVEC here
 289	 * respecting the final 1KB alignment.
 290	 */
 291	.balign TEXT_SIZE
 292	/*
 293	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
 294	 * block making sure the final alignment is correct.
 295	 */
 296#ifdef CONFIG_MMU
 297tlb_miss:
 298	synco	/* TAKum03020 (but probably a good idea anyway.) */
 299	putcon	SP, KCR1
 300	movi	reg_save_area, SP
 301	/* SP is guaranteed 32-byte aligned. */
 302	st.q	SP, TLB_SAVED_R0 , r0
 303	st.q	SP, TLB_SAVED_R1 , r1
 304	st.q	SP, SAVED_R2 , r2
 305	st.q	SP, SAVED_R3 , r3
 306	st.q	SP, SAVED_R4 , r4
 307	st.q	SP, SAVED_R5 , r5
 308	st.q	SP, SAVED_R6 , r6
 309	st.q	SP, SAVED_R18, r18
 310
 311	/* Save R25 for safety; as/ld may want to use it to achieve the call to
 312	 * the code in mm/tlbmiss.c */
 313	st.q	SP, TLB_SAVED_R25, r25
 314	gettr	tr0, r2
 315	gettr	tr1, r3
 316	gettr	tr2, r4
 317	gettr	tr3, r5
 318	gettr	tr4, r18
 319	st.q	SP, SAVED_TR0 , r2
 320	st.q	SP, TLB_SAVED_TR1 , r3
 321	st.q	SP, TLB_SAVED_TR2 , r4
 322	st.q	SP, TLB_SAVED_TR3 , r5
 323	st.q	SP, TLB_SAVED_TR4 , r18
 324
 325	pt	do_fast_page_fault, tr0
 326	getcon	SSR, r2
 327	getcon	EXPEVT, r3
 328	getcon	TEA, r4
 329	shlri	r2, 30, r2
 330	andi	r2, 1, r2	/* r2 = SSR.MD */
 331	blink 	tr0, LINK
 332
 333	pt	fixup_to_invoke_general_handler, tr1
 334
 335	/* If the fast path handler fixed the fault, just drop through quickly
 336	   to the restore code right away to return to the excepting context.
 337	   */
 338	beqi/u	r2, 0, tr1
 339
 340fast_tlb_miss_restore:
 341	ld.q	SP, SAVED_TR0, r2
 342	ld.q	SP, TLB_SAVED_TR1, r3
 343	ld.q	SP, TLB_SAVED_TR2, r4
 344
 345	ld.q	SP, TLB_SAVED_TR3, r5
 346	ld.q	SP, TLB_SAVED_TR4, r18
 347
 348	ptabs	r2, tr0
 349	ptabs	r3, tr1
 350	ptabs	r4, tr2
 351	ptabs	r5, tr3
 352	ptabs	r18, tr4
 353
 354	ld.q	SP, TLB_SAVED_R0, r0
 355	ld.q	SP, TLB_SAVED_R1, r1
 356	ld.q	SP, SAVED_R2, r2
 357	ld.q	SP, SAVED_R3, r3
 358	ld.q	SP, SAVED_R4, r4
 359	ld.q	SP, SAVED_R5, r5
 360	ld.q	SP, SAVED_R6, r6
 361	ld.q	SP, SAVED_R18, r18
 362	ld.q	SP, TLB_SAVED_R25, r25
 363
 364	getcon	KCR1, SP
 365	rte
 366	nop /* for safety, in case the code is run on sh5-101 cut1.x */
 367
 368fixup_to_invoke_general_handler:
 369
 370	/* OK, new method.  Restore stuff that's not expected to get saved into
 371	   the 'first-level' reg save area, then just fall through to setting
 372	   up the registers and calling the second-level handler. */
 373
 374	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
 375	   r25,tr1-4 and save r6 to get into the right state.  */
 376
 377	ld.q	SP, TLB_SAVED_TR1, r3
 378	ld.q	SP, TLB_SAVED_TR2, r4
 379	ld.q	SP, TLB_SAVED_TR3, r5
 380	ld.q	SP, TLB_SAVED_TR4, r18
 381	ld.q	SP, TLB_SAVED_R25, r25
 382
 383	ld.q	SP, TLB_SAVED_R0, r0
 384	ld.q	SP, TLB_SAVED_R1, r1
 385
 386	ptabs/u	r3, tr1
 387	ptabs/u	r4, tr2
 388	ptabs/u	r5, tr3
 389	ptabs/u	r18, tr4
 390
 391	/* Set args for Non-debug, TLB miss class handler */
 392	getcon	EXPEVT, r2
 393	movi	ret_from_exception, r3
 394	ori	r3, 1, r3
 395	movi	EVENT_FAULT_TLB, r4
 396	or	SP, ZERO, r5
 397	getcon	KCR1, SP
 398	pta	handle_exception, tr0
 399	blink	tr0, ZERO
 400#else /* CONFIG_MMU */
 401	.balign 256
 402#endif
 403
 404/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
 405   DOES END UP AT VBR+0x600 */
 406	nop
 407	nop
 408	nop
 409	nop
 410	nop
 411	nop
 412
 413	.balign 256
 414	/* VBR + 0x600 */
 415
 416interrupt:
 417	synco	/* TAKum03020 (but probably a good idea anyway.) */
 418	/* Save original stack pointer into KCR1 */
 419	putcon	SP, KCR1
 420
 421	/* Save other original registers into reg_save_area */
 422        movi  reg_save_area, SP
 423	st.q	SP, SAVED_R2, r2
 424	st.q	SP, SAVED_R3, r3
 425	st.q	SP, SAVED_R4, r4
 426	st.q	SP, SAVED_R5, r5
 427	st.q	SP, SAVED_R6, r6
 428	st.q	SP, SAVED_R18, r18
 429	gettr	tr0, r3
 430	st.q	SP, SAVED_TR0, r3
 431
 432	/* Set args for interrupt class handler */
 433	getcon	INTEVT, r2
 434	movi	ret_from_irq, r3
 435	ori	r3, 1, r3
 436	movi	EVENT_INTERRUPT, r4
 437	or	SP, ZERO, r5
 438	getcon	KCR1, SP
 439	pta	handle_exception, tr0
 440	blink	tr0, ZERO
 441	.balign	TEXT_SIZE		/* let's waste the bare minimum */
 442
 443LVBR_block_end:				/* Marker. Used for total checking */
 444
 445	.balign 256
 446LRESVEC_block:
 447	/* Panic handler. Called with MMU off. Possible causes/actions:
 448	 * - Reset:		Jump to program start.
 449	 * - Single Step:	Turn off Single Step & return.
 450	 * - Others:		Call panic handler, passing PC as arg.
 451	 *			(this may need to be extended...)
 452	 */
 453reset_or_panic:
 454	synco	/* TAKum03020 (but probably a good idea anyway.) */
 455	putcon	SP, DCR
 456	/* First save r0-1 and tr0, as we need to use these */
 457	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
 458	st.q	SP, 0, r0
 459	st.q	SP, 8, r1
 460	gettr	tr0, r0
 461	st.q	SP, 32, r0
 462
 463	/* Check cause */
 464	getcon	EXPEVT, r0
 465	movi	RESET_CAUSE, r1
 466	sub	r1, r0, r1		/* r1=0 if reset */
 467	movi	_stext-CONFIG_PAGE_OFFSET, r0
 468	ori	r0, 1, r0
 469	ptabs	r0, tr0
 470	beqi	r1, 0, tr0		/* Jump to start address if reset */
 471
 472	getcon	EXPEVT, r0
 473	movi	DEBUGSS_CAUSE, r1
 474	sub	r1, r0, r1		/* r1=0 if single step */
 475	pta	single_step_panic, tr0
 476	beqi	r1, 0, tr0		/* jump if single step */
 477
 478	/* Now jump to where we save the registers. */
 479	movi	panic_stash_regs-CONFIG_PAGE_OFFSET, r1
 480	ptabs	r1, tr0
 481	blink	tr0, r63
 482
 483single_step_panic:
 484	/* We are in a handler with Single Step set. We need to resume the
 485	 * handler, by turning on MMU & turning off Single Step. */
 486	getcon	SSR, r0
 487	movi	SR_MMU, r1
 488	or	r0, r1, r0
 489	movi	~SR_SS, r1
 490	and	r0, r1, r0
 491	putcon	r0, SSR
 492	/* Restore EXPEVT, as the rte won't do this */
 493	getcon	PEXPEVT, r0
 494	putcon	r0, EXPEVT
 495	/* Restore regs */
 496	ld.q	SP, 32, r0
 497	ptabs	r0, tr0
 498	ld.q	SP, 0, r0
 499	ld.q	SP, 8, r1
 500	getcon	DCR, SP
 501	synco
 502	rte
 503
 504
 505	.balign	256
 506debug_exception:
 507	synco	/* TAKum03020 (but probably a good idea anyway.) */
 508	/*
 509	 * Single step/software_break_point first level handler.
 510	 * Called with MMU off, so the first thing we do is enable it
 511	 * by doing an rte with appropriate SSR.
 512	 */
 513	putcon	SP, DCR
 514	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
 515	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
 516
 517	/* With the MMU off, we are bypassing the cache, so purge any
 518         * data that will be made stale by the following stores.
 519         */
 520	ocbp	SP, 0
 521	synco
 522
 523	st.q	SP, 0, r0
 524	st.q	SP, 8, r1
 525	getcon	SPC, r0
 526	st.q	SP, 16, r0
 527	getcon	SSR, r0
 528	st.q	SP, 24, r0
 529
 530	/* Enable MMU, block exceptions, set priv mode, disable single step */
 531	movi	SR_MMU | SR_BL | SR_MD, r1
 532	or	r0, r1, r0
 533	movi	~SR_SS, r1
 534	and	r0, r1, r0
 535	putcon	r0, SSR
 536	/* Force control to debug_exception_2 when rte is executed */
 537	movi	debug_exeception_2, r0
 538	ori	r0, 1, r0      /* force SHmedia, just in case */
 539	putcon	r0, SPC
 540	getcon	DCR, SP
 541	synco
 542	rte
 543debug_exeception_2:
 544	/* Restore saved regs */
 545	putcon	SP, KCR1
 546	movi	resvec_save_area, SP
 547	ld.q	SP, 24, r0
 548	putcon	r0, SSR
 549	ld.q	SP, 16, r0
 550	putcon	r0, SPC
 551	ld.q	SP, 0, r0
 552	ld.q	SP, 8, r1
 553
 554	/* Save other original registers into reg_save_area */
 555        movi  reg_save_area, SP
 556	st.q	SP, SAVED_R2, r2
 557	st.q	SP, SAVED_R3, r3
 558	st.q	SP, SAVED_R4, r4
 559	st.q	SP, SAVED_R5, r5
 560	st.q	SP, SAVED_R6, r6
 561	st.q	SP, SAVED_R18, r18
 562	gettr	tr0, r3
 563	st.q	SP, SAVED_TR0, r3
 564
 565	/* Set args for debug class handler */
 566	getcon	EXPEVT, r2
 567	movi	ret_from_exception, r3
 568	ori	r3, 1, r3
 569	movi	EVENT_DEBUG, r4
 570	or	SP, ZERO, r5
 571	getcon	KCR1, SP
 572	pta	handle_exception, tr0
 573	blink	tr0, ZERO
 574
 575	.balign	256
 576debug_interrupt:
 577	/* !!! WE COME HERE IN REAL MODE !!! */
 578	/* Hook-up debug interrupt to allow various debugging options to be
 579	 * hooked into its handler. */
 580	/* Save original stack pointer into KCR1 */
 581	synco
 582	putcon	SP, KCR1
 583	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
 584	ocbp	SP, 0
 585	ocbp	SP, 32
 586	synco
 587
 588	/* Save other original registers into reg_save_area thru real addresses */
 589	st.q	SP, SAVED_R2, r2
 590	st.q	SP, SAVED_R3, r3
 591	st.q	SP, SAVED_R4, r4
 592	st.q	SP, SAVED_R5, r5
 593	st.q	SP, SAVED_R6, r6
 594	st.q	SP, SAVED_R18, r18
 595	gettr	tr0, r3
 596	st.q	SP, SAVED_TR0, r3
 597
 598	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
 599	   them back again, so that they look like the originals
 600	   as far as the real handler code is concerned. */
 601	getcon	spc, r6
 602	putcon	r6, pspc
 603	getcon	ssr, r6
 604	putcon	r6, pssr
 605
 606	! construct useful SR for handle_exception
 607	movi	3, r6
 608	shlli	r6, 30, r6
 609	getcon	sr, r18
 610	or	r18, r6, r6
 611	putcon	r6, ssr
 612
 613	! SSR is now the current SR with the MD and MMU bits set
 614	! i.e. the rte will switch back to priv mode and put
 615	! the mmu back on
 616
 617	! construct spc
 618	movi	handle_exception, r18
 619	ori	r18, 1, r18		! for safety (do we need this?)
 620	putcon	r18, spc
 621
 622	/* Set args for Non-debug, Not a TLB miss class handler */
 623
 624	! EXPEVT==0x80 is unused, so 'steal' this value to put the
 625	! debug interrupt handler in the vectoring table
 626	movi	0x80, r2
 627	movi	ret_from_exception, r3
 628	ori	r3, 1, r3
 629	movi	EVENT_FAULT_NOT_TLB, r4
 630
 631	or	SP, ZERO, r5
 632	movi	CONFIG_PAGE_OFFSET, r6
 633	add	r6, r5, r5
 634	getcon	KCR1, SP
 635
 636	synco	! for safety
 637	rte	! -> handle_exception, switch back to priv mode again
 638
 639LRESVEC_block_end:			/* Marker. Unused. */
 640
 641	.balign	TEXT_SIZE
 642
 643/*
 644 * Second level handler for VBR-based exceptions. Pre-handler.
 645 * In common to all stack-frame sensitive handlers.
 646 *
 647 * Inputs:
 648 * (KCR0) Current [current task union]
 649 * (KCR1) Original SP
 650 * (r2)   INTEVT/EXPEVT
 651 * (r3)   appropriate return address
 652 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
 653 * (r5)   Pointer to reg_save_area
 654 * (SP)   Original SP
 655 *
 656 * Available registers:
 657 * (r6)
 658 * (r18)
 659 * (tr0)
 660 *
 661 */
 662handle_exception:
 663	/* Common 2nd level handler. */
 664
 665	/* First thing we need an appropriate stack pointer */
 666	getcon	SSR, r6
 667	shlri	r6, 30, r6
 668	andi	r6, 1, r6
 669	pta	stack_ok, tr0
 670	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
 671
 672	/* Set stack pointer for user fault */
 673	getcon	KCR0, SP
 674	movi	THREAD_SIZE, r6		/* Point to the end */
 675	add	SP, r6, SP
 676
 677stack_ok:
 678
 679/* DEBUG : check for underflow/overflow of the kernel stack */
 680	pta	no_underflow, tr0
 681	getcon  KCR0, r6
 682	movi	1024, r18
 683	add	r6, r18, r6
 684	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
 685
 686/* Just panic to cause a crash. */
 687bad_sp:
 688	ld.b	r63, 0, r6
 689	nop
 690
 691no_underflow:
 692	pta	bad_sp, tr0
 693	getcon	kcr0, r6
 694	movi	THREAD_SIZE, r18
 695	add	r18, r6, r6
 696	bgt	SP, r6, tr0	! sp above the stack
 697
 698	/* Make some room for the BASIC frame. */
 699	movi	-(FRAME_SIZE), r6
 700	add	SP, r6, SP
 701
 702/* Could do this with no stalling if we had another spare register, but the
 703   code below will be OK. */
 704	ld.q	r5, SAVED_R2, r6
 705	ld.q	r5, SAVED_R3, r18
 706	st.q	SP, FRAME_R(2), r6
 707	ld.q	r5, SAVED_R4, r6
 708	st.q	SP, FRAME_R(3), r18
 709	ld.q	r5, SAVED_R5, r18
 710	st.q	SP, FRAME_R(4), r6
 711	ld.q	r5, SAVED_R6, r6
 712	st.q	SP, FRAME_R(5), r18
 713	ld.q	r5, SAVED_R18, r18
 714	st.q	SP, FRAME_R(6), r6
 715	ld.q	r5, SAVED_TR0, r6
 716	st.q	SP, FRAME_R(18), r18
 717	st.q	SP, FRAME_T(0), r6
 718
 719	/* Keep old SP around */
 720	getcon	KCR1, r6
 721
 722	/* Save the rest of the general purpose registers */
 723	st.q	SP, FRAME_R(0), r0
 724	st.q	SP, FRAME_R(1), r1
 725	st.q	SP, FRAME_R(7), r7
 726	st.q	SP, FRAME_R(8), r8
 727	st.q	SP, FRAME_R(9), r9
 728	st.q	SP, FRAME_R(10), r10
 729	st.q	SP, FRAME_R(11), r11
 730	st.q	SP, FRAME_R(12), r12
 731	st.q	SP, FRAME_R(13), r13
 732	st.q	SP, FRAME_R(14), r14
 733
 734	/* SP is somewhere else */
 735	st.q	SP, FRAME_R(15), r6
 736
 737	st.q	SP, FRAME_R(16), r16
 738	st.q	SP, FRAME_R(17), r17
 739	/* r18 is saved earlier. */
 740	st.q	SP, FRAME_R(19), r19
 741	st.q	SP, FRAME_R(20), r20
 742	st.q	SP, FRAME_R(21), r21
 743	st.q	SP, FRAME_R(22), r22
 744	st.q	SP, FRAME_R(23), r23
 745	st.q	SP, FRAME_R(24), r24
 746	st.q	SP, FRAME_R(25), r25
 747	st.q	SP, FRAME_R(26), r26
 748	st.q	SP, FRAME_R(27), r27
 749	st.q	SP, FRAME_R(28), r28
 750	st.q	SP, FRAME_R(29), r29
 751	st.q	SP, FRAME_R(30), r30
 752	st.q	SP, FRAME_R(31), r31
 753	st.q	SP, FRAME_R(32), r32
 754	st.q	SP, FRAME_R(33), r33
 755	st.q	SP, FRAME_R(34), r34
 756	st.q	SP, FRAME_R(35), r35
 757	st.q	SP, FRAME_R(36), r36
 758	st.q	SP, FRAME_R(37), r37
 759	st.q	SP, FRAME_R(38), r38
 760	st.q	SP, FRAME_R(39), r39
 761	st.q	SP, FRAME_R(40), r40
 762	st.q	SP, FRAME_R(41), r41
 763	st.q	SP, FRAME_R(42), r42
 764	st.q	SP, FRAME_R(43), r43
 765	st.q	SP, FRAME_R(44), r44
 766	st.q	SP, FRAME_R(45), r45
 767	st.q	SP, FRAME_R(46), r46
 768	st.q	SP, FRAME_R(47), r47
 769	st.q	SP, FRAME_R(48), r48
 770	st.q	SP, FRAME_R(49), r49
 771	st.q	SP, FRAME_R(50), r50
 772	st.q	SP, FRAME_R(51), r51
 773	st.q	SP, FRAME_R(52), r52
 774	st.q	SP, FRAME_R(53), r53
 775	st.q	SP, FRAME_R(54), r54
 776	st.q	SP, FRAME_R(55), r55
 777	st.q	SP, FRAME_R(56), r56
 778	st.q	SP, FRAME_R(57), r57
 779	st.q	SP, FRAME_R(58), r58
 780	st.q	SP, FRAME_R(59), r59
 781	st.q	SP, FRAME_R(60), r60
 782	st.q	SP, FRAME_R(61), r61
 783	st.q	SP, FRAME_R(62), r62
 784
 785	/*
 786	 * Save the S* registers.
 787	 */
 788	getcon	SSR, r61
 789	st.q	SP, FRAME_S(FSSR), r61
 790	getcon	SPC, r62
 791	st.q	SP, FRAME_S(FSPC), r62
 792	movi	-1, r62			/* Reset syscall_nr */
 793	st.q	SP, FRAME_S(FSYSCALL_ID), r62
 794
 795	/* Save the rest of the target registers */
 796	gettr	tr1, r6
 797	st.q	SP, FRAME_T(1), r6
 798	gettr	tr2, r6
 799	st.q	SP, FRAME_T(2), r6
 800	gettr	tr3, r6
 801	st.q	SP, FRAME_T(3), r6
 802	gettr	tr4, r6
 803	st.q	SP, FRAME_T(4), r6
 804	gettr	tr5, r6
 805	st.q	SP, FRAME_T(5), r6
 806	gettr	tr6, r6
 807	st.q	SP, FRAME_T(6), r6
 808	gettr	tr7, r6
 809	st.q	SP, FRAME_T(7), r6
 810
 811	! setup FP so that unwinder can wind back through nested kernel mode
 812	! exceptions
 813	add	SP, ZERO, r14
 814
 815	/* For syscall and debug race condition, get TRA now */
 816	getcon	TRA, r5
 817
 818	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
 819	 * Also set FD, to catch FPU usage in the kernel.
 820	 *
 821	 * benedict.gaster@superh.com 29/07/2002
 822	 *
 823	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
 824	 * same time change BL from 1->0, as any pending interrupt of a level
 825	 * higher than he previous value of IMASK will leak through and be
 826	 * taken unexpectedly.
 827	 *
 828	 * To avoid this we raise the IMASK and then issue another PUTCON to
 829	 * enable interrupts.
 830         */
 831	getcon	SR, r6
 832	movi	SR_IMASK | SR_FD, r7
 833	or	r6, r7, r6
 834	putcon	r6, SR
 835	movi	SR_UNBLOCK_EXC, r7
 836	and	r6, r7, r6
 837	putcon	r6, SR
 838
 839
 840	/* Now call the appropriate 3rd level handler */
 841	or	r3, ZERO, LINK
 842	movi	trap_jtable, r3
 843	shlri	r2, 3, r2
 844	ldx.l	r2, r3, r3
 845	shlri	r2, 2, r2
 846	ptabs	r3, tr0
 847	or	SP, ZERO, r3
 848	blink	tr0, ZERO
 849
 850/*
 851 * Second level handler for VBR-based exceptions. Post-handlers.
 852 *
 853 * Post-handlers for interrupts (ret_from_irq), exceptions
 854 * (ret_from_exception) and common reentrance doors (restore_all
 855 * to get back to the original context, ret_from_syscall loop to
 856 * check kernel exiting).
 857 *
 858 * ret_with_reschedule and work_notifysig are an inner lables of
 859 * the ret_from_syscall loop.
 860 *
 861 * In common to all stack-frame sensitive handlers.
 862 *
 863 * Inputs:
 864 * (SP)   struct pt_regs *, original register's frame pointer (basic)
 865 *
 866 */
 867	.global ret_from_irq
 868ret_from_irq:
 869	ld.q	SP, FRAME_S(FSSR), r6
 870	shlri	r6, 30, r6
 871	andi	r6, 1, r6
 872	pta	resume_kernel, tr0
 873	bne	r6, ZERO, tr0		/* no further checks */
 874	STI()
 875	pta	ret_with_reschedule, tr0
 876	blink	tr0, ZERO		/* Do not check softirqs */
 877
 878	.global ret_from_exception
 879ret_from_exception:
 880	preempt_stop()
 881
 882	ld.q	SP, FRAME_S(FSSR), r6
 883	shlri	r6, 30, r6
 884	andi	r6, 1, r6
 885	pta	resume_kernel, tr0
 886	bne	r6, ZERO, tr0		/* no further checks */
 887
 888	/* Check softirqs */
 889
 890#ifdef CONFIG_PREEMPT
 891	pta   ret_from_syscall, tr0
 892	blink   tr0, ZERO
 893
 894resume_kernel:
 895	CLI()
 896
 897	pta	restore_all, tr0
 898
 899	getcon	KCR0, r6
 900	ld.l	r6, TI_PRE_COUNT, r7
 901	beq/u	r7, ZERO, tr0
 902
 903need_resched:
 904	ld.l	r6, TI_FLAGS, r7
 905	movi	(1 << TIF_NEED_RESCHED), r8
 906	and	r8, r7, r8
 907	bne	r8, ZERO, tr0
 908
 909	getcon	SR, r7
 910	andi	r7, 0xf0, r7
 911	bne	r7, ZERO, tr0
 912
 913	movi	preempt_schedule_irq, r7
 914	ori	r7, 1, r7
 915	ptabs	r7, tr1
 916	blink	tr1, LINK
 917
 918	pta	need_resched, tr1
 919	blink	tr1, ZERO
 920#endif
 921
 922	.global ret_from_syscall
 923ret_from_syscall:
 924
 925ret_with_reschedule:
 926	getcon	KCR0, r6		! r6 contains current_thread_info
 927	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
 928
 929	movi	_TIF_NEED_RESCHED, r8
 930	and	r8, r7, r8
 931	pta	work_resched, tr0
 932	bne	r8, ZERO, tr0
 933
 934	pta	restore_all, tr1
 935
 936	movi	_TIF_SIGPENDING, r8
 937	and	r8, r7, r8
 938	pta	work_notifysig, tr0
 939	bne	r8, ZERO, tr0
 940
 941	blink	tr1, ZERO
 942
 943work_resched:
 944	pta	ret_from_syscall, tr0
 945	gettr	tr0, LINK
 946	movi	schedule, r6
 947	ptabs	r6, tr0
 948	blink	tr0, ZERO		/* Call schedule(), return on top */
 949
 950work_notifysig:
 951	gettr	tr1, LINK
 952
 953	movi	do_notify_resume, r6
 954	ptabs	r6, tr0
 955	or	SP, ZERO, r2
 956	or	r7, ZERO, r3
 957	blink	tr0, LINK	    /* Call do_notify_resume(regs, current_thread_info->flags), return here */
 958
 959restore_all:
 960	/* Do prefetches */
 961
 962	ld.q	SP, FRAME_T(0), r6
 963	ld.q	SP, FRAME_T(1), r7
 964	ld.q	SP, FRAME_T(2), r8
 965	ld.q	SP, FRAME_T(3), r9
 966	ptabs	r6, tr0
 967	ptabs	r7, tr1
 968	ptabs	r8, tr2
 969	ptabs	r9, tr3
 970	ld.q	SP, FRAME_T(4), r6
 971	ld.q	SP, FRAME_T(5), r7
 972	ld.q	SP, FRAME_T(6), r8
 973	ld.q	SP, FRAME_T(7), r9
 974	ptabs	r6, tr4
 975	ptabs	r7, tr5
 976	ptabs	r8, tr6
 977	ptabs	r9, tr7
 978
 979	ld.q	SP, FRAME_R(0), r0
 980	ld.q	SP, FRAME_R(1), r1
 981	ld.q	SP, FRAME_R(2), r2
 982	ld.q	SP, FRAME_R(3), r3
 983	ld.q	SP, FRAME_R(4), r4
 984	ld.q	SP, FRAME_R(5), r5
 985	ld.q	SP, FRAME_R(6), r6
 986	ld.q	SP, FRAME_R(7), r7
 987	ld.q	SP, FRAME_R(8), r8
 988	ld.q	SP, FRAME_R(9), r9
 989	ld.q	SP, FRAME_R(10), r10
 990	ld.q	SP, FRAME_R(11), r11
 991	ld.q	SP, FRAME_R(12), r12
 992	ld.q	SP, FRAME_R(13), r13
 993	ld.q	SP, FRAME_R(14), r14
 994
 995	ld.q	SP, FRAME_R(16), r16
 996	ld.q	SP, FRAME_R(17), r17
 997	ld.q	SP, FRAME_R(18), r18
 998	ld.q	SP, FRAME_R(19), r19
 999	ld.q	SP, FRAME_R(20), r20
1000	ld.q	SP, FRAME_R(21), r21
1001	ld.q	SP, FRAME_R(22), r22
1002	ld.q	SP, FRAME_R(23), r23
1003	ld.q	SP, FRAME_R(24), r24
1004	ld.q	SP, FRAME_R(25), r25
1005	ld.q	SP, FRAME_R(26), r26
1006	ld.q	SP, FRAME_R(27), r27
1007	ld.q	SP, FRAME_R(28), r28
1008	ld.q	SP, FRAME_R(29), r29
1009	ld.q	SP, FRAME_R(30), r30
1010	ld.q	SP, FRAME_R(31), r31
1011	ld.q	SP, FRAME_R(32), r32
1012	ld.q	SP, FRAME_R(33), r33
1013	ld.q	SP, FRAME_R(34), r34
1014	ld.q	SP, FRAME_R(35), r35
1015	ld.q	SP, FRAME_R(36), r36
1016	ld.q	SP, FRAME_R(37), r37
1017	ld.q	SP, FRAME_R(38), r38
1018	ld.q	SP, FRAME_R(39), r39
1019	ld.q	SP, FRAME_R(40), r40
1020	ld.q	SP, FRAME_R(41), r41
1021	ld.q	SP, FRAME_R(42), r42
1022	ld.q	SP, FRAME_R(43), r43
1023	ld.q	SP, FRAME_R(44), r44
1024	ld.q	SP, FRAME_R(45), r45
1025	ld.q	SP, FRAME_R(46), r46
1026	ld.q	SP, FRAME_R(47), r47
1027	ld.q	SP, FRAME_R(48), r48
1028	ld.q	SP, FRAME_R(49), r49
1029	ld.q	SP, FRAME_R(50), r50
1030	ld.q	SP, FRAME_R(51), r51
1031	ld.q	SP, FRAME_R(52), r52
1032	ld.q	SP, FRAME_R(53), r53
1033	ld.q	SP, FRAME_R(54), r54
1034	ld.q	SP, FRAME_R(55), r55
1035	ld.q	SP, FRAME_R(56), r56
1036	ld.q	SP, FRAME_R(57), r57
1037	ld.q	SP, FRAME_R(58), r58
1038
1039	getcon	SR, r59
1040	movi	SR_BLOCK_EXC, r60
1041	or	r59, r60, r59
1042	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
1043	ld.q	SP, FRAME_S(FSSR), r61
1044	ld.q	SP, FRAME_S(FSPC), r62
1045	movi	SR_ASID_MASK, r60
1046	and	r59, r60, r59
1047	andc	r61, r60, r61		/* Clear out older ASID */
1048	or	r59, r61, r61		/* Retain current ASID */
1049	putcon	r61, SSR
1050	putcon	r62, SPC
1051
1052	/* Ignore FSYSCALL_ID */
1053
1054	ld.q	SP, FRAME_R(59), r59
1055	ld.q	SP, FRAME_R(60), r60
1056	ld.q	SP, FRAME_R(61), r61
1057	ld.q	SP, FRAME_R(62), r62
1058
1059	/* Last touch */
1060	ld.q	SP, FRAME_R(15), SP
1061	rte
1062	nop
1063
1064/*
1065 * Third level handlers for VBR-based exceptions. Adapting args to
1066 * and/or deflecting to fourth level handlers.
1067 *
1068 * Fourth level handlers interface.
1069 * Most are C-coded handlers directly pointed by the trap_jtable.
1070 * (Third = Fourth level)
1071 * Inputs:
1072 * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
1073 *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1074 * (r3)   struct pt_regs *, original register's frame pointer
1075 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1076 * (r5)   TRA control register (for syscall/debug benefit only)
1077 * (LINK) return address
1078 * (SP)   = r3
1079 *
1080 * Kernel TLB fault handlers will get a slightly different interface.
1081 * (r2)   struct pt_regs *, original register's frame pointer
1082 * (r3)   writeaccess, whether it's a store fault as opposed to load fault
1083 * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
1084 * (r5)   Effective Address of fault
1085 * (LINK) return address
1086 * (SP)   = r2
1087 *
1088 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1089 *
1090 */
1091#ifdef CONFIG_MMU
1092tlb_miss_load:
1093	or	SP, ZERO, r2
1094	or	ZERO, ZERO, r3		/* Read */
1095	or	ZERO, ZERO, r4		/* Data */
1096	getcon	TEA, r5
1097	pta	call_do_page_fault, tr0
1098	beq	ZERO, ZERO, tr0
1099
1100tlb_miss_store:
1101	or	SP, ZERO, r2
1102	movi	1, r3			/* Write */
1103	or	ZERO, ZERO, r4		/* Data */
1104	getcon	TEA, r5
1105	pta	call_do_page_fault, tr0
1106	beq	ZERO, ZERO, tr0
1107
1108itlb_miss_or_IRQ:
1109	pta	its_IRQ, tr0
1110	beqi/u	r4, EVENT_INTERRUPT, tr0
 
 
1111	or	SP, ZERO, r2
1112	or	ZERO, ZERO, r3		/* Read */
1113	movi	1, r4			/* Text */
1114	getcon	TEA, r5
1115	/* Fall through */
1116
1117call_do_page_fault:
1118	movi	do_page_fault, r6
1119        ptabs	r6, tr0
1120        blink	tr0, ZERO
1121#endif /* CONFIG_MMU */
1122
1123fpu_error_or_IRQA:
1124	pta	its_IRQ, tr0
1125	beqi/l	r4, EVENT_INTERRUPT, tr0
1126#ifdef CONFIG_SH_FPU
1127	movi	fpu_state_restore_trap_handler, r6
1128#else
1129	movi	do_exception_error, r6
1130#endif
1131	ptabs	r6, tr0
1132	blink	tr0, ZERO
1133
1134fpu_error_or_IRQB:
1135	pta	its_IRQ, tr0
1136	beqi/l	r4, EVENT_INTERRUPT, tr0
1137#ifdef CONFIG_SH_FPU
1138	movi	fpu_state_restore_trap_handler, r6
1139#else
1140	movi	do_exception_error, r6
1141#endif
1142	ptabs	r6, tr0
1143	blink	tr0, ZERO
1144
1145its_IRQ:
1146	movi	do_IRQ, r6
1147	ptabs	r6, tr0
1148	blink	tr0, ZERO
1149
1150/*
1151 * system_call/unknown_trap third level handler:
1152 *
1153 * Inputs:
1154 * (r2)   fault/interrupt code, entry number (TRAP = 11)
1155 * (r3)   struct pt_regs *, original register's frame pointer
1156 * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1157 * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1158 * (SP)   = r3
1159 * (LINK) return address: ret_from_exception
1160 * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1161 *
1162 * Outputs:
1163 * (*r3)  Syscall reply (Saved r2)
1164 * (LINK) In case of syscall only it can be scrapped.
1165 *        Common second level post handler will be ret_from_syscall.
1166 *        Common (non-trace) exit point to that is syscall_ret (saving
1167 *        result to r2). Common bad exit point is syscall_bad (returning
1168 *        ENOSYS then saved to r2).
1169 *
1170 */
1171
1172unknown_trap:
1173	/* Unknown Trap or User Trace */
1174	movi	do_unknown_trapa, r6
1175	ptabs	r6, tr0
1176        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
1177        andi    r2, 0x1ff, r2		/* r2 = syscall # */
1178	blink	tr0, LINK
1179
1180	pta	syscall_ret, tr0
1181	blink	tr0, ZERO
1182
1183        /* New syscall implementation*/
1184system_call:
1185	pta	unknown_trap, tr0
1186        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
1187        shlri   r4, 20, r4
1188	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
1189
1190        /* It's a system call */
1191	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
1192	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
1193
1194	STI()
1195
1196	pta	syscall_allowed, tr0
1197	movi	NR_syscalls - 1, r4	/* Last valid */
1198	bgeu/l	r4, r5, tr0
1199
1200syscall_bad:
1201	/* Return ENOSYS ! */
1202	movi	-(ENOSYS), r2		/* Fall-through */
1203
1204	.global syscall_ret
1205syscall_ret:
1206	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
1207	ld.q	SP, FRAME_S(FSPC), r2
1208	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1209	st.q	SP, FRAME_S(FSPC), r2
1210	pta	ret_from_syscall, tr0
1211	blink	tr0, ZERO
1212
1213
1214/*  A different return path for ret_from_fork, because we now need
1215 *  to call schedule_tail with the later kernels. Because prev is
1216 *  loaded into r2 by switch_to() means we can just call it straight  away
1217 */
1218
1219.global	ret_from_fork
1220ret_from_fork:
1221
1222	movi	schedule_tail,r5
1223	ori	r5, 1, r5
1224	ptabs	r5, tr0
1225	blink	tr0, LINK
1226
1227	ld.q	SP, FRAME_S(FSPC), r2
1228	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1229	st.q	SP, FRAME_S(FSPC), r2
1230	pta	ret_from_syscall, tr0
1231	blink	tr0, ZERO
1232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1233syscall_allowed:
1234	/* Use LINK to deflect the exit point, default is syscall_ret */
1235	pta	syscall_ret, tr0
1236	gettr	tr0, LINK
1237	pta	syscall_notrace, tr0
1238
1239	getcon	KCR0, r2
1240	ld.l	r2, TI_FLAGS, r4
1241	movi	_TIF_WORK_SYSCALL_MASK, r6
1242	and	r6, r4, r6
1243	beq/l	r6, ZERO, tr0
1244
1245	/* Trace it by calling syscall_trace before and after */
1246	movi	do_syscall_trace_enter, r4
1247	or	SP, ZERO, r2
1248	ptabs	r4, tr0
1249	blink	tr0, LINK
1250
1251	/* Save the retval */
1252	st.q	SP, FRAME_R(2), r2
1253
1254	/* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1255	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
1256	andi	r5, 0x1ff, r5
1257
1258	pta	syscall_ret_trace, tr0
1259	gettr	tr0, LINK
1260
1261syscall_notrace:
1262	/* Now point to the appropriate 4th level syscall handler */
1263	movi	sys_call_table, r4
1264	shlli	r5, 2, r5
1265	ldx.l	r4, r5, r5
1266	ptabs	r5, tr0
1267
1268	/* Prepare original args */
1269	ld.q	SP, FRAME_R(2), r2
1270	ld.q	SP, FRAME_R(3), r3
1271	ld.q	SP, FRAME_R(4), r4
1272	ld.q	SP, FRAME_R(5), r5
1273	ld.q	SP, FRAME_R(6), r6
1274	ld.q	SP, FRAME_R(7), r7
1275
1276	/* And now the trick for those syscalls requiring regs * ! */
1277	or	SP, ZERO, r8
1278
1279	/* Call it */
1280	blink	tr0, ZERO	/* LINK is already properly set */
1281
1282syscall_ret_trace:
1283	/* We get back here only if under trace */
1284	st.q	SP, FRAME_R(9), r2	/* Save return value */
1285
1286	movi	do_syscall_trace_leave, LINK
1287	or	SP, ZERO, r2
1288	ptabs	LINK, tr0
1289	blink	tr0, LINK
1290
1291	/* This needs to be done after any syscall tracing */
1292	ld.q	SP, FRAME_S(FSPC), r2
1293	addi	r2, 4, r2	/* Move PC, being pre-execution event */
1294	st.q	SP, FRAME_S(FSPC), r2
1295
1296	pta	ret_from_syscall, tr0
1297	blink	tr0, ZERO		/* Resume normal return sequence */
1298
1299/*
1300 * --- Switch to running under a particular ASID and return the previous ASID value
1301 * --- The caller is assumed to have done a cli before calling this.
1302 *
1303 * Input r2 : new ASID
1304 * Output r2 : old ASID
1305 */
1306
1307	.global switch_and_save_asid
1308switch_and_save_asid:
1309	getcon	sr, r0
1310	movi	255, r4
1311	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
1312	and	r0, r4, r3	/* r3 = shifted old ASID */
1313	andi	r2, 255, r2	/* mask down new ASID */
1314	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
1315	andc	r0, r4, r0	/* efface old ASID from SR */
1316	or	r0, r2, r0	/* insert the new ASID */
1317	putcon	r0, ssr
1318	movi	1f, r0
1319	putcon	r0, spc
1320	rte
1321	nop
13221:
1323	ptabs	LINK, tr0
1324	shlri	r3, 16, r2	/* r2 = old ASID */
1325	blink tr0, r63
1326
1327	.global	route_to_panic_handler
1328route_to_panic_handler:
1329	/* Switch to real mode, goto panic_handler, don't return.  Useful for
1330	   last-chance debugging, e.g. if no output wants to go to the console.
1331	   */
1332
1333	movi	panic_handler - CONFIG_PAGE_OFFSET, r1
1334	ptabs	r1, tr0
1335	pta	1f, tr1
1336	gettr	tr1, r0
1337	putcon	r0, spc
1338	getcon	sr, r0
1339	movi	1, r1
1340	shlli	r1, 31, r1
1341	andc	r0, r1, r0
1342	putcon	r0, ssr
1343	rte
1344	nop
13451:	/* Now in real mode */
1346	blink tr0, r63
1347	nop
1348
1349	.global peek_real_address_q
1350peek_real_address_q:
1351	/* Two args:
1352	   r2 : real mode address to peek
1353	   r2(out) : result quadword
1354
1355	   This is provided as a cheapskate way of manipulating device
1356	   registers for debugging (to avoid the need to ioremap the debug
1357	   module, and to avoid the need to ioremap the watchpoint
1358	   controller in a way that identity maps sufficient bits to avoid the
1359	   SH5-101 cut2 silicon defect).
1360
1361	   This code is not performance critical
1362	*/
1363
1364	add.l	r2, r63, r2	/* sign extend address */
1365	getcon	sr, r0		/* r0 = saved original SR */
1366	movi	1, r1
1367	shlli	r1, 28, r1
1368	or	r0, r1, r1	/* r0 with block bit set */
1369	putcon	r1, sr		/* now in critical section */
1370	movi	1, r36
1371	shlli	r36, 31, r36
1372	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1373
1374	putcon	r1, ssr
1375	movi	.peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1376	movi	1f, r37		/* virtual mode return addr */
1377	putcon	r36, spc
1378
1379	synco
1380	rte
1381	nop
1382
1383.peek0:	/* come here in real mode, don't touch caches!!
1384           still in critical section (sr.bl==1) */
1385	putcon	r0, ssr
1386	putcon	r37, spc
1387	/* Here's the actual peek.  If the address is bad, all bets are now off
1388	 * what will happen (handlers invoked in real-mode = bad news) */
1389	ld.q	r2, 0, r2
1390	synco
1391	rte	/* Back to virtual mode */
1392	nop
1393
13941:
1395	ptabs	LINK, tr0
1396	blink	tr0, r63
1397
1398	.global poke_real_address_q
1399poke_real_address_q:
1400	/* Two args:
1401	   r2 : real mode address to poke
1402	   r3 : quadword value to write.
1403
1404	   This is provided as a cheapskate way of manipulating device
1405	   registers for debugging (to avoid the need to ioremap the debug
1406	   module, and to avoid the need to ioremap the watchpoint
1407	   controller in a way that identity maps sufficient bits to avoid the
1408	   SH5-101 cut2 silicon defect).
1409
1410	   This code is not performance critical
1411	*/
1412
1413	add.l	r2, r63, r2	/* sign extend address */
1414	getcon	sr, r0		/* r0 = saved original SR */
1415	movi	1, r1
1416	shlli	r1, 28, r1
1417	or	r0, r1, r1	/* r0 with block bit set */
1418	putcon	r1, sr		/* now in critical section */
1419	movi	1, r36
1420	shlli	r36, 31, r36
1421	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1422
1423	putcon	r1, ssr
1424	movi	.poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1425	movi	1f, r37		/* virtual mode return addr */
1426	putcon	r36, spc
1427
1428	synco
1429	rte
1430	nop
1431
1432.poke0:	/* come here in real mode, don't touch caches!!
1433           still in critical section (sr.bl==1) */
1434	putcon	r0, ssr
1435	putcon	r37, spc
1436	/* Here's the actual poke.  If the address is bad, all bets are now off
1437	 * what will happen (handlers invoked in real-mode = bad news) */
1438	st.q	r2, 0, r3
1439	synco
1440	rte	/* Back to virtual mode */
1441	nop
1442
14431:
1444	ptabs	LINK, tr0
1445	blink	tr0, r63
1446
1447#ifdef CONFIG_MMU
1448/*
1449 * --- User Access Handling Section
1450 */
1451
1452/*
1453 * User Access support. It all moved to non inlined Assembler
1454 * functions in here.
1455 *
1456 * __kernel_size_t __copy_user(void *__to, const void *__from,
1457 *			       __kernel_size_t __n)
1458 *
1459 * Inputs:
1460 * (r2)  target address
1461 * (r3)  source address
1462 * (r4)  size in bytes
1463 *
1464 * Ouputs:
1465 * (*r2) target data
1466 * (r2)  non-copied bytes
1467 *
1468 * If a fault occurs on the user pointer, bail out early and return the
1469 * number of bytes not copied in r2.
1470 * Strategy : for large blocks, call a real memcpy function which can
1471 * move >1 byte at a time using unaligned ld/st instructions, and can
1472 * manipulate the cache using prefetch + alloco to improve the speed
1473 * further.  If a fault occurs in that function, just revert to the
1474 * byte-by-byte approach used for small blocks; this is rare so the
1475 * performance hit for that case does not matter.
1476 *
1477 * For small blocks it's not worth the overhead of setting up and calling
1478 * the memcpy routine; do the copy a byte at a time.
1479 *
1480 */
1481	.global	__copy_user
1482__copy_user:
1483	pta	__copy_user_byte_by_byte, tr1
1484	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
1485	bge/u	r0, r4, tr1
1486	pta copy_user_memcpy, tr0
1487	addi	SP, -32, SP
1488	/* Save arguments in case we have to fix-up unhandled page fault */
1489	st.q	SP, 0, r2
1490	st.q	SP, 8, r3
1491	st.q	SP, 16, r4
1492	st.q	SP, 24, r35 ! r35 is callee-save
1493	/* Save LINK in a register to reduce RTS time later (otherwise
1494	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1495	ori	LINK, 0, r35
1496	blink	tr0, LINK
1497
1498	/* Copy completed normally if we get back here */
1499	ptabs	r35, tr0
1500	ld.q	SP, 24, r35
1501	/* don't restore r2-r4, pointless */
1502	/* set result=r2 to zero as the copy must have succeeded. */
1503	or	r63, r63, r2
1504	addi	SP, 32, SP
1505	blink	tr0, r63 ! RTS
1506
1507	.global __copy_user_fixup
1508__copy_user_fixup:
1509	/* Restore stack frame */
1510	ori	r35, 0, LINK
1511	ld.q	SP, 24, r35
1512	ld.q	SP, 16, r4
1513	ld.q	SP,  8, r3
1514	ld.q	SP,  0, r2
1515	addi	SP, 32, SP
1516	/* Fall through to original code, in the 'same' state we entered with */
1517
1518/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1519   user address.  In that rare case, the speed drop can be tolerated. */
1520__copy_user_byte_by_byte:
1521	pta	___copy_user_exit, tr1
1522	pta	___copy_user1, tr0
1523	beq/u	r4, r63, tr1	/* early exit for zero length copy */
1524	sub	r2, r3, r0
1525	addi	r0, -1, r0
1526
1527___copy_user1:
1528	ld.b	r3, 0, r5		/* Fault address 1 */
1529
1530	/* Could rewrite this to use just 1 add, but the second comes 'free'
1531	   due to load latency */
1532	addi	r3, 1, r3
1533	addi	r4, -1, r4		/* No real fixup required */
1534___copy_user2:
1535	stx.b	r3, r0, r5		/* Fault address 2 */
1536	bne     r4, ZERO, tr0
1537
1538___copy_user_exit:
1539	or	r4, ZERO, r2
1540	ptabs	LINK, tr0
1541	blink	tr0, ZERO
1542
1543/*
1544 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1545 *
1546 * Inputs:
1547 * (r2)  target address
1548 * (r3)  size in bytes
1549 *
1550 * Ouputs:
1551 * (*r2) zero-ed target data
1552 * (r2)  non-zero-ed bytes
1553 */
1554	.global	__clear_user
1555__clear_user:
1556	pta	___clear_user_exit, tr1
1557	pta	___clear_user1, tr0
1558	beq/u	r3, r63, tr1
1559
1560___clear_user1:
1561	st.b	r2, 0, ZERO		/* Fault address */
1562	addi	r2, 1, r2
1563	addi	r3, -1, r3		/* No real fixup required */
1564	bne     r3, ZERO, tr0
1565
1566___clear_user_exit:
1567	or	r3, ZERO, r2
1568	ptabs	LINK, tr0
1569	blink	tr0, ZERO
1570
1571#endif /* CONFIG_MMU */
1572
1573/*
1574 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1575 *			   int __count)
1576 *
1577 * Inputs:
1578 * (r2)  target address
1579 * (r3)  source address
1580 * (r4)  maximum size in bytes
1581 *
1582 * Ouputs:
1583 * (*r2) copied data
1584 * (r2)  -EFAULT (in case of faulting)
1585 *       copied data (otherwise)
1586 */
1587	.global	__strncpy_from_user
1588__strncpy_from_user:
1589	pta	___strncpy_from_user1, tr0
1590	pta	___strncpy_from_user_done, tr1
1591	or	r4, ZERO, r5		/* r5 = original count */
1592	beq/u	r4, r63, tr1		/* early exit if r4==0 */
1593	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
1594	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
1595
1596___strncpy_from_user1:
1597	ld.b	r3, 0, r7		/* Fault address: only in reading */
1598	st.b	r2, 0, r7
1599	addi	r2, 1, r2
1600	addi	r3, 1, r3
1601	beq/u	ZERO, r7, tr1
1602	addi	r4, -1, r4		/* return real number of copied bytes */
1603	bne/l	ZERO, r4, tr0
1604
1605___strncpy_from_user_done:
1606	sub	r5, r4, r6		/* If done, return copied */
1607
1608___strncpy_from_user_exit:
1609	or	r6, ZERO, r2
1610	ptabs	LINK, tr0
1611	blink	tr0, ZERO
1612
1613/*
1614 * extern long __strnlen_user(const char *__s, long __n)
1615 *
1616 * Inputs:
1617 * (r2)  source address
1618 * (r3)  source size in bytes
1619 *
1620 * Ouputs:
1621 * (r2)  -EFAULT (in case of faulting)
1622 *       string length (otherwise)
1623 */
1624	.global	__strnlen_user
1625__strnlen_user:
1626	pta	___strnlen_user_set_reply, tr0
1627	pta	___strnlen_user1, tr1
1628	or	ZERO, ZERO, r5		/* r5 = counter */
1629	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
1630	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
1631	beq	r3, ZERO, tr0
1632
1633___strnlen_user1:
1634	ldx.b	r2, r5, r7		/* Fault address: only in reading */
1635	addi	r3, -1, r3		/* No real fixup */
1636	addi	r5, 1, r5
1637	beq	r3, ZERO, tr0
1638	bne	r7, ZERO, tr1
1639! The line below used to be active.  This meant led to a junk byte lying between each pair
1640! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
1641! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1642! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1643!	addi	r5, 1, r5		/* Include '\0' */
1644
1645___strnlen_user_set_reply:
1646	or	r5, ZERO, r6		/* If done, return counter */
1647
1648___strnlen_user_exit:
1649	or	r6, ZERO, r2
1650	ptabs	LINK, tr0
1651	blink	tr0, ZERO
1652
1653/*
1654 * extern long __get_user_asm_?(void *val, long addr)
1655 *
1656 * Inputs:
1657 * (r2)  dest address
1658 * (r3)  source address (in User Space)
1659 *
1660 * Ouputs:
1661 * (r2)  -EFAULT (faulting)
1662 *       0 	 (not faulting)
1663 */
1664	.global	__get_user_asm_b
1665__get_user_asm_b:
1666	or	r2, ZERO, r4
1667	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1668
1669___get_user_asm_b1:
1670	ld.b	r3, 0, r5		/* r5 = data */
1671	st.b	r4, 0, r5
1672	or	ZERO, ZERO, r2
1673
1674___get_user_asm_b_exit:
1675	ptabs	LINK, tr0
1676	blink	tr0, ZERO
1677
1678
1679	.global	__get_user_asm_w
1680__get_user_asm_w:
1681	or	r2, ZERO, r4
1682	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1683
1684___get_user_asm_w1:
1685	ld.w	r3, 0, r5		/* r5 = data */
1686	st.w	r4, 0, r5
1687	or	ZERO, ZERO, r2
1688
1689___get_user_asm_w_exit:
1690	ptabs	LINK, tr0
1691	blink	tr0, ZERO
1692
1693
1694	.global	__get_user_asm_l
1695__get_user_asm_l:
1696	or	r2, ZERO, r4
1697	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1698
1699___get_user_asm_l1:
1700	ld.l	r3, 0, r5		/* r5 = data */
1701	st.l	r4, 0, r5
1702	or	ZERO, ZERO, r2
1703
1704___get_user_asm_l_exit:
1705	ptabs	LINK, tr0
1706	blink	tr0, ZERO
1707
1708
1709	.global	__get_user_asm_q
1710__get_user_asm_q:
1711	or	r2, ZERO, r4
1712	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1713
1714___get_user_asm_q1:
1715	ld.q	r3, 0, r5		/* r5 = data */
1716	st.q	r4, 0, r5
1717	or	ZERO, ZERO, r2
1718
1719___get_user_asm_q_exit:
1720	ptabs	LINK, tr0
1721	blink	tr0, ZERO
1722
1723/*
1724 * extern long __put_user_asm_?(void *pval, long addr)
1725 *
1726 * Inputs:
1727 * (r2)  kernel pointer to value
1728 * (r3)  dest address (in User Space)
1729 *
1730 * Ouputs:
1731 * (r2)  -EFAULT (faulting)
1732 *       0 	 (not faulting)
1733 */
1734	.global	__put_user_asm_b
1735__put_user_asm_b:
1736	ld.b	r2, 0, r4		/* r4 = data */
1737	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1738
1739___put_user_asm_b1:
1740	st.b	r3, 0, r4
1741	or	ZERO, ZERO, r2
1742
1743___put_user_asm_b_exit:
1744	ptabs	LINK, tr0
1745	blink	tr0, ZERO
1746
1747
1748	.global	__put_user_asm_w
1749__put_user_asm_w:
1750	ld.w	r2, 0, r4		/* r4 = data */
1751	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1752
1753___put_user_asm_w1:
1754	st.w	r3, 0, r4
1755	or	ZERO, ZERO, r2
1756
1757___put_user_asm_w_exit:
1758	ptabs	LINK, tr0
1759	blink	tr0, ZERO
1760
1761
1762	.global	__put_user_asm_l
1763__put_user_asm_l:
1764	ld.l	r2, 0, r4		/* r4 = data */
1765	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1766
1767___put_user_asm_l1:
1768	st.l	r3, 0, r4
1769	or	ZERO, ZERO, r2
1770
1771___put_user_asm_l_exit:
1772	ptabs	LINK, tr0
1773	blink	tr0, ZERO
1774
1775
1776	.global	__put_user_asm_q
1777__put_user_asm_q:
1778	ld.q	r2, 0, r4		/* r4 = data */
1779	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1780
1781___put_user_asm_q1:
1782	st.q	r3, 0, r4
1783	or	ZERO, ZERO, r2
1784
1785___put_user_asm_q_exit:
1786	ptabs	LINK, tr0
1787	blink	tr0, ZERO
1788
1789panic_stash_regs:
1790	/* The idea is : when we get an unhandled panic, we dump the registers
1791	   to a known memory location, the just sit in a tight loop.
1792	   This allows the human to look at the memory region through the GDB
1793	   session (assuming the debug module's SHwy initiator isn't locked up
1794	   or anything), to hopefully analyze the cause of the panic. */
1795
1796	/* On entry, former r15 (SP) is in DCR
1797	   former r0  is at resvec_saved_area + 0
1798	   former r1  is at resvec_saved_area + 8
1799	   former tr0 is at resvec_saved_area + 32
1800	   DCR is the only register whose value is lost altogether.
1801	*/
1802
1803	movi	0xffffffff80000000, r0 ! phy of dump area
1804	ld.q	SP, 0x000, r1	! former r0
1805	st.q	r0,  0x000, r1
1806	ld.q	SP, 0x008, r1	! former r1
1807	st.q	r0,  0x008, r1
1808	st.q	r0,  0x010, r2
1809	st.q	r0,  0x018, r3
1810	st.q	r0,  0x020, r4
1811	st.q	r0,  0x028, r5
1812	st.q	r0,  0x030, r6
1813	st.q	r0,  0x038, r7
1814	st.q	r0,  0x040, r8
1815	st.q	r0,  0x048, r9
1816	st.q	r0,  0x050, r10
1817	st.q	r0,  0x058, r11
1818	st.q	r0,  0x060, r12
1819	st.q	r0,  0x068, r13
1820	st.q	r0,  0x070, r14
1821	getcon	dcr, r14
1822	st.q	r0,  0x078, r14
1823	st.q	r0,  0x080, r16
1824	st.q	r0,  0x088, r17
1825	st.q	r0,  0x090, r18
1826	st.q	r0,  0x098, r19
1827	st.q	r0,  0x0a0, r20
1828	st.q	r0,  0x0a8, r21
1829	st.q	r0,  0x0b0, r22
1830	st.q	r0,  0x0b8, r23
1831	st.q	r0,  0x0c0, r24
1832	st.q	r0,  0x0c8, r25
1833	st.q	r0,  0x0d0, r26
1834	st.q	r0,  0x0d8, r27
1835	st.q	r0,  0x0e0, r28
1836	st.q	r0,  0x0e8, r29
1837	st.q	r0,  0x0f0, r30
1838	st.q	r0,  0x0f8, r31
1839	st.q	r0,  0x100, r32
1840	st.q	r0,  0x108, r33
1841	st.q	r0,  0x110, r34
1842	st.q	r0,  0x118, r35
1843	st.q	r0,  0x120, r36
1844	st.q	r0,  0x128, r37
1845	st.q	r0,  0x130, r38
1846	st.q	r0,  0x138, r39
1847	st.q	r0,  0x140, r40
1848	st.q	r0,  0x148, r41
1849	st.q	r0,  0x150, r42
1850	st.q	r0,  0x158, r43
1851	st.q	r0,  0x160, r44
1852	st.q	r0,  0x168, r45
1853	st.q	r0,  0x170, r46
1854	st.q	r0,  0x178, r47
1855	st.q	r0,  0x180, r48
1856	st.q	r0,  0x188, r49
1857	st.q	r0,  0x190, r50
1858	st.q	r0,  0x198, r51
1859	st.q	r0,  0x1a0, r52
1860	st.q	r0,  0x1a8, r53
1861	st.q	r0,  0x1b0, r54
1862	st.q	r0,  0x1b8, r55
1863	st.q	r0,  0x1c0, r56
1864	st.q	r0,  0x1c8, r57
1865	st.q	r0,  0x1d0, r58
1866	st.q	r0,  0x1d8, r59
1867	st.q	r0,  0x1e0, r60
1868	st.q	r0,  0x1e8, r61
1869	st.q	r0,  0x1f0, r62
1870	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
1871
1872	ld.q	SP, 0x020, r1  ! former tr0
1873	st.q	r0,  0x200, r1
1874	gettr	tr1, r1
1875	st.q	r0,  0x208, r1
1876	gettr	tr2, r1
1877	st.q	r0,  0x210, r1
1878	gettr	tr3, r1
1879	st.q	r0,  0x218, r1
1880	gettr	tr4, r1
1881	st.q	r0,  0x220, r1
1882	gettr	tr5, r1
1883	st.q	r0,  0x228, r1
1884	gettr	tr6, r1
1885	st.q	r0,  0x230, r1
1886	gettr	tr7, r1
1887	st.q	r0,  0x238, r1
1888
1889	getcon	sr,  r1
1890	getcon	ssr,  r2
1891	getcon	pssr,  r3
1892	getcon	spc,  r4
1893	getcon	pspc,  r5
1894	getcon	intevt,  r6
1895	getcon	expevt,  r7
1896	getcon	pexpevt,  r8
1897	getcon	tra,  r9
1898	getcon	tea,  r10
1899	getcon	kcr0, r11
1900	getcon	kcr1, r12
1901	getcon	vbr,  r13
1902	getcon	resvec,  r14
1903
1904	st.q	r0,  0x240, r1
1905	st.q	r0,  0x248, r2
1906	st.q	r0,  0x250, r3
1907	st.q	r0,  0x258, r4
1908	st.q	r0,  0x260, r5
1909	st.q	r0,  0x268, r6
1910	st.q	r0,  0x270, r7
1911	st.q	r0,  0x278, r8
1912	st.q	r0,  0x280, r9
1913	st.q	r0,  0x288, r10
1914	st.q	r0,  0x290, r11
1915	st.q	r0,  0x298, r12
1916	st.q	r0,  0x2a0, r13
1917	st.q	r0,  0x2a8, r14
1918
1919	getcon	SPC,r2
1920	getcon	SSR,r3
1921	getcon	EXPEVT,r4
1922	/* Prepare to jump to C - physical address */
1923	movi	panic_handler-CONFIG_PAGE_OFFSET, r1
1924	ori	r1, 1, r1
1925	ptabs   r1, tr0
1926	getcon	DCR, SP
1927	blink	tr0, ZERO
1928	nop
1929	nop
1930	nop
1931	nop
1932
1933
1934
1935
1936/*
1937 * --- Signal Handling Section
1938 */
1939
1940/*
1941 * extern long long _sa_default_rt_restorer
1942 * extern long long _sa_default_restorer
1943 *
1944 *		 or, better,
1945 *
1946 * extern void _sa_default_rt_restorer(void)
1947 * extern void _sa_default_restorer(void)
1948 *
1949 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1950 * from user space. Copied into user space by signal management.
1951 * Both must be quad aligned and 2 quad long (4 instructions).
1952 *
1953 */
1954	.balign 8
1955	.global sa_default_rt_restorer
1956sa_default_rt_restorer:
1957	movi	0x10, r9
1958	shori	__NR_rt_sigreturn, r9
1959	trapa	r9
1960	nop
1961
1962	.balign 8
1963	.global sa_default_restorer
1964sa_default_restorer:
1965	movi	0x10, r9
1966	shori	__NR_sigreturn, r9
1967	trapa	r9
1968	nop
1969
1970/*
1971 * --- __ex_table Section
1972 */
1973
1974/*
1975 * User Access Exception Table.
1976 */
1977	.section	__ex_table,  "a"
1978
1979	.global asm_uaccess_start	/* Just a marker */
1980asm_uaccess_start:
1981
1982#ifdef CONFIG_MMU
1983	.long	___copy_user1, ___copy_user_exit
1984	.long	___copy_user2, ___copy_user_exit
1985	.long	___clear_user1, ___clear_user_exit
1986#endif
1987	.long	___strncpy_from_user1, ___strncpy_from_user_exit
1988	.long	___strnlen_user1, ___strnlen_user_exit
1989	.long	___get_user_asm_b1, ___get_user_asm_b_exit
1990	.long	___get_user_asm_w1, ___get_user_asm_w_exit
1991	.long	___get_user_asm_l1, ___get_user_asm_l_exit
1992	.long	___get_user_asm_q1, ___get_user_asm_q_exit
1993	.long	___put_user_asm_b1, ___put_user_asm_b_exit
1994	.long	___put_user_asm_w1, ___put_user_asm_w_exit
1995	.long	___put_user_asm_l1, ___put_user_asm_l_exit
1996	.long	___put_user_asm_q1, ___put_user_asm_q_exit
1997
1998	.global asm_uaccess_end		/* Just a marker */
1999asm_uaccess_end:
2000
2001
2002
2003
2004/*
2005 * --- .init.text Section
2006 */
2007
2008	__INIT
2009
2010/*
2011 * void trap_init (void)
2012 *
2013 */
2014	.global	trap_init
2015trap_init:
2016	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
2017	st.q	SP, 0, r28
2018	st.q	SP, 8, r29
2019	st.q	SP, 16, r30
2020
2021	/* Set VBR and RESVEC */
2022	movi	LVBR_block, r19
2023	andi	r19, -4, r19			/* reset MMUOFF + reserved */
2024	/* For RESVEC exceptions we force the MMU off, which means we need the
2025	   physical address. */
2026	movi	LRESVEC_block-CONFIG_PAGE_OFFSET, r20
2027	andi	r20, -4, r20			/* reset reserved */
2028	ori	r20, 1, r20			/* set MMUOFF */
2029	putcon	r19, VBR
2030	putcon	r20, RESVEC
2031
2032	/* Sanity check */
2033	movi	LVBR_block_end, r21
2034	andi	r21, -4, r21
2035	movi	BLOCK_SIZE, r29			/* r29 = expected size */
2036	or	r19, ZERO, r30
2037	add	r19, r29, r19
2038
2039	/*
2040	 * Ugly, but better loop forever now than crash afterwards.
2041	 * We should print a message, but if we touch LVBR or
2042	 * LRESVEC blocks we should not be surprised if we get stuck
2043	 * in trap_init().
2044	 */
2045	pta	trap_init_loop, tr1
2046	gettr	tr1, r28			/* r28 = trap_init_loop */
2047	sub	r21, r30, r30			/* r30 = actual size */
2048
2049	/*
2050	 * VBR/RESVEC handlers overlap by being bigger than
2051	 * allowed. Very bad. Just loop forever.
2052	 * (r28) panic/loop address
2053	 * (r29) expected size
2054	 * (r30) actual size
2055	 */
2056trap_init_loop:
2057	bne	r19, r21, tr1
2058
2059	/* Now that exception vectors are set up reset SR.BL */
2060	getcon 	SR, r22
2061	movi	SR_UNBLOCK_EXC, r23
2062	and	r22, r23, r22
2063	putcon	r22, SR
2064
2065	addi	SP, 24, SP
2066	ptabs	LINK, tr0
2067	blink	tr0, ZERO
2068