Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 *  Meta exception handling.
  3 *
  4 *  Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file COPYING in the main directory of this archive
  8 * for more details.
  9 */
 10
 11#include <linux/export.h>
 12#include <linux/sched.h>
 13#include <linux/signal.h>
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/types.h>
 17#include <linux/init.h>
 18#include <linux/interrupt.h>
 19#include <linux/preempt.h>
 20#include <linux/ptrace.h>
 21#include <linux/module.h>
 22#include <linux/kallsyms.h>
 23#include <linux/kdebug.h>
 24#include <linux/kexec.h>
 25#include <linux/unistd.h>
 26#include <linux/smp.h>
 27#include <linux/slab.h>
 28#include <linux/syscalls.h>
 29
 30#include <asm/bug.h>
 31#include <asm/core_reg.h>
 32#include <asm/irqflags.h>
 33#include <asm/siginfo.h>
 34#include <asm/traps.h>
 35#include <asm/hwthread.h>
 36#include <asm/setup.h>
 37#include <asm/switch.h>
 38#include <asm/user_gateway.h>
 39#include <asm/syscall.h>
 40#include <asm/syscalls.h>
 41
 42/* Passing syscall arguments as long long is quicker. */
 43typedef unsigned int (*LPSYSCALL) (unsigned long long,
 44				   unsigned long long,
 45				   unsigned long long);
 46
 47/*
 48 * Users of LNKSET should compare the bus error bits obtained from DEFR
 49 * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
 50 * different cores revisions.
 51 */
 52#define TXDEFR_LNKSET_SUCCESS 0x02000000
 53#define TXDEFR_LNKSET_FAILURE 0x04000000
 54
 55/*
 56 * Our global TBI handle.  Initialised from setup.c/setup_arch.
 57 */
 58DECLARE_PER_CPU(PTBI, pTBI);
 59
 60#ifdef CONFIG_SMP
 61static DEFINE_PER_CPU(unsigned int, trigger_mask);
 62#else
 63unsigned int global_trigger_mask;
 64EXPORT_SYMBOL(global_trigger_mask);
 65#endif
 66
 67unsigned long per_cpu__stack_save[NR_CPUS];
 68
 69static const char * const trap_names[] = {
 70	[TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
 71	[TBIXXF_SIGNUM_PGF] = "Privilege violation",
 72	[TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
 73	[TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
 74	[TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
 75	[TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
 76	[TBIXXF_SIGNUM_DPF] = "Data access page fault",
 77	[TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
 78	[TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
 79};
 80
 81const char *trap_name(int trapno)
 82{
 83	if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
 84			&& trap_names[trapno])
 85		return trap_names[trapno];
 86	return "Unknown fault";
 87}
 88
 89static DEFINE_SPINLOCK(die_lock);
 90
 91void __noreturn die(const char *str, struct pt_regs *regs,
 92		    long err, unsigned long addr)
 93{
 94	static int die_counter;
 95
 96	oops_enter();
 97
 98	spin_lock_irq(&die_lock);
 99	console_verbose();
100	bust_spinlocks(1);
101	pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
102	       trap_name(err & 0xffff), addr, ++die_counter);
103
104	print_modules();
105	show_regs(regs);
106
107	pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
108	       task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
109
110	bust_spinlocks(0);
111	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
112	if (kexec_should_crash(current))
113		crash_kexec(regs);
114
115	if (in_interrupt())
116		panic("Fatal exception in interrupt");
117
118	if (panic_on_oops)
119		panic("Fatal exception");
120
121	spin_unlock_irq(&die_lock);
122	oops_exit();
123	do_exit(SIGSEGV);
124}
125
126#ifdef CONFIG_METAG_DSP
127/*
128 * The ECH encoding specifies the size of a DSPRAM as,
129 *
130 *		"slots" / 4
131 *
132 * A "slot" is the size of two DSPRAM bank entries; an entry from
133 * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
134 * entry is 4 bytes.
135 */
136#define SLOT_SZ	8
137static inline unsigned int decode_dspram_size(unsigned int size)
138{
139	unsigned int _sz = size & 0x7f;
140
141	return _sz * SLOT_SZ * 4;
142}
143
144static void dspram_save(struct meta_ext_context *dsp_ctx,
145			unsigned int ramA_sz, unsigned int ramB_sz)
146{
147	unsigned int ram_sz[2];
148	int i;
149
150	ram_sz[0] = ramA_sz;
151	ram_sz[1] = ramB_sz;
152
153	for (i = 0; i < 2; i++) {
154		if (ram_sz[i] != 0) {
155			unsigned int sz;
156
157			if (i == 0)
158				sz = decode_dspram_size(ram_sz[i] >> 8);
159			else
160				sz = decode_dspram_size(ram_sz[i]);
161
162			if (dsp_ctx->ram[i] == NULL) {
163				dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
164
165				if (dsp_ctx->ram[i] == NULL)
166					panic("couldn't save DSP context");
167			} else {
168				if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
169					kfree(dsp_ctx->ram[i]);
170
171					dsp_ctx->ram[i] = kmalloc(sz,
172								  GFP_KERNEL);
173
174					if (dsp_ctx->ram[i] == NULL)
175						panic("couldn't save DSP context");
176				}
177			}
178
179			if (i == 0)
180				__TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
181			else
182				__TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
183
184			dsp_ctx->ram_sz[i] = ram_sz[i];
185		}
186	}
187}
188#endif /* CONFIG_METAG_DSP */
189
190/*
191 * Allow interrupts to be nested and save any "extended" register
192 * context state, e.g. DSP regs and RAMs.
193 */
194static void nest_interrupts(TBIRES State, unsigned long mask)
195{
196#ifdef CONFIG_METAG_DSP
197	struct meta_ext_context *dsp_ctx;
198	unsigned int D0_8;
199
200	/*
201	 * D0.8 may contain an ECH encoding. The upper 16 bits
202	 * tell us what DSP resources the current process is
203	 * using. OR the bits into the SaveMask so that
204	 * __TBINestInts() knows what resources to save as
205	 * part of this context.
206	 *
207	 * Don't save the context if we're nesting interrupts in the
208	 * kernel because the kernel doesn't use DSP hardware.
209	 */
210	D0_8 = __core_reg_get(D0.8);
211
212	if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
213		State.Sig.SaveMask |= (D0_8 >> 16);
214
215		dsp_ctx = current->thread.dsp_context;
216		if (dsp_ctx == NULL) {
217			dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
218			if (dsp_ctx == NULL)
219				panic("couldn't save DSP context: ENOMEM");
220
221			current->thread.dsp_context = dsp_ctx;
222		}
223
224		current->thread.user_flags |= (D0_8 & 0xffff0000);
225		__TBINestInts(State, &dsp_ctx->regs, mask);
226		dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
227	} else
228		__TBINestInts(State, NULL, mask);
229#else
230	__TBINestInts(State, NULL, mask);
231#endif
232}
233
234void head_end(TBIRES State, unsigned long mask)
235{
236	unsigned int savemask = (unsigned short)State.Sig.SaveMask;
237	unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
238
239	if (savemask & TBICTX_PRIV_BIT) {
240		ctx_savemask |= TBICTX_PRIV_BIT;
241		current->thread.user_flags = savemask;
242	}
243
244	/* Always undo the sleep bit */
245	ctx_savemask &= ~TBICTX_WAIT_BIT;
246
247	/* Always save the catch buffer and RD pipe if they are dirty */
248	savemask |= TBICTX_XCBF_BIT;
249
250	/* Only save the catch and RD if we have not already done so.
251	 * Note - the RD bits are in the pCtx only, and not in the
252	 * State.SaveMask.
253	 */
254	if ((savemask & TBICTX_CBUF_BIT) ||
255	    (ctx_savemask & TBICTX_CBRP_BIT)) {
256		/* Have we already saved the buffers though?
257		 * - See TestTrack 5071 */
258		if (ctx_savemask & TBICTX_XCBF_BIT) {
259			/* Strip off the bits so the call to __TBINestInts
260			 * won't save the buffers again. */
261			savemask &= ~TBICTX_CBUF_BIT;
262			ctx_savemask &= ~TBICTX_CBRP_BIT;
263		}
264	}
265
266#ifdef CONFIG_METAG_META21
267	{
268		unsigned int depth, txdefr;
269
270		/*
271		 * Save TXDEFR state.
272		 *
273		 * The process may have been interrupted after a LNKSET, but
274		 * before it could read the DEFR state, so we mustn't lose that
275		 * state or it could end up retrying an atomic operation that
276		 * succeeded.
277		 *
278		 * All interrupts are disabled at this point so we
279		 * don't need to perform any locking. We must do this
280		 * dance before we use LNKGET or LNKSET.
281		 */
282		BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
283
284		depth = current->thread.int_depth++;
285
286		txdefr = __core_reg_get(TXDEFR);
287
288		txdefr &= TXDEFR_BUS_STATE_BITS;
289		if (txdefr & TXDEFR_LNKSET_SUCCESS)
290			current->thread.txdefr_failure &= ~(1 << depth);
291		else
292			current->thread.txdefr_failure |= (1 << depth);
293	}
294#endif
295
296	State.Sig.SaveMask = savemask;
297	State.Sig.pCtx->SaveMask = ctx_savemask;
298
299	nest_interrupts(State, mask);
300
301#ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
302	/* Poison the catch registers.  This shows up any mistakes we have
303	 * made in their handling MUCH quicker.
304	 */
305	__core_reg_set(TXCATCH0, 0x87650021);
306	__core_reg_set(TXCATCH1, 0x87654322);
307	__core_reg_set(TXCATCH2, 0x87654323);
308	__core_reg_set(TXCATCH3, 0x87654324);
309#endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
310}
311
312TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
313{
314	struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
315	unsigned long flags;
316
317	local_irq_disable();
318
319	if (user_mode(regs)) {
320		flags = current_thread_info()->flags;
321		if (flags & _TIF_WORK_MASK &&
322		    do_work_pending(regs, flags, syscall)) {
323			*restart = 1;
324			return State;
325		}
326
327#ifdef CONFIG_METAG_FPU
328		if (current->thread.fpu_context &&
329		    current->thread.fpu_context->needs_restore) {
330			__TBICtxFPURestore(State, current->thread.fpu_context);
331			/*
332			 * Clearing this bit ensures the FP unit is not made
333			 * active again unless it is used.
334			 */
335			State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
336			current->thread.fpu_context->needs_restore = false;
337		}
338		State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
339#endif
340	}
341
342	/* TBI will turn interrupts back on at some point. */
343	if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
344		trace_hardirqs_on();
345
346#ifdef CONFIG_METAG_DSP
347	/*
348	 * If we previously saved an extended context then restore it
349	 * now. Otherwise, clear D0.8 because this process is not
350	 * using DSP hardware.
351	 */
352	if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
353		unsigned int D0_8;
354		struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
355
356		/* Make sure we're going to return to userland. */
357		BUG_ON(current->thread.int_depth != 1);
358
359		if (dsp_ctx->ram_sz[0] > 0)
360			__TBIDspramRestoreA(dsp_ctx->ram_sz[0],
361					    dsp_ctx->ram[0]);
362		if (dsp_ctx->ram_sz[1] > 0)
363			__TBIDspramRestoreB(dsp_ctx->ram_sz[1],
364					    dsp_ctx->ram[1]);
365
366		State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
367		__TBICtxRestore(State, current->thread.dsp_context);
368		D0_8 = __core_reg_get(D0.8);
369		D0_8 |= current->thread.user_flags & 0xffff0000;
370		D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
371		__core_reg_set(D0.8, D0_8);
372	} else
373		__core_reg_set(D0.8, 0);
374#endif /* CONFIG_METAG_DSP */
375
376#ifdef CONFIG_METAG_META21
377	{
378		unsigned int depth, txdefr;
379
380		/*
381		 * If there hasn't been a LNKSET since the last LNKGET then the
382		 * link flag will be set, causing the next LNKSET to succeed if
383		 * the addresses match. The two LNK operations may not be a pair
384		 * (e.g. see atomic_read()), so the LNKSET should fail.
385		 * We use a conditional-never LNKSET to clear the link flag
386		 * without side effects.
387		 */
388		asm volatile("LNKSETDNV [D0Re0],D0Re0");
389
390		depth = --current->thread.int_depth;
391
392		BUG_ON(user_mode(regs) && depth);
393
394		txdefr = __core_reg_get(TXDEFR);
395
396		txdefr &= ~TXDEFR_BUS_STATE_BITS;
397
398		/* Do we need to restore a failure code into TXDEFR? */
399		if (current->thread.txdefr_failure & (1 << depth))
400			txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
401		else
402			txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
403
404		__core_reg_set(TXDEFR, txdefr);
405	}
406#endif
407	return State;
408}
409
410#ifdef CONFIG_SMP
411/*
412 * If we took an interrupt in the middle of __kuser_get_tls then we need
413 * to rewind the PC to the start of the function in case the process
414 * gets migrated to another thread (SMP only) and it reads the wrong tls
415 * data.
416 */
417static inline void _restart_critical_section(TBIRES State)
418{
419	unsigned long get_tls_start;
420	unsigned long get_tls_end;
421
422	get_tls_start = (unsigned long)__kuser_get_tls -
423		(unsigned long)&__user_gateway_start;
424
425	get_tls_start += USER_GATEWAY_PAGE;
426
427	get_tls_end = (unsigned long)__kuser_get_tls_end -
428		(unsigned long)&__user_gateway_start;
429
430	get_tls_end += USER_GATEWAY_PAGE;
431
432	if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
433	    (State.Sig.pCtx->CurrPC < get_tls_end))
434		State.Sig.pCtx->CurrPC = get_tls_start;
435}
436#else
437/*
438 * If we took an interrupt in the middle of
439 * __kuser_cmpxchg then we need to rewind the PC to the
440 * start of the function.
441 */
442static inline void _restart_critical_section(TBIRES State)
443{
444	unsigned long cmpxchg_start;
445	unsigned long cmpxchg_end;
446
447	cmpxchg_start = (unsigned long)__kuser_cmpxchg -
448		(unsigned long)&__user_gateway_start;
449
450	cmpxchg_start += USER_GATEWAY_PAGE;
451
452	cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
453		(unsigned long)&__user_gateway_start;
454
455	cmpxchg_end += USER_GATEWAY_PAGE;
456
457	if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
458	    (State.Sig.pCtx->CurrPC < cmpxchg_end))
459		State.Sig.pCtx->CurrPC = cmpxchg_start;
460}
461#endif
462
463/* Used by kick_handler() */
464void restart_critical_section(TBIRES State)
465{
466	_restart_critical_section(State);
467}
468
469TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
470		       PTBI pTBI)
471{
472	head_end(State, ~INTS_OFF_MASK);
473
474	/* If we interrupted user code handle any critical sections. */
475	if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
476		_restart_critical_section(State);
477
478	trace_hardirqs_off();
479
480	do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
481
482	return tail_end(State);
483}
484
485static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
486{
487	return pbuf->CBFlags & TXCATCH0_READ_BIT;
488}
489
490static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
491{
492	return pbuf->CBAddr;
493}
494
495static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
496			    int signo, int code, int trapno)
497{
498	if (user_mode(regs)) {
499		siginfo_t info;
500
501		if (show_unhandled_signals && unhandled_signal(current, signo)
502		    && printk_ratelimit()) {
503
504			pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
505				current->pid, regs->ctx.CurrPC, addr,
506				trapno, trap_name(trapno));
507			print_vma_addr(" in ", regs->ctx.CurrPC);
508			print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
509			printk("\n");
510			show_regs(regs);
511		}
512
513		info.si_signo = signo;
514		info.si_errno = 0;
515		info.si_code = code;
516		info.si_addr = (__force void __user *)addr;
517		info.si_trapno = trapno;
518		force_sig_info(signo, &info, current);
519	} else {
520		die("Oops", regs, trapno, addr);
521	}
522}
523
524static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
525			     unsigned int data_address, int trapno)
526{
527	int ret;
528
529	ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
530
531	return ret;
532}
533
534static unsigned long get_inst_fault_address(struct pt_regs *regs)
535{
536	return regs->ctx.CurrPC;
537}
538
539TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
540		     int Inst, PTBI pTBI)
541{
542	struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
543	PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
544	unsigned long data_address;
545
546	head_end(State, ~INTS_OFF_MASK);
547
548	/* Hardware breakpoint or data watch */
549	if ((SigNum == TBIXXF_SIGNUM_IHF) ||
550	    ((SigNum == TBIXXF_SIGNUM_DHF) &&
551	     (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
552				  TXCATCH0_WATCH0_BIT)))) {
553		State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
554				      pTBI);
555		return tail_end(State);
556	}
557
558	local_irq_enable();
559
560	data_address = fault_address(pcbuf);
561
562	switch (SigNum) {
563	case TBIXXF_SIGNUM_IGF:
564		/* 1st-level entry invalid (instruction fetch) */
565	case TBIXXF_SIGNUM_IPF: {
566		/* 2nd-level entry invalid (instruction fetch) */
567		unsigned long addr = get_inst_fault_address(regs);
568		do_page_fault(regs, addr, 0, SigNum);
569		break;
570	}
571
572	case TBIXXF_SIGNUM_DGF:
573		/* 1st-level entry invalid (data access) */
574	case TBIXXF_SIGNUM_DPF:
575		/* 2nd-level entry invalid (data access) */
576	case TBIXXF_SIGNUM_DWF:
577		/* Write to read only page */
578		handle_data_fault(pcbuf, regs, data_address, SigNum);
579		break;
580
581	case TBIXXF_SIGNUM_IIF:
582		/* Illegal instruction */
583		unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
584				SigNum);
585		break;
586
587	case TBIXXF_SIGNUM_DHF:
588		/* Unaligned access */
589		unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
590				SigNum);
591		break;
592	case TBIXXF_SIGNUM_PGF:
593		/* Privilege violation */
594		unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
595				SigNum);
596		break;
597	default:
598		BUG();
599		break;
600	}
601
602	return tail_end(State);
603}
604
605static bool switch_is_syscall(unsigned int inst)
606{
607	return inst == __METAG_SW_ENCODING(SYS);
608}
609
610static bool switch_is_legacy_syscall(unsigned int inst)
611{
612	return inst == __METAG_SW_ENCODING(SYS_LEGACY);
613}
614
615static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
616{
617	regs->ctx.CurrPC += 4;
618}
619
620static inline int test_syscall_work(void)
621{
622	return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
623}
624
625TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
626		       int Inst, PTBI pTBI)
627{
628	struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
629	unsigned int sysnumber;
630	unsigned long long a1_a2, a3_a4, a5_a6;
631	LPSYSCALL syscall_entry;
632	int restart;
633
634	head_end(State, ~INTS_OFF_MASK);
635
636	/*
637	 * If this is not a syscall SWITCH it could be a breakpoint.
638	 */
639	if (!switch_is_syscall(Inst)) {
640		/*
641		 * Alert the user if they're trying to use legacy system
642		 * calls. This suggests they need to update their C
643		 * library and build against up to date kernel headers.
644		 */
645		if (switch_is_legacy_syscall(Inst))
646			pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
647		/*
648		 * We don't know how to handle the SWITCH and cannot
649		 * safely ignore it, so treat all unknown switches
650		 * (including breakpoints) as traps.
651		 */
652		force_sig(SIGTRAP, current);
653		return tail_end(State);
654	}
655
656	local_irq_enable();
657
658restart_syscall:
659	restart = 0;
660	sysnumber = regs->ctx.DX[0].U1;
661
662	if (test_syscall_work())
663		sysnumber = syscall_trace_enter(regs);
664
665	/* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
666	step_over_switch(regs, Inst);
667
668	if (sysnumber >= __NR_syscalls) {
669		pr_debug("unknown syscall number: %d\n", sysnumber);
670		syscall_entry = (LPSYSCALL) sys_ni_syscall;
671	} else {
672		syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
673	}
674
675	/* Use 64bit loads for speed. */
676	a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
677	a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
678	a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
679
680	/* here is the actual call to the syscall handler functions */
681	regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
682
683	if (test_syscall_work())
684		syscall_trace_leave(regs);
685
686	State = tail_end_sys(State, sysnumber, &restart);
687	/* Handlerless restarts shouldn't go via userland */
688	if (restart)
689		goto restart_syscall;
690	return State;
691}
692
693TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
694		       int Inst, PTBI pTBI)
695{
696	struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
697
698	/*
699	 * This can be caused by any user process simply executing an unusual
700	 * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
701	 * thread to stop, so signal a SIGTRAP instead.
702	 */
703	head_end(State, ~INTS_OFF_MASK);
704	if (user_mode(regs))
705		force_sig(SIGTRAP, current);
706	else
707		State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
708	return tail_end(State);
709}
710
711#ifdef CONFIG_METAG_META21
712TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
713{
714	struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
715	unsigned int error_state = Triggers;
716	siginfo_t info;
717
718	head_end(State, ~INTS_OFF_MASK);
719
720	local_irq_enable();
721
722	info.si_signo = SIGFPE;
723
724	if (error_state & TXSTAT_FPE_INVALID_BIT)
725		info.si_code = FPE_FLTINV;
726	else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
727		info.si_code = FPE_FLTDIV;
728	else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
729		info.si_code = FPE_FLTOVF;
730	else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
731		info.si_code = FPE_FLTUND;
732	else if (error_state & TXSTAT_FPE_INEXACT_BIT)
733		info.si_code = FPE_FLTRES;
734	else
735		info.si_code = 0;
736	info.si_errno = 0;
737	info.si_addr = (__force void __user *)regs->ctx.CurrPC;
738	force_sig_info(SIGFPE, &info, current);
739
740	return tail_end(State);
741}
742#endif
743
744#ifdef CONFIG_METAG_SUSPEND_MEM
745struct traps_context {
746	PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
747};
748
749static struct traps_context *metag_traps_context;
750
751int traps_save_context(void)
752{
753	unsigned long cpu = smp_processor_id();
754	PTBI _pTBI = per_cpu(pTBI, cpu);
755	struct traps_context *context;
756
757	context = kzalloc(sizeof(*context), GFP_ATOMIC);
758	if (!context)
759		return -ENOMEM;
760
761	memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
762
763	metag_traps_context = context;
764	return 0;
765}
766
767int traps_restore_context(void)
768{
769	unsigned long cpu = smp_processor_id();
770	PTBI _pTBI = per_cpu(pTBI, cpu);
771	struct traps_context *context = metag_traps_context;
772
773	metag_traps_context = NULL;
774
775	memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
776
777	kfree(context);
778	return 0;
779}
780#endif
781
782#ifdef CONFIG_SMP
783static inline unsigned int _get_trigger_mask(void)
784{
785	unsigned long cpu = smp_processor_id();
786	return per_cpu(trigger_mask, cpu);
787}
788
789unsigned int get_trigger_mask(void)
790{
791	return _get_trigger_mask();
792}
793EXPORT_SYMBOL(get_trigger_mask);
794
795static void set_trigger_mask(unsigned int mask)
796{
797	unsigned long cpu = smp_processor_id();
798	per_cpu(trigger_mask, cpu) = mask;
799}
800
801void arch_local_irq_enable(void)
802{
803	preempt_disable();
804	arch_local_irq_restore(_get_trigger_mask());
805	preempt_enable_no_resched();
806}
807EXPORT_SYMBOL(arch_local_irq_enable);
808#else
809static void set_trigger_mask(unsigned int mask)
810{
811	global_trigger_mask = mask;
812}
813#endif
814
815void per_cpu_trap_init(unsigned long cpu)
816{
817	TBIRES int_context;
818	unsigned int thread = cpu_2_hwthread_id[cpu];
819
820	set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
821			 TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
822			 TBI_TRIG_BIT(TBID_SIGNUM_SW1));
823
824	/* non-priv - use current stack */
825	int_context.Sig.pCtx = NULL;
826	/* Start with interrupts off */
827	int_context.Sig.TrigMask = INTS_OFF_MASK;
828	int_context.Sig.SaveMask = 0;
829
830	/* And call __TBIASyncTrigger() */
831	__TBIASyncTrigger(int_context);
832}
833
834void __init trap_init(void)
835{
836	unsigned long cpu = smp_processor_id();
837	PTBI _pTBI = per_cpu(pTBI, cpu);
838
839	_pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
840	_pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
841	_pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
842	_pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
843	_pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
844	_pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler;
845
846#ifdef CONFIG_METAG_META21
847	_pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
848	_pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
849#endif
850
851	per_cpu_trap_init(cpu);
852}
853
854void tbi_startup_interrupt(int irq)
855{
856	unsigned long cpu = smp_processor_id();
857	PTBI _pTBI = per_cpu(pTBI, cpu);
858
859	BUG_ON(irq > TBID_SIGNUM_MAX);
860
861	/* For TR1 and TR2, the thread id is encoded in the irq number */
862	if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
863		cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
864
865	set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
866
867	_pTBI->fnSigs[irq] = trigger_handler;
868}
869
870void tbi_shutdown_interrupt(int irq)
871{
872	unsigned long cpu = smp_processor_id();
873	PTBI _pTBI = per_cpu(pTBI, cpu);
874
875	BUG_ON(irq > TBID_SIGNUM_MAX);
876
877	set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
878
879	_pTBI->fnSigs[irq] = __TBIUnExpXXX;
880}
881
882int ret_from_fork(TBIRES arg)
883{
884	struct task_struct *prev = arg.Switch.pPara;
885	struct task_struct *tsk = current;
886	struct pt_regs *regs = task_pt_regs(tsk);
887	int (*fn)(void *);
888	TBIRES Next;
889
890	schedule_tail(prev);
891
892	if (tsk->flags & PF_KTHREAD) {
893		fn = (void *)regs->ctx.DX[4].U1;
894		BUG_ON(!fn);
895
896		fn((void *)regs->ctx.DX[3].U1);
897	}
898
899	if (test_syscall_work())
900		syscall_trace_leave(regs);
901
902	preempt_disable();
903
904	Next.Sig.TrigMask = get_trigger_mask();
905	Next.Sig.SaveMask = 0;
906	Next.Sig.pCtx = &regs->ctx;
907
908	set_gateway_tls(current->thread.tls_ptr);
909
910	preempt_enable_no_resched();
911
912	/* And interrupts should come back on when we resume the real usermode
913	 * code. Call __TBIASyncResume()
914	 */
915	__TBIASyncResume(tail_end(Next));
916	/* ASyncResume should NEVER return */
917	BUG();
918	return 0;
919}
920
921void show_trace(struct task_struct *tsk, unsigned long *sp,
922		struct pt_regs *regs)
923{
924	unsigned long addr;
925#ifdef CONFIG_FRAME_POINTER
926	unsigned long fp, fpnew;
927	unsigned long stack;
928#endif
929
930	if (regs && user_mode(regs))
931		return;
932
933	printk("\nCall trace: ");
934#ifdef CONFIG_KALLSYMS
935	printk("\n");
936#endif
937
938	if (!tsk)
939		tsk = current;
940
941#ifdef CONFIG_FRAME_POINTER
942	if (regs) {
943		print_ip_sym(regs->ctx.CurrPC);
944		fp = regs->ctx.AX[1].U0;
945	} else {
946		fp = __core_reg_get(A0FrP);
947	}
948
949	/* detect when the frame pointer has been used for other purposes and
950	 * doesn't point to the stack (it may point completely elsewhere which
951	 * kstack_end may not detect).
952	 */
953	stack = (unsigned long)task_stack_page(tsk);
954	while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
955		addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
956		if (kernel_text_address(addr))
957			print_ip_sym(addr);
958		else
959			break;
960		/* stack grows up, so frame pointers must decrease */
961		fpnew = __raw_readl((unsigned long *)(fp + 0));
962		if (fpnew >= fp)
963			break;
964		fp = fpnew;
965	}
966#else
967	while (!kstack_end(sp)) {
968		addr = (*sp--) - 4;
969		if (kernel_text_address(addr))
970			print_ip_sym(addr);
971	}
972#endif
973
974	printk("\n");
975
976	debug_show_held_locks(tsk);
977}
978
979void show_stack(struct task_struct *tsk, unsigned long *sp)
980{
981	if (!tsk)
982		tsk = current;
983	if (tsk == current)
984		sp = (unsigned long *)current_stack_pointer;
985	else
986		sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
987
988	show_trace(tsk, sp, NULL);
989}