Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Huacai Chen <chenhuacai@loongson.cn>
   4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/bitops.h>
   8#include <linux/bug.h>
   9#include <linux/compiler.h>
  10#include <linux/context_tracking.h>
  11#include <linux/entry-common.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/kexec.h>
  15#include <linux/module.h>
  16#include <linux/extable.h>
  17#include <linux/mm.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/debug.h>
  20#include <linux/smp.h>
  21#include <linux/spinlock.h>
  22#include <linux/kallsyms.h>
  23#include <linux/memblock.h>
  24#include <linux/interrupt.h>
  25#include <linux/ptrace.h>
  26#include <linux/kgdb.h>
  27#include <linux/kdebug.h>
  28#include <linux/notifier.h>
  29#include <linux/irq.h>
  30#include <linux/perf_event.h>
  31
  32#include <asm/addrspace.h>
  33#include <asm/bootinfo.h>
  34#include <asm/branch.h>
  35#include <asm/break.h>
  36#include <asm/cpu.h>
  37#include <asm/exception.h>
  38#include <asm/fpu.h>
  39#include <asm/lbt.h>
  40#include <asm/inst.h>
  41#include <asm/kgdb.h>
  42#include <asm/loongarch.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/ptrace.h>
  46#include <asm/sections.h>
  47#include <asm/siginfo.h>
  48#include <asm/stacktrace.h>
  49#include <asm/tlb.h>
  50#include <asm/types.h>
  51#include <asm/unwind.h>
  52#include <asm/uprobes.h>
  53
  54#include "access-helper.h"
  55
  56void *exception_table[EXCCODE_INT_START] = {
  57	[0 ... EXCCODE_INT_START - 1] = handle_reserved,
  58
  59	[EXCCODE_TLBI]		= handle_tlb_load,
  60	[EXCCODE_TLBL]		= handle_tlb_load,
  61	[EXCCODE_TLBS]		= handle_tlb_store,
  62	[EXCCODE_TLBM]		= handle_tlb_modify,
  63	[EXCCODE_TLBNR]		= handle_tlb_protect,
  64	[EXCCODE_TLBNX]		= handle_tlb_protect,
  65	[EXCCODE_TLBPE]		= handle_tlb_protect,
  66	[EXCCODE_ADE]		= handle_ade,
  67	[EXCCODE_ALE]		= handle_ale,
  68	[EXCCODE_BCE]		= handle_bce,
  69	[EXCCODE_SYS]		= handle_sys,
  70	[EXCCODE_BP]		= handle_bp,
  71	[EXCCODE_INE]		= handle_ri,
  72	[EXCCODE_IPE]		= handle_ri,
  73	[EXCCODE_FPDIS]		= handle_fpu,
  74	[EXCCODE_LSXDIS]	= handle_lsx,
  75	[EXCCODE_LASXDIS]	= handle_lasx,
  76	[EXCCODE_FPE]		= handle_fpe,
  77	[EXCCODE_WATCH]		= handle_watch,
  78	[EXCCODE_BTDIS]		= handle_lbt,
  79};
  80EXPORT_SYMBOL_GPL(exception_table);
  81
  82static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  83			   const char *loglvl, bool user)
  84{
  85	unsigned long addr;
  86	struct unwind_state state;
  87	struct pt_regs *pregs = (struct pt_regs *)regs;
  88
  89	if (!task)
  90		task = current;
  91
  92	printk("%sCall Trace:", loglvl);
  93	for (unwind_start(&state, task, pregs);
  94	      !unwind_done(&state); unwind_next_frame(&state)) {
  95		addr = unwind_get_return_address(&state);
  96		print_ip_sym(loglvl, addr);
  97	}
  98	printk("%s\n", loglvl);
  99}
 100
 101static void show_stacktrace(struct task_struct *task,
 102	const struct pt_regs *regs, const char *loglvl, bool user)
 103{
 104	int i;
 105	const int field = 2 * sizeof(unsigned long);
 106	unsigned long stackdata;
 107	unsigned long *sp = (unsigned long *)regs->regs[3];
 108
 109	printk("%sStack :", loglvl);
 110	i = 0;
 111	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 112		if (i && ((i % (64 / field)) == 0)) {
 113			pr_cont("\n");
 114			printk("%s       ", loglvl);
 115		}
 116		if (i > 39) {
 117			pr_cont(" ...");
 118			break;
 119		}
 120
 121		if (__get_addr(&stackdata, sp++, user)) {
 122			pr_cont(" (Bad stack address)");
 123			break;
 124		}
 125
 126		pr_cont(" %0*lx", field, stackdata);
 127		i++;
 128	}
 129	pr_cont("\n");
 130	show_backtrace(task, regs, loglvl, user);
 131}
 132
 133void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 134{
 135	struct pt_regs regs;
 136
 137	regs.csr_crmd = 0;
 138	if (sp) {
 139		regs.csr_era = 0;
 140		regs.regs[1] = 0;
 141		regs.regs[3] = (unsigned long)sp;
 142	} else {
 143		if (!task || task == current)
 144			prepare_frametrace(&regs);
 145		else {
 146			regs.csr_era = task->thread.reg01;
 147			regs.regs[1] = 0;
 148			regs.regs[3] = task->thread.reg03;
 149			regs.regs[22] = task->thread.reg22;
 150		}
 151	}
 152
 153	show_stacktrace(task, &regs, loglvl, false);
 154}
 155
 156static void show_code(unsigned int *pc, bool user)
 157{
 158	long i;
 159	unsigned int insn;
 160
 161	printk("Code:");
 162
 163	for(i = -3 ; i < 6 ; i++) {
 164		if (__get_inst(&insn, pc + i, user)) {
 165			pr_cont(" (Bad address in era)\n");
 166			break;
 167		}
 168		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
 169	}
 170	pr_cont("\n");
 171}
 172
 173static void print_bool_fragment(const char *key, unsigned long val, bool first)
 174{
 175	/* e.g. "+PG", "-DA" */
 176	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
 177}
 178
 179static void print_plv_fragment(const char *key, int val)
 180{
 181	/* e.g. "PLV0", "PPLV3" */
 182	pr_cont("%s%d", key, val);
 183}
 184
 185static void print_memory_type_fragment(const char *key, unsigned long val)
 186{
 187	const char *humanized_type;
 188
 189	switch (val) {
 190	case 0:
 191		humanized_type = "SUC";
 192		break;
 193	case 1:
 194		humanized_type = "CC";
 195		break;
 196	case 2:
 197		humanized_type = "WUC";
 198		break;
 199	default:
 200		pr_cont(" %s=Reserved(%lu)", key, val);
 201		return;
 202	}
 203
 204	/* e.g. " DATM=WUC" */
 205	pr_cont(" %s=%s", key, humanized_type);
 206}
 207
 208static void print_intr_fragment(const char *key, unsigned long val)
 209{
 210	/* e.g. "LIE=0-1,3,5-7" */
 211	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
 212}
 213
 214static void print_crmd(unsigned long x)
 215{
 216	printk(" CRMD: %08lx (", x);
 217	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
 218	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
 219	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
 220	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
 221	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
 222	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
 223	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
 224	pr_cont(")\n");
 225}
 226
 227static void print_prmd(unsigned long x)
 228{
 229	printk(" PRMD: %08lx (", x);
 230	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
 231	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
 232	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
 233	pr_cont(")\n");
 234}
 235
 236static void print_euen(unsigned long x)
 237{
 238	printk(" EUEN: %08lx (", x);
 239	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
 240	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
 241	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
 242	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
 243	pr_cont(")\n");
 244}
 245
 246static void print_ecfg(unsigned long x)
 247{
 248	printk(" ECFG: %08lx (", x);
 249	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
 250	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
 251}
 252
 253static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
 254{
 255	/*
 256	 * LoongArch users and developers are probably more familiar with
 257	 * those names found in the ISA manual, so we are going to print out
 258	 * the latter. This will require some mapping.
 259	 */
 260	switch (ecode) {
 261	case EXCCODE_RSV: return "INT";
 262	case EXCCODE_TLBL: return "PIL";
 263	case EXCCODE_TLBS: return "PIS";
 264	case EXCCODE_TLBI: return "PIF";
 265	case EXCCODE_TLBM: return "PME";
 266	case EXCCODE_TLBNR: return "PNR";
 267	case EXCCODE_TLBNX: return "PNX";
 268	case EXCCODE_TLBPE: return "PPI";
 269	case EXCCODE_ADE:
 270		switch (esubcode) {
 271		case EXSUBCODE_ADEF: return "ADEF";
 272		case EXSUBCODE_ADEM: return "ADEM";
 273		}
 274		break;
 275	case EXCCODE_ALE: return "ALE";
 276	case EXCCODE_BCE: return "BCE";
 277	case EXCCODE_SYS: return "SYS";
 278	case EXCCODE_BP: return "BRK";
 279	case EXCCODE_INE: return "INE";
 280	case EXCCODE_IPE: return "IPE";
 281	case EXCCODE_FPDIS: return "FPD";
 282	case EXCCODE_LSXDIS: return "SXD";
 283	case EXCCODE_LASXDIS: return "ASXD";
 284	case EXCCODE_FPE:
 285		switch (esubcode) {
 286		case EXCSUBCODE_FPE: return "FPE";
 287		case EXCSUBCODE_VFPE: return "VFPE";
 288		}
 289		break;
 290	case EXCCODE_WATCH:
 291		switch (esubcode) {
 292		case EXCSUBCODE_WPEF: return "WPEF";
 293		case EXCSUBCODE_WPEM: return "WPEM";
 294		}
 295		break;
 296	case EXCCODE_BTDIS: return "BTD";
 297	case EXCCODE_BTE: return "BTE";
 298	case EXCCODE_GSPR: return "GSPR";
 299	case EXCCODE_HVC: return "HVC";
 300	case EXCCODE_GCM:
 301		switch (esubcode) {
 302		case EXCSUBCODE_GCSC: return "GCSC";
 303		case EXCSUBCODE_GCHC: return "GCHC";
 304		}
 305		break;
 306	/*
 307	 * The manual did not mention the EXCCODE_SE case, but print out it
 308	 * nevertheless.
 309	 */
 310	case EXCCODE_SE: return "SE";
 311	}
 312
 313	return "???";
 314}
 315
 316static void print_estat(unsigned long x)
 317{
 318	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
 319	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
 320
 321	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
 322	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
 323	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
 324}
 325
 326static void __show_regs(const struct pt_regs *regs)
 327{
 328	const int field = 2 * sizeof(unsigned long);
 329	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
 330
 331	show_regs_print_info(KERN_DEFAULT);
 332
 333	/* Print saved GPRs except $zero (substituting with PC/ERA) */
 334#define GPR_FIELD(x) field, regs->regs[x]
 335	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
 336	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
 337	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
 338	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
 339	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
 340	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
 341	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
 342	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
 343	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
 344	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
 345	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
 346	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
 347	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
 348	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
 349	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
 350	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
 351
 352	/* The slot for $zero is reused as the syscall restart flag */
 353	if (regs->regs[0])
 354		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
 355
 356	if (user_mode(regs)) {
 357		printk("   ra: %0*lx\n", GPR_FIELD(1));
 358		printk("  ERA: %0*lx\n", field, regs->csr_era);
 359	} else {
 360		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
 361		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
 362	}
 363#undef GPR_FIELD
 364
 365	/* Print saved important CSRs */
 366	print_crmd(regs->csr_crmd);
 367	print_prmd(regs->csr_prmd);
 368	print_euen(regs->csr_euen);
 369	print_ecfg(regs->csr_ecfg);
 370	print_estat(regs->csr_estat);
 371
 372	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
 373		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
 374
 375	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
 376	       cpu_family_string(), cpu_full_name_string());
 377}
 378
 379void show_regs(struct pt_regs *regs)
 380{
 381	__show_regs((struct pt_regs *)regs);
 382	dump_stack();
 383}
 384
 385void show_registers(struct pt_regs *regs)
 386{
 387	__show_regs(regs);
 388	print_modules();
 389	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
 390	       current->comm, current->pid, current_thread_info(), current);
 391
 392	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
 393	show_code((void *)regs->csr_era, user_mode(regs));
 394	printk("\n");
 395}
 396
 397static DEFINE_RAW_SPINLOCK(die_lock);
 398
 399void die(const char *str, struct pt_regs *regs)
 400{
 401	int ret;
 402	static int die_counter;
 403
 404	oops_enter();
 405
 406	ret = notify_die(DIE_OOPS, str, regs, 0,
 407			 current->thread.trap_nr, SIGSEGV);
 408
 409	console_verbose();
 410	raw_spin_lock_irq(&die_lock);
 411	bust_spinlocks(1);
 412
 413	printk("%s[#%d]:\n", str, ++die_counter);
 414	show_registers(regs);
 415	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 416	raw_spin_unlock_irq(&die_lock);
 417
 418	oops_exit();
 419
 420	if (ret == NOTIFY_STOP)
 421		return;
 422
 423	if (regs && kexec_should_crash(current))
 424		crash_kexec(regs);
 425
 426	if (in_interrupt())
 427		panic("Fatal exception in interrupt");
 428
 429	if (panic_on_oops)
 430		panic("Fatal exception");
 431
 432	make_task_dead(SIGSEGV);
 433}
 434
 435static inline void setup_vint_size(unsigned int size)
 436{
 437	unsigned int vs;
 438
 439	vs = ilog2(size/4);
 440
 441	if (vs == 0 || vs > 7)
 442		panic("vint_size %d Not support yet", vs);
 443
 444	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
 445}
 446
 447/*
 448 * Send SIGFPE according to FCSR Cause bits, which must have already
 449 * been masked against Enable bits.  This is impotant as Inexact can
 450 * happen together with Overflow or Underflow, and `ptrace' can set
 451 * any bits.
 452 */
 453static void force_fcsr_sig(unsigned long fcsr,
 454			void __user *fault_addr, struct task_struct *tsk)
 455{
 456	int si_code = FPE_FLTUNK;
 457
 458	if (fcsr & FPU_CSR_INV_X)
 459		si_code = FPE_FLTINV;
 460	else if (fcsr & FPU_CSR_DIV_X)
 461		si_code = FPE_FLTDIV;
 462	else if (fcsr & FPU_CSR_OVF_X)
 463		si_code = FPE_FLTOVF;
 464	else if (fcsr & FPU_CSR_UDF_X)
 465		si_code = FPE_FLTUND;
 466	else if (fcsr & FPU_CSR_INE_X)
 467		si_code = FPE_FLTRES;
 468
 469	force_sig_fault(SIGFPE, si_code, fault_addr);
 470}
 471
 472static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
 473{
 474	int si_code;
 475
 476	switch (sig) {
 477	case 0:
 478		return 0;
 479
 480	case SIGFPE:
 481		force_fcsr_sig(fcsr, fault_addr, current);
 482		return 1;
 483
 484	case SIGBUS:
 485		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 486		return 1;
 487
 488	case SIGSEGV:
 489		mmap_read_lock(current->mm);
 490		if (vma_lookup(current->mm, (unsigned long)fault_addr))
 491			si_code = SEGV_ACCERR;
 492		else
 493			si_code = SEGV_MAPERR;
 494		mmap_read_unlock(current->mm);
 495		force_sig_fault(SIGSEGV, si_code, fault_addr);
 496		return 1;
 497
 498	default:
 499		force_sig(sig);
 500		return 1;
 501	}
 502}
 503
 504/*
 505 * Delayed fp exceptions when doing a lazy ctx switch
 506 */
 507asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
 508{
 509	int sig;
 510	void __user *fault_addr;
 511	irqentry_state_t state = irqentry_enter(regs);
 512
 513	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 514		       SIGFPE) == NOTIFY_STOP)
 515		goto out;
 516
 517	/* Clear FCSR.Cause before enabling interrupts */
 518	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
 519	local_irq_enable();
 520
 521	die_if_kernel("FP exception in kernel code", regs);
 522
 523	sig = SIGFPE;
 524	fault_addr = (void __user *) regs->csr_era;
 525
 526	/* Send a signal if required.  */
 527	process_fpemu_return(sig, fault_addr, fcsr);
 528
 529out:
 530	local_irq_disable();
 531	irqentry_exit(regs, state);
 532}
 533
 534asmlinkage void noinstr do_ade(struct pt_regs *regs)
 535{
 536	irqentry_state_t state = irqentry_enter(regs);
 537
 538	die_if_kernel("Kernel ade access", regs);
 539	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
 540
 541	irqentry_exit(regs, state);
 542}
 543
 544/* sysctl hooks */
 545int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
 546int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
 547
 548asmlinkage void noinstr do_ale(struct pt_regs *regs)
 549{
 550	irqentry_state_t state = irqentry_enter(regs);
 551
 552#ifndef CONFIG_ARCH_STRICT_ALIGN
 553	die_if_kernel("Kernel ale access", regs);
 554	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 555#else
 556	unsigned int *pc;
 557
 558	if (regs->csr_prmd & CSR_PRMD_PIE)
 559		local_irq_enable();
 560
 561	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
 562
 563	/*
 564	 * Did we catch a fault trying to load an instruction?
 565	 */
 566	if (regs->csr_badvaddr == regs->csr_era)
 567		goto sigbus;
 568	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
 569		goto sigbus;
 570	if (!unaligned_enabled)
 571		goto sigbus;
 572	if (!no_unaligned_warning)
 573		show_registers(regs);
 574
 575	pc = (unsigned int *)exception_era(regs);
 576
 577	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
 578
 579	goto out;
 580
 581sigbus:
 582	die_if_kernel("Kernel ale access", regs);
 583	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 584out:
 585	if (regs->csr_prmd & CSR_PRMD_PIE)
 586		local_irq_disable();
 587#endif
 588	irqentry_exit(regs, state);
 589}
 590
 591#ifdef CONFIG_GENERIC_BUG
 592int is_valid_bugaddr(unsigned long addr)
 593{
 594	return 1;
 595}
 596#endif /* CONFIG_GENERIC_BUG */
 597
 598static void bug_handler(struct pt_regs *regs)
 599{
 600	switch (report_bug(regs->csr_era, regs)) {
 601	case BUG_TRAP_TYPE_BUG:
 602	case BUG_TRAP_TYPE_NONE:
 603		die_if_kernel("Oops - BUG", regs);
 604		force_sig(SIGTRAP);
 605		break;
 606
 607	case BUG_TRAP_TYPE_WARN:
 608		/* Skip the BUG instruction and continue */
 609		regs->csr_era += LOONGARCH_INSN_SIZE;
 610		break;
 611	}
 612}
 613
 614asmlinkage void noinstr do_bce(struct pt_regs *regs)
 615{
 616	bool user = user_mode(regs);
 617	unsigned long era = exception_era(regs);
 618	u64 badv = 0, lower = 0, upper = ULONG_MAX;
 619	union loongarch_instruction insn;
 620	irqentry_state_t state = irqentry_enter(regs);
 621
 622	if (regs->csr_prmd & CSR_PRMD_PIE)
 623		local_irq_enable();
 624
 625	current->thread.trap_nr = read_csr_excode();
 626
 627	die_if_kernel("Bounds check error in kernel code", regs);
 628
 629	/*
 630	 * Pull out the address that failed bounds checking, and the lower /
 631	 * upper bound, by minimally looking at the faulting instruction word
 632	 * and reading from the correct register.
 633	 */
 634	if (__get_inst(&insn.word, (u32 *)era, user))
 635		goto bad_era;
 636
 637	switch (insn.reg3_format.opcode) {
 638	case asrtle_op:
 639		if (insn.reg3_format.rd != 0)
 640			break;	/* not asrtle */
 641		badv = regs->regs[insn.reg3_format.rj];
 642		upper = regs->regs[insn.reg3_format.rk];
 643		break;
 644
 645	case asrtgt_op:
 646		if (insn.reg3_format.rd != 0)
 647			break;	/* not asrtgt */
 648		badv = regs->regs[insn.reg3_format.rj];
 649		lower = regs->regs[insn.reg3_format.rk];
 650		break;
 651
 652	case ldleb_op:
 653	case ldleh_op:
 654	case ldlew_op:
 655	case ldled_op:
 656	case stleb_op:
 657	case stleh_op:
 658	case stlew_op:
 659	case stled_op:
 660	case fldles_op:
 661	case fldled_op:
 662	case fstles_op:
 663	case fstled_op:
 664		badv = regs->regs[insn.reg3_format.rj];
 665		upper = regs->regs[insn.reg3_format.rk];
 666		break;
 667
 668	case ldgtb_op:
 669	case ldgth_op:
 670	case ldgtw_op:
 671	case ldgtd_op:
 672	case stgtb_op:
 673	case stgth_op:
 674	case stgtw_op:
 675	case stgtd_op:
 676	case fldgts_op:
 677	case fldgtd_op:
 678	case fstgts_op:
 679	case fstgtd_op:
 680		badv = regs->regs[insn.reg3_format.rj];
 681		lower = regs->regs[insn.reg3_format.rk];
 682		break;
 683	}
 684
 685	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
 686
 687out:
 688	if (regs->csr_prmd & CSR_PRMD_PIE)
 689		local_irq_disable();
 690
 691	irqentry_exit(regs, state);
 692	return;
 693
 694bad_era:
 695	/*
 696	 * Cannot pull out the instruction word, hence cannot provide more
 697	 * info than a regular SIGSEGV in this case.
 698	 */
 699	force_sig(SIGSEGV);
 700	goto out;
 701}
 702
 703asmlinkage void noinstr do_bp(struct pt_regs *regs)
 704{
 705	bool user = user_mode(regs);
 706	unsigned int opcode, bcode;
 707	unsigned long era = exception_era(regs);
 708	irqentry_state_t state = irqentry_enter(regs);
 709
 710	if (regs->csr_prmd & CSR_PRMD_PIE)
 711		local_irq_enable();
 712
 713	if (__get_inst(&opcode, (u32 *)era, user))
 714		goto out_sigsegv;
 715
 716	bcode = (opcode & 0x7fff);
 717
 718	/*
 719	 * notify the kprobe handlers, if instruction is likely to
 720	 * pertain to them.
 721	 */
 722	switch (bcode) {
 723	case BRK_KDB:
 724		if (kgdb_breakpoint_handler(regs))
 725			goto out;
 726		else
 727			break;
 728	case BRK_KPROBE_BP:
 729		if (kprobe_breakpoint_handler(regs))
 730			goto out;
 731		else
 732			break;
 733	case BRK_KPROBE_SSTEPBP:
 734		if (kprobe_singlestep_handler(regs))
 735			goto out;
 736		else
 737			break;
 738	case BRK_UPROBE_BP:
 739		if (uprobe_breakpoint_handler(regs))
 740			goto out;
 741		else
 742			break;
 743	case BRK_UPROBE_XOLBP:
 744		if (uprobe_singlestep_handler(regs))
 745			goto out;
 746		else
 747			break;
 748	default:
 749		current->thread.trap_nr = read_csr_excode();
 750		if (notify_die(DIE_TRAP, "Break", regs, bcode,
 751			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 752			goto out;
 753		else
 754			break;
 755	}
 756
 757	switch (bcode) {
 758	case BRK_BUG:
 759		bug_handler(regs);
 760		break;
 761	case BRK_DIVZERO:
 762		die_if_kernel("Break instruction in kernel code", regs);
 763		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
 764		break;
 765	case BRK_OVERFLOW:
 766		die_if_kernel("Break instruction in kernel code", regs);
 767		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
 768		break;
 769	default:
 770		die_if_kernel("Break instruction in kernel code", regs);
 771		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
 772		break;
 773	}
 774
 775out:
 776	if (regs->csr_prmd & CSR_PRMD_PIE)
 777		local_irq_disable();
 778
 779	irqentry_exit(regs, state);
 780	return;
 781
 782out_sigsegv:
 783	force_sig(SIGSEGV);
 784	goto out;
 785}
 786
 787asmlinkage void noinstr do_watch(struct pt_regs *regs)
 788{
 789	irqentry_state_t state = irqentry_enter(regs);
 790
 791#ifndef CONFIG_HAVE_HW_BREAKPOINT
 792	pr_warn("Hardware watch point handler not implemented!\n");
 793#else
 794	if (kgdb_breakpoint_handler(regs))
 795		goto out;
 796
 797	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
 798		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
 799		unsigned long pc = instruction_pointer(regs);
 800		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
 801
 802		if (llbit) {
 803			/*
 804			 * When the ll-sc combo is encountered, it is regarded as an single
 805			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
 806			 * the llsc execution is completed.
 807			 */
 808			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 809			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
 810			goto out;
 811		}
 812
 813		if (pc == current->thread.single_step) {
 814			/*
 815			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
 816			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
 817			 * the csr_era is equal to the value of singlestep which last time set.
 818			 */
 819			if (!is_self_loop_ins(ip, regs)) {
 820				/*
 821				 * Check if the given instruction the target pc is equal to the
 822				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
 823				 * bit to break the original instruction stream.
 824				 */
 825				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 826				goto out;
 827			}
 828		}
 829	} else {
 830		breakpoint_handler(regs);
 831		watchpoint_handler(regs);
 832	}
 833
 834	force_sig(SIGTRAP);
 835out:
 836#endif
 837	irqentry_exit(regs, state);
 838}
 839
 840asmlinkage void noinstr do_ri(struct pt_regs *regs)
 841{
 842	int status = SIGILL;
 843	unsigned int __maybe_unused opcode;
 844	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
 845	irqentry_state_t state = irqentry_enter(regs);
 846
 847	local_irq_enable();
 848	current->thread.trap_nr = read_csr_excode();
 849
 850	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 851		       SIGILL) == NOTIFY_STOP)
 852		goto out;
 853
 854	die_if_kernel("Reserved instruction in kernel code", regs);
 855
 856	if (unlikely(get_user(opcode, era) < 0)) {
 857		status = SIGSEGV;
 858		current->thread.error_code = 1;
 859	}
 860
 861	force_sig(status);
 862
 863out:
 864	local_irq_disable();
 865	irqentry_exit(regs, state);
 866}
 867
 868static void init_restore_fp(void)
 869{
 870	if (!used_math()) {
 871		/* First time FP context user. */
 872		init_fpu();
 873	} else {
 874		/* This task has formerly used the FP context */
 875		if (!is_fpu_owner())
 876			own_fpu_inatomic(1);
 877	}
 878
 879	BUG_ON(!is_fp_enabled());
 880}
 881
 882static void init_restore_lsx(void)
 883{
 884	enable_lsx();
 885
 886	if (!thread_lsx_context_live()) {
 887		/* First time LSX context user */
 888		init_restore_fp();
 889		init_lsx_upper();
 890		set_thread_flag(TIF_LSX_CTX_LIVE);
 891	} else {
 892		if (!is_simd_owner()) {
 893			if (is_fpu_owner()) {
 894				restore_lsx_upper(current);
 895			} else {
 896				__own_fpu();
 897				restore_lsx(current);
 898			}
 899		}
 900	}
 901
 902	set_thread_flag(TIF_USEDSIMD);
 903
 904	BUG_ON(!is_fp_enabled());
 905	BUG_ON(!is_lsx_enabled());
 906}
 907
 908static void init_restore_lasx(void)
 909{
 910	enable_lasx();
 911
 912	if (!thread_lasx_context_live()) {
 913		/* First time LASX context user */
 914		init_restore_lsx();
 915		init_lasx_upper();
 916		set_thread_flag(TIF_LASX_CTX_LIVE);
 917	} else {
 918		if (is_fpu_owner() || is_simd_owner()) {
 919			init_restore_lsx();
 920			restore_lasx_upper(current);
 921		} else {
 922			__own_fpu();
 923			enable_lsx();
 924			restore_lasx(current);
 925		}
 926	}
 927
 928	set_thread_flag(TIF_USEDSIMD);
 929
 930	BUG_ON(!is_fp_enabled());
 931	BUG_ON(!is_lsx_enabled());
 932	BUG_ON(!is_lasx_enabled());
 933}
 934
 935asmlinkage void noinstr do_fpu(struct pt_regs *regs)
 936{
 937	irqentry_state_t state = irqentry_enter(regs);
 938
 939	local_irq_enable();
 940	die_if_kernel("do_fpu invoked from kernel context!", regs);
 941	BUG_ON(is_lsx_enabled());
 942	BUG_ON(is_lasx_enabled());
 943
 944	preempt_disable();
 945	init_restore_fp();
 946	preempt_enable();
 947
 948	local_irq_disable();
 949	irqentry_exit(regs, state);
 950}
 951
 952asmlinkage void noinstr do_lsx(struct pt_regs *regs)
 953{
 954	irqentry_state_t state = irqentry_enter(regs);
 955
 956	local_irq_enable();
 957	if (!cpu_has_lsx) {
 958		force_sig(SIGILL);
 959		goto out;
 960	}
 961
 962	die_if_kernel("do_lsx invoked from kernel context!", regs);
 963	BUG_ON(is_lasx_enabled());
 964
 965	preempt_disable();
 966	init_restore_lsx();
 967	preempt_enable();
 968
 969out:
 970	local_irq_disable();
 971	irqentry_exit(regs, state);
 972}
 973
 974asmlinkage void noinstr do_lasx(struct pt_regs *regs)
 975{
 976	irqentry_state_t state = irqentry_enter(regs);
 977
 978	local_irq_enable();
 979	if (!cpu_has_lasx) {
 980		force_sig(SIGILL);
 981		goto out;
 982	}
 983
 984	die_if_kernel("do_lasx invoked from kernel context!", regs);
 985
 986	preempt_disable();
 987	init_restore_lasx();
 988	preempt_enable();
 989
 990out:
 991	local_irq_disable();
 992	irqentry_exit(regs, state);
 993}
 994
 995static void init_restore_lbt(void)
 996{
 997	if (!thread_lbt_context_live()) {
 998		/* First time LBT context user */
 999		init_lbt();
1000		set_thread_flag(TIF_LBT_CTX_LIVE);
1001	} else {
1002		if (!is_lbt_owner())
1003			own_lbt_inatomic(1);
1004	}
1005
1006	BUG_ON(!is_lbt_enabled());
1007}
1008
1009asmlinkage void noinstr do_lbt(struct pt_regs *regs)
1010{
1011	irqentry_state_t state = irqentry_enter(regs);
1012
1013	/*
1014	 * BTD (Binary Translation Disable exception) can be triggered
1015	 * during FP save/restore if TM (Top Mode) is on, which may
1016	 * cause irq_enable during 'switch_to'. To avoid this situation
1017	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
1018	 * will not trigger the BTE), we need to check PRMD first.
1019	 */
1020	if (regs->csr_prmd & CSR_PRMD_PIE)
1021		local_irq_enable();
1022
1023	if (!cpu_has_lbt) {
1024		force_sig(SIGILL);
1025		goto out;
1026	}
1027	BUG_ON(is_lbt_enabled());
1028
1029	preempt_disable();
1030	init_restore_lbt();
1031	preempt_enable();
1032
1033out:
1034	if (regs->csr_prmd & CSR_PRMD_PIE)
1035		local_irq_disable();
1036
1037	irqentry_exit(regs, state);
1038}
1039
1040asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1041{
1042	irqentry_state_t state = irqentry_enter(regs);
1043
1044	local_irq_enable();
1045	/*
1046	 * Game over - no way to handle this if it ever occurs.	Most probably
1047	 * caused by a fatal error after another hardware/software error.
1048	 */
1049	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1050		read_csr_excode(), current->pid, current->comm);
1051	die_if_kernel("do_reserved exception", regs);
1052	force_sig(SIGUNUSED);
1053
1054	local_irq_disable();
1055
1056	irqentry_exit(regs, state);
1057}
1058
1059asmlinkage void cache_parity_error(void)
1060{
1061	/* For the moment, report the problem and hang. */
1062	pr_err("Cache error exception:\n");
1063	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1064	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1065	panic("Can't handle the cache error!");
1066}
1067
1068asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1069{
1070	struct pt_regs *old_regs;
1071
1072	irq_enter_rcu();
1073	old_regs = set_irq_regs(regs);
1074	handle_arch_irq(regs);
1075	set_irq_regs(old_regs);
1076	irq_exit_rcu();
1077}
1078
1079asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1080{
1081	register int cpu;
1082	register unsigned long stack;
1083	irqentry_state_t state = irqentry_enter(regs);
1084
1085	cpu = smp_processor_id();
1086
1087	if (on_irq_stack(cpu, sp))
1088		handle_loongarch_irq(regs);
1089	else {
1090		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1091
1092		/* Save task's sp on IRQ stack for unwinding */
1093		*(unsigned long *)stack = sp;
1094
1095		__asm__ __volatile__(
1096		"move	$s0, $sp		\n" /* Preserve sp */
1097		"move	$sp, %[stk]		\n" /* Switch stack */
1098		"move	$a0, %[regs]		\n"
1099		"bl	handle_loongarch_irq	\n"
1100		"move	$sp, $s0		\n" /* Restore sp */
1101		: /* No outputs */
1102		: [stk] "r" (stack), [regs] "r" (regs)
1103		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1104		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1105		  "memory");
1106	}
1107
1108	irqentry_exit(regs, state);
1109}
1110
1111unsigned long eentry;
1112unsigned long tlbrentry;
1113
1114long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1115
1116static void configure_exception_vector(void)
1117{
1118	eentry    = (unsigned long)exception_handlers;
1119	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1120
1121	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1122	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1123	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1124}
1125
1126void per_cpu_trap_init(int cpu)
1127{
1128	unsigned int i;
1129
1130	setup_vint_size(VECSIZE);
1131
1132	configure_exception_vector();
1133
1134	if (!cpu_data[cpu].asid_cache)
1135		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1136
1137	mmgrab(&init_mm);
1138	current->active_mm = &init_mm;
1139	BUG_ON(current->mm);
1140	enter_lazy_tlb(&init_mm, current);
1141
1142	/* Initialise exception handlers */
1143	if (cpu == 0)
1144		for (i = 0; i < 64; i++)
1145			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1146
1147	tlb_init(cpu);
1148	cpu_cache_init();
1149}
1150
1151/* Install CPU exception handler */
1152void set_handler(unsigned long offset, void *addr, unsigned long size)
1153{
1154	memcpy((void *)(eentry + offset), addr, size);
1155	local_flush_icache_range(eentry + offset, eentry + offset + size);
1156}
1157
1158static const char panic_null_cerr[] =
1159	"Trying to set NULL cache error exception handler\n";
1160
1161/*
1162 * Install uncached CPU exception handler.
1163 * This is suitable only for the cache error exception which is the only
1164 * exception handler that is being run uncached.
1165 */
1166void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1167{
1168	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1169
1170	if (!addr)
1171		panic(panic_null_cerr);
1172
1173	memcpy((void *)(uncached_eentry + offset), addr, size);
1174}
1175
1176void __init trap_init(void)
1177{
1178	long i;
1179
1180	/* Set interrupt vector handler */
1181	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1182		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1183
1184	/* Set exception vector handler */
1185	for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++)
1186		set_handler(i * VECSIZE, exception_table[i], VECSIZE);
1187
1188	cache_error_setup();
1189
1190	local_flush_icache_range(eentry, eentry + 0x400);
1191}