Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Huacai Chen <chenhuacai@loongson.cn>
   4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/bitops.h>
   8#include <linux/bug.h>
   9#include <linux/compiler.h>
  10#include <linux/context_tracking.h>
  11#include <linux/entry-common.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/kexec.h>
  15#include <linux/module.h>
  16#include <linux/extable.h>
  17#include <linux/mm.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/debug.h>
  20#include <linux/smp.h>
  21#include <linux/spinlock.h>
  22#include <linux/kallsyms.h>
  23#include <linux/memblock.h>
  24#include <linux/interrupt.h>
  25#include <linux/ptrace.h>
  26#include <linux/kgdb.h>
  27#include <linux/kdebug.h>
  28#include <linux/notifier.h>
  29#include <linux/irq.h>
  30#include <linux/perf_event.h>
  31
  32#include <asm/addrspace.h>
  33#include <asm/bootinfo.h>
  34#include <asm/branch.h>
  35#include <asm/break.h>
  36#include <asm/cpu.h>
  37#include <asm/exception.h>
  38#include <asm/fpu.h>
  39#include <asm/lbt.h>
  40#include <asm/inst.h>
  41#include <asm/kgdb.h>
  42#include <asm/loongarch.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/ptrace.h>
  46#include <asm/sections.h>
  47#include <asm/siginfo.h>
  48#include <asm/stacktrace.h>
  49#include <asm/tlb.h>
  50#include <asm/types.h>
  51#include <asm/unwind.h>
  52#include <asm/uprobes.h>
  53
  54#include "access-helper.h"
  55
  56static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  57			   const char *loglvl, bool user)
  58{
  59	unsigned long addr;
  60	struct unwind_state state;
  61	struct pt_regs *pregs = (struct pt_regs *)regs;
  62
  63	if (!task)
  64		task = current;
  65
  66	printk("%sCall Trace:", loglvl);
  67	for (unwind_start(&state, task, pregs);
  68	      !unwind_done(&state); unwind_next_frame(&state)) {
  69		addr = unwind_get_return_address(&state);
  70		print_ip_sym(loglvl, addr);
  71	}
  72	printk("%s\n", loglvl);
  73}
  74
  75static void show_stacktrace(struct task_struct *task,
  76	const struct pt_regs *regs, const char *loglvl, bool user)
  77{
  78	int i;
  79	const int field = 2 * sizeof(unsigned long);
  80	unsigned long stackdata;
  81	unsigned long *sp = (unsigned long *)regs->regs[3];
  82
  83	printk("%sStack :", loglvl);
  84	i = 0;
  85	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  86		if (i && ((i % (64 / field)) == 0)) {
  87			pr_cont("\n");
  88			printk("%s       ", loglvl);
  89		}
  90		if (i > 39) {
  91			pr_cont(" ...");
  92			break;
  93		}
  94
  95		if (__get_addr(&stackdata, sp++, user)) {
  96			pr_cont(" (Bad stack address)");
  97			break;
  98		}
  99
 100		pr_cont(" %0*lx", field, stackdata);
 101		i++;
 102	}
 103	pr_cont("\n");
 104	show_backtrace(task, regs, loglvl, user);
 105}
 106
 107void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 108{
 109	struct pt_regs regs;
 110
 111	regs.csr_crmd = 0;
 112	if (sp) {
 113		regs.csr_era = 0;
 114		regs.regs[1] = 0;
 115		regs.regs[3] = (unsigned long)sp;
 116	} else {
 117		if (!task || task == current)
 118			prepare_frametrace(&regs);
 119		else {
 120			regs.csr_era = task->thread.reg01;
 121			regs.regs[1] = 0;
 122			regs.regs[3] = task->thread.reg03;
 123			regs.regs[22] = task->thread.reg22;
 124		}
 125	}
 126
 127	show_stacktrace(task, &regs, loglvl, false);
 128}
 129
 130static void show_code(unsigned int *pc, bool user)
 131{
 132	long i;
 133	unsigned int insn;
 134
 135	printk("Code:");
 136
 137	for(i = -3 ; i < 6 ; i++) {
 138		if (__get_inst(&insn, pc + i, user)) {
 139			pr_cont(" (Bad address in era)\n");
 140			break;
 141		}
 142		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
 143	}
 144	pr_cont("\n");
 145}
 146
 147static void print_bool_fragment(const char *key, unsigned long val, bool first)
 148{
 149	/* e.g. "+PG", "-DA" */
 150	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
 151}
 152
 153static void print_plv_fragment(const char *key, int val)
 154{
 155	/* e.g. "PLV0", "PPLV3" */
 156	pr_cont("%s%d", key, val);
 157}
 158
 159static void print_memory_type_fragment(const char *key, unsigned long val)
 160{
 161	const char *humanized_type;
 162
 163	switch (val) {
 164	case 0:
 165		humanized_type = "SUC";
 166		break;
 167	case 1:
 168		humanized_type = "CC";
 169		break;
 170	case 2:
 171		humanized_type = "WUC";
 172		break;
 173	default:
 174		pr_cont(" %s=Reserved(%lu)", key, val);
 175		return;
 176	}
 177
 178	/* e.g. " DATM=WUC" */
 179	pr_cont(" %s=%s", key, humanized_type);
 180}
 181
 182static void print_intr_fragment(const char *key, unsigned long val)
 183{
 184	/* e.g. "LIE=0-1,3,5-7" */
 185	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
 186}
 187
 188static void print_crmd(unsigned long x)
 189{
 190	printk(" CRMD: %08lx (", x);
 191	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
 192	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
 193	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
 194	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
 195	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
 196	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
 197	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
 198	pr_cont(")\n");
 199}
 200
 201static void print_prmd(unsigned long x)
 202{
 203	printk(" PRMD: %08lx (", x);
 204	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
 205	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
 206	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
 207	pr_cont(")\n");
 208}
 209
 210static void print_euen(unsigned long x)
 211{
 212	printk(" EUEN: %08lx (", x);
 213	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
 214	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
 215	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
 216	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
 217	pr_cont(")\n");
 218}
 219
 220static void print_ecfg(unsigned long x)
 221{
 222	printk(" ECFG: %08lx (", x);
 223	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
 224	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
 225}
 226
 227static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
 228{
 229	/*
 230	 * LoongArch users and developers are probably more familiar with
 231	 * those names found in the ISA manual, so we are going to print out
 232	 * the latter. This will require some mapping.
 233	 */
 234	switch (ecode) {
 235	case EXCCODE_RSV: return "INT";
 236	case EXCCODE_TLBL: return "PIL";
 237	case EXCCODE_TLBS: return "PIS";
 238	case EXCCODE_TLBI: return "PIF";
 239	case EXCCODE_TLBM: return "PME";
 240	case EXCCODE_TLBNR: return "PNR";
 241	case EXCCODE_TLBNX: return "PNX";
 242	case EXCCODE_TLBPE: return "PPI";
 243	case EXCCODE_ADE:
 244		switch (esubcode) {
 245		case EXSUBCODE_ADEF: return "ADEF";
 246		case EXSUBCODE_ADEM: return "ADEM";
 247		}
 248		break;
 249	case EXCCODE_ALE: return "ALE";
 250	case EXCCODE_BCE: return "BCE";
 251	case EXCCODE_SYS: return "SYS";
 252	case EXCCODE_BP: return "BRK";
 253	case EXCCODE_INE: return "INE";
 254	case EXCCODE_IPE: return "IPE";
 255	case EXCCODE_FPDIS: return "FPD";
 256	case EXCCODE_LSXDIS: return "SXD";
 257	case EXCCODE_LASXDIS: return "ASXD";
 258	case EXCCODE_FPE:
 259		switch (esubcode) {
 260		case EXCSUBCODE_FPE: return "FPE";
 261		case EXCSUBCODE_VFPE: return "VFPE";
 262		}
 263		break;
 264	case EXCCODE_WATCH:
 265		switch (esubcode) {
 266		case EXCSUBCODE_WPEF: return "WPEF";
 267		case EXCSUBCODE_WPEM: return "WPEM";
 268		}
 269		break;
 270	case EXCCODE_BTDIS: return "BTD";
 271	case EXCCODE_BTE: return "BTE";
 272	case EXCCODE_GSPR: return "GSPR";
 273	case EXCCODE_HVC: return "HVC";
 274	case EXCCODE_GCM:
 275		switch (esubcode) {
 276		case EXCSUBCODE_GCSC: return "GCSC";
 277		case EXCSUBCODE_GCHC: return "GCHC";
 278		}
 279		break;
 280	/*
 281	 * The manual did not mention the EXCCODE_SE case, but print out it
 282	 * nevertheless.
 283	 */
 284	case EXCCODE_SE: return "SE";
 285	}
 286
 287	return "???";
 288}
 289
 290static void print_estat(unsigned long x)
 291{
 292	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
 293	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
 294
 295	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
 296	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
 297	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
 298}
 299
 300static void __show_regs(const struct pt_regs *regs)
 301{
 302	const int field = 2 * sizeof(unsigned long);
 303	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
 304
 305	show_regs_print_info(KERN_DEFAULT);
 306
 307	/* Print saved GPRs except $zero (substituting with PC/ERA) */
 308#define GPR_FIELD(x) field, regs->regs[x]
 309	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
 310	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
 311	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
 312	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
 313	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
 314	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
 315	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
 316	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
 317	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
 318	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
 319	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
 320	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
 321	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
 322	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
 323	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
 324	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
 325
 326	/* The slot for $zero is reused as the syscall restart flag */
 327	if (regs->regs[0])
 328		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
 329
 330	if (user_mode(regs)) {
 331		printk("   ra: %0*lx\n", GPR_FIELD(1));
 332		printk("  ERA: %0*lx\n", field, regs->csr_era);
 333	} else {
 334		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
 335		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
 336	}
 337#undef GPR_FIELD
 338
 339	/* Print saved important CSRs */
 340	print_crmd(regs->csr_crmd);
 341	print_prmd(regs->csr_prmd);
 342	print_euen(regs->csr_euen);
 343	print_ecfg(regs->csr_ecfg);
 344	print_estat(regs->csr_estat);
 345
 346	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
 347		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
 348
 349	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
 350	       cpu_family_string(), cpu_full_name_string());
 351}
 352
 353void show_regs(struct pt_regs *regs)
 354{
 355	__show_regs((struct pt_regs *)regs);
 356	dump_stack();
 357}
 358
 359void show_registers(struct pt_regs *regs)
 360{
 361	__show_regs(regs);
 362	print_modules();
 363	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
 364	       current->comm, current->pid, current_thread_info(), current);
 365
 366	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
 367	show_code((void *)regs->csr_era, user_mode(regs));
 368	printk("\n");
 369}
 370
 371static DEFINE_RAW_SPINLOCK(die_lock);
 372
 373void die(const char *str, struct pt_regs *regs)
 374{
 375	int ret;
 376	static int die_counter;
 377
 378	oops_enter();
 379
 380	ret = notify_die(DIE_OOPS, str, regs, 0,
 381			 current->thread.trap_nr, SIGSEGV);
 382
 383	console_verbose();
 384	raw_spin_lock_irq(&die_lock);
 385	bust_spinlocks(1);
 386
 387	printk("%s[#%d]:\n", str, ++die_counter);
 388	show_registers(regs);
 389	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 390	raw_spin_unlock_irq(&die_lock);
 391
 392	oops_exit();
 393
 394	if (ret == NOTIFY_STOP)
 395		return;
 396
 397	if (regs && kexec_should_crash(current))
 398		crash_kexec(regs);
 399
 400	if (in_interrupt())
 401		panic("Fatal exception in interrupt");
 402
 403	if (panic_on_oops)
 404		panic("Fatal exception");
 405
 406	make_task_dead(SIGSEGV);
 407}
 408
 409static inline void setup_vint_size(unsigned int size)
 410{
 411	unsigned int vs;
 412
 413	vs = ilog2(size/4);
 414
 415	if (vs == 0 || vs > 7)
 416		panic("vint_size %d Not support yet", vs);
 417
 418	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
 419}
 420
 421/*
 422 * Send SIGFPE according to FCSR Cause bits, which must have already
 423 * been masked against Enable bits.  This is impotant as Inexact can
 424 * happen together with Overflow or Underflow, and `ptrace' can set
 425 * any bits.
 426 */
 427static void force_fcsr_sig(unsigned long fcsr,
 428			void __user *fault_addr, struct task_struct *tsk)
 429{
 430	int si_code = FPE_FLTUNK;
 431
 432	if (fcsr & FPU_CSR_INV_X)
 433		si_code = FPE_FLTINV;
 434	else if (fcsr & FPU_CSR_DIV_X)
 435		si_code = FPE_FLTDIV;
 436	else if (fcsr & FPU_CSR_OVF_X)
 437		si_code = FPE_FLTOVF;
 438	else if (fcsr & FPU_CSR_UDF_X)
 439		si_code = FPE_FLTUND;
 440	else if (fcsr & FPU_CSR_INE_X)
 441		si_code = FPE_FLTRES;
 442
 443	force_sig_fault(SIGFPE, si_code, fault_addr);
 444}
 445
 446static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
 447{
 448	int si_code;
 449
 450	switch (sig) {
 451	case 0:
 452		return 0;
 453
 454	case SIGFPE:
 455		force_fcsr_sig(fcsr, fault_addr, current);
 456		return 1;
 457
 458	case SIGBUS:
 459		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 460		return 1;
 461
 462	case SIGSEGV:
 463		mmap_read_lock(current->mm);
 464		if (vma_lookup(current->mm, (unsigned long)fault_addr))
 465			si_code = SEGV_ACCERR;
 466		else
 467			si_code = SEGV_MAPERR;
 468		mmap_read_unlock(current->mm);
 469		force_sig_fault(SIGSEGV, si_code, fault_addr);
 470		return 1;
 471
 472	default:
 473		force_sig(sig);
 474		return 1;
 475	}
 476}
 477
 478/*
 479 * Delayed fp exceptions when doing a lazy ctx switch
 480 */
 481asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
 482{
 483	int sig;
 484	void __user *fault_addr;
 485	irqentry_state_t state = irqentry_enter(regs);
 486
 487	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 488		       SIGFPE) == NOTIFY_STOP)
 489		goto out;
 490
 491	/* Clear FCSR.Cause before enabling interrupts */
 492	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
 493	local_irq_enable();
 494
 495	die_if_kernel("FP exception in kernel code", regs);
 496
 497	sig = SIGFPE;
 498	fault_addr = (void __user *) regs->csr_era;
 499
 500	/* Send a signal if required.  */
 501	process_fpemu_return(sig, fault_addr, fcsr);
 502
 503out:
 504	local_irq_disable();
 505	irqentry_exit(regs, state);
 506}
 507
 508asmlinkage void noinstr do_ade(struct pt_regs *regs)
 509{
 510	irqentry_state_t state = irqentry_enter(regs);
 511
 512	die_if_kernel("Kernel ade access", regs);
 513	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
 514
 515	irqentry_exit(regs, state);
 516}
 517
 518/* sysctl hooks */
 519int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
 520int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
 521
 522asmlinkage void noinstr do_ale(struct pt_regs *regs)
 523{
 524	irqentry_state_t state = irqentry_enter(regs);
 525
 526#ifndef CONFIG_ARCH_STRICT_ALIGN
 527	die_if_kernel("Kernel ale access", regs);
 528	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 529#else
 530	unsigned int *pc;
 531
 532	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
 533
 534	/*
 535	 * Did we catch a fault trying to load an instruction?
 536	 */
 537	if (regs->csr_badvaddr == regs->csr_era)
 538		goto sigbus;
 539	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
 540		goto sigbus;
 541	if (!unaligned_enabled)
 542		goto sigbus;
 543	if (!no_unaligned_warning)
 544		show_registers(regs);
 545
 546	pc = (unsigned int *)exception_era(regs);
 547
 548	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
 549
 550	goto out;
 551
 552sigbus:
 553	die_if_kernel("Kernel ale access", regs);
 554	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 555out:
 556#endif
 557	irqentry_exit(regs, state);
 558}
 559
 560#ifdef CONFIG_GENERIC_BUG
 561int is_valid_bugaddr(unsigned long addr)
 562{
 563	return 1;
 564}
 565#endif /* CONFIG_GENERIC_BUG */
 566
 567static void bug_handler(struct pt_regs *regs)
 568{
 569	switch (report_bug(regs->csr_era, regs)) {
 570	case BUG_TRAP_TYPE_BUG:
 571	case BUG_TRAP_TYPE_NONE:
 572		die_if_kernel("Oops - BUG", regs);
 573		force_sig(SIGTRAP);
 574		break;
 575
 576	case BUG_TRAP_TYPE_WARN:
 577		/* Skip the BUG instruction and continue */
 578		regs->csr_era += LOONGARCH_INSN_SIZE;
 579		break;
 580	}
 581}
 582
 583asmlinkage void noinstr do_bce(struct pt_regs *regs)
 584{
 585	bool user = user_mode(regs);
 586	unsigned long era = exception_era(regs);
 587	u64 badv = 0, lower = 0, upper = ULONG_MAX;
 588	union loongarch_instruction insn;
 589	irqentry_state_t state = irqentry_enter(regs);
 590
 591	if (regs->csr_prmd & CSR_PRMD_PIE)
 592		local_irq_enable();
 593
 594	current->thread.trap_nr = read_csr_excode();
 595
 596	die_if_kernel("Bounds check error in kernel code", regs);
 597
 598	/*
 599	 * Pull out the address that failed bounds checking, and the lower /
 600	 * upper bound, by minimally looking at the faulting instruction word
 601	 * and reading from the correct register.
 602	 */
 603	if (__get_inst(&insn.word, (u32 *)era, user))
 604		goto bad_era;
 605
 606	switch (insn.reg3_format.opcode) {
 607	case asrtle_op:
 608		if (insn.reg3_format.rd != 0)
 609			break;	/* not asrtle */
 610		badv = regs->regs[insn.reg3_format.rj];
 611		upper = regs->regs[insn.reg3_format.rk];
 612		break;
 613
 614	case asrtgt_op:
 615		if (insn.reg3_format.rd != 0)
 616			break;	/* not asrtgt */
 617		badv = regs->regs[insn.reg3_format.rj];
 618		lower = regs->regs[insn.reg3_format.rk];
 619		break;
 620
 621	case ldleb_op:
 622	case ldleh_op:
 623	case ldlew_op:
 624	case ldled_op:
 625	case stleb_op:
 626	case stleh_op:
 627	case stlew_op:
 628	case stled_op:
 629	case fldles_op:
 630	case fldled_op:
 631	case fstles_op:
 632	case fstled_op:
 633		badv = regs->regs[insn.reg3_format.rj];
 634		upper = regs->regs[insn.reg3_format.rk];
 635		break;
 636
 637	case ldgtb_op:
 638	case ldgth_op:
 639	case ldgtw_op:
 640	case ldgtd_op:
 641	case stgtb_op:
 642	case stgth_op:
 643	case stgtw_op:
 644	case stgtd_op:
 645	case fldgts_op:
 646	case fldgtd_op:
 647	case fstgts_op:
 648	case fstgtd_op:
 649		badv = regs->regs[insn.reg3_format.rj];
 650		lower = regs->regs[insn.reg3_format.rk];
 651		break;
 652	}
 653
 654	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
 655
 656out:
 657	if (regs->csr_prmd & CSR_PRMD_PIE)
 658		local_irq_disable();
 659
 660	irqentry_exit(regs, state);
 661	return;
 662
 663bad_era:
 664	/*
 665	 * Cannot pull out the instruction word, hence cannot provide more
 666	 * info than a regular SIGSEGV in this case.
 667	 */
 668	force_sig(SIGSEGV);
 669	goto out;
 670}
 671
 672asmlinkage void noinstr do_bp(struct pt_regs *regs)
 673{
 674	bool user = user_mode(regs);
 675	unsigned int opcode, bcode;
 676	unsigned long era = exception_era(regs);
 677	irqentry_state_t state = irqentry_enter(regs);
 678
 679	if (regs->csr_prmd & CSR_PRMD_PIE)
 680		local_irq_enable();
 681
 682	if (__get_inst(&opcode, (u32 *)era, user))
 683		goto out_sigsegv;
 684
 685	bcode = (opcode & 0x7fff);
 686
 687	/*
 688	 * notify the kprobe handlers, if instruction is likely to
 689	 * pertain to them.
 690	 */
 691	switch (bcode) {
 692	case BRK_KDB:
 693		if (kgdb_breakpoint_handler(regs))
 694			goto out;
 695		else
 696			break;
 697	case BRK_KPROBE_BP:
 698		if (kprobe_breakpoint_handler(regs))
 699			goto out;
 700		else
 701			break;
 702	case BRK_KPROBE_SSTEPBP:
 703		if (kprobe_singlestep_handler(regs))
 704			goto out;
 705		else
 706			break;
 707	case BRK_UPROBE_BP:
 708		if (uprobe_breakpoint_handler(regs))
 709			goto out;
 710		else
 711			break;
 712	case BRK_UPROBE_XOLBP:
 713		if (uprobe_singlestep_handler(regs))
 714			goto out;
 715		else
 716			break;
 717	default:
 718		current->thread.trap_nr = read_csr_excode();
 719		if (notify_die(DIE_TRAP, "Break", regs, bcode,
 720			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 721			goto out;
 722		else
 723			break;
 724	}
 725
 726	switch (bcode) {
 727	case BRK_BUG:
 728		bug_handler(regs);
 729		break;
 730	case BRK_DIVZERO:
 731		die_if_kernel("Break instruction in kernel code", regs);
 732		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
 733		break;
 734	case BRK_OVERFLOW:
 735		die_if_kernel("Break instruction in kernel code", regs);
 736		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
 737		break;
 738	default:
 739		die_if_kernel("Break instruction in kernel code", regs);
 740		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
 741		break;
 742	}
 743
 744out:
 745	if (regs->csr_prmd & CSR_PRMD_PIE)
 746		local_irq_disable();
 747
 748	irqentry_exit(regs, state);
 749	return;
 750
 751out_sigsegv:
 752	force_sig(SIGSEGV);
 753	goto out;
 754}
 755
 756asmlinkage void noinstr do_watch(struct pt_regs *regs)
 757{
 758	irqentry_state_t state = irqentry_enter(regs);
 759
 760#ifndef CONFIG_HAVE_HW_BREAKPOINT
 761	pr_warn("Hardware watch point handler not implemented!\n");
 762#else
 763	if (kgdb_breakpoint_handler(regs))
 764		goto out;
 765
 766	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
 767		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
 768		unsigned long pc = instruction_pointer(regs);
 769		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
 770
 771		if (llbit) {
 772			/*
 773			 * When the ll-sc combo is encountered, it is regarded as an single
 774			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
 775			 * the llsc execution is completed.
 776			 */
 777			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 778			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
 779			goto out;
 780		}
 781
 782		if (pc == current->thread.single_step) {
 783			/*
 784			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
 785			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
 786			 * the csr_era is equal to the value of singlestep which last time set.
 787			 */
 788			if (!is_self_loop_ins(ip, regs)) {
 789				/*
 790				 * Check if the given instruction the target pc is equal to the
 791				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
 792				 * bit to break the original instruction stream.
 793				 */
 794				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 795				goto out;
 796			}
 797		}
 798	} else {
 799		breakpoint_handler(regs);
 800		watchpoint_handler(regs);
 801	}
 802
 803	force_sig(SIGTRAP);
 804out:
 805#endif
 806	irqentry_exit(regs, state);
 807}
 808
 809asmlinkage void noinstr do_ri(struct pt_regs *regs)
 810{
 811	int status = SIGILL;
 812	unsigned int __maybe_unused opcode;
 813	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
 814	irqentry_state_t state = irqentry_enter(regs);
 815
 816	local_irq_enable();
 817	current->thread.trap_nr = read_csr_excode();
 818
 819	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 820		       SIGILL) == NOTIFY_STOP)
 821		goto out;
 822
 823	die_if_kernel("Reserved instruction in kernel code", regs);
 824
 825	if (unlikely(get_user(opcode, era) < 0)) {
 826		status = SIGSEGV;
 827		current->thread.error_code = 1;
 828	}
 829
 830	force_sig(status);
 831
 832out:
 833	local_irq_disable();
 834	irqentry_exit(regs, state);
 835}
 836
 837static void init_restore_fp(void)
 838{
 839	if (!used_math()) {
 840		/* First time FP context user. */
 841		init_fpu();
 842	} else {
 843		/* This task has formerly used the FP context */
 844		if (!is_fpu_owner())
 845			own_fpu_inatomic(1);
 846	}
 847
 848	BUG_ON(!is_fp_enabled());
 849}
 850
 851static void init_restore_lsx(void)
 852{
 853	enable_lsx();
 854
 855	if (!thread_lsx_context_live()) {
 856		/* First time LSX context user */
 857		init_restore_fp();
 858		init_lsx_upper();
 859		set_thread_flag(TIF_LSX_CTX_LIVE);
 860	} else {
 861		if (!is_simd_owner()) {
 862			if (is_fpu_owner()) {
 863				restore_lsx_upper(current);
 864			} else {
 865				__own_fpu();
 866				restore_lsx(current);
 867			}
 868		}
 869	}
 870
 871	set_thread_flag(TIF_USEDSIMD);
 872
 873	BUG_ON(!is_fp_enabled());
 874	BUG_ON(!is_lsx_enabled());
 875}
 876
 877static void init_restore_lasx(void)
 878{
 879	enable_lasx();
 880
 881	if (!thread_lasx_context_live()) {
 882		/* First time LASX context user */
 883		init_restore_lsx();
 884		init_lasx_upper();
 885		set_thread_flag(TIF_LASX_CTX_LIVE);
 886	} else {
 887		if (is_fpu_owner() || is_simd_owner()) {
 888			init_restore_lsx();
 889			restore_lasx_upper(current);
 890		} else {
 891			__own_fpu();
 892			enable_lsx();
 893			restore_lasx(current);
 894		}
 895	}
 896
 897	set_thread_flag(TIF_USEDSIMD);
 898
 899	BUG_ON(!is_fp_enabled());
 900	BUG_ON(!is_lsx_enabled());
 901	BUG_ON(!is_lasx_enabled());
 902}
 903
 904asmlinkage void noinstr do_fpu(struct pt_regs *regs)
 905{
 906	irqentry_state_t state = irqentry_enter(regs);
 907
 908	local_irq_enable();
 909	die_if_kernel("do_fpu invoked from kernel context!", regs);
 910	BUG_ON(is_lsx_enabled());
 911	BUG_ON(is_lasx_enabled());
 912
 913	preempt_disable();
 914	init_restore_fp();
 915	preempt_enable();
 916
 917	local_irq_disable();
 918	irqentry_exit(regs, state);
 919}
 920
 921asmlinkage void noinstr do_lsx(struct pt_regs *regs)
 922{
 923	irqentry_state_t state = irqentry_enter(regs);
 924
 925	local_irq_enable();
 926	if (!cpu_has_lsx) {
 927		force_sig(SIGILL);
 928		goto out;
 929	}
 930
 931	die_if_kernel("do_lsx invoked from kernel context!", regs);
 932	BUG_ON(is_lasx_enabled());
 933
 934	preempt_disable();
 935	init_restore_lsx();
 936	preempt_enable();
 937
 938out:
 939	local_irq_disable();
 940	irqentry_exit(regs, state);
 941}
 942
 943asmlinkage void noinstr do_lasx(struct pt_regs *regs)
 944{
 945	irqentry_state_t state = irqentry_enter(regs);
 946
 947	local_irq_enable();
 948	if (!cpu_has_lasx) {
 949		force_sig(SIGILL);
 950		goto out;
 951	}
 952
 953	die_if_kernel("do_lasx invoked from kernel context!", regs);
 954
 955	preempt_disable();
 956	init_restore_lasx();
 957	preempt_enable();
 958
 959out:
 960	local_irq_disable();
 961	irqentry_exit(regs, state);
 962}
 963
 964static void init_restore_lbt(void)
 965{
 966	if (!thread_lbt_context_live()) {
 967		/* First time LBT context user */
 968		init_lbt();
 969		set_thread_flag(TIF_LBT_CTX_LIVE);
 970	} else {
 971		if (!is_lbt_owner())
 972			own_lbt_inatomic(1);
 973	}
 974
 975	BUG_ON(!is_lbt_enabled());
 976}
 977
 978asmlinkage void noinstr do_lbt(struct pt_regs *regs)
 979{
 980	irqentry_state_t state = irqentry_enter(regs);
 981
 982	/*
 983	 * BTD (Binary Translation Disable exception) can be triggered
 984	 * during FP save/restore if TM (Top Mode) is on, which may
 985	 * cause irq_enable during 'switch_to'. To avoid this situation
 986	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
 987	 * will not trigger the BTE), we need to check PRMD first.
 988	 */
 989	if (regs->csr_prmd & CSR_PRMD_PIE)
 990		local_irq_enable();
 991
 992	if (!cpu_has_lbt) {
 993		force_sig(SIGILL);
 994		goto out;
 995	}
 996	BUG_ON(is_lbt_enabled());
 997
 998	preempt_disable();
 999	init_restore_lbt();
1000	preempt_enable();
1001
1002out:
1003	if (regs->csr_prmd & CSR_PRMD_PIE)
1004		local_irq_disable();
1005
1006	irqentry_exit(regs, state);
1007}
1008
1009asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1010{
1011	irqentry_state_t state = irqentry_enter(regs);
1012
1013	local_irq_enable();
1014	/*
1015	 * Game over - no way to handle this if it ever occurs.	Most probably
1016	 * caused by a fatal error after another hardware/software error.
1017	 */
1018	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1019		read_csr_excode(), current->pid, current->comm);
1020	die_if_kernel("do_reserved exception", regs);
1021	force_sig(SIGUNUSED);
1022
1023	local_irq_disable();
1024
1025	irqentry_exit(regs, state);
1026}
1027
1028asmlinkage void cache_parity_error(void)
1029{
1030	/* For the moment, report the problem and hang. */
1031	pr_err("Cache error exception:\n");
1032	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1033	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1034	panic("Can't handle the cache error!");
1035}
1036
1037asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1038{
1039	struct pt_regs *old_regs;
1040
1041	irq_enter_rcu();
1042	old_regs = set_irq_regs(regs);
1043	handle_arch_irq(regs);
1044	set_irq_regs(old_regs);
1045	irq_exit_rcu();
1046}
1047
1048asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1049{
1050	register int cpu;
1051	register unsigned long stack;
1052	irqentry_state_t state = irqentry_enter(regs);
1053
1054	cpu = smp_processor_id();
1055
1056	if (on_irq_stack(cpu, sp))
1057		handle_loongarch_irq(regs);
1058	else {
1059		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1060
1061		/* Save task's sp on IRQ stack for unwinding */
1062		*(unsigned long *)stack = sp;
1063
1064		__asm__ __volatile__(
1065		"move	$s0, $sp		\n" /* Preserve sp */
1066		"move	$sp, %[stk]		\n" /* Switch stack */
1067		"move	$a0, %[regs]		\n"
1068		"bl	handle_loongarch_irq	\n"
1069		"move	$sp, $s0		\n" /* Restore sp */
1070		: /* No outputs */
1071		: [stk] "r" (stack), [regs] "r" (regs)
1072		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1073		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1074		  "memory");
1075	}
1076
1077	irqentry_exit(regs, state);
1078}
1079
1080unsigned long eentry;
1081unsigned long tlbrentry;
1082
1083long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1084
1085static void configure_exception_vector(void)
1086{
1087	eentry    = (unsigned long)exception_handlers;
1088	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1089
1090	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1091	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1092	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1093}
1094
1095void per_cpu_trap_init(int cpu)
1096{
1097	unsigned int i;
1098
1099	setup_vint_size(VECSIZE);
1100
1101	configure_exception_vector();
1102
1103	if (!cpu_data[cpu].asid_cache)
1104		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1105
1106	mmgrab(&init_mm);
1107	current->active_mm = &init_mm;
1108	BUG_ON(current->mm);
1109	enter_lazy_tlb(&init_mm, current);
1110
1111	/* Initialise exception handlers */
1112	if (cpu == 0)
1113		for (i = 0; i < 64; i++)
1114			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1115
1116	tlb_init(cpu);
1117	cpu_cache_init();
1118}
1119
1120/* Install CPU exception handler */
1121void set_handler(unsigned long offset, void *addr, unsigned long size)
1122{
1123	memcpy((void *)(eentry + offset), addr, size);
1124	local_flush_icache_range(eentry + offset, eentry + offset + size);
1125}
1126
1127static const char panic_null_cerr[] =
1128	"Trying to set NULL cache error exception handler\n";
1129
1130/*
1131 * Install uncached CPU exception handler.
1132 * This is suitable only for the cache error exception which is the only
1133 * exception handler that is being run uncached.
1134 */
1135void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1136{
1137	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1138
1139	if (!addr)
1140		panic(panic_null_cerr);
1141
1142	memcpy((void *)(uncached_eentry + offset), addr, size);
1143}
1144
1145void __init trap_init(void)
1146{
1147	long i;
1148
1149	/* Set interrupt vector handler */
1150	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1151		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1152
1153	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
1154	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
1155	set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
1156	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
1157	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
1158	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
1159	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
1160	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
1161	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
1162	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
1163	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
1164	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
1165	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
1166
1167	cache_error_setup();
1168
1169	local_flush_icache_range(eentry, eentry + 0x400);
1170}