Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Huacai Chen <chenhuacai@loongson.cn>
   4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/bitops.h>
   8#include <linux/bug.h>
   9#include <linux/compiler.h>
  10#include <linux/context_tracking.h>
  11#include <linux/entry-common.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/kexec.h>
  15#include <linux/module.h>
  16#include <linux/extable.h>
  17#include <linux/mm.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/debug.h>
  20#include <linux/smp.h>
  21#include <linux/spinlock.h>
  22#include <linux/kallsyms.h>
  23#include <linux/memblock.h>
  24#include <linux/interrupt.h>
  25#include <linux/ptrace.h>
  26#include <linux/kgdb.h>
  27#include <linux/kdebug.h>
  28#include <linux/notifier.h>
  29#include <linux/irq.h>
  30#include <linux/perf_event.h>
  31
  32#include <asm/addrspace.h>
  33#include <asm/bootinfo.h>
  34#include <asm/branch.h>
  35#include <asm/break.h>
  36#include <asm/cpu.h>
  37#include <asm/exception.h>
  38#include <asm/fpu.h>
  39#include <asm/lbt.h>
  40#include <asm/inst.h>
  41#include <asm/kgdb.h>
  42#include <asm/loongarch.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/ptrace.h>
  46#include <asm/sections.h>
  47#include <asm/siginfo.h>
  48#include <asm/stacktrace.h>
  49#include <asm/tlb.h>
  50#include <asm/types.h>
  51#include <asm/unwind.h>
  52#include <asm/uprobes.h>
  53
  54#include "access-helper.h"
  55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  57			   const char *loglvl, bool user)
  58{
  59	unsigned long addr;
  60	struct unwind_state state;
  61	struct pt_regs *pregs = (struct pt_regs *)regs;
  62
  63	if (!task)
  64		task = current;
  65
  66	printk("%sCall Trace:", loglvl);
  67	for (unwind_start(&state, task, pregs);
  68	      !unwind_done(&state); unwind_next_frame(&state)) {
  69		addr = unwind_get_return_address(&state);
  70		print_ip_sym(loglvl, addr);
  71	}
  72	printk("%s\n", loglvl);
  73}
  74
  75static void show_stacktrace(struct task_struct *task,
  76	const struct pt_regs *regs, const char *loglvl, bool user)
  77{
  78	int i;
  79	const int field = 2 * sizeof(unsigned long);
  80	unsigned long stackdata;
  81	unsigned long *sp = (unsigned long *)regs->regs[3];
  82
  83	printk("%sStack :", loglvl);
  84	i = 0;
  85	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  86		if (i && ((i % (64 / field)) == 0)) {
  87			pr_cont("\n");
  88			printk("%s       ", loglvl);
  89		}
  90		if (i > 39) {
  91			pr_cont(" ...");
  92			break;
  93		}
  94
  95		if (__get_addr(&stackdata, sp++, user)) {
  96			pr_cont(" (Bad stack address)");
  97			break;
  98		}
  99
 100		pr_cont(" %0*lx", field, stackdata);
 101		i++;
 102	}
 103	pr_cont("\n");
 104	show_backtrace(task, regs, loglvl, user);
 105}
 106
 107void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 108{
 109	struct pt_regs regs;
 110
 111	regs.csr_crmd = 0;
 112	if (sp) {
 113		regs.csr_era = 0;
 114		regs.regs[1] = 0;
 115		regs.regs[3] = (unsigned long)sp;
 116	} else {
 117		if (!task || task == current)
 118			prepare_frametrace(&regs);
 119		else {
 120			regs.csr_era = task->thread.reg01;
 121			regs.regs[1] = 0;
 122			regs.regs[3] = task->thread.reg03;
 123			regs.regs[22] = task->thread.reg22;
 124		}
 125	}
 126
 127	show_stacktrace(task, &regs, loglvl, false);
 128}
 129
 130static void show_code(unsigned int *pc, bool user)
 131{
 132	long i;
 133	unsigned int insn;
 134
 135	printk("Code:");
 136
 137	for(i = -3 ; i < 6 ; i++) {
 138		if (__get_inst(&insn, pc + i, user)) {
 139			pr_cont(" (Bad address in era)\n");
 140			break;
 141		}
 142		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
 143	}
 144	pr_cont("\n");
 145}
 146
 147static void print_bool_fragment(const char *key, unsigned long val, bool first)
 148{
 149	/* e.g. "+PG", "-DA" */
 150	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
 151}
 152
 153static void print_plv_fragment(const char *key, int val)
 154{
 155	/* e.g. "PLV0", "PPLV3" */
 156	pr_cont("%s%d", key, val);
 157}
 158
 159static void print_memory_type_fragment(const char *key, unsigned long val)
 160{
 161	const char *humanized_type;
 162
 163	switch (val) {
 164	case 0:
 165		humanized_type = "SUC";
 166		break;
 167	case 1:
 168		humanized_type = "CC";
 169		break;
 170	case 2:
 171		humanized_type = "WUC";
 172		break;
 173	default:
 174		pr_cont(" %s=Reserved(%lu)", key, val);
 175		return;
 176	}
 177
 178	/* e.g. " DATM=WUC" */
 179	pr_cont(" %s=%s", key, humanized_type);
 180}
 181
 182static void print_intr_fragment(const char *key, unsigned long val)
 183{
 184	/* e.g. "LIE=0-1,3,5-7" */
 185	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
 186}
 187
 188static void print_crmd(unsigned long x)
 189{
 190	printk(" CRMD: %08lx (", x);
 191	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
 192	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
 193	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
 194	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
 195	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
 196	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
 197	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
 198	pr_cont(")\n");
 199}
 200
 201static void print_prmd(unsigned long x)
 202{
 203	printk(" PRMD: %08lx (", x);
 204	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
 205	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
 206	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
 207	pr_cont(")\n");
 208}
 209
 210static void print_euen(unsigned long x)
 211{
 212	printk(" EUEN: %08lx (", x);
 213	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
 214	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
 215	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
 216	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
 217	pr_cont(")\n");
 218}
 219
 220static void print_ecfg(unsigned long x)
 221{
 222	printk(" ECFG: %08lx (", x);
 223	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
 224	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
 225}
 226
 227static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
 228{
 229	/*
 230	 * LoongArch users and developers are probably more familiar with
 231	 * those names found in the ISA manual, so we are going to print out
 232	 * the latter. This will require some mapping.
 233	 */
 234	switch (ecode) {
 235	case EXCCODE_RSV: return "INT";
 236	case EXCCODE_TLBL: return "PIL";
 237	case EXCCODE_TLBS: return "PIS";
 238	case EXCCODE_TLBI: return "PIF";
 239	case EXCCODE_TLBM: return "PME";
 240	case EXCCODE_TLBNR: return "PNR";
 241	case EXCCODE_TLBNX: return "PNX";
 242	case EXCCODE_TLBPE: return "PPI";
 243	case EXCCODE_ADE:
 244		switch (esubcode) {
 245		case EXSUBCODE_ADEF: return "ADEF";
 246		case EXSUBCODE_ADEM: return "ADEM";
 247		}
 248		break;
 249	case EXCCODE_ALE: return "ALE";
 250	case EXCCODE_BCE: return "BCE";
 251	case EXCCODE_SYS: return "SYS";
 252	case EXCCODE_BP: return "BRK";
 253	case EXCCODE_INE: return "INE";
 254	case EXCCODE_IPE: return "IPE";
 255	case EXCCODE_FPDIS: return "FPD";
 256	case EXCCODE_LSXDIS: return "SXD";
 257	case EXCCODE_LASXDIS: return "ASXD";
 258	case EXCCODE_FPE:
 259		switch (esubcode) {
 260		case EXCSUBCODE_FPE: return "FPE";
 261		case EXCSUBCODE_VFPE: return "VFPE";
 262		}
 263		break;
 264	case EXCCODE_WATCH:
 265		switch (esubcode) {
 266		case EXCSUBCODE_WPEF: return "WPEF";
 267		case EXCSUBCODE_WPEM: return "WPEM";
 268		}
 269		break;
 270	case EXCCODE_BTDIS: return "BTD";
 271	case EXCCODE_BTE: return "BTE";
 272	case EXCCODE_GSPR: return "GSPR";
 273	case EXCCODE_HVC: return "HVC";
 274	case EXCCODE_GCM:
 275		switch (esubcode) {
 276		case EXCSUBCODE_GCSC: return "GCSC";
 277		case EXCSUBCODE_GCHC: return "GCHC";
 278		}
 279		break;
 280	/*
 281	 * The manual did not mention the EXCCODE_SE case, but print out it
 282	 * nevertheless.
 283	 */
 284	case EXCCODE_SE: return "SE";
 285	}
 286
 287	return "???";
 288}
 289
 290static void print_estat(unsigned long x)
 291{
 292	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
 293	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
 294
 295	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
 296	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
 297	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
 298}
 299
 300static void __show_regs(const struct pt_regs *regs)
 301{
 302	const int field = 2 * sizeof(unsigned long);
 303	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
 304
 305	show_regs_print_info(KERN_DEFAULT);
 306
 307	/* Print saved GPRs except $zero (substituting with PC/ERA) */
 308#define GPR_FIELD(x) field, regs->regs[x]
 309	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
 310	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
 311	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
 312	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
 313	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
 314	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
 315	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
 316	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
 317	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
 318	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
 319	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
 320	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
 321	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
 322	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
 323	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
 324	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
 325
 326	/* The slot for $zero is reused as the syscall restart flag */
 327	if (regs->regs[0])
 328		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
 329
 330	if (user_mode(regs)) {
 331		printk("   ra: %0*lx\n", GPR_FIELD(1));
 332		printk("  ERA: %0*lx\n", field, regs->csr_era);
 333	} else {
 334		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
 335		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
 336	}
 337#undef GPR_FIELD
 338
 339	/* Print saved important CSRs */
 340	print_crmd(regs->csr_crmd);
 341	print_prmd(regs->csr_prmd);
 342	print_euen(regs->csr_euen);
 343	print_ecfg(regs->csr_ecfg);
 344	print_estat(regs->csr_estat);
 345
 346	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
 347		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
 348
 349	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
 350	       cpu_family_string(), cpu_full_name_string());
 351}
 352
 353void show_regs(struct pt_regs *regs)
 354{
 355	__show_regs((struct pt_regs *)regs);
 356	dump_stack();
 357}
 358
 359void show_registers(struct pt_regs *regs)
 360{
 361	__show_regs(regs);
 362	print_modules();
 363	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
 364	       current->comm, current->pid, current_thread_info(), current);
 365
 366	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
 367	show_code((void *)regs->csr_era, user_mode(regs));
 368	printk("\n");
 369}
 370
 371static DEFINE_RAW_SPINLOCK(die_lock);
 372
 373void die(const char *str, struct pt_regs *regs)
 374{
 375	int ret;
 376	static int die_counter;
 377
 378	oops_enter();
 379
 380	ret = notify_die(DIE_OOPS, str, regs, 0,
 381			 current->thread.trap_nr, SIGSEGV);
 382
 383	console_verbose();
 384	raw_spin_lock_irq(&die_lock);
 385	bust_spinlocks(1);
 386
 387	printk("%s[#%d]:\n", str, ++die_counter);
 388	show_registers(regs);
 389	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 390	raw_spin_unlock_irq(&die_lock);
 391
 392	oops_exit();
 393
 394	if (ret == NOTIFY_STOP)
 395		return;
 396
 397	if (regs && kexec_should_crash(current))
 398		crash_kexec(regs);
 399
 400	if (in_interrupt())
 401		panic("Fatal exception in interrupt");
 402
 403	if (panic_on_oops)
 404		panic("Fatal exception");
 405
 406	make_task_dead(SIGSEGV);
 407}
 408
 409static inline void setup_vint_size(unsigned int size)
 410{
 411	unsigned int vs;
 412
 413	vs = ilog2(size/4);
 414
 415	if (vs == 0 || vs > 7)
 416		panic("vint_size %d Not support yet", vs);
 417
 418	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
 419}
 420
 421/*
 422 * Send SIGFPE according to FCSR Cause bits, which must have already
 423 * been masked against Enable bits.  This is impotant as Inexact can
 424 * happen together with Overflow or Underflow, and `ptrace' can set
 425 * any bits.
 426 */
 427static void force_fcsr_sig(unsigned long fcsr,
 428			void __user *fault_addr, struct task_struct *tsk)
 429{
 430	int si_code = FPE_FLTUNK;
 431
 432	if (fcsr & FPU_CSR_INV_X)
 433		si_code = FPE_FLTINV;
 434	else if (fcsr & FPU_CSR_DIV_X)
 435		si_code = FPE_FLTDIV;
 436	else if (fcsr & FPU_CSR_OVF_X)
 437		si_code = FPE_FLTOVF;
 438	else if (fcsr & FPU_CSR_UDF_X)
 439		si_code = FPE_FLTUND;
 440	else if (fcsr & FPU_CSR_INE_X)
 441		si_code = FPE_FLTRES;
 442
 443	force_sig_fault(SIGFPE, si_code, fault_addr);
 444}
 445
 446static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
 447{
 448	int si_code;
 449
 450	switch (sig) {
 451	case 0:
 452		return 0;
 453
 454	case SIGFPE:
 455		force_fcsr_sig(fcsr, fault_addr, current);
 456		return 1;
 457
 458	case SIGBUS:
 459		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 460		return 1;
 461
 462	case SIGSEGV:
 463		mmap_read_lock(current->mm);
 464		if (vma_lookup(current->mm, (unsigned long)fault_addr))
 465			si_code = SEGV_ACCERR;
 466		else
 467			si_code = SEGV_MAPERR;
 468		mmap_read_unlock(current->mm);
 469		force_sig_fault(SIGSEGV, si_code, fault_addr);
 470		return 1;
 471
 472	default:
 473		force_sig(sig);
 474		return 1;
 475	}
 476}
 477
 478/*
 479 * Delayed fp exceptions when doing a lazy ctx switch
 480 */
 481asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
 482{
 483	int sig;
 484	void __user *fault_addr;
 485	irqentry_state_t state = irqentry_enter(regs);
 486
 487	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 488		       SIGFPE) == NOTIFY_STOP)
 489		goto out;
 490
 491	/* Clear FCSR.Cause before enabling interrupts */
 492	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
 493	local_irq_enable();
 494
 495	die_if_kernel("FP exception in kernel code", regs);
 496
 497	sig = SIGFPE;
 498	fault_addr = (void __user *) regs->csr_era;
 499
 500	/* Send a signal if required.  */
 501	process_fpemu_return(sig, fault_addr, fcsr);
 502
 503out:
 504	local_irq_disable();
 505	irqentry_exit(regs, state);
 506}
 507
 508asmlinkage void noinstr do_ade(struct pt_regs *regs)
 509{
 510	irqentry_state_t state = irqentry_enter(regs);
 511
 512	die_if_kernel("Kernel ade access", regs);
 513	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
 514
 515	irqentry_exit(regs, state);
 516}
 517
 518/* sysctl hooks */
 519int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
 520int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
 521
 522asmlinkage void noinstr do_ale(struct pt_regs *regs)
 523{
 524	irqentry_state_t state = irqentry_enter(regs);
 525
 526#ifndef CONFIG_ARCH_STRICT_ALIGN
 527	die_if_kernel("Kernel ale access", regs);
 528	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 529#else
 530	unsigned int *pc;
 531
 532	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
 533
 534	/*
 535	 * Did we catch a fault trying to load an instruction?
 536	 */
 537	if (regs->csr_badvaddr == regs->csr_era)
 538		goto sigbus;
 539	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
 540		goto sigbus;
 541	if (!unaligned_enabled)
 542		goto sigbus;
 543	if (!no_unaligned_warning)
 544		show_registers(regs);
 545
 546	pc = (unsigned int *)exception_era(regs);
 547
 548	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
 549
 550	goto out;
 551
 552sigbus:
 553	die_if_kernel("Kernel ale access", regs);
 554	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 555out:
 556#endif
 557	irqentry_exit(regs, state);
 558}
 559
 560#ifdef CONFIG_GENERIC_BUG
 561int is_valid_bugaddr(unsigned long addr)
 562{
 563	return 1;
 564}
 565#endif /* CONFIG_GENERIC_BUG */
 566
 567static void bug_handler(struct pt_regs *regs)
 568{
 569	switch (report_bug(regs->csr_era, regs)) {
 570	case BUG_TRAP_TYPE_BUG:
 571	case BUG_TRAP_TYPE_NONE:
 572		die_if_kernel("Oops - BUG", regs);
 573		force_sig(SIGTRAP);
 574		break;
 575
 576	case BUG_TRAP_TYPE_WARN:
 577		/* Skip the BUG instruction and continue */
 578		regs->csr_era += LOONGARCH_INSN_SIZE;
 579		break;
 580	}
 581}
 582
 583asmlinkage void noinstr do_bce(struct pt_regs *regs)
 584{
 585	bool user = user_mode(regs);
 586	unsigned long era = exception_era(regs);
 587	u64 badv = 0, lower = 0, upper = ULONG_MAX;
 588	union loongarch_instruction insn;
 589	irqentry_state_t state = irqentry_enter(regs);
 590
 591	if (regs->csr_prmd & CSR_PRMD_PIE)
 592		local_irq_enable();
 593
 594	current->thread.trap_nr = read_csr_excode();
 595
 596	die_if_kernel("Bounds check error in kernel code", regs);
 597
 598	/*
 599	 * Pull out the address that failed bounds checking, and the lower /
 600	 * upper bound, by minimally looking at the faulting instruction word
 601	 * and reading from the correct register.
 602	 */
 603	if (__get_inst(&insn.word, (u32 *)era, user))
 604		goto bad_era;
 605
 606	switch (insn.reg3_format.opcode) {
 607	case asrtle_op:
 608		if (insn.reg3_format.rd != 0)
 609			break;	/* not asrtle */
 610		badv = regs->regs[insn.reg3_format.rj];
 611		upper = regs->regs[insn.reg3_format.rk];
 612		break;
 613
 614	case asrtgt_op:
 615		if (insn.reg3_format.rd != 0)
 616			break;	/* not asrtgt */
 617		badv = regs->regs[insn.reg3_format.rj];
 618		lower = regs->regs[insn.reg3_format.rk];
 619		break;
 620
 621	case ldleb_op:
 622	case ldleh_op:
 623	case ldlew_op:
 624	case ldled_op:
 625	case stleb_op:
 626	case stleh_op:
 627	case stlew_op:
 628	case stled_op:
 629	case fldles_op:
 630	case fldled_op:
 631	case fstles_op:
 632	case fstled_op:
 633		badv = regs->regs[insn.reg3_format.rj];
 634		upper = regs->regs[insn.reg3_format.rk];
 635		break;
 636
 637	case ldgtb_op:
 638	case ldgth_op:
 639	case ldgtw_op:
 640	case ldgtd_op:
 641	case stgtb_op:
 642	case stgth_op:
 643	case stgtw_op:
 644	case stgtd_op:
 645	case fldgts_op:
 646	case fldgtd_op:
 647	case fstgts_op:
 648	case fstgtd_op:
 649		badv = regs->regs[insn.reg3_format.rj];
 650		lower = regs->regs[insn.reg3_format.rk];
 651		break;
 652	}
 653
 654	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
 655
 656out:
 657	if (regs->csr_prmd & CSR_PRMD_PIE)
 658		local_irq_disable();
 659
 660	irqentry_exit(regs, state);
 661	return;
 662
 663bad_era:
 664	/*
 665	 * Cannot pull out the instruction word, hence cannot provide more
 666	 * info than a regular SIGSEGV in this case.
 667	 */
 668	force_sig(SIGSEGV);
 669	goto out;
 670}
 671
 672asmlinkage void noinstr do_bp(struct pt_regs *regs)
 673{
 674	bool user = user_mode(regs);
 675	unsigned int opcode, bcode;
 676	unsigned long era = exception_era(regs);
 677	irqentry_state_t state = irqentry_enter(regs);
 678
 679	if (regs->csr_prmd & CSR_PRMD_PIE)
 680		local_irq_enable();
 681
 682	if (__get_inst(&opcode, (u32 *)era, user))
 683		goto out_sigsegv;
 684
 685	bcode = (opcode & 0x7fff);
 686
 687	/*
 688	 * notify the kprobe handlers, if instruction is likely to
 689	 * pertain to them.
 690	 */
 691	switch (bcode) {
 692	case BRK_KDB:
 693		if (kgdb_breakpoint_handler(regs))
 694			goto out;
 695		else
 696			break;
 697	case BRK_KPROBE_BP:
 698		if (kprobe_breakpoint_handler(regs))
 699			goto out;
 700		else
 701			break;
 702	case BRK_KPROBE_SSTEPBP:
 703		if (kprobe_singlestep_handler(regs))
 704			goto out;
 705		else
 706			break;
 707	case BRK_UPROBE_BP:
 708		if (uprobe_breakpoint_handler(regs))
 709			goto out;
 710		else
 711			break;
 712	case BRK_UPROBE_XOLBP:
 713		if (uprobe_singlestep_handler(regs))
 714			goto out;
 715		else
 716			break;
 717	default:
 718		current->thread.trap_nr = read_csr_excode();
 719		if (notify_die(DIE_TRAP, "Break", regs, bcode,
 720			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 721			goto out;
 722		else
 723			break;
 724	}
 725
 726	switch (bcode) {
 727	case BRK_BUG:
 728		bug_handler(regs);
 729		break;
 730	case BRK_DIVZERO:
 731		die_if_kernel("Break instruction in kernel code", regs);
 732		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
 733		break;
 734	case BRK_OVERFLOW:
 735		die_if_kernel("Break instruction in kernel code", regs);
 736		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
 737		break;
 738	default:
 739		die_if_kernel("Break instruction in kernel code", regs);
 740		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
 741		break;
 742	}
 743
 744out:
 745	if (regs->csr_prmd & CSR_PRMD_PIE)
 746		local_irq_disable();
 747
 748	irqentry_exit(regs, state);
 749	return;
 750
 751out_sigsegv:
 752	force_sig(SIGSEGV);
 753	goto out;
 754}
 755
 756asmlinkage void noinstr do_watch(struct pt_regs *regs)
 757{
 758	irqentry_state_t state = irqentry_enter(regs);
 759
 760#ifndef CONFIG_HAVE_HW_BREAKPOINT
 761	pr_warn("Hardware watch point handler not implemented!\n");
 762#else
 763	if (kgdb_breakpoint_handler(regs))
 764		goto out;
 765
 766	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
 767		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
 768		unsigned long pc = instruction_pointer(regs);
 769		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
 770
 771		if (llbit) {
 772			/*
 773			 * When the ll-sc combo is encountered, it is regarded as an single
 774			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
 775			 * the llsc execution is completed.
 776			 */
 777			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 778			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
 779			goto out;
 780		}
 781
 782		if (pc == current->thread.single_step) {
 783			/*
 784			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
 785			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
 786			 * the csr_era is equal to the value of singlestep which last time set.
 787			 */
 788			if (!is_self_loop_ins(ip, regs)) {
 789				/*
 790				 * Check if the given instruction the target pc is equal to the
 791				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
 792				 * bit to break the original instruction stream.
 793				 */
 794				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 795				goto out;
 796			}
 797		}
 798	} else {
 799		breakpoint_handler(regs);
 800		watchpoint_handler(regs);
 801	}
 802
 803	force_sig(SIGTRAP);
 804out:
 805#endif
 806	irqentry_exit(regs, state);
 807}
 808
 809asmlinkage void noinstr do_ri(struct pt_regs *regs)
 810{
 811	int status = SIGILL;
 812	unsigned int __maybe_unused opcode;
 813	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
 814	irqentry_state_t state = irqentry_enter(regs);
 815
 816	local_irq_enable();
 817	current->thread.trap_nr = read_csr_excode();
 818
 819	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 820		       SIGILL) == NOTIFY_STOP)
 821		goto out;
 822
 823	die_if_kernel("Reserved instruction in kernel code", regs);
 824
 825	if (unlikely(get_user(opcode, era) < 0)) {
 826		status = SIGSEGV;
 827		current->thread.error_code = 1;
 828	}
 829
 830	force_sig(status);
 831
 832out:
 833	local_irq_disable();
 834	irqentry_exit(regs, state);
 835}
 836
 837static void init_restore_fp(void)
 838{
 839	if (!used_math()) {
 840		/* First time FP context user. */
 841		init_fpu();
 842	} else {
 843		/* This task has formerly used the FP context */
 844		if (!is_fpu_owner())
 845			own_fpu_inatomic(1);
 846	}
 847
 848	BUG_ON(!is_fp_enabled());
 849}
 850
 851static void init_restore_lsx(void)
 852{
 853	enable_lsx();
 854
 855	if (!thread_lsx_context_live()) {
 856		/* First time LSX context user */
 857		init_restore_fp();
 858		init_lsx_upper();
 859		set_thread_flag(TIF_LSX_CTX_LIVE);
 860	} else {
 861		if (!is_simd_owner()) {
 862			if (is_fpu_owner()) {
 863				restore_lsx_upper(current);
 864			} else {
 865				__own_fpu();
 866				restore_lsx(current);
 867			}
 868		}
 869	}
 870
 871	set_thread_flag(TIF_USEDSIMD);
 872
 873	BUG_ON(!is_fp_enabled());
 874	BUG_ON(!is_lsx_enabled());
 875}
 876
 877static void init_restore_lasx(void)
 878{
 879	enable_lasx();
 880
 881	if (!thread_lasx_context_live()) {
 882		/* First time LASX context user */
 883		init_restore_lsx();
 884		init_lasx_upper();
 885		set_thread_flag(TIF_LASX_CTX_LIVE);
 886	} else {
 887		if (is_fpu_owner() || is_simd_owner()) {
 888			init_restore_lsx();
 889			restore_lasx_upper(current);
 890		} else {
 891			__own_fpu();
 892			enable_lsx();
 893			restore_lasx(current);
 894		}
 895	}
 896
 897	set_thread_flag(TIF_USEDSIMD);
 898
 899	BUG_ON(!is_fp_enabled());
 900	BUG_ON(!is_lsx_enabled());
 901	BUG_ON(!is_lasx_enabled());
 902}
 903
 904asmlinkage void noinstr do_fpu(struct pt_regs *regs)
 905{
 906	irqentry_state_t state = irqentry_enter(regs);
 907
 908	local_irq_enable();
 909	die_if_kernel("do_fpu invoked from kernel context!", regs);
 910	BUG_ON(is_lsx_enabled());
 911	BUG_ON(is_lasx_enabled());
 912
 913	preempt_disable();
 914	init_restore_fp();
 915	preempt_enable();
 916
 917	local_irq_disable();
 918	irqentry_exit(regs, state);
 919}
 920
 921asmlinkage void noinstr do_lsx(struct pt_regs *regs)
 922{
 923	irqentry_state_t state = irqentry_enter(regs);
 924
 925	local_irq_enable();
 926	if (!cpu_has_lsx) {
 927		force_sig(SIGILL);
 928		goto out;
 929	}
 930
 931	die_if_kernel("do_lsx invoked from kernel context!", regs);
 932	BUG_ON(is_lasx_enabled());
 933
 934	preempt_disable();
 935	init_restore_lsx();
 936	preempt_enable();
 937
 938out:
 939	local_irq_disable();
 940	irqentry_exit(regs, state);
 941}
 942
 943asmlinkage void noinstr do_lasx(struct pt_regs *regs)
 944{
 945	irqentry_state_t state = irqentry_enter(regs);
 946
 947	local_irq_enable();
 948	if (!cpu_has_lasx) {
 949		force_sig(SIGILL);
 950		goto out;
 951	}
 952
 953	die_if_kernel("do_lasx invoked from kernel context!", regs);
 954
 955	preempt_disable();
 956	init_restore_lasx();
 957	preempt_enable();
 958
 959out:
 960	local_irq_disable();
 961	irqentry_exit(regs, state);
 962}
 963
 964static void init_restore_lbt(void)
 965{
 966	if (!thread_lbt_context_live()) {
 967		/* First time LBT context user */
 968		init_lbt();
 969		set_thread_flag(TIF_LBT_CTX_LIVE);
 970	} else {
 971		if (!is_lbt_owner())
 972			own_lbt_inatomic(1);
 973	}
 974
 975	BUG_ON(!is_lbt_enabled());
 976}
 977
 978asmlinkage void noinstr do_lbt(struct pt_regs *regs)
 979{
 980	irqentry_state_t state = irqentry_enter(regs);
 981
 982	/*
 983	 * BTD (Binary Translation Disable exception) can be triggered
 984	 * during FP save/restore if TM (Top Mode) is on, which may
 985	 * cause irq_enable during 'switch_to'. To avoid this situation
 986	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
 987	 * will not trigger the BTE), we need to check PRMD first.
 988	 */
 989	if (regs->csr_prmd & CSR_PRMD_PIE)
 990		local_irq_enable();
 991
 992	if (!cpu_has_lbt) {
 993		force_sig(SIGILL);
 994		goto out;
 995	}
 996	BUG_ON(is_lbt_enabled());
 997
 998	preempt_disable();
 999	init_restore_lbt();
1000	preempt_enable();
1001
1002out:
1003	if (regs->csr_prmd & CSR_PRMD_PIE)
1004		local_irq_disable();
1005
1006	irqentry_exit(regs, state);
1007}
1008
1009asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1010{
1011	irqentry_state_t state = irqentry_enter(regs);
1012
1013	local_irq_enable();
1014	/*
1015	 * Game over - no way to handle this if it ever occurs.	Most probably
1016	 * caused by a fatal error after another hardware/software error.
1017	 */
1018	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1019		read_csr_excode(), current->pid, current->comm);
1020	die_if_kernel("do_reserved exception", regs);
1021	force_sig(SIGUNUSED);
1022
1023	local_irq_disable();
1024
1025	irqentry_exit(regs, state);
1026}
1027
1028asmlinkage void cache_parity_error(void)
1029{
1030	/* For the moment, report the problem and hang. */
1031	pr_err("Cache error exception:\n");
1032	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1033	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1034	panic("Can't handle the cache error!");
1035}
1036
1037asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1038{
1039	struct pt_regs *old_regs;
1040
1041	irq_enter_rcu();
1042	old_regs = set_irq_regs(regs);
1043	handle_arch_irq(regs);
1044	set_irq_regs(old_regs);
1045	irq_exit_rcu();
1046}
1047
1048asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1049{
1050	register int cpu;
1051	register unsigned long stack;
1052	irqentry_state_t state = irqentry_enter(regs);
1053
1054	cpu = smp_processor_id();
1055
1056	if (on_irq_stack(cpu, sp))
1057		handle_loongarch_irq(regs);
1058	else {
1059		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1060
1061		/* Save task's sp on IRQ stack for unwinding */
1062		*(unsigned long *)stack = sp;
1063
1064		__asm__ __volatile__(
1065		"move	$s0, $sp		\n" /* Preserve sp */
1066		"move	$sp, %[stk]		\n" /* Switch stack */
1067		"move	$a0, %[regs]		\n"
1068		"bl	handle_loongarch_irq	\n"
1069		"move	$sp, $s0		\n" /* Restore sp */
1070		: /* No outputs */
1071		: [stk] "r" (stack), [regs] "r" (regs)
1072		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1073		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1074		  "memory");
1075	}
1076
1077	irqentry_exit(regs, state);
1078}
1079
1080unsigned long eentry;
1081unsigned long tlbrentry;
1082
1083long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1084
1085static void configure_exception_vector(void)
1086{
1087	eentry    = (unsigned long)exception_handlers;
1088	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1089
1090	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1091	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1092	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1093}
1094
1095void per_cpu_trap_init(int cpu)
1096{
1097	unsigned int i;
1098
1099	setup_vint_size(VECSIZE);
1100
1101	configure_exception_vector();
1102
1103	if (!cpu_data[cpu].asid_cache)
1104		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1105
1106	mmgrab(&init_mm);
1107	current->active_mm = &init_mm;
1108	BUG_ON(current->mm);
1109	enter_lazy_tlb(&init_mm, current);
1110
1111	/* Initialise exception handlers */
1112	if (cpu == 0)
1113		for (i = 0; i < 64; i++)
1114			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1115
1116	tlb_init(cpu);
1117	cpu_cache_init();
1118}
1119
1120/* Install CPU exception handler */
1121void set_handler(unsigned long offset, void *addr, unsigned long size)
1122{
1123	memcpy((void *)(eentry + offset), addr, size);
1124	local_flush_icache_range(eentry + offset, eentry + offset + size);
1125}
1126
1127static const char panic_null_cerr[] =
1128	"Trying to set NULL cache error exception handler\n";
1129
1130/*
1131 * Install uncached CPU exception handler.
1132 * This is suitable only for the cache error exception which is the only
1133 * exception handler that is being run uncached.
1134 */
1135void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1136{
1137	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1138
1139	if (!addr)
1140		panic(panic_null_cerr);
1141
1142	memcpy((void *)(uncached_eentry + offset), addr, size);
1143}
1144
1145void __init trap_init(void)
1146{
1147	long i;
1148
1149	/* Set interrupt vector handler */
1150	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1151		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1152
1153	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
1154	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
1155	set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
1156	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
1157	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
1158	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
1159	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
1160	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
1161	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
1162	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
1163	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
1164	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
1165	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
1166
1167	cache_error_setup();
1168
1169	local_flush_icache_range(eentry, eentry + 0x400);
1170}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Huacai Chen <chenhuacai@loongson.cn>
   4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/bitops.h>
   8#include <linux/bug.h>
   9#include <linux/compiler.h>
  10#include <linux/context_tracking.h>
  11#include <linux/entry-common.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/kexec.h>
  15#include <linux/module.h>
  16#include <linux/extable.h>
  17#include <linux/mm.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/debug.h>
  20#include <linux/smp.h>
  21#include <linux/spinlock.h>
  22#include <linux/kallsyms.h>
  23#include <linux/memblock.h>
  24#include <linux/interrupt.h>
  25#include <linux/ptrace.h>
  26#include <linux/kgdb.h>
  27#include <linux/kdebug.h>
  28#include <linux/notifier.h>
  29#include <linux/irq.h>
  30#include <linux/perf_event.h>
  31
  32#include <asm/addrspace.h>
  33#include <asm/bootinfo.h>
  34#include <asm/branch.h>
  35#include <asm/break.h>
  36#include <asm/cpu.h>
  37#include <asm/exception.h>
  38#include <asm/fpu.h>
  39#include <asm/lbt.h>
  40#include <asm/inst.h>
  41#include <asm/kgdb.h>
  42#include <asm/loongarch.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/ptrace.h>
  46#include <asm/sections.h>
  47#include <asm/siginfo.h>
  48#include <asm/stacktrace.h>
  49#include <asm/tlb.h>
  50#include <asm/types.h>
  51#include <asm/unwind.h>
  52#include <asm/uprobes.h>
  53
  54#include "access-helper.h"
  55
  56void *exception_table[EXCCODE_INT_START] = {
  57	[0 ... EXCCODE_INT_START - 1] = handle_reserved,
  58
  59	[EXCCODE_TLBI]		= handle_tlb_load,
  60	[EXCCODE_TLBL]		= handle_tlb_load,
  61	[EXCCODE_TLBS]		= handle_tlb_store,
  62	[EXCCODE_TLBM]		= handle_tlb_modify,
  63	[EXCCODE_TLBNR]		= handle_tlb_protect,
  64	[EXCCODE_TLBNX]		= handle_tlb_protect,
  65	[EXCCODE_TLBPE]		= handle_tlb_protect,
  66	[EXCCODE_ADE]		= handle_ade,
  67	[EXCCODE_ALE]		= handle_ale,
  68	[EXCCODE_BCE]		= handle_bce,
  69	[EXCCODE_SYS]		= handle_sys,
  70	[EXCCODE_BP]		= handle_bp,
  71	[EXCCODE_INE]		= handle_ri,
  72	[EXCCODE_IPE]		= handle_ri,
  73	[EXCCODE_FPDIS]		= handle_fpu,
  74	[EXCCODE_LSXDIS]	= handle_lsx,
  75	[EXCCODE_LASXDIS]	= handle_lasx,
  76	[EXCCODE_FPE]		= handle_fpe,
  77	[EXCCODE_WATCH]		= handle_watch,
  78	[EXCCODE_BTDIS]		= handle_lbt,
  79};
  80EXPORT_SYMBOL_GPL(exception_table);
  81
  82static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  83			   const char *loglvl, bool user)
  84{
  85	unsigned long addr;
  86	struct unwind_state state;
  87	struct pt_regs *pregs = (struct pt_regs *)regs;
  88
  89	if (!task)
  90		task = current;
  91
  92	printk("%sCall Trace:", loglvl);
  93	for (unwind_start(&state, task, pregs);
  94	      !unwind_done(&state); unwind_next_frame(&state)) {
  95		addr = unwind_get_return_address(&state);
  96		print_ip_sym(loglvl, addr);
  97	}
  98	printk("%s\n", loglvl);
  99}
 100
 101static void show_stacktrace(struct task_struct *task,
 102	const struct pt_regs *regs, const char *loglvl, bool user)
 103{
 104	int i;
 105	const int field = 2 * sizeof(unsigned long);
 106	unsigned long stackdata;
 107	unsigned long *sp = (unsigned long *)regs->regs[3];
 108
 109	printk("%sStack :", loglvl);
 110	i = 0;
 111	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 112		if (i && ((i % (64 / field)) == 0)) {
 113			pr_cont("\n");
 114			printk("%s       ", loglvl);
 115		}
 116		if (i > 39) {
 117			pr_cont(" ...");
 118			break;
 119		}
 120
 121		if (__get_addr(&stackdata, sp++, user)) {
 122			pr_cont(" (Bad stack address)");
 123			break;
 124		}
 125
 126		pr_cont(" %0*lx", field, stackdata);
 127		i++;
 128	}
 129	pr_cont("\n");
 130	show_backtrace(task, regs, loglvl, user);
 131}
 132
 133void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 134{
 135	struct pt_regs regs;
 136
 137	regs.csr_crmd = 0;
 138	if (sp) {
 139		regs.csr_era = 0;
 140		regs.regs[1] = 0;
 141		regs.regs[3] = (unsigned long)sp;
 142	} else {
 143		if (!task || task == current)
 144			prepare_frametrace(&regs);
 145		else {
 146			regs.csr_era = task->thread.reg01;
 147			regs.regs[1] = 0;
 148			regs.regs[3] = task->thread.reg03;
 149			regs.regs[22] = task->thread.reg22;
 150		}
 151	}
 152
 153	show_stacktrace(task, &regs, loglvl, false);
 154}
 155
 156static void show_code(unsigned int *pc, bool user)
 157{
 158	long i;
 159	unsigned int insn;
 160
 161	printk("Code:");
 162
 163	for(i = -3 ; i < 6 ; i++) {
 164		if (__get_inst(&insn, pc + i, user)) {
 165			pr_cont(" (Bad address in era)\n");
 166			break;
 167		}
 168		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
 169	}
 170	pr_cont("\n");
 171}
 172
 173static void print_bool_fragment(const char *key, unsigned long val, bool first)
 174{
 175	/* e.g. "+PG", "-DA" */
 176	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
 177}
 178
 179static void print_plv_fragment(const char *key, int val)
 180{
 181	/* e.g. "PLV0", "PPLV3" */
 182	pr_cont("%s%d", key, val);
 183}
 184
 185static void print_memory_type_fragment(const char *key, unsigned long val)
 186{
 187	const char *humanized_type;
 188
 189	switch (val) {
 190	case 0:
 191		humanized_type = "SUC";
 192		break;
 193	case 1:
 194		humanized_type = "CC";
 195		break;
 196	case 2:
 197		humanized_type = "WUC";
 198		break;
 199	default:
 200		pr_cont(" %s=Reserved(%lu)", key, val);
 201		return;
 202	}
 203
 204	/* e.g. " DATM=WUC" */
 205	pr_cont(" %s=%s", key, humanized_type);
 206}
 207
 208static void print_intr_fragment(const char *key, unsigned long val)
 209{
 210	/* e.g. "LIE=0-1,3,5-7" */
 211	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
 212}
 213
 214static void print_crmd(unsigned long x)
 215{
 216	printk(" CRMD: %08lx (", x);
 217	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
 218	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
 219	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
 220	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
 221	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
 222	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
 223	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
 224	pr_cont(")\n");
 225}
 226
 227static void print_prmd(unsigned long x)
 228{
 229	printk(" PRMD: %08lx (", x);
 230	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
 231	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
 232	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
 233	pr_cont(")\n");
 234}
 235
 236static void print_euen(unsigned long x)
 237{
 238	printk(" EUEN: %08lx (", x);
 239	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
 240	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
 241	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
 242	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
 243	pr_cont(")\n");
 244}
 245
 246static void print_ecfg(unsigned long x)
 247{
 248	printk(" ECFG: %08lx (", x);
 249	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
 250	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
 251}
 252
 253static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
 254{
 255	/*
 256	 * LoongArch users and developers are probably more familiar with
 257	 * those names found in the ISA manual, so we are going to print out
 258	 * the latter. This will require some mapping.
 259	 */
 260	switch (ecode) {
 261	case EXCCODE_RSV: return "INT";
 262	case EXCCODE_TLBL: return "PIL";
 263	case EXCCODE_TLBS: return "PIS";
 264	case EXCCODE_TLBI: return "PIF";
 265	case EXCCODE_TLBM: return "PME";
 266	case EXCCODE_TLBNR: return "PNR";
 267	case EXCCODE_TLBNX: return "PNX";
 268	case EXCCODE_TLBPE: return "PPI";
 269	case EXCCODE_ADE:
 270		switch (esubcode) {
 271		case EXSUBCODE_ADEF: return "ADEF";
 272		case EXSUBCODE_ADEM: return "ADEM";
 273		}
 274		break;
 275	case EXCCODE_ALE: return "ALE";
 276	case EXCCODE_BCE: return "BCE";
 277	case EXCCODE_SYS: return "SYS";
 278	case EXCCODE_BP: return "BRK";
 279	case EXCCODE_INE: return "INE";
 280	case EXCCODE_IPE: return "IPE";
 281	case EXCCODE_FPDIS: return "FPD";
 282	case EXCCODE_LSXDIS: return "SXD";
 283	case EXCCODE_LASXDIS: return "ASXD";
 284	case EXCCODE_FPE:
 285		switch (esubcode) {
 286		case EXCSUBCODE_FPE: return "FPE";
 287		case EXCSUBCODE_VFPE: return "VFPE";
 288		}
 289		break;
 290	case EXCCODE_WATCH:
 291		switch (esubcode) {
 292		case EXCSUBCODE_WPEF: return "WPEF";
 293		case EXCSUBCODE_WPEM: return "WPEM";
 294		}
 295		break;
 296	case EXCCODE_BTDIS: return "BTD";
 297	case EXCCODE_BTE: return "BTE";
 298	case EXCCODE_GSPR: return "GSPR";
 299	case EXCCODE_HVC: return "HVC";
 300	case EXCCODE_GCM:
 301		switch (esubcode) {
 302		case EXCSUBCODE_GCSC: return "GCSC";
 303		case EXCSUBCODE_GCHC: return "GCHC";
 304		}
 305		break;
 306	/*
 307	 * The manual did not mention the EXCCODE_SE case, but print out it
 308	 * nevertheless.
 309	 */
 310	case EXCCODE_SE: return "SE";
 311	}
 312
 313	return "???";
 314}
 315
 316static void print_estat(unsigned long x)
 317{
 318	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
 319	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
 320
 321	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
 322	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
 323	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
 324}
 325
 326static void __show_regs(const struct pt_regs *regs)
 327{
 328	const int field = 2 * sizeof(unsigned long);
 329	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
 330
 331	show_regs_print_info(KERN_DEFAULT);
 332
 333	/* Print saved GPRs except $zero (substituting with PC/ERA) */
 334#define GPR_FIELD(x) field, regs->regs[x]
 335	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
 336	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
 337	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
 338	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
 339	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
 340	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
 341	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
 342	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
 343	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
 344	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
 345	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
 346	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
 347	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
 348	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
 349	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
 350	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
 351
 352	/* The slot for $zero is reused as the syscall restart flag */
 353	if (regs->regs[0])
 354		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
 355
 356	if (user_mode(regs)) {
 357		printk("   ra: %0*lx\n", GPR_FIELD(1));
 358		printk("  ERA: %0*lx\n", field, regs->csr_era);
 359	} else {
 360		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
 361		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
 362	}
 363#undef GPR_FIELD
 364
 365	/* Print saved important CSRs */
 366	print_crmd(regs->csr_crmd);
 367	print_prmd(regs->csr_prmd);
 368	print_euen(regs->csr_euen);
 369	print_ecfg(regs->csr_ecfg);
 370	print_estat(regs->csr_estat);
 371
 372	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
 373		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
 374
 375	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
 376	       cpu_family_string(), cpu_full_name_string());
 377}
 378
 379void show_regs(struct pt_regs *regs)
 380{
 381	__show_regs((struct pt_regs *)regs);
 382	dump_stack();
 383}
 384
 385void show_registers(struct pt_regs *regs)
 386{
 387	__show_regs(regs);
 388	print_modules();
 389	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
 390	       current->comm, current->pid, current_thread_info(), current);
 391
 392	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
 393	show_code((void *)regs->csr_era, user_mode(regs));
 394	printk("\n");
 395}
 396
 397static DEFINE_RAW_SPINLOCK(die_lock);
 398
 399void die(const char *str, struct pt_regs *regs)
 400{
 401	int ret;
 402	static int die_counter;
 403
 404	oops_enter();
 405
 406	ret = notify_die(DIE_OOPS, str, regs, 0,
 407			 current->thread.trap_nr, SIGSEGV);
 408
 409	console_verbose();
 410	raw_spin_lock_irq(&die_lock);
 411	bust_spinlocks(1);
 412
 413	printk("%s[#%d]:\n", str, ++die_counter);
 414	show_registers(regs);
 415	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 416	raw_spin_unlock_irq(&die_lock);
 417
 418	oops_exit();
 419
 420	if (ret == NOTIFY_STOP)
 421		return;
 422
 423	if (regs && kexec_should_crash(current))
 424		crash_kexec(regs);
 425
 426	if (in_interrupt())
 427		panic("Fatal exception in interrupt");
 428
 429	if (panic_on_oops)
 430		panic("Fatal exception");
 431
 432	make_task_dead(SIGSEGV);
 433}
 434
 435static inline void setup_vint_size(unsigned int size)
 436{
 437	unsigned int vs;
 438
 439	vs = ilog2(size/4);
 440
 441	if (vs == 0 || vs > 7)
 442		panic("vint_size %d Not support yet", vs);
 443
 444	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
 445}
 446
 447/*
 448 * Send SIGFPE according to FCSR Cause bits, which must have already
 449 * been masked against Enable bits.  This is impotant as Inexact can
 450 * happen together with Overflow or Underflow, and `ptrace' can set
 451 * any bits.
 452 */
 453static void force_fcsr_sig(unsigned long fcsr,
 454			void __user *fault_addr, struct task_struct *tsk)
 455{
 456	int si_code = FPE_FLTUNK;
 457
 458	if (fcsr & FPU_CSR_INV_X)
 459		si_code = FPE_FLTINV;
 460	else if (fcsr & FPU_CSR_DIV_X)
 461		si_code = FPE_FLTDIV;
 462	else if (fcsr & FPU_CSR_OVF_X)
 463		si_code = FPE_FLTOVF;
 464	else if (fcsr & FPU_CSR_UDF_X)
 465		si_code = FPE_FLTUND;
 466	else if (fcsr & FPU_CSR_INE_X)
 467		si_code = FPE_FLTRES;
 468
 469	force_sig_fault(SIGFPE, si_code, fault_addr);
 470}
 471
 472static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
 473{
 474	int si_code;
 475
 476	switch (sig) {
 477	case 0:
 478		return 0;
 479
 480	case SIGFPE:
 481		force_fcsr_sig(fcsr, fault_addr, current);
 482		return 1;
 483
 484	case SIGBUS:
 485		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 486		return 1;
 487
 488	case SIGSEGV:
 489		mmap_read_lock(current->mm);
 490		if (vma_lookup(current->mm, (unsigned long)fault_addr))
 491			si_code = SEGV_ACCERR;
 492		else
 493			si_code = SEGV_MAPERR;
 494		mmap_read_unlock(current->mm);
 495		force_sig_fault(SIGSEGV, si_code, fault_addr);
 496		return 1;
 497
 498	default:
 499		force_sig(sig);
 500		return 1;
 501	}
 502}
 503
 504/*
 505 * Delayed fp exceptions when doing a lazy ctx switch
 506 */
 507asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
 508{
 509	int sig;
 510	void __user *fault_addr;
 511	irqentry_state_t state = irqentry_enter(regs);
 512
 513	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 514		       SIGFPE) == NOTIFY_STOP)
 515		goto out;
 516
 517	/* Clear FCSR.Cause before enabling interrupts */
 518	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
 519	local_irq_enable();
 520
 521	die_if_kernel("FP exception in kernel code", regs);
 522
 523	sig = SIGFPE;
 524	fault_addr = (void __user *) regs->csr_era;
 525
 526	/* Send a signal if required.  */
 527	process_fpemu_return(sig, fault_addr, fcsr);
 528
 529out:
 530	local_irq_disable();
 531	irqentry_exit(regs, state);
 532}
 533
 534asmlinkage void noinstr do_ade(struct pt_regs *regs)
 535{
 536	irqentry_state_t state = irqentry_enter(regs);
 537
 538	die_if_kernel("Kernel ade access", regs);
 539	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
 540
 541	irqentry_exit(regs, state);
 542}
 543
 544/* sysctl hooks */
 545int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
 546int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
 547
 548asmlinkage void noinstr do_ale(struct pt_regs *regs)
 549{
 550	irqentry_state_t state = irqentry_enter(regs);
 551
 552#ifndef CONFIG_ARCH_STRICT_ALIGN
 553	die_if_kernel("Kernel ale access", regs);
 554	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 555#else
 556	unsigned int *pc;
 557
 558	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
 559
 560	/*
 561	 * Did we catch a fault trying to load an instruction?
 562	 */
 563	if (regs->csr_badvaddr == regs->csr_era)
 564		goto sigbus;
 565	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
 566		goto sigbus;
 567	if (!unaligned_enabled)
 568		goto sigbus;
 569	if (!no_unaligned_warning)
 570		show_registers(regs);
 571
 572	pc = (unsigned int *)exception_era(regs);
 573
 574	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
 575
 576	goto out;
 577
 578sigbus:
 579	die_if_kernel("Kernel ale access", regs);
 580	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 581out:
 582#endif
 583	irqentry_exit(regs, state);
 584}
 585
 586#ifdef CONFIG_GENERIC_BUG
 587int is_valid_bugaddr(unsigned long addr)
 588{
 589	return 1;
 590}
 591#endif /* CONFIG_GENERIC_BUG */
 592
 593static void bug_handler(struct pt_regs *regs)
 594{
 595	switch (report_bug(regs->csr_era, regs)) {
 596	case BUG_TRAP_TYPE_BUG:
 597	case BUG_TRAP_TYPE_NONE:
 598		die_if_kernel("Oops - BUG", regs);
 599		force_sig(SIGTRAP);
 600		break;
 601
 602	case BUG_TRAP_TYPE_WARN:
 603		/* Skip the BUG instruction and continue */
 604		regs->csr_era += LOONGARCH_INSN_SIZE;
 605		break;
 606	}
 607}
 608
 609asmlinkage void noinstr do_bce(struct pt_regs *regs)
 610{
 611	bool user = user_mode(regs);
 612	unsigned long era = exception_era(regs);
 613	u64 badv = 0, lower = 0, upper = ULONG_MAX;
 614	union loongarch_instruction insn;
 615	irqentry_state_t state = irqentry_enter(regs);
 616
 617	if (regs->csr_prmd & CSR_PRMD_PIE)
 618		local_irq_enable();
 619
 620	current->thread.trap_nr = read_csr_excode();
 621
 622	die_if_kernel("Bounds check error in kernel code", regs);
 623
 624	/*
 625	 * Pull out the address that failed bounds checking, and the lower /
 626	 * upper bound, by minimally looking at the faulting instruction word
 627	 * and reading from the correct register.
 628	 */
 629	if (__get_inst(&insn.word, (u32 *)era, user))
 630		goto bad_era;
 631
 632	switch (insn.reg3_format.opcode) {
 633	case asrtle_op:
 634		if (insn.reg3_format.rd != 0)
 635			break;	/* not asrtle */
 636		badv = regs->regs[insn.reg3_format.rj];
 637		upper = regs->regs[insn.reg3_format.rk];
 638		break;
 639
 640	case asrtgt_op:
 641		if (insn.reg3_format.rd != 0)
 642			break;	/* not asrtgt */
 643		badv = regs->regs[insn.reg3_format.rj];
 644		lower = regs->regs[insn.reg3_format.rk];
 645		break;
 646
 647	case ldleb_op:
 648	case ldleh_op:
 649	case ldlew_op:
 650	case ldled_op:
 651	case stleb_op:
 652	case stleh_op:
 653	case stlew_op:
 654	case stled_op:
 655	case fldles_op:
 656	case fldled_op:
 657	case fstles_op:
 658	case fstled_op:
 659		badv = regs->regs[insn.reg3_format.rj];
 660		upper = regs->regs[insn.reg3_format.rk];
 661		break;
 662
 663	case ldgtb_op:
 664	case ldgth_op:
 665	case ldgtw_op:
 666	case ldgtd_op:
 667	case stgtb_op:
 668	case stgth_op:
 669	case stgtw_op:
 670	case stgtd_op:
 671	case fldgts_op:
 672	case fldgtd_op:
 673	case fstgts_op:
 674	case fstgtd_op:
 675		badv = regs->regs[insn.reg3_format.rj];
 676		lower = regs->regs[insn.reg3_format.rk];
 677		break;
 678	}
 679
 680	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
 681
 682out:
 683	if (regs->csr_prmd & CSR_PRMD_PIE)
 684		local_irq_disable();
 685
 686	irqentry_exit(regs, state);
 687	return;
 688
 689bad_era:
 690	/*
 691	 * Cannot pull out the instruction word, hence cannot provide more
 692	 * info than a regular SIGSEGV in this case.
 693	 */
 694	force_sig(SIGSEGV);
 695	goto out;
 696}
 697
 698asmlinkage void noinstr do_bp(struct pt_regs *regs)
 699{
 700	bool user = user_mode(regs);
 701	unsigned int opcode, bcode;
 702	unsigned long era = exception_era(regs);
 703	irqentry_state_t state = irqentry_enter(regs);
 704
 705	if (regs->csr_prmd & CSR_PRMD_PIE)
 706		local_irq_enable();
 707
 708	if (__get_inst(&opcode, (u32 *)era, user))
 709		goto out_sigsegv;
 710
 711	bcode = (opcode & 0x7fff);
 712
 713	/*
 714	 * notify the kprobe handlers, if instruction is likely to
 715	 * pertain to them.
 716	 */
 717	switch (bcode) {
 718	case BRK_KDB:
 719		if (kgdb_breakpoint_handler(regs))
 720			goto out;
 721		else
 722			break;
 723	case BRK_KPROBE_BP:
 724		if (kprobe_breakpoint_handler(regs))
 725			goto out;
 726		else
 727			break;
 728	case BRK_KPROBE_SSTEPBP:
 729		if (kprobe_singlestep_handler(regs))
 730			goto out;
 731		else
 732			break;
 733	case BRK_UPROBE_BP:
 734		if (uprobe_breakpoint_handler(regs))
 735			goto out;
 736		else
 737			break;
 738	case BRK_UPROBE_XOLBP:
 739		if (uprobe_singlestep_handler(regs))
 740			goto out;
 741		else
 742			break;
 743	default:
 744		current->thread.trap_nr = read_csr_excode();
 745		if (notify_die(DIE_TRAP, "Break", regs, bcode,
 746			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 747			goto out;
 748		else
 749			break;
 750	}
 751
 752	switch (bcode) {
 753	case BRK_BUG:
 754		bug_handler(regs);
 755		break;
 756	case BRK_DIVZERO:
 757		die_if_kernel("Break instruction in kernel code", regs);
 758		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
 759		break;
 760	case BRK_OVERFLOW:
 761		die_if_kernel("Break instruction in kernel code", regs);
 762		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
 763		break;
 764	default:
 765		die_if_kernel("Break instruction in kernel code", regs);
 766		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
 767		break;
 768	}
 769
 770out:
 771	if (regs->csr_prmd & CSR_PRMD_PIE)
 772		local_irq_disable();
 773
 774	irqentry_exit(regs, state);
 775	return;
 776
 777out_sigsegv:
 778	force_sig(SIGSEGV);
 779	goto out;
 780}
 781
 782asmlinkage void noinstr do_watch(struct pt_regs *regs)
 783{
 784	irqentry_state_t state = irqentry_enter(regs);
 785
 786#ifndef CONFIG_HAVE_HW_BREAKPOINT
 787	pr_warn("Hardware watch point handler not implemented!\n");
 788#else
 789	if (kgdb_breakpoint_handler(regs))
 790		goto out;
 791
 792	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
 793		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
 794		unsigned long pc = instruction_pointer(regs);
 795		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
 796
 797		if (llbit) {
 798			/*
 799			 * When the ll-sc combo is encountered, it is regarded as an single
 800			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
 801			 * the llsc execution is completed.
 802			 */
 803			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 804			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
 805			goto out;
 806		}
 807
 808		if (pc == current->thread.single_step) {
 809			/*
 810			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
 811			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
 812			 * the csr_era is equal to the value of singlestep which last time set.
 813			 */
 814			if (!is_self_loop_ins(ip, regs)) {
 815				/*
 816				 * Check if the given instruction the target pc is equal to the
 817				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
 818				 * bit to break the original instruction stream.
 819				 */
 820				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 821				goto out;
 822			}
 823		}
 824	} else {
 825		breakpoint_handler(regs);
 826		watchpoint_handler(regs);
 827	}
 828
 829	force_sig(SIGTRAP);
 830out:
 831#endif
 832	irqentry_exit(regs, state);
 833}
 834
 835asmlinkage void noinstr do_ri(struct pt_regs *regs)
 836{
 837	int status = SIGILL;
 838	unsigned int __maybe_unused opcode;
 839	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
 840	irqentry_state_t state = irqentry_enter(regs);
 841
 842	local_irq_enable();
 843	current->thread.trap_nr = read_csr_excode();
 844
 845	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 846		       SIGILL) == NOTIFY_STOP)
 847		goto out;
 848
 849	die_if_kernel("Reserved instruction in kernel code", regs);
 850
 851	if (unlikely(get_user(opcode, era) < 0)) {
 852		status = SIGSEGV;
 853		current->thread.error_code = 1;
 854	}
 855
 856	force_sig(status);
 857
 858out:
 859	local_irq_disable();
 860	irqentry_exit(regs, state);
 861}
 862
 863static void init_restore_fp(void)
 864{
 865	if (!used_math()) {
 866		/* First time FP context user. */
 867		init_fpu();
 868	} else {
 869		/* This task has formerly used the FP context */
 870		if (!is_fpu_owner())
 871			own_fpu_inatomic(1);
 872	}
 873
 874	BUG_ON(!is_fp_enabled());
 875}
 876
 877static void init_restore_lsx(void)
 878{
 879	enable_lsx();
 880
 881	if (!thread_lsx_context_live()) {
 882		/* First time LSX context user */
 883		init_restore_fp();
 884		init_lsx_upper();
 885		set_thread_flag(TIF_LSX_CTX_LIVE);
 886	} else {
 887		if (!is_simd_owner()) {
 888			if (is_fpu_owner()) {
 889				restore_lsx_upper(current);
 890			} else {
 891				__own_fpu();
 892				restore_lsx(current);
 893			}
 894		}
 895	}
 896
 897	set_thread_flag(TIF_USEDSIMD);
 898
 899	BUG_ON(!is_fp_enabled());
 900	BUG_ON(!is_lsx_enabled());
 901}
 902
 903static void init_restore_lasx(void)
 904{
 905	enable_lasx();
 906
 907	if (!thread_lasx_context_live()) {
 908		/* First time LASX context user */
 909		init_restore_lsx();
 910		init_lasx_upper();
 911		set_thread_flag(TIF_LASX_CTX_LIVE);
 912	} else {
 913		if (is_fpu_owner() || is_simd_owner()) {
 914			init_restore_lsx();
 915			restore_lasx_upper(current);
 916		} else {
 917			__own_fpu();
 918			enable_lsx();
 919			restore_lasx(current);
 920		}
 921	}
 922
 923	set_thread_flag(TIF_USEDSIMD);
 924
 925	BUG_ON(!is_fp_enabled());
 926	BUG_ON(!is_lsx_enabled());
 927	BUG_ON(!is_lasx_enabled());
 928}
 929
 930asmlinkage void noinstr do_fpu(struct pt_regs *regs)
 931{
 932	irqentry_state_t state = irqentry_enter(regs);
 933
 934	local_irq_enable();
 935	die_if_kernel("do_fpu invoked from kernel context!", regs);
 936	BUG_ON(is_lsx_enabled());
 937	BUG_ON(is_lasx_enabled());
 938
 939	preempt_disable();
 940	init_restore_fp();
 941	preempt_enable();
 942
 943	local_irq_disable();
 944	irqentry_exit(regs, state);
 945}
 946
 947asmlinkage void noinstr do_lsx(struct pt_regs *regs)
 948{
 949	irqentry_state_t state = irqentry_enter(regs);
 950
 951	local_irq_enable();
 952	if (!cpu_has_lsx) {
 953		force_sig(SIGILL);
 954		goto out;
 955	}
 956
 957	die_if_kernel("do_lsx invoked from kernel context!", regs);
 958	BUG_ON(is_lasx_enabled());
 959
 960	preempt_disable();
 961	init_restore_lsx();
 962	preempt_enable();
 963
 964out:
 965	local_irq_disable();
 966	irqentry_exit(regs, state);
 967}
 968
 969asmlinkage void noinstr do_lasx(struct pt_regs *regs)
 970{
 971	irqentry_state_t state = irqentry_enter(regs);
 972
 973	local_irq_enable();
 974	if (!cpu_has_lasx) {
 975		force_sig(SIGILL);
 976		goto out;
 977	}
 978
 979	die_if_kernel("do_lasx invoked from kernel context!", regs);
 980
 981	preempt_disable();
 982	init_restore_lasx();
 983	preempt_enable();
 984
 985out:
 986	local_irq_disable();
 987	irqentry_exit(regs, state);
 988}
 989
 990static void init_restore_lbt(void)
 991{
 992	if (!thread_lbt_context_live()) {
 993		/* First time LBT context user */
 994		init_lbt();
 995		set_thread_flag(TIF_LBT_CTX_LIVE);
 996	} else {
 997		if (!is_lbt_owner())
 998			own_lbt_inatomic(1);
 999	}
1000
1001	BUG_ON(!is_lbt_enabled());
1002}
1003
1004asmlinkage void noinstr do_lbt(struct pt_regs *regs)
1005{
1006	irqentry_state_t state = irqentry_enter(regs);
1007
1008	/*
1009	 * BTD (Binary Translation Disable exception) can be triggered
1010	 * during FP save/restore if TM (Top Mode) is on, which may
1011	 * cause irq_enable during 'switch_to'. To avoid this situation
1012	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
1013	 * will not trigger the BTE), we need to check PRMD first.
1014	 */
1015	if (regs->csr_prmd & CSR_PRMD_PIE)
1016		local_irq_enable();
1017
1018	if (!cpu_has_lbt) {
1019		force_sig(SIGILL);
1020		goto out;
1021	}
1022	BUG_ON(is_lbt_enabled());
1023
1024	preempt_disable();
1025	init_restore_lbt();
1026	preempt_enable();
1027
1028out:
1029	if (regs->csr_prmd & CSR_PRMD_PIE)
1030		local_irq_disable();
1031
1032	irqentry_exit(regs, state);
1033}
1034
1035asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1036{
1037	irqentry_state_t state = irqentry_enter(regs);
1038
1039	local_irq_enable();
1040	/*
1041	 * Game over - no way to handle this if it ever occurs.	Most probably
1042	 * caused by a fatal error after another hardware/software error.
1043	 */
1044	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1045		read_csr_excode(), current->pid, current->comm);
1046	die_if_kernel("do_reserved exception", regs);
1047	force_sig(SIGUNUSED);
1048
1049	local_irq_disable();
1050
1051	irqentry_exit(regs, state);
1052}
1053
1054asmlinkage void cache_parity_error(void)
1055{
1056	/* For the moment, report the problem and hang. */
1057	pr_err("Cache error exception:\n");
1058	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1059	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1060	panic("Can't handle the cache error!");
1061}
1062
1063asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1064{
1065	struct pt_regs *old_regs;
1066
1067	irq_enter_rcu();
1068	old_regs = set_irq_regs(regs);
1069	handle_arch_irq(regs);
1070	set_irq_regs(old_regs);
1071	irq_exit_rcu();
1072}
1073
1074asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1075{
1076	register int cpu;
1077	register unsigned long stack;
1078	irqentry_state_t state = irqentry_enter(regs);
1079
1080	cpu = smp_processor_id();
1081
1082	if (on_irq_stack(cpu, sp))
1083		handle_loongarch_irq(regs);
1084	else {
1085		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1086
1087		/* Save task's sp on IRQ stack for unwinding */
1088		*(unsigned long *)stack = sp;
1089
1090		__asm__ __volatile__(
1091		"move	$s0, $sp		\n" /* Preserve sp */
1092		"move	$sp, %[stk]		\n" /* Switch stack */
1093		"move	$a0, %[regs]		\n"
1094		"bl	handle_loongarch_irq	\n"
1095		"move	$sp, $s0		\n" /* Restore sp */
1096		: /* No outputs */
1097		: [stk] "r" (stack), [regs] "r" (regs)
1098		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1099		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1100		  "memory");
1101	}
1102
1103	irqentry_exit(regs, state);
1104}
1105
1106unsigned long eentry;
1107unsigned long tlbrentry;
1108
1109long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1110
1111static void configure_exception_vector(void)
1112{
1113	eentry    = (unsigned long)exception_handlers;
1114	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1115
1116	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1117	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1118	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1119}
1120
1121void per_cpu_trap_init(int cpu)
1122{
1123	unsigned int i;
1124
1125	setup_vint_size(VECSIZE);
1126
1127	configure_exception_vector();
1128
1129	if (!cpu_data[cpu].asid_cache)
1130		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1131
1132	mmgrab(&init_mm);
1133	current->active_mm = &init_mm;
1134	BUG_ON(current->mm);
1135	enter_lazy_tlb(&init_mm, current);
1136
1137	/* Initialise exception handlers */
1138	if (cpu == 0)
1139		for (i = 0; i < 64; i++)
1140			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1141
1142	tlb_init(cpu);
1143	cpu_cache_init();
1144}
1145
1146/* Install CPU exception handler */
1147void set_handler(unsigned long offset, void *addr, unsigned long size)
1148{
1149	memcpy((void *)(eentry + offset), addr, size);
1150	local_flush_icache_range(eentry + offset, eentry + offset + size);
1151}
1152
1153static const char panic_null_cerr[] =
1154	"Trying to set NULL cache error exception handler\n";
1155
1156/*
1157 * Install uncached CPU exception handler.
1158 * This is suitable only for the cache error exception which is the only
1159 * exception handler that is being run uncached.
1160 */
1161void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1162{
1163	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1164
1165	if (!addr)
1166		panic(panic_null_cerr);
1167
1168	memcpy((void *)(uncached_eentry + offset), addr, size);
1169}
1170
1171void __init trap_init(void)
1172{
1173	long i;
1174
1175	/* Set interrupt vector handler */
1176	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1177		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1178
1179	/* Set exception vector handler */
1180	for (i = EXCCODE_ADE; i <= EXCCODE_BTDIS; i++)
1181		set_handler(i * VECSIZE, exception_table[i], VECSIZE);
 
 
 
 
 
 
 
 
 
 
1182
1183	cache_error_setup();
1184
1185	local_flush_icache_range(eentry, eentry + 0x400);
1186}