Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Huacai Chen <chenhuacai@loongson.cn>
   4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   5 */
   6#include <linux/bitfield.h>
   7#include <linux/bitops.h>
   8#include <linux/bug.h>
   9#include <linux/compiler.h>
  10#include <linux/context_tracking.h>
  11#include <linux/entry-common.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/kexec.h>
  15#include <linux/module.h>
  16#include <linux/extable.h>
  17#include <linux/mm.h>
  18#include <linux/sched/mm.h>
  19#include <linux/sched/debug.h>
  20#include <linux/smp.h>
  21#include <linux/spinlock.h>
  22#include <linux/kallsyms.h>
  23#include <linux/memblock.h>
  24#include <linux/interrupt.h>
  25#include <linux/ptrace.h>
  26#include <linux/kgdb.h>
  27#include <linux/kdebug.h>
 
  28#include <linux/notifier.h>
  29#include <linux/irq.h>
  30#include <linux/perf_event.h>
  31
  32#include <asm/addrspace.h>
  33#include <asm/bootinfo.h>
  34#include <asm/branch.h>
  35#include <asm/break.h>
  36#include <asm/cpu.h>
  37#include <asm/exception.h>
  38#include <asm/fpu.h>
  39#include <asm/lbt.h>
  40#include <asm/inst.h>
  41#include <asm/kgdb.h>
  42#include <asm/loongarch.h>
  43#include <asm/mmu_context.h>
  44#include <asm/pgtable.h>
  45#include <asm/ptrace.h>
  46#include <asm/sections.h>
  47#include <asm/siginfo.h>
  48#include <asm/stacktrace.h>
  49#include <asm/tlb.h>
  50#include <asm/types.h>
  51#include <asm/unwind.h>
  52#include <asm/uprobes.h>
  53
  54#include "access-helper.h"
  55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  56static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
  57			   const char *loglvl, bool user)
  58{
  59	unsigned long addr;
  60	struct unwind_state state;
  61	struct pt_regs *pregs = (struct pt_regs *)regs;
  62
  63	if (!task)
  64		task = current;
  65
  66	printk("%sCall Trace:", loglvl);
  67	for (unwind_start(&state, task, pregs);
  68	      !unwind_done(&state); unwind_next_frame(&state)) {
  69		addr = unwind_get_return_address(&state);
  70		print_ip_sym(loglvl, addr);
  71	}
  72	printk("%s\n", loglvl);
  73}
  74
  75static void show_stacktrace(struct task_struct *task,
  76	const struct pt_regs *regs, const char *loglvl, bool user)
  77{
  78	int i;
  79	const int field = 2 * sizeof(unsigned long);
  80	unsigned long stackdata;
  81	unsigned long *sp = (unsigned long *)regs->regs[3];
  82
  83	printk("%sStack :", loglvl);
  84	i = 0;
  85	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
  86		if (i && ((i % (64 / field)) == 0)) {
  87			pr_cont("\n");
  88			printk("%s       ", loglvl);
  89		}
  90		if (i > 39) {
  91			pr_cont(" ...");
  92			break;
  93		}
  94
  95		if (__get_addr(&stackdata, sp++, user)) {
  96			pr_cont(" (Bad stack address)");
  97			break;
  98		}
  99
 100		pr_cont(" %0*lx", field, stackdata);
 101		i++;
 102	}
 103	pr_cont("\n");
 104	show_backtrace(task, regs, loglvl, user);
 105}
 106
 107void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 108{
 109	struct pt_regs regs;
 110
 111	regs.csr_crmd = 0;
 112	if (sp) {
 113		regs.csr_era = 0;
 114		regs.regs[1] = 0;
 115		regs.regs[3] = (unsigned long)sp;
 116	} else {
 117		if (!task || task == current)
 118			prepare_frametrace(&regs);
 119		else {
 120			regs.csr_era = task->thread.reg01;
 121			regs.regs[1] = 0;
 122			regs.regs[3] = task->thread.reg03;
 123			regs.regs[22] = task->thread.reg22;
 124		}
 125	}
 126
 127	show_stacktrace(task, &regs, loglvl, false);
 128}
 129
 130static void show_code(unsigned int *pc, bool user)
 131{
 132	long i;
 133	unsigned int insn;
 134
 135	printk("Code:");
 136
 137	for(i = -3 ; i < 6 ; i++) {
 138		if (__get_inst(&insn, pc + i, user)) {
 139			pr_cont(" (Bad address in era)\n");
 140			break;
 141		}
 142		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
 143	}
 144	pr_cont("\n");
 145}
 146
 147static void print_bool_fragment(const char *key, unsigned long val, bool first)
 148{
 149	/* e.g. "+PG", "-DA" */
 150	pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
 151}
 
 152
 153static void print_plv_fragment(const char *key, int val)
 154{
 155	/* e.g. "PLV0", "PPLV3" */
 156	pr_cont("%s%d", key, val);
 157}
 158
 159static void print_memory_type_fragment(const char *key, unsigned long val)
 160{
 161	const char *humanized_type;
 
 
 
 
 162
 163	switch (val) {
 164	case 0:
 165		humanized_type = "SUC";
 166		break;
 167	case 1:
 168		humanized_type = "CC";
 169		break;
 170	case 2:
 171		humanized_type = "WUC";
 172		break;
 173	default:
 174		pr_cont(" %s=Reserved(%lu)", key, val);
 175		return;
 176	}
 177
 178	/* e.g. " DATM=WUC" */
 179	pr_cont(" %s=%s", key, humanized_type);
 180}
 181
 182static void print_intr_fragment(const char *key, unsigned long val)
 183{
 184	/* e.g. "LIE=0-1,3,5-7" */
 185	pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
 186}
 187
 188static void print_crmd(unsigned long x)
 189{
 190	printk(" CRMD: %08lx (", x);
 191	print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
 192	print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
 193	print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
 194	print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
 195	print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
 196	print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
 197	print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
 198	pr_cont(")\n");
 199}
 200
 201static void print_prmd(unsigned long x)
 202{
 203	printk(" PRMD: %08lx (", x);
 204	print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
 205	print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
 206	print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
 207	pr_cont(")\n");
 208}
 209
 210static void print_euen(unsigned long x)
 211{
 212	printk(" EUEN: %08lx (", x);
 213	print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
 214	print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
 215	print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
 216	print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
 217	pr_cont(")\n");
 218}
 219
 220static void print_ecfg(unsigned long x)
 221{
 222	printk(" ECFG: %08lx (", x);
 223	print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
 224	pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
 225}
 226
 227static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
 228{
 229	/*
 230	 * LoongArch users and developers are probably more familiar with
 231	 * those names found in the ISA manual, so we are going to print out
 232	 * the latter. This will require some mapping.
 233	 */
 234	switch (ecode) {
 235	case EXCCODE_RSV: return "INT";
 236	case EXCCODE_TLBL: return "PIL";
 237	case EXCCODE_TLBS: return "PIS";
 238	case EXCCODE_TLBI: return "PIF";
 239	case EXCCODE_TLBM: return "PME";
 240	case EXCCODE_TLBNR: return "PNR";
 241	case EXCCODE_TLBNX: return "PNX";
 242	case EXCCODE_TLBPE: return "PPI";
 243	case EXCCODE_ADE:
 244		switch (esubcode) {
 245		case EXSUBCODE_ADEF: return "ADEF";
 246		case EXSUBCODE_ADEM: return "ADEM";
 247		}
 248		break;
 249	case EXCCODE_ALE: return "ALE";
 250	case EXCCODE_BCE: return "BCE";
 251	case EXCCODE_SYS: return "SYS";
 252	case EXCCODE_BP: return "BRK";
 253	case EXCCODE_INE: return "INE";
 254	case EXCCODE_IPE: return "IPE";
 255	case EXCCODE_FPDIS: return "FPD";
 256	case EXCCODE_LSXDIS: return "SXD";
 257	case EXCCODE_LASXDIS: return "ASXD";
 258	case EXCCODE_FPE:
 259		switch (esubcode) {
 260		case EXCSUBCODE_FPE: return "FPE";
 261		case EXCSUBCODE_VFPE: return "VFPE";
 262		}
 263		break;
 264	case EXCCODE_WATCH:
 265		switch (esubcode) {
 266		case EXCSUBCODE_WPEF: return "WPEF";
 267		case EXCSUBCODE_WPEM: return "WPEM";
 268		}
 269		break;
 270	case EXCCODE_BTDIS: return "BTD";
 271	case EXCCODE_BTE: return "BTE";
 272	case EXCCODE_GSPR: return "GSPR";
 273	case EXCCODE_HVC: return "HVC";
 274	case EXCCODE_GCM:
 275		switch (esubcode) {
 276		case EXCSUBCODE_GCSC: return "GCSC";
 277		case EXCSUBCODE_GCHC: return "GCHC";
 278		}
 279		break;
 280	/*
 281	 * The manual did not mention the EXCCODE_SE case, but print out it
 282	 * nevertheless.
 283	 */
 284	case EXCCODE_SE: return "SE";
 285	}
 286
 287	return "???";
 288}
 289
 290static void print_estat(unsigned long x)
 291{
 292	unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
 293	unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
 294
 295	printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
 296	print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
 297	pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
 298}
 299
 300static void __show_regs(const struct pt_regs *regs)
 301{
 302	const int field = 2 * sizeof(unsigned long);
 303	unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
 304
 305	show_regs_print_info(KERN_DEFAULT);
 306
 307	/* Print saved GPRs except $zero (substituting with PC/ERA) */
 308#define GPR_FIELD(x) field, regs->regs[x]
 309	printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
 310	       field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
 311	printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
 312	       GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
 313	printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
 314	       GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
 315	printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
 316	       GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
 317	printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
 318	       GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
 319	printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
 320	       GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
 321	printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
 322	       GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
 323	printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
 324	       GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
 325
 326	/* The slot for $zero is reused as the syscall restart flag */
 327	if (regs->regs[0])
 328		printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
 329
 330	if (user_mode(regs)) {
 331		printk("   ra: %0*lx\n", GPR_FIELD(1));
 332		printk("  ERA: %0*lx\n", field, regs->csr_era);
 333	} else {
 334		printk("   ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
 335		printk("  ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
 336	}
 337#undef GPR_FIELD
 338
 339	/* Print saved important CSRs */
 340	print_crmd(regs->csr_crmd);
 341	print_prmd(regs->csr_prmd);
 342	print_euen(regs->csr_euen);
 343	print_ecfg(regs->csr_ecfg);
 344	print_estat(regs->csr_estat);
 345
 346	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
 347		printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
 348
 349	printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
 350	       cpu_family_string(), cpu_full_name_string());
 351}
 352
 353void show_regs(struct pt_regs *regs)
 354{
 355	__show_regs((struct pt_regs *)regs);
 356	dump_stack();
 357}
 358
 359void show_registers(struct pt_regs *regs)
 360{
 361	__show_regs(regs);
 362	print_modules();
 363	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
 364	       current->comm, current->pid, current_thread_info(), current);
 365
 366	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
 367	show_code((void *)regs->csr_era, user_mode(regs));
 368	printk("\n");
 369}
 370
 371static DEFINE_RAW_SPINLOCK(die_lock);
 372
 373void die(const char *str, struct pt_regs *regs)
 374{
 375	int ret;
 376	static int die_counter;
 
 377
 378	oops_enter();
 379
 380	ret = notify_die(DIE_OOPS, str, regs, 0,
 381			 current->thread.trap_nr, SIGSEGV);
 
 382
 383	console_verbose();
 384	raw_spin_lock_irq(&die_lock);
 385	bust_spinlocks(1);
 386
 387	printk("%s[#%d]:\n", str, ++die_counter);
 388	show_registers(regs);
 389	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 390	raw_spin_unlock_irq(&die_lock);
 391
 392	oops_exit();
 393
 394	if (ret == NOTIFY_STOP)
 395		return;
 396
 397	if (regs && kexec_should_crash(current))
 398		crash_kexec(regs);
 399
 400	if (in_interrupt())
 401		panic("Fatal exception in interrupt");
 402
 403	if (panic_on_oops)
 404		panic("Fatal exception");
 405
 406	make_task_dead(SIGSEGV);
 407}
 408
 409static inline void setup_vint_size(unsigned int size)
 410{
 411	unsigned int vs;
 412
 413	vs = ilog2(size/4);
 414
 415	if (vs == 0 || vs > 7)
 416		panic("vint_size %d Not support yet", vs);
 417
 418	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
 419}
 420
 421/*
 422 * Send SIGFPE according to FCSR Cause bits, which must have already
 423 * been masked against Enable bits.  This is impotant as Inexact can
 424 * happen together with Overflow or Underflow, and `ptrace' can set
 425 * any bits.
 426 */
 427static void force_fcsr_sig(unsigned long fcsr,
 428			void __user *fault_addr, struct task_struct *tsk)
 429{
 430	int si_code = FPE_FLTUNK;
 431
 432	if (fcsr & FPU_CSR_INV_X)
 433		si_code = FPE_FLTINV;
 434	else if (fcsr & FPU_CSR_DIV_X)
 435		si_code = FPE_FLTDIV;
 436	else if (fcsr & FPU_CSR_OVF_X)
 437		si_code = FPE_FLTOVF;
 438	else if (fcsr & FPU_CSR_UDF_X)
 439		si_code = FPE_FLTUND;
 440	else if (fcsr & FPU_CSR_INE_X)
 441		si_code = FPE_FLTRES;
 442
 443	force_sig_fault(SIGFPE, si_code, fault_addr);
 444}
 445
 446static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
 447{
 448	int si_code;
 449
 450	switch (sig) {
 451	case 0:
 452		return 0;
 453
 454	case SIGFPE:
 455		force_fcsr_sig(fcsr, fault_addr, current);
 456		return 1;
 457
 458	case SIGBUS:
 459		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 460		return 1;
 461
 462	case SIGSEGV:
 463		mmap_read_lock(current->mm);
 464		if (vma_lookup(current->mm, (unsigned long)fault_addr))
 465			si_code = SEGV_ACCERR;
 466		else
 467			si_code = SEGV_MAPERR;
 468		mmap_read_unlock(current->mm);
 469		force_sig_fault(SIGSEGV, si_code, fault_addr);
 470		return 1;
 471
 472	default:
 473		force_sig(sig);
 474		return 1;
 475	}
 476}
 477
 478/*
 479 * Delayed fp exceptions when doing a lazy ctx switch
 480 */
 481asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
 482{
 483	int sig;
 484	void __user *fault_addr;
 485	irqentry_state_t state = irqentry_enter(regs);
 486
 487	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 488		       SIGFPE) == NOTIFY_STOP)
 489		goto out;
 490
 491	/* Clear FCSR.Cause before enabling interrupts */
 492	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
 493	local_irq_enable();
 494
 495	die_if_kernel("FP exception in kernel code", regs);
 496
 497	sig = SIGFPE;
 498	fault_addr = (void __user *) regs->csr_era;
 499
 500	/* Send a signal if required.  */
 501	process_fpemu_return(sig, fault_addr, fcsr);
 502
 503out:
 504	local_irq_disable();
 505	irqentry_exit(regs, state);
 506}
 507
 508asmlinkage void noinstr do_ade(struct pt_regs *regs)
 509{
 510	irqentry_state_t state = irqentry_enter(regs);
 511
 512	die_if_kernel("Kernel ade access", regs);
 513	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
 514
 515	irqentry_exit(regs, state);
 516}
 517
 518/* sysctl hooks */
 519int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
 520int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
 521
 522asmlinkage void noinstr do_ale(struct pt_regs *regs)
 523{
 524	irqentry_state_t state = irqentry_enter(regs);
 525
 526#ifndef CONFIG_ARCH_STRICT_ALIGN
 527	die_if_kernel("Kernel ale access", regs);
 528	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 529#else
 530	unsigned int *pc;
 
 531
 532	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
 533
 534	/*
 535	 * Did we catch a fault trying to load an instruction?
 536	 */
 537	if (regs->csr_badvaddr == regs->csr_era)
 538		goto sigbus;
 539	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
 540		goto sigbus;
 541	if (!unaligned_enabled)
 542		goto sigbus;
 543	if (!no_unaligned_warning)
 544		show_registers(regs);
 545
 546	pc = (unsigned int *)exception_era(regs);
 547
 548	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
 549
 550	goto out;
 551
 552sigbus:
 553	die_if_kernel("Kernel ale access", regs);
 554	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
 
 555out:
 556#endif
 557	irqentry_exit(regs, state);
 558}
 559
 560#ifdef CONFIG_GENERIC_BUG
 561int is_valid_bugaddr(unsigned long addr)
 562{
 563	return 1;
 564}
 565#endif /* CONFIG_GENERIC_BUG */
 566
 567static void bug_handler(struct pt_regs *regs)
 568{
 569	switch (report_bug(regs->csr_era, regs)) {
 570	case BUG_TRAP_TYPE_BUG:
 571	case BUG_TRAP_TYPE_NONE:
 572		die_if_kernel("Oops - BUG", regs);
 573		force_sig(SIGTRAP);
 574		break;
 575
 576	case BUG_TRAP_TYPE_WARN:
 577		/* Skip the BUG instruction and continue */
 578		regs->csr_era += LOONGARCH_INSN_SIZE;
 579		break;
 580	}
 581}
 582
 583asmlinkage void noinstr do_bce(struct pt_regs *regs)
 584{
 585	bool user = user_mode(regs);
 586	unsigned long era = exception_era(regs);
 587	u64 badv = 0, lower = 0, upper = ULONG_MAX;
 588	union loongarch_instruction insn;
 589	irqentry_state_t state = irqentry_enter(regs);
 590
 591	if (regs->csr_prmd & CSR_PRMD_PIE)
 592		local_irq_enable();
 593
 594	current->thread.trap_nr = read_csr_excode();
 595
 596	die_if_kernel("Bounds check error in kernel code", regs);
 597
 598	/*
 599	 * Pull out the address that failed bounds checking, and the lower /
 600	 * upper bound, by minimally looking at the faulting instruction word
 601	 * and reading from the correct register.
 602	 */
 603	if (__get_inst(&insn.word, (u32 *)era, user))
 604		goto bad_era;
 605
 606	switch (insn.reg3_format.opcode) {
 607	case asrtle_op:
 608		if (insn.reg3_format.rd != 0)
 609			break;	/* not asrtle */
 610		badv = regs->regs[insn.reg3_format.rj];
 611		upper = regs->regs[insn.reg3_format.rk];
 612		break;
 613
 614	case asrtgt_op:
 615		if (insn.reg3_format.rd != 0)
 616			break;	/* not asrtgt */
 617		badv = regs->regs[insn.reg3_format.rj];
 618		lower = regs->regs[insn.reg3_format.rk];
 619		break;
 620
 621	case ldleb_op:
 622	case ldleh_op:
 623	case ldlew_op:
 624	case ldled_op:
 625	case stleb_op:
 626	case stleh_op:
 627	case stlew_op:
 628	case stled_op:
 629	case fldles_op:
 630	case fldled_op:
 631	case fstles_op:
 632	case fstled_op:
 633		badv = regs->regs[insn.reg3_format.rj];
 634		upper = regs->regs[insn.reg3_format.rk];
 635		break;
 636
 637	case ldgtb_op:
 638	case ldgth_op:
 639	case ldgtw_op:
 640	case ldgtd_op:
 641	case stgtb_op:
 642	case stgth_op:
 643	case stgtw_op:
 644	case stgtd_op:
 645	case fldgts_op:
 646	case fldgtd_op:
 647	case fstgts_op:
 648	case fstgtd_op:
 649		badv = regs->regs[insn.reg3_format.rj];
 650		lower = regs->regs[insn.reg3_format.rk];
 651		break;
 652	}
 653
 654	force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
 655
 656out:
 657	if (regs->csr_prmd & CSR_PRMD_PIE)
 658		local_irq_disable();
 659
 660	irqentry_exit(regs, state);
 661	return;
 662
 663bad_era:
 664	/*
 665	 * Cannot pull out the instruction word, hence cannot provide more
 666	 * info than a regular SIGSEGV in this case.
 667	 */
 668	force_sig(SIGSEGV);
 669	goto out;
 670}
 671
 672asmlinkage void noinstr do_bp(struct pt_regs *regs)
 673{
 674	bool user = user_mode(regs);
 675	unsigned int opcode, bcode;
 676	unsigned long era = exception_era(regs);
 677	irqentry_state_t state = irqentry_enter(regs);
 678
 679	if (regs->csr_prmd & CSR_PRMD_PIE)
 680		local_irq_enable();
 681
 682	if (__get_inst(&opcode, (u32 *)era, user))
 683		goto out_sigsegv;
 684
 685	bcode = (opcode & 0x7fff);
 686
 687	/*
 688	 * notify the kprobe handlers, if instruction is likely to
 689	 * pertain to them.
 690	 */
 691	switch (bcode) {
 692	case BRK_KDB:
 693		if (kgdb_breakpoint_handler(regs))
 694			goto out;
 695		else
 696			break;
 697	case BRK_KPROBE_BP:
 698		if (kprobe_breakpoint_handler(regs))
 
 699			goto out;
 700		else
 701			break;
 702	case BRK_KPROBE_SSTEPBP:
 703		if (kprobe_singlestep_handler(regs))
 
 704			goto out;
 705		else
 706			break;
 707	case BRK_UPROBE_BP:
 708		if (uprobe_breakpoint_handler(regs))
 
 709			goto out;
 710		else
 711			break;
 712	case BRK_UPROBE_XOLBP:
 713		if (uprobe_singlestep_handler(regs))
 
 714			goto out;
 715		else
 716			break;
 717	default:
 718		current->thread.trap_nr = read_csr_excode();
 719		if (notify_die(DIE_TRAP, "Break", regs, bcode,
 720			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 721			goto out;
 722		else
 723			break;
 724	}
 725
 726	switch (bcode) {
 727	case BRK_BUG:
 728		bug_handler(regs);
 729		break;
 730	case BRK_DIVZERO:
 731		die_if_kernel("Break instruction in kernel code", regs);
 732		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
 733		break;
 734	case BRK_OVERFLOW:
 735		die_if_kernel("Break instruction in kernel code", regs);
 736		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
 737		break;
 738	default:
 739		die_if_kernel("Break instruction in kernel code", regs);
 740		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
 741		break;
 742	}
 743
 744out:
 745	if (regs->csr_prmd & CSR_PRMD_PIE)
 746		local_irq_disable();
 747
 748	irqentry_exit(regs, state);
 749	return;
 750
 751out_sigsegv:
 752	force_sig(SIGSEGV);
 753	goto out;
 754}
 755
 756asmlinkage void noinstr do_watch(struct pt_regs *regs)
 757{
 758	irqentry_state_t state = irqentry_enter(regs);
 759
 760#ifndef CONFIG_HAVE_HW_BREAKPOINT
 761	pr_warn("Hardware watch point handler not implemented!\n");
 762#else
 763	if (kgdb_breakpoint_handler(regs))
 764		goto out;
 765
 766	if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
 767		int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
 768		unsigned long pc = instruction_pointer(regs);
 769		union loongarch_instruction *ip = (union loongarch_instruction *)pc;
 770
 771		if (llbit) {
 772			/*
 773			 * When the ll-sc combo is encountered, it is regarded as an single
 774			 * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
 775			 * the llsc execution is completed.
 776			 */
 777			csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 778			csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
 779			goto out;
 780		}
 781
 782		if (pc == current->thread.single_step) {
 783			/*
 784			 * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
 785			 * set, such as fld.d/fst.d. So singlestep needs to compare whether
 786			 * the csr_era is equal to the value of singlestep which last time set.
 787			 */
 788			if (!is_self_loop_ins(ip, regs)) {
 789				/*
 790				 * Check if the given instruction the target pc is equal to the
 791				 * current pc, If yes, then we should not set the CSR.FWPS.SKIP
 792				 * bit to break the original instruction stream.
 793				 */
 794				csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
 795				goto out;
 796			}
 797		}
 798	} else {
 799		breakpoint_handler(regs);
 800		watchpoint_handler(regs);
 801	}
 802
 803	force_sig(SIGTRAP);
 804out:
 805#endif
 806	irqentry_exit(regs, state);
 807}
 808
 809asmlinkage void noinstr do_ri(struct pt_regs *regs)
 810{
 811	int status = SIGILL;
 812	unsigned int __maybe_unused opcode;
 813	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
 814	irqentry_state_t state = irqentry_enter(regs);
 815
 816	local_irq_enable();
 817	current->thread.trap_nr = read_csr_excode();
 818
 819	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
 820		       SIGILL) == NOTIFY_STOP)
 821		goto out;
 822
 823	die_if_kernel("Reserved instruction in kernel code", regs);
 824
 825	if (unlikely(get_user(opcode, era) < 0)) {
 826		status = SIGSEGV;
 827		current->thread.error_code = 1;
 828	}
 829
 830	force_sig(status);
 831
 832out:
 833	local_irq_disable();
 834	irqentry_exit(regs, state);
 835}
 836
 837static void init_restore_fp(void)
 838{
 839	if (!used_math()) {
 840		/* First time FP context user. */
 841		init_fpu();
 842	} else {
 843		/* This task has formerly used the FP context */
 844		if (!is_fpu_owner())
 845			own_fpu_inatomic(1);
 846	}
 847
 848	BUG_ON(!is_fp_enabled());
 849}
 850
 851static void init_restore_lsx(void)
 852{
 853	enable_lsx();
 854
 855	if (!thread_lsx_context_live()) {
 856		/* First time LSX context user */
 857		init_restore_fp();
 858		init_lsx_upper();
 859		set_thread_flag(TIF_LSX_CTX_LIVE);
 860	} else {
 861		if (!is_simd_owner()) {
 862			if (is_fpu_owner()) {
 863				restore_lsx_upper(current);
 864			} else {
 865				__own_fpu();
 866				restore_lsx(current);
 867			}
 868		}
 869	}
 870
 871	set_thread_flag(TIF_USEDSIMD);
 872
 873	BUG_ON(!is_fp_enabled());
 874	BUG_ON(!is_lsx_enabled());
 875}
 876
 877static void init_restore_lasx(void)
 878{
 879	enable_lasx();
 880
 881	if (!thread_lasx_context_live()) {
 882		/* First time LASX context user */
 883		init_restore_lsx();
 884		init_lasx_upper();
 885		set_thread_flag(TIF_LASX_CTX_LIVE);
 886	} else {
 887		if (is_fpu_owner() || is_simd_owner()) {
 888			init_restore_lsx();
 889			restore_lasx_upper(current);
 890		} else {
 891			__own_fpu();
 892			enable_lsx();
 893			restore_lasx(current);
 894		}
 895	}
 896
 897	set_thread_flag(TIF_USEDSIMD);
 898
 899	BUG_ON(!is_fp_enabled());
 900	BUG_ON(!is_lsx_enabled());
 901	BUG_ON(!is_lasx_enabled());
 902}
 903
 904asmlinkage void noinstr do_fpu(struct pt_regs *regs)
 905{
 906	irqentry_state_t state = irqentry_enter(regs);
 907
 908	local_irq_enable();
 909	die_if_kernel("do_fpu invoked from kernel context!", regs);
 910	BUG_ON(is_lsx_enabled());
 911	BUG_ON(is_lasx_enabled());
 912
 913	preempt_disable();
 914	init_restore_fp();
 915	preempt_enable();
 916
 917	local_irq_disable();
 918	irqentry_exit(regs, state);
 919}
 920
 921asmlinkage void noinstr do_lsx(struct pt_regs *regs)
 922{
 923	irqentry_state_t state = irqentry_enter(regs);
 924
 925	local_irq_enable();
 926	if (!cpu_has_lsx) {
 927		force_sig(SIGILL);
 928		goto out;
 929	}
 930
 931	die_if_kernel("do_lsx invoked from kernel context!", regs);
 932	BUG_ON(is_lasx_enabled());
 933
 934	preempt_disable();
 935	init_restore_lsx();
 936	preempt_enable();
 937
 938out:
 939	local_irq_disable();
 
 940	irqentry_exit(regs, state);
 941}
 942
 943asmlinkage void noinstr do_lasx(struct pt_regs *regs)
 944{
 945	irqentry_state_t state = irqentry_enter(regs);
 946
 947	local_irq_enable();
 948	if (!cpu_has_lasx) {
 949		force_sig(SIGILL);
 950		goto out;
 951	}
 952
 953	die_if_kernel("do_lasx invoked from kernel context!", regs);
 954
 955	preempt_disable();
 956	init_restore_lasx();
 957	preempt_enable();
 958
 959out:
 960	local_irq_disable();
 961	irqentry_exit(regs, state);
 962}
 963
 964static void init_restore_lbt(void)
 965{
 966	if (!thread_lbt_context_live()) {
 967		/* First time LBT context user */
 968		init_lbt();
 969		set_thread_flag(TIF_LBT_CTX_LIVE);
 970	} else {
 971		if (!is_lbt_owner())
 972			own_lbt_inatomic(1);
 973	}
 974
 975	BUG_ON(!is_lbt_enabled());
 976}
 977
 978asmlinkage void noinstr do_lbt(struct pt_regs *regs)
 979{
 980	irqentry_state_t state = irqentry_enter(regs);
 981
 982	/*
 983	 * BTD (Binary Translation Disable exception) can be triggered
 984	 * during FP save/restore if TM (Top Mode) is on, which may
 985	 * cause irq_enable during 'switch_to'. To avoid this situation
 986	 * (including the user using 'MOVGR2GCSR' to turn on TM, which
 987	 * will not trigger the BTE), we need to check PRMD first.
 988	 */
 989	if (regs->csr_prmd & CSR_PRMD_PIE)
 990		local_irq_enable();
 991
 992	if (!cpu_has_lbt) {
 993		force_sig(SIGILL);
 994		goto out;
 995	}
 996	BUG_ON(is_lbt_enabled());
 997
 998	preempt_disable();
 999	init_restore_lbt();
1000	preempt_enable();
1001
1002out:
1003	if (regs->csr_prmd & CSR_PRMD_PIE)
1004		local_irq_disable();
1005
1006	irqentry_exit(regs, state);
1007}
1008
1009asmlinkage void noinstr do_reserved(struct pt_regs *regs)
1010{
1011	irqentry_state_t state = irqentry_enter(regs);
1012
1013	local_irq_enable();
1014	/*
1015	 * Game over - no way to handle this if it ever occurs.	Most probably
1016	 * caused by a fatal error after another hardware/software error.
1017	 */
1018	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
1019		read_csr_excode(), current->pid, current->comm);
1020	die_if_kernel("do_reserved exception", regs);
1021	force_sig(SIGUNUSED);
1022
1023	local_irq_disable();
1024
1025	irqentry_exit(regs, state);
1026}
1027
1028asmlinkage void cache_parity_error(void)
1029{
1030	/* For the moment, report the problem and hang. */
1031	pr_err("Cache error exception:\n");
1032	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
1033	pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
1034	panic("Can't handle the cache error!");
1035}
1036
1037asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
1038{
1039	struct pt_regs *old_regs;
1040
1041	irq_enter_rcu();
1042	old_regs = set_irq_regs(regs);
1043	handle_arch_irq(regs);
1044	set_irq_regs(old_regs);
1045	irq_exit_rcu();
1046}
1047
1048asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
1049{
1050	register int cpu;
1051	register unsigned long stack;
1052	irqentry_state_t state = irqentry_enter(regs);
1053
1054	cpu = smp_processor_id();
1055
1056	if (on_irq_stack(cpu, sp))
1057		handle_loongarch_irq(regs);
1058	else {
1059		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
1060
1061		/* Save task's sp on IRQ stack for unwinding */
1062		*(unsigned long *)stack = sp;
1063
1064		__asm__ __volatile__(
1065		"move	$s0, $sp		\n" /* Preserve sp */
1066		"move	$sp, %[stk]		\n" /* Switch stack */
1067		"move	$a0, %[regs]		\n"
1068		"bl	handle_loongarch_irq	\n"
1069		"move	$sp, $s0		\n" /* Restore sp */
1070		: /* No outputs */
1071		: [stk] "r" (stack), [regs] "r" (regs)
1072		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
1073		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
1074		  "memory");
1075	}
1076
1077	irqentry_exit(regs, state);
1078}
1079
1080unsigned long eentry;
1081unsigned long tlbrentry;
1082
1083long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
1084
1085static void configure_exception_vector(void)
1086{
1087	eentry    = (unsigned long)exception_handlers;
1088	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
1089
1090	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
1091	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
1092	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
1093}
1094
1095void per_cpu_trap_init(int cpu)
1096{
1097	unsigned int i;
1098
1099	setup_vint_size(VECSIZE);
1100
1101	configure_exception_vector();
1102
1103	if (!cpu_data[cpu].asid_cache)
1104		cpu_data[cpu].asid_cache = asid_first_version(cpu);
1105
1106	mmgrab(&init_mm);
1107	current->active_mm = &init_mm;
1108	BUG_ON(current->mm);
1109	enter_lazy_tlb(&init_mm, current);
1110
1111	/* Initialise exception handlers */
1112	if (cpu == 0)
1113		for (i = 0; i < 64; i++)
1114			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
1115
1116	tlb_init(cpu);
1117	cpu_cache_init();
1118}
1119
1120/* Install CPU exception handler */
1121void set_handler(unsigned long offset, void *addr, unsigned long size)
1122{
1123	memcpy((void *)(eentry + offset), addr, size);
1124	local_flush_icache_range(eentry + offset, eentry + offset + size);
1125}
1126
1127static const char panic_null_cerr[] =
1128	"Trying to set NULL cache error exception handler\n";
1129
1130/*
1131 * Install uncached CPU exception handler.
1132 * This is suitable only for the cache error exception which is the only
1133 * exception handler that is being run uncached.
1134 */
1135void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
1136{
1137	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
1138
1139	if (!addr)
1140		panic(panic_null_cerr);
1141
1142	memcpy((void *)(uncached_eentry + offset), addr, size);
1143}
1144
1145void __init trap_init(void)
1146{
1147	long i;
1148
1149	/* Set interrupt vector handler */
1150	for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
1151		set_handler(i * VECSIZE, handle_vint, VECSIZE);
1152
1153	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
1154	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
1155	set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
1156	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
1157	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
1158	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
1159	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
1160	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
1161	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
1162	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
1163	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
1164	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
1165	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
1166
1167	cache_error_setup();
1168
1169	local_flush_icache_range(eentry, eentry + 0x400);
1170}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Author: Huacai Chen <chenhuacai@loongson.cn>
  4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5 */
 
  6#include <linux/bitops.h>
  7#include <linux/bug.h>
  8#include <linux/compiler.h>
  9#include <linux/context_tracking.h>
 10#include <linux/entry-common.h>
 11#include <linux/init.h>
 12#include <linux/kernel.h>
 13#include <linux/kexec.h>
 14#include <linux/module.h>
 15#include <linux/extable.h>
 16#include <linux/mm.h>
 17#include <linux/sched/mm.h>
 18#include <linux/sched/debug.h>
 19#include <linux/smp.h>
 20#include <linux/spinlock.h>
 21#include <linux/kallsyms.h>
 22#include <linux/memblock.h>
 23#include <linux/interrupt.h>
 24#include <linux/ptrace.h>
 25#include <linux/kgdb.h>
 26#include <linux/kdebug.h>
 27#include <linux/kprobes.h>
 28#include <linux/notifier.h>
 29#include <linux/irq.h>
 30#include <linux/perf_event.h>
 31
 32#include <asm/addrspace.h>
 33#include <asm/bootinfo.h>
 34#include <asm/branch.h>
 35#include <asm/break.h>
 36#include <asm/cpu.h>
 
 37#include <asm/fpu.h>
 
 
 
 38#include <asm/loongarch.h>
 39#include <asm/mmu_context.h>
 40#include <asm/pgtable.h>
 41#include <asm/ptrace.h>
 42#include <asm/sections.h>
 43#include <asm/siginfo.h>
 44#include <asm/stacktrace.h>
 45#include <asm/tlb.h>
 46#include <asm/types.h>
 47#include <asm/unwind.h>
 
 48
 49#include "access-helper.h"
 50
 51extern asmlinkage void handle_ade(void);
 52extern asmlinkage void handle_ale(void);
 53extern asmlinkage void handle_sys(void);
 54extern asmlinkage void handle_bp(void);
 55extern asmlinkage void handle_ri(void);
 56extern asmlinkage void handle_fpu(void);
 57extern asmlinkage void handle_fpe(void);
 58extern asmlinkage void handle_lbt(void);
 59extern asmlinkage void handle_lsx(void);
 60extern asmlinkage void handle_lasx(void);
 61extern asmlinkage void handle_reserved(void);
 62extern asmlinkage void handle_watch(void);
 63extern asmlinkage void handle_vint(void);
 64
 65static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
 66			   const char *loglvl, bool user)
 67{
 68	unsigned long addr;
 69	struct unwind_state state;
 70	struct pt_regs *pregs = (struct pt_regs *)regs;
 71
 72	if (!task)
 73		task = current;
 74
 75	printk("%sCall Trace:", loglvl);
 76	for (unwind_start(&state, task, pregs);
 77	      !unwind_done(&state); unwind_next_frame(&state)) {
 78		addr = unwind_get_return_address(&state);
 79		print_ip_sym(loglvl, addr);
 80	}
 81	printk("%s\n", loglvl);
 82}
 83
 84static void show_stacktrace(struct task_struct *task,
 85	const struct pt_regs *regs, const char *loglvl, bool user)
 86{
 87	int i;
 88	const int field = 2 * sizeof(unsigned long);
 89	unsigned long stackdata;
 90	unsigned long *sp = (unsigned long *)regs->regs[3];
 91
 92	printk("%sStack :", loglvl);
 93	i = 0;
 94	while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 95		if (i && ((i % (64 / field)) == 0)) {
 96			pr_cont("\n");
 97			printk("%s       ", loglvl);
 98		}
 99		if (i > 39) {
100			pr_cont(" ...");
101			break;
102		}
103
104		if (__get_addr(&stackdata, sp++, user)) {
105			pr_cont(" (Bad stack address)");
106			break;
107		}
108
109		pr_cont(" %0*lx", field, stackdata);
110		i++;
111	}
112	pr_cont("\n");
113	show_backtrace(task, regs, loglvl, user);
114}
115
116void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
117{
118	struct pt_regs regs;
119
120	regs.csr_crmd = 0;
121	if (sp) {
122		regs.csr_era = 0;
123		regs.regs[1] = 0;
124		regs.regs[3] = (unsigned long)sp;
125	} else {
126		if (!task || task == current)
127			prepare_frametrace(&regs);
128		else {
129			regs.csr_era = task->thread.reg01;
130			regs.regs[1] = 0;
131			regs.regs[3] = task->thread.reg03;
132			regs.regs[22] = task->thread.reg22;
133		}
134	}
135
136	show_stacktrace(task, &regs, loglvl, false);
137}
138
139static void show_code(unsigned int *pc, bool user)
140{
141	long i;
142	unsigned int insn;
143
144	printk("Code:");
145
146	for(i = -3 ; i < 6 ; i++) {
147		if (__get_inst(&insn, pc + i, user)) {
148			pr_cont(" (Bad address in era)\n");
149			break;
150		}
151		pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
152	}
153	pr_cont("\n");
154}
155
156static void __show_regs(const struct pt_regs *regs)
157{
158	const int field = 2 * sizeof(unsigned long);
159	unsigned int excsubcode;
160	unsigned int exccode;
161	int i;
162
163	show_regs_print_info(KERN_DEFAULT);
 
 
 
 
164
165	/*
166	 * Saved main processor registers
167	 */
168	for (i = 0; i < 32; ) {
169		if ((i % 4) == 0)
170			printk("$%2d   :", i);
171		pr_cont(" %0*lx", field, regs->regs[i]);
172
173		i++;
174		if ((i % 4) == 0)
175			pr_cont("\n");
 
 
 
 
 
 
 
 
 
 
176	}
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178	/*
179	 * Saved csr registers
 
180	 */
181	printk("era   : %0*lx %pS\n", field, regs->csr_era,
182	       (void *) regs->csr_era);
183	printk("ra    : %0*lx %pS\n", field, regs->regs[1],
184	       (void *) regs->regs[1]);
185
186	printk("CSR crmd: %08lx	", regs->csr_crmd);
187	printk("CSR prmd: %08lx	", regs->csr_prmd);
188	printk("CSR euen: %08lx	", regs->csr_euen);
189	printk("CSR ecfg: %08lx	", regs->csr_ecfg);
190	printk("CSR estat: %08lx	", regs->csr_estat);
 
 
 
 
 
 
 
 
 
 
191
192	pr_cont("\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
194	exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
195	excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
196	printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
 
 
 
197
198	if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
199		printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
200
201	printk("PrId  : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
202	       cpu_family_string());
203}
204
205void show_regs(struct pt_regs *regs)
206{
207	__show_regs((struct pt_regs *)regs);
208	dump_stack();
209}
210
211void show_registers(struct pt_regs *regs)
212{
213	__show_regs(regs);
214	print_modules();
215	printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
216	       current->comm, current->pid, current_thread_info(), current);
217
218	show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
219	show_code((void *)regs->csr_era, user_mode(regs));
220	printk("\n");
221}
222
223static DEFINE_RAW_SPINLOCK(die_lock);
224
225void __noreturn die(const char *str, struct pt_regs *regs)
226{
 
227	static int die_counter;
228	int sig = SIGSEGV;
229
230	oops_enter();
231
232	if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
233		       SIGSEGV) == NOTIFY_STOP)
234		sig = 0;
235
236	console_verbose();
237	raw_spin_lock_irq(&die_lock);
238	bust_spinlocks(1);
239
240	printk("%s[#%d]:\n", str, ++die_counter);
241	show_registers(regs);
242	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
243	raw_spin_unlock_irq(&die_lock);
244
245	oops_exit();
246
 
 
 
247	if (regs && kexec_should_crash(current))
248		crash_kexec(regs);
249
250	if (in_interrupt())
251		panic("Fatal exception in interrupt");
252
253	if (panic_on_oops)
254		panic("Fatal exception");
255
256	make_task_dead(sig);
257}
258
259static inline void setup_vint_size(unsigned int size)
260{
261	unsigned int vs;
262
263	vs = ilog2(size/4);
264
265	if (vs == 0 || vs > 7)
266		panic("vint_size %d Not support yet", vs);
267
268	csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
269}
270
271/*
272 * Send SIGFPE according to FCSR Cause bits, which must have already
273 * been masked against Enable bits.  This is impotant as Inexact can
274 * happen together with Overflow or Underflow, and `ptrace' can set
275 * any bits.
276 */
277void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
278		     struct task_struct *tsk)
279{
280	int si_code = FPE_FLTUNK;
281
282	if (fcsr & FPU_CSR_INV_X)
283		si_code = FPE_FLTINV;
284	else if (fcsr & FPU_CSR_DIV_X)
285		si_code = FPE_FLTDIV;
286	else if (fcsr & FPU_CSR_OVF_X)
287		si_code = FPE_FLTOVF;
288	else if (fcsr & FPU_CSR_UDF_X)
289		si_code = FPE_FLTUND;
290	else if (fcsr & FPU_CSR_INE_X)
291		si_code = FPE_FLTRES;
292
293	force_sig_fault(SIGFPE, si_code, fault_addr);
294}
295
296int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
297{
298	int si_code;
299
300	switch (sig) {
301	case 0:
302		return 0;
303
304	case SIGFPE:
305		force_fcsr_sig(fcsr, fault_addr, current);
306		return 1;
307
308	case SIGBUS:
309		force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
310		return 1;
311
312	case SIGSEGV:
313		mmap_read_lock(current->mm);
314		if (vma_lookup(current->mm, (unsigned long)fault_addr))
315			si_code = SEGV_ACCERR;
316		else
317			si_code = SEGV_MAPERR;
318		mmap_read_unlock(current->mm);
319		force_sig_fault(SIGSEGV, si_code, fault_addr);
320		return 1;
321
322	default:
323		force_sig(sig);
324		return 1;
325	}
326}
327
328/*
329 * Delayed fp exceptions when doing a lazy ctx switch
330 */
331asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
332{
333	int sig;
334	void __user *fault_addr;
335	irqentry_state_t state = irqentry_enter(regs);
336
337	if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
338		       SIGFPE) == NOTIFY_STOP)
339		goto out;
340
341	/* Clear FCSR.Cause before enabling interrupts */
342	write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
343	local_irq_enable();
344
345	die_if_kernel("FP exception in kernel code", regs);
346
347	sig = SIGFPE;
348	fault_addr = (void __user *) regs->csr_era;
349
350	/* Send a signal if required.  */
351	process_fpemu_return(sig, fault_addr, fcsr);
352
353out:
354	local_irq_disable();
355	irqentry_exit(regs, state);
356}
357
358asmlinkage void noinstr do_ade(struct pt_regs *regs)
359{
360	irqentry_state_t state = irqentry_enter(regs);
361
362	die_if_kernel("Kernel ade access", regs);
363	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
364
365	irqentry_exit(regs, state);
366}
367
368/* sysctl hooks */
369int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
370int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
371
372asmlinkage void noinstr do_ale(struct pt_regs *regs)
373{
 
 
 
 
 
 
374	unsigned int *pc;
375	irqentry_state_t state = irqentry_enter(regs);
376
377	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
378
379	/*
380	 * Did we catch a fault trying to load an instruction?
381	 */
382	if (regs->csr_badvaddr == regs->csr_era)
383		goto sigbus;
384	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
385		goto sigbus;
386	if (!unaligned_enabled)
387		goto sigbus;
388	if (!no_unaligned_warning)
389		show_registers(regs);
390
391	pc = (unsigned int *)exception_era(regs);
392
393	emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
394
395	goto out;
396
397sigbus:
398	die_if_kernel("Kernel ale access", regs);
399	force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
400
401out:
 
402	irqentry_exit(regs, state);
403}
404
405#ifdef CONFIG_GENERIC_BUG
406int is_valid_bugaddr(unsigned long addr)
407{
408	return 1;
409}
410#endif /* CONFIG_GENERIC_BUG */
411
412static void bug_handler(struct pt_regs *regs)
413{
414	switch (report_bug(regs->csr_era, regs)) {
415	case BUG_TRAP_TYPE_BUG:
416	case BUG_TRAP_TYPE_NONE:
417		die_if_kernel("Oops - BUG", regs);
418		force_sig(SIGTRAP);
419		break;
420
421	case BUG_TRAP_TYPE_WARN:
422		/* Skip the BUG instruction and continue */
423		regs->csr_era += LOONGARCH_INSN_SIZE;
424		break;
425	}
426}
427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428asmlinkage void noinstr do_bp(struct pt_regs *regs)
429{
430	bool user = user_mode(regs);
431	unsigned int opcode, bcode;
432	unsigned long era = exception_era(regs);
433	irqentry_state_t state = irqentry_enter(regs);
434
435	local_irq_enable();
436	current->thread.trap_nr = read_csr_excode();
 
437	if (__get_inst(&opcode, (u32 *)era, user))
438		goto out_sigsegv;
439
440	bcode = (opcode & 0x7fff);
441
442	/*
443	 * notify the kprobe handlers, if instruction is likely to
444	 * pertain to them.
445	 */
446	switch (bcode) {
 
 
 
 
 
447	case BRK_KPROBE_BP:
448		if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
449			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
450			goto out;
451		else
452			break;
453	case BRK_KPROBE_SSTEPBP:
454		if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
455			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
456			goto out;
457		else
458			break;
459	case BRK_UPROBE_BP:
460		if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
461			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
462			goto out;
463		else
464			break;
465	case BRK_UPROBE_XOLBP:
466		if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
467			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
468			goto out;
469		else
470			break;
471	default:
 
472		if (notify_die(DIE_TRAP, "Break", regs, bcode,
473			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
474			goto out;
475		else
476			break;
477	}
478
479	switch (bcode) {
480	case BRK_BUG:
481		bug_handler(regs);
482		break;
483	case BRK_DIVZERO:
484		die_if_kernel("Break instruction in kernel code", regs);
485		force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
486		break;
487	case BRK_OVERFLOW:
488		die_if_kernel("Break instruction in kernel code", regs);
489		force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
490		break;
491	default:
492		die_if_kernel("Break instruction in kernel code", regs);
493		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
494		break;
495	}
496
497out:
498	local_irq_disable();
 
 
499	irqentry_exit(regs, state);
500	return;
501
502out_sigsegv:
503	force_sig(SIGSEGV);
504	goto out;
505}
506
507asmlinkage void noinstr do_watch(struct pt_regs *regs)
508{
 
 
 
509	pr_warn("Hardware watch point handler not implemented!\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
510}
511
512asmlinkage void noinstr do_ri(struct pt_regs *regs)
513{
514	int status = SIGILL;
515	unsigned int opcode = 0;
516	unsigned int __user *era = (unsigned int __user *)exception_era(regs);
517	irqentry_state_t state = irqentry_enter(regs);
518
519	local_irq_enable();
520	current->thread.trap_nr = read_csr_excode();
521
522	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
523		       SIGILL) == NOTIFY_STOP)
524		goto out;
525
526	die_if_kernel("Reserved instruction in kernel code", regs);
527
528	if (unlikely(get_user(opcode, era) < 0)) {
529		status = SIGSEGV;
530		current->thread.error_code = 1;
531	}
532
533	force_sig(status);
534
535out:
536	local_irq_disable();
537	irqentry_exit(regs, state);
538}
539
540static void init_restore_fp(void)
541{
542	if (!used_math()) {
543		/* First time FP context user. */
544		init_fpu();
545	} else {
546		/* This task has formerly used the FP context */
547		if (!is_fpu_owner())
548			own_fpu_inatomic(1);
549	}
550
551	BUG_ON(!is_fp_enabled());
552}
553
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554asmlinkage void noinstr do_fpu(struct pt_regs *regs)
555{
556	irqentry_state_t state = irqentry_enter(regs);
557
558	local_irq_enable();
559	die_if_kernel("do_fpu invoked from kernel context!", regs);
 
 
560
561	preempt_disable();
562	init_restore_fp();
563	preempt_enable();
564
565	local_irq_disable();
566	irqentry_exit(regs, state);
567}
568
569asmlinkage void noinstr do_lsx(struct pt_regs *regs)
570{
571	irqentry_state_t state = irqentry_enter(regs);
572
573	local_irq_enable();
574	force_sig(SIGILL);
 
 
 
 
 
 
 
 
 
 
 
 
575	local_irq_disable();
576
577	irqentry_exit(regs, state);
578}
579
580asmlinkage void noinstr do_lasx(struct pt_regs *regs)
581{
582	irqentry_state_t state = irqentry_enter(regs);
583
584	local_irq_enable();
585	force_sig(SIGILL);
 
 
 
 
 
 
 
 
 
 
 
586	local_irq_disable();
 
 
587
588	irqentry_exit(regs, state);
 
 
 
 
 
 
 
 
 
 
 
589}
590
591asmlinkage void noinstr do_lbt(struct pt_regs *regs)
592{
593	irqentry_state_t state = irqentry_enter(regs);
594
595	local_irq_enable();
596	force_sig(SIGILL);
597	local_irq_disable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
598
599	irqentry_exit(regs, state);
600}
601
602asmlinkage void noinstr do_reserved(struct pt_regs *regs)
603{
604	irqentry_state_t state = irqentry_enter(regs);
605
606	local_irq_enable();
607	/*
608	 * Game over - no way to handle this if it ever occurs.	Most probably
609	 * caused by a fatal error after another hardware/software error.
610	 */
611	pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
612		read_csr_excode(), current->pid, current->comm);
613	die_if_kernel("do_reserved exception", regs);
614	force_sig(SIGUNUSED);
615
616	local_irq_disable();
617
618	irqentry_exit(regs, state);
619}
620
621asmlinkage void cache_parity_error(void)
622{
623	/* For the moment, report the problem and hang. */
624	pr_err("Cache error exception:\n");
625	pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
626	pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
627	panic("Can't handle the cache error!");
628}
629
630asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
631{
632	struct pt_regs *old_regs;
633
634	irq_enter_rcu();
635	old_regs = set_irq_regs(regs);
636	handle_arch_irq(regs);
637	set_irq_regs(old_regs);
638	irq_exit_rcu();
639}
640
641asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
642{
643	register int cpu;
644	register unsigned long stack;
645	irqentry_state_t state = irqentry_enter(regs);
646
647	cpu = smp_processor_id();
648
649	if (on_irq_stack(cpu, sp))
650		handle_loongarch_irq(regs);
651	else {
652		stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
653
654		/* Save task's sp on IRQ stack for unwinding */
655		*(unsigned long *)stack = sp;
656
657		__asm__ __volatile__(
658		"move	$s0, $sp		\n" /* Preserve sp */
659		"move	$sp, %[stk]		\n" /* Switch stack */
660		"move	$a0, %[regs]		\n"
661		"bl	handle_loongarch_irq	\n"
662		"move	$sp, $s0		\n" /* Restore sp */
663		: /* No outputs */
664		: [stk] "r" (stack), [regs] "r" (regs)
665		: "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
666		  "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
667		  "memory");
668	}
669
670	irqentry_exit(regs, state);
671}
672
673unsigned long eentry;
674unsigned long tlbrentry;
675
676long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
677
678static void configure_exception_vector(void)
679{
680	eentry    = (unsigned long)exception_handlers;
681	tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
682
683	csr_write64(eentry, LOONGARCH_CSR_EENTRY);
684	csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
685	csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
686}
687
688void per_cpu_trap_init(int cpu)
689{
690	unsigned int i;
691
692	setup_vint_size(VECSIZE);
693
694	configure_exception_vector();
695
696	if (!cpu_data[cpu].asid_cache)
697		cpu_data[cpu].asid_cache = asid_first_version(cpu);
698
699	mmgrab(&init_mm);
700	current->active_mm = &init_mm;
701	BUG_ON(current->mm);
702	enter_lazy_tlb(&init_mm, current);
703
704	/* Initialise exception handlers */
705	if (cpu == 0)
706		for (i = 0; i < 64; i++)
707			set_handler(i * VECSIZE, handle_reserved, VECSIZE);
708
709	tlb_init(cpu);
710	cpu_cache_init();
711}
712
713/* Install CPU exception handler */
714void set_handler(unsigned long offset, void *addr, unsigned long size)
715{
716	memcpy((void *)(eentry + offset), addr, size);
717	local_flush_icache_range(eentry + offset, eentry + offset + size);
718}
719
720static const char panic_null_cerr[] =
721	"Trying to set NULL cache error exception handler\n";
722
723/*
724 * Install uncached CPU exception handler.
725 * This is suitable only for the cache error exception which is the only
726 * exception handler that is being run uncached.
727 */
728void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
729{
730	unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
731
732	if (!addr)
733		panic(panic_null_cerr);
734
735	memcpy((void *)(uncached_eentry + offset), addr, size);
736}
737
738void __init trap_init(void)
739{
740	long i;
741
742	/* Set interrupt vector handler */
743	for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
744		set_handler(i * VECSIZE, handle_vint, VECSIZE);
745
746	set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
747	set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
 
748	set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
749	set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
750	set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
751	set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
752	set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
753	set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
754	set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
755	set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
756	set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
757	set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
758
759	cache_error_setup();
760
761	local_flush_icache_range(eentry, eentry + 0x400);
762}