Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Hanlu Li <lihanlu@loongson.cn>
   4 *         Huacai Chen <chenhuacai@loongson.cn>
   5 *
   6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   7 *
   8 * Derived from MIPS:
   9 * Copyright (C) 1992 Ross Biro
  10 * Copyright (C) Linus Torvalds
  11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  12 * Copyright (C) 1996 David S. Miller
  13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  14 * Copyright (C) 1999 MIPS Technologies, Inc.
  15 * Copyright (C) 2000 Ulf Carlsson
  16 */
  17#include <linux/kernel.h>
  18#include <linux/audit.h>
  19#include <linux/compiler.h>
  20#include <linux/context_tracking.h>
  21#include <linux/elf.h>
  22#include <linux/errno.h>
  23#include <linux/hw_breakpoint.h>
  24#include <linux/mm.h>
  25#include <linux/nospec.h>
  26#include <linux/ptrace.h>
  27#include <linux/regset.h>
  28#include <linux/sched.h>
  29#include <linux/sched/task_stack.h>
  30#include <linux/security.h>
  31#include <linux/smp.h>
  32#include <linux/stddef.h>
  33#include <linux/seccomp.h>
  34#include <linux/thread_info.h>
  35#include <linux/uaccess.h>
  36
  37#include <asm/byteorder.h>
  38#include <asm/cpu.h>
  39#include <asm/cpu-info.h>
  40#include <asm/fpu.h>
  41#include <asm/lbt.h>
  42#include <asm/loongarch.h>
  43#include <asm/page.h>
  44#include <asm/pgtable.h>
  45#include <asm/processor.h>
  46#include <asm/ptrace.h>
  47#include <asm/reg.h>
  48#include <asm/syscall.h>
  49
  50static void init_fp_ctx(struct task_struct *target)
  51{
  52	/* The target already has context */
  53	if (tsk_used_math(target))
  54		return;
  55
  56	/* Begin with data registers set to all 1s... */
  57	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
  58	set_stopped_child_used_math(target);
  59}
  60
  61/*
  62 * Called by kernel/ptrace.c when detaching..
  63 *
  64 * Make sure single step bits etc are not set.
  65 */
  66void ptrace_disable(struct task_struct *child)
  67{
  68	/* Don't load the watchpoint registers for the ex-child. */
  69	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  70	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  71}
  72
  73/* regset get/set implementations */
  74
  75static int gpr_get(struct task_struct *target,
  76		   const struct user_regset *regset,
  77		   struct membuf to)
  78{
  79	int r;
  80	struct pt_regs *regs = task_pt_regs(target);
  81
  82	r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
  83	r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
  84	r = membuf_write(&to, &regs->csr_era, sizeof(u64));
  85	r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
  86
  87	return r;
  88}
  89
  90static int gpr_set(struct task_struct *target,
  91		   const struct user_regset *regset,
  92		   unsigned int pos, unsigned int count,
  93		   const void *kbuf, const void __user *ubuf)
  94{
  95	int err;
  96	int a0_start = sizeof(u64) * GPR_NUM;
  97	int era_start = a0_start + sizeof(u64);
  98	int badvaddr_start = era_start + sizeof(u64);
  99	struct pt_regs *regs = task_pt_regs(target);
 100
 101	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 102				 &regs->regs,
 103				 0, a0_start);
 104	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 105				 &regs->orig_a0,
 106				 a0_start, a0_start + sizeof(u64));
 107	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 108				 &regs->csr_era,
 109				 era_start, era_start + sizeof(u64));
 110	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 111				 &regs->csr_badvaddr,
 112				 badvaddr_start, badvaddr_start + sizeof(u64));
 113
 114	return err;
 115}
 116
 117
 118/*
 119 * Get the general floating-point registers.
 120 */
 121static int gfpr_get(struct task_struct *target, struct membuf *to)
 122{
 123	return membuf_write(to, &target->thread.fpu.fpr,
 124			    sizeof(elf_fpreg_t) * NUM_FPU_REGS);
 125}
 126
 127static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
 128{
 129	int i, r;
 130	u64 fpr_val;
 131
 132	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 133	for (i = 0; i < NUM_FPU_REGS; i++) {
 134		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 135		r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
 136	}
 137
 138	return r;
 139}
 140
 141/*
 142 * Choose the appropriate helper for general registers, and then copy
 143 * the FCC and FCSR registers separately.
 144 */
 145static int fpr_get(struct task_struct *target,
 146		   const struct user_regset *regset,
 147		   struct membuf to)
 148{
 149	int r;
 150
 151	save_fpu_regs(target);
 152
 153	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 154		r = gfpr_get(target, &to);
 155	else
 156		r = gfpr_get_simd(target, &to);
 157
 158	r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
 159	r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
 160
 161	return r;
 162}
 163
 164static int gfpr_set(struct task_struct *target,
 165		    unsigned int *pos, unsigned int *count,
 166		    const void **kbuf, const void __user **ubuf)
 167{
 168	return user_regset_copyin(pos, count, kbuf, ubuf,
 169				  &target->thread.fpu.fpr,
 170				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 171}
 172
 173static int gfpr_set_simd(struct task_struct *target,
 174		       unsigned int *pos, unsigned int *count,
 175		       const void **kbuf, const void __user **ubuf)
 176{
 177	int i, err;
 178	u64 fpr_val;
 179
 180	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 181	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 182		err = user_regset_copyin(pos, count, kbuf, ubuf,
 183					 &fpr_val, i * sizeof(elf_fpreg_t),
 184					 (i + 1) * sizeof(elf_fpreg_t));
 185		if (err)
 186			return err;
 187		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 188	}
 189
 190	return 0;
 191}
 192
 193/*
 194 * Choose the appropriate helper for general registers, and then copy
 195 * the FCC register separately.
 196 */
 197static int fpr_set(struct task_struct *target,
 198		   const struct user_regset *regset,
 199		   unsigned int pos, unsigned int count,
 200		   const void *kbuf, const void __user *ubuf)
 201{
 202	const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 203	const int fcsr_start = fcc_start + sizeof(u64);
 204	int err;
 205
 206	BUG_ON(count % sizeof(elf_fpreg_t));
 207	if (pos + count > sizeof(elf_fpregset_t))
 208		return -EIO;
 209
 210	init_fp_ctx(target);
 211
 212	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 213		err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
 214	else
 215		err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
 216	if (err)
 217		return err;
 218
 219	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 220				  &target->thread.fpu.fcc, fcc_start,
 221				  fcc_start + sizeof(u64));
 222	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 223				  &target->thread.fpu.fcsr, fcsr_start,
 224				  fcsr_start + sizeof(u32));
 225
 226	return err;
 227}
 228
 229static int cfg_get(struct task_struct *target,
 230		   const struct user_regset *regset,
 231		   struct membuf to)
 232{
 233	int i, r;
 234	u32 cfg_val;
 235
 236	i = 0;
 237	while (to.left > 0) {
 238		cfg_val = read_cpucfg(i++);
 239		r = membuf_write(&to, &cfg_val, sizeof(u32));
 240	}
 241
 242	return r;
 243}
 244
 245/*
 246 * CFG registers are read-only.
 247 */
 248static int cfg_set(struct task_struct *target,
 249		   const struct user_regset *regset,
 250		   unsigned int pos, unsigned int count,
 251		   const void *kbuf, const void __user *ubuf)
 252{
 253	return 0;
 254}
 255
 256#ifdef CONFIG_CPU_HAS_LSX
 257
 258static void copy_pad_fprs(struct task_struct *target,
 259			 const struct user_regset *regset,
 260			 struct membuf *to, unsigned int live_sz)
 261{
 262	int i, j;
 263	unsigned long long fill = ~0ull;
 264	unsigned int cp_sz, pad_sz;
 265
 266	cp_sz = min(regset->size, live_sz);
 267	pad_sz = regset->size - cp_sz;
 268	WARN_ON(pad_sz % sizeof(fill));
 269
 270	for (i = 0; i < NUM_FPU_REGS; i++) {
 271		membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
 272		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
 273			membuf_store(to, fill);
 274		}
 275	}
 276}
 277
 278static int simd_get(struct task_struct *target,
 279		    const struct user_regset *regset,
 280		    struct membuf to)
 281{
 282	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 283
 284	save_fpu_regs(target);
 285
 286	if (!tsk_used_math(target)) {
 287		/* The task hasn't used FP or LSX, fill with 0xff */
 288		copy_pad_fprs(target, regset, &to, 0);
 289	} else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
 290		/* Copy scalar FP context, fill the rest with 0xff */
 291		copy_pad_fprs(target, regset, &to, 8);
 292#ifdef CONFIG_CPU_HAS_LASX
 293	} else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
 294		/* Copy LSX 128 Bit context, fill the rest with 0xff */
 295		copy_pad_fprs(target, regset, &to, 16);
 296#endif
 297	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 298		/* Trivially copy the vector registers */
 299		membuf_write(&to, &target->thread.fpu.fpr, wr_size);
 300	} else {
 301		/* Copy as much context as possible, fill the rest with 0xff */
 302		copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
 303	}
 304
 305	return 0;
 306}
 307
 308static int simd_set(struct task_struct *target,
 309		    const struct user_regset *regset,
 310		    unsigned int pos, unsigned int count,
 311		    const void *kbuf, const void __user *ubuf)
 312{
 313	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 314	unsigned int cp_sz;
 315	int i, err, start;
 316
 317	init_fp_ctx(target);
 318
 319	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 320		/* Trivially copy the vector registers */
 321		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 322					 &target->thread.fpu.fpr,
 323					 0, wr_size);
 324	} else {
 325		/* Copy as much context as possible */
 326		cp_sz = min_t(unsigned int, regset->size,
 327			      sizeof(target->thread.fpu.fpr[0]));
 328
 329		i = start = err = 0;
 330		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 331			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 332						  &target->thread.fpu.fpr[i],
 333						  start, start + cp_sz);
 334		}
 335	}
 336
 337	return err;
 338}
 339
 340#endif /* CONFIG_CPU_HAS_LSX */
 341
 342#ifdef CONFIG_CPU_HAS_LBT
 343static int lbt_get(struct task_struct *target,
 344		   const struct user_regset *regset,
 345		   struct membuf to)
 346{
 347	int r;
 348
 349	r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
 350	r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
 351	r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
 352	r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
 353	r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
 354	r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
 355
 356	return r;
 357}
 358
 359static int lbt_set(struct task_struct *target,
 360		   const struct user_regset *regset,
 361		   unsigned int pos, unsigned int count,
 362		   const void *kbuf, const void __user *ubuf)
 363{
 364	int err = 0;
 365	const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
 366	const int ftop_start = eflags_start + sizeof(u32);
 367
 368	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 369				  &target->thread.lbt.scr0,
 370				  0, 4 * sizeof(target->thread.lbt.scr0));
 371	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 372				  &target->thread.lbt.eflags,
 373				  eflags_start, ftop_start);
 374	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 375				  &target->thread.fpu.ftop,
 376				  ftop_start, ftop_start + sizeof(u32));
 377
 378	return err;
 379}
 380#endif /* CONFIG_CPU_HAS_LBT */
 381
 382#ifdef CONFIG_HAVE_HW_BREAKPOINT
 383
 384/*
 385 * Handle hitting a HW-breakpoint.
 386 */
 387static void ptrace_hbptriggered(struct perf_event *bp,
 388				struct perf_sample_data *data,
 389				struct pt_regs *regs)
 390{
 391	int i;
 392	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
 393
 394	for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
 395		if (current->thread.hbp_break[i] == bp)
 396			break;
 397
 398	for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
 399		if (current->thread.hbp_watch[i] == bp)
 400			break;
 401
 402	force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
 403}
 404
 405static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
 406					       struct task_struct *tsk,
 407					       unsigned long idx)
 408{
 409	struct perf_event *bp;
 410
 411	switch (note_type) {
 412	case NT_LOONGARCH_HW_BREAK:
 413		if (idx >= LOONGARCH_MAX_BRP)
 414			return ERR_PTR(-EINVAL);
 415		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
 416		bp = tsk->thread.hbp_break[idx];
 417		break;
 418	case NT_LOONGARCH_HW_WATCH:
 419		if (idx >= LOONGARCH_MAX_WRP)
 420			return ERR_PTR(-EINVAL);
 421		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
 422		bp = tsk->thread.hbp_watch[idx];
 423		break;
 424	}
 425
 426	return bp;
 427}
 428
 429static int ptrace_hbp_set_event(unsigned int note_type,
 430				struct task_struct *tsk,
 431				unsigned long idx,
 432				struct perf_event *bp)
 433{
 434	switch (note_type) {
 435	case NT_LOONGARCH_HW_BREAK:
 436		if (idx >= LOONGARCH_MAX_BRP)
 437			return -EINVAL;
 438		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
 439		tsk->thread.hbp_break[idx] = bp;
 440		break;
 441	case NT_LOONGARCH_HW_WATCH:
 442		if (idx >= LOONGARCH_MAX_WRP)
 443			return -EINVAL;
 444		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
 445		tsk->thread.hbp_watch[idx] = bp;
 446		break;
 447	}
 448
 449	return 0;
 450}
 451
 452static struct perf_event *ptrace_hbp_create(unsigned int note_type,
 453					    struct task_struct *tsk,
 454					    unsigned long idx)
 455{
 456	int err, type;
 457	struct perf_event *bp;
 458	struct perf_event_attr attr;
 459
 460	switch (note_type) {
 461	case NT_LOONGARCH_HW_BREAK:
 462		type = HW_BREAKPOINT_X;
 463		break;
 464	case NT_LOONGARCH_HW_WATCH:
 465		type = HW_BREAKPOINT_RW;
 466		break;
 467	default:
 468		return ERR_PTR(-EINVAL);
 469	}
 470
 471	ptrace_breakpoint_init(&attr);
 472
 473	/*
 474	 * Initialise fields to sane defaults
 475	 * (i.e. values that will pass validation).
 476	 */
 477	attr.bp_addr	= 0;
 478	attr.bp_len	= HW_BREAKPOINT_LEN_4;
 479	attr.bp_type	= type;
 480	attr.disabled	= 1;
 481
 482	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
 483	if (IS_ERR(bp))
 484		return bp;
 485
 486	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
 487	if (err)
 488		return ERR_PTR(err);
 489
 490	return bp;
 491}
 492
 493static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 494				     struct arch_hw_breakpoint_ctrl ctrl,
 495				     struct perf_event_attr *attr)
 496{
 497	int err, len, type, offset;
 498
 499	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
 500	if (err)
 501		return err;
 502
 503	switch (note_type) {
 504	case NT_LOONGARCH_HW_BREAK:
 505		if ((type & HW_BREAKPOINT_X) != type)
 506			return -EINVAL;
 507		break;
 508	case NT_LOONGARCH_HW_WATCH:
 509		if ((type & HW_BREAKPOINT_RW) != type)
 510			return -EINVAL;
 511		break;
 512	default:
 513		return -EINVAL;
 514	}
 515
 516	attr->bp_len	= len;
 517	attr->bp_type	= type;
 518	attr->bp_addr	+= offset;
 519
 520	return 0;
 521}
 522
 523static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
 524{
 525	u8 num;
 526	u64 reg = 0;
 527
 528	switch (note_type) {
 529	case NT_LOONGARCH_HW_BREAK:
 530		num = hw_breakpoint_slots(TYPE_INST);
 531		break;
 532	case NT_LOONGARCH_HW_WATCH:
 533		num = hw_breakpoint_slots(TYPE_DATA);
 534		break;
 535	default:
 536		return -EINVAL;
 537	}
 538
 539	*info = reg | num;
 540
 541	return 0;
 542}
 543
 544static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
 545							struct task_struct *tsk,
 546							unsigned long idx)
 547{
 548	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 549
 550	if (!bp)
 551		bp = ptrace_hbp_create(note_type, tsk, idx);
 552
 553	return bp;
 554}
 555
 556static int ptrace_hbp_get_ctrl(unsigned int note_type,
 557			       struct task_struct *tsk,
 558			       unsigned long idx, u32 *ctrl)
 559{
 560	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 561
 562	if (IS_ERR(bp))
 563		return PTR_ERR(bp);
 564
 565	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
 566
 567	return 0;
 568}
 569
 570static int ptrace_hbp_get_mask(unsigned int note_type,
 571			       struct task_struct *tsk,
 572			       unsigned long idx, u64 *mask)
 573{
 574	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 575
 576	if (IS_ERR(bp))
 577		return PTR_ERR(bp);
 578
 579	*mask = bp ? counter_arch_bp(bp)->mask : 0;
 580
 581	return 0;
 582}
 583
 584static int ptrace_hbp_get_addr(unsigned int note_type,
 585			       struct task_struct *tsk,
 586			       unsigned long idx, u64 *addr)
 587{
 588	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 589
 590	if (IS_ERR(bp))
 591		return PTR_ERR(bp);
 592
 593	*addr = bp ? counter_arch_bp(bp)->address : 0;
 594
 595	return 0;
 596}
 597
 598static int ptrace_hbp_set_ctrl(unsigned int note_type,
 599			       struct task_struct *tsk,
 600			       unsigned long idx, u32 uctrl)
 601{
 602	int err;
 603	struct perf_event *bp;
 604	struct perf_event_attr attr;
 605	struct arch_hw_breakpoint_ctrl ctrl;
 606
 607	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 608	if (IS_ERR(bp))
 609		return PTR_ERR(bp);
 610
 611	attr = bp->attr;
 612	decode_ctrl_reg(uctrl, &ctrl);
 613	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
 614	if (err)
 615		return err;
 616
 617	return modify_user_hw_breakpoint(bp, &attr);
 618}
 619
 620static int ptrace_hbp_set_mask(unsigned int note_type,
 621			       struct task_struct *tsk,
 622			       unsigned long idx, u64 mask)
 623{
 624	struct perf_event *bp;
 625	struct perf_event_attr attr;
 626	struct arch_hw_breakpoint *info;
 627
 628	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 629	if (IS_ERR(bp))
 630		return PTR_ERR(bp);
 631
 632	attr = bp->attr;
 633	info = counter_arch_bp(bp);
 634	info->mask = mask;
 635
 636	return modify_user_hw_breakpoint(bp, &attr);
 637}
 638
 639static int ptrace_hbp_set_addr(unsigned int note_type,
 640			       struct task_struct *tsk,
 641			       unsigned long idx, u64 addr)
 642{
 643	struct perf_event *bp;
 644	struct perf_event_attr attr;
 645
 646	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 647	if (IS_ERR(bp))
 648		return PTR_ERR(bp);
 649
 650	attr = bp->attr;
 651	attr.bp_addr = addr;
 652
 653	return modify_user_hw_breakpoint(bp, &attr);
 654}
 655
 656#define PTRACE_HBP_ADDR_SZ	sizeof(u64)
 657#define PTRACE_HBP_MASK_SZ	sizeof(u64)
 658#define PTRACE_HBP_CTRL_SZ	sizeof(u32)
 659#define PTRACE_HBP_PAD_SZ	sizeof(u32)
 660
 661static int hw_break_get(struct task_struct *target,
 662			const struct user_regset *regset,
 663			struct membuf to)
 664{
 665	u64 info;
 666	u32 ctrl;
 667	u64 addr, mask;
 668	int ret, idx = 0;
 669	unsigned int note_type = regset->core_note_type;
 670
 671	/* Resource info */
 672	ret = ptrace_hbp_get_resource_info(note_type, &info);
 673	if (ret)
 674		return ret;
 675
 676	membuf_write(&to, &info, sizeof(info));
 677
 678	/* (address, mask, ctrl) registers */
 679	while (to.left) {
 680		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
 681		if (ret)
 682			return ret;
 683
 684		ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
 685		if (ret)
 686			return ret;
 687
 688		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
 689		if (ret)
 690			return ret;
 691
 692		membuf_store(&to, addr);
 693		membuf_store(&to, mask);
 694		membuf_store(&to, ctrl);
 695		membuf_zero(&to, sizeof(u32));
 696		idx++;
 697	}
 698
 699	return 0;
 700}
 701
 702static int hw_break_set(struct task_struct *target,
 703			const struct user_regset *regset,
 704			unsigned int pos, unsigned int count,
 705			const void *kbuf, const void __user *ubuf)
 706{
 707	u32 ctrl;
 708	u64 addr, mask;
 709	int ret, idx = 0, offset, limit;
 710	unsigned int note_type = regset->core_note_type;
 711
 712	/* Resource info */
 713	offset = offsetof(struct user_watch_state, dbg_regs);
 714	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
 715
 716	/* (address, mask, ctrl) registers */
 717	limit = regset->n * regset->size;
 718	while (count && offset < limit) {
 719		if (count < PTRACE_HBP_ADDR_SZ)
 720			return -EINVAL;
 721
 722		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
 723					 offset, offset + PTRACE_HBP_ADDR_SZ);
 724		if (ret)
 725			return ret;
 726
 727		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
 728		if (ret)
 729			return ret;
 730		offset += PTRACE_HBP_ADDR_SZ;
 731
 732		if (!count)
 733			break;
 734
 735		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
 736					 offset, offset + PTRACE_HBP_MASK_SZ);
 737		if (ret)
 738			return ret;
 739
 740		ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
 741		if (ret)
 742			return ret;
 743		offset += PTRACE_HBP_MASK_SZ;
 744
 745		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
 746					 offset, offset + PTRACE_HBP_CTRL_SZ);
 747		if (ret)
 748			return ret;
 749
 750		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
 751		if (ret)
 752			return ret;
 753		offset += PTRACE_HBP_CTRL_SZ;
 754
 755		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 756					  offset, offset + PTRACE_HBP_PAD_SZ);
 757		offset += PTRACE_HBP_PAD_SZ;
 758
 759		idx++;
 760	}
 761
 762	return 0;
 763}
 764
 765#endif
 766
 767struct pt_regs_offset {
 768	const char *name;
 769	int offset;
 770};
 771
 772#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
 773#define REG_OFFSET_END {.name = NULL, .offset = 0}
 774
 775static const struct pt_regs_offset regoffset_table[] = {
 776	REG_OFFSET_NAME(r0, regs[0]),
 777	REG_OFFSET_NAME(r1, regs[1]),
 778	REG_OFFSET_NAME(r2, regs[2]),
 779	REG_OFFSET_NAME(r3, regs[3]),
 780	REG_OFFSET_NAME(r4, regs[4]),
 781	REG_OFFSET_NAME(r5, regs[5]),
 782	REG_OFFSET_NAME(r6, regs[6]),
 783	REG_OFFSET_NAME(r7, regs[7]),
 784	REG_OFFSET_NAME(r8, regs[8]),
 785	REG_OFFSET_NAME(r9, regs[9]),
 786	REG_OFFSET_NAME(r10, regs[10]),
 787	REG_OFFSET_NAME(r11, regs[11]),
 788	REG_OFFSET_NAME(r12, regs[12]),
 789	REG_OFFSET_NAME(r13, regs[13]),
 790	REG_OFFSET_NAME(r14, regs[14]),
 791	REG_OFFSET_NAME(r15, regs[15]),
 792	REG_OFFSET_NAME(r16, regs[16]),
 793	REG_OFFSET_NAME(r17, regs[17]),
 794	REG_OFFSET_NAME(r18, regs[18]),
 795	REG_OFFSET_NAME(r19, regs[19]),
 796	REG_OFFSET_NAME(r20, regs[20]),
 797	REG_OFFSET_NAME(r21, regs[21]),
 798	REG_OFFSET_NAME(r22, regs[22]),
 799	REG_OFFSET_NAME(r23, regs[23]),
 800	REG_OFFSET_NAME(r24, regs[24]),
 801	REG_OFFSET_NAME(r25, regs[25]),
 802	REG_OFFSET_NAME(r26, regs[26]),
 803	REG_OFFSET_NAME(r27, regs[27]),
 804	REG_OFFSET_NAME(r28, regs[28]),
 805	REG_OFFSET_NAME(r29, regs[29]),
 806	REG_OFFSET_NAME(r30, regs[30]),
 807	REG_OFFSET_NAME(r31, regs[31]),
 808	REG_OFFSET_NAME(orig_a0, orig_a0),
 809	REG_OFFSET_NAME(csr_era, csr_era),
 810	REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
 811	REG_OFFSET_NAME(csr_crmd, csr_crmd),
 812	REG_OFFSET_NAME(csr_prmd, csr_prmd),
 813	REG_OFFSET_NAME(csr_euen, csr_euen),
 814	REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
 815	REG_OFFSET_NAME(csr_estat, csr_estat),
 816	REG_OFFSET_END,
 817};
 818
 819/**
 820 * regs_query_register_offset() - query register offset from its name
 821 * @name:       the name of a register
 822 *
 823 * regs_query_register_offset() returns the offset of a register in struct
 824 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 825 */
 826int regs_query_register_offset(const char *name)
 827{
 828	const struct pt_regs_offset *roff;
 829
 830	for (roff = regoffset_table; roff->name != NULL; roff++)
 831		if (!strcmp(roff->name, name))
 832			return roff->offset;
 833	return -EINVAL;
 834}
 835
 836enum loongarch_regset {
 837	REGSET_GPR,
 838	REGSET_FPR,
 839	REGSET_CPUCFG,
 840#ifdef CONFIG_CPU_HAS_LSX
 841	REGSET_LSX,
 842#endif
 843#ifdef CONFIG_CPU_HAS_LASX
 844	REGSET_LASX,
 845#endif
 846#ifdef CONFIG_CPU_HAS_LBT
 847	REGSET_LBT,
 848#endif
 849#ifdef CONFIG_HAVE_HW_BREAKPOINT
 850	REGSET_HW_BREAK,
 851	REGSET_HW_WATCH,
 852#endif
 853};
 854
 855static const struct user_regset loongarch64_regsets[] = {
 856	[REGSET_GPR] = {
 857		.core_note_type	= NT_PRSTATUS,
 858		.n		= ELF_NGREG,
 859		.size		= sizeof(elf_greg_t),
 860		.align		= sizeof(elf_greg_t),
 861		.regset_get	= gpr_get,
 862		.set		= gpr_set,
 863	},
 864	[REGSET_FPR] = {
 865		.core_note_type	= NT_PRFPREG,
 866		.n		= ELF_NFPREG,
 867		.size		= sizeof(elf_fpreg_t),
 868		.align		= sizeof(elf_fpreg_t),
 869		.regset_get	= fpr_get,
 870		.set		= fpr_set,
 871	},
 872	[REGSET_CPUCFG] = {
 873		.core_note_type	= NT_LOONGARCH_CPUCFG,
 874		.n		= 64,
 875		.size		= sizeof(u32),
 876		.align		= sizeof(u32),
 877		.regset_get	= cfg_get,
 878		.set		= cfg_set,
 879	},
 880#ifdef CONFIG_CPU_HAS_LSX
 881	[REGSET_LSX] = {
 882		.core_note_type	= NT_LOONGARCH_LSX,
 883		.n		= NUM_FPU_REGS,
 884		.size		= 16,
 885		.align		= 16,
 886		.regset_get	= simd_get,
 887		.set		= simd_set,
 888	},
 889#endif
 890#ifdef CONFIG_CPU_HAS_LASX
 891	[REGSET_LASX] = {
 892		.core_note_type	= NT_LOONGARCH_LASX,
 893		.n		= NUM_FPU_REGS,
 894		.size		= 32,
 895		.align		= 32,
 896		.regset_get	= simd_get,
 897		.set		= simd_set,
 898	},
 899#endif
 900#ifdef CONFIG_CPU_HAS_LBT
 901	[REGSET_LBT] = {
 902		.core_note_type	= NT_LOONGARCH_LBT,
 903		.n		= 5,
 904		.size		= sizeof(u64),
 905		.align		= sizeof(u64),
 906		.regset_get	= lbt_get,
 907		.set		= lbt_set,
 908	},
 909#endif
 910#ifdef CONFIG_HAVE_HW_BREAKPOINT
 911	[REGSET_HW_BREAK] = {
 912		.core_note_type = NT_LOONGARCH_HW_BREAK,
 913		.n = sizeof(struct user_watch_state) / sizeof(u32),
 914		.size = sizeof(u32),
 915		.align = sizeof(u32),
 916		.regset_get = hw_break_get,
 917		.set = hw_break_set,
 918	},
 919	[REGSET_HW_WATCH] = {
 920		.core_note_type = NT_LOONGARCH_HW_WATCH,
 921		.n = sizeof(struct user_watch_state) / sizeof(u32),
 922		.size = sizeof(u32),
 923		.align = sizeof(u32),
 924		.regset_get = hw_break_get,
 925		.set = hw_break_set,
 926	},
 927#endif
 928};
 929
 930static const struct user_regset_view user_loongarch64_view = {
 931	.name		= "loongarch64",
 932	.e_machine	= ELF_ARCH,
 933	.regsets	= loongarch64_regsets,
 934	.n		= ARRAY_SIZE(loongarch64_regsets),
 935};
 936
 937
 938const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 939{
 940	return &user_loongarch64_view;
 941}
 942
 943static inline int read_user(struct task_struct *target, unsigned long addr,
 944			    unsigned long __user *data)
 945{
 946	unsigned long tmp = 0;
 947
 948	switch (addr) {
 949	case 0 ... 31:
 950		tmp = task_pt_regs(target)->regs[addr];
 951		break;
 952	case ARG0:
 953		tmp = task_pt_regs(target)->orig_a0;
 954		break;
 955	case PC:
 956		tmp = task_pt_regs(target)->csr_era;
 957		break;
 958	case BADVADDR:
 959		tmp = task_pt_regs(target)->csr_badvaddr;
 960		break;
 961	default:
 962		return -EIO;
 963	}
 964
 965	return put_user(tmp, data);
 966}
 967
 968static inline int write_user(struct task_struct *target, unsigned long addr,
 969			    unsigned long data)
 970{
 971	switch (addr) {
 972	case 0 ... 31:
 973		task_pt_regs(target)->regs[addr] = data;
 974		break;
 975	case ARG0:
 976		task_pt_regs(target)->orig_a0 = data;
 977		break;
 978	case PC:
 979		task_pt_regs(target)->csr_era = data;
 980		break;
 981	case BADVADDR:
 982		task_pt_regs(target)->csr_badvaddr = data;
 983		break;
 984	default:
 985		return -EIO;
 986	}
 987
 988	return 0;
 989}
 990
 991long arch_ptrace(struct task_struct *child, long request,
 992		 unsigned long addr, unsigned long data)
 993{
 994	int ret;
 995	unsigned long __user *datap = (void __user *) data;
 996
 997	switch (request) {
 998	case PTRACE_PEEKUSR:
 999		ret = read_user(child, addr, datap);
1000		break;
1001
1002	case PTRACE_POKEUSR:
1003		ret = write_user(child, addr, data);
1004		break;
1005
1006	default:
1007		ret = ptrace_request(child, request, addr, data);
1008		break;
1009	}
1010
1011	return ret;
1012}
1013
1014#ifdef CONFIG_HAVE_HW_BREAKPOINT
1015static void ptrace_triggered(struct perf_event *bp,
1016		      struct perf_sample_data *data, struct pt_regs *regs)
1017{
1018	struct perf_event_attr attr;
1019
1020	attr = bp->attr;
1021	attr.disabled = true;
1022	modify_user_hw_breakpoint(bp, &attr);
1023}
1024
1025static int set_single_step(struct task_struct *tsk, unsigned long addr)
1026{
1027	struct perf_event *bp;
1028	struct perf_event_attr attr;
1029	struct arch_hw_breakpoint *info;
1030	struct thread_struct *thread = &tsk->thread;
1031
1032	bp = thread->hbp_break[0];
1033	if (!bp) {
1034		ptrace_breakpoint_init(&attr);
1035
1036		attr.bp_addr = addr;
1037		attr.bp_len = HW_BREAKPOINT_LEN_8;
1038		attr.bp_type = HW_BREAKPOINT_X;
1039
1040		bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
1041						 NULL, tsk);
1042		if (IS_ERR(bp))
1043			return PTR_ERR(bp);
1044
1045		thread->hbp_break[0] = bp;
1046	} else {
1047		int err;
1048
1049		attr = bp->attr;
1050		attr.bp_addr = addr;
1051
1052		/* Reenable breakpoint */
1053		attr.disabled = false;
1054		err = modify_user_hw_breakpoint(bp, &attr);
1055		if (unlikely(err))
1056			return err;
1057
1058		csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1059	}
1060	info = counter_arch_bp(bp);
1061	info->mask = TASK_SIZE - 1;
1062
1063	return 0;
1064}
1065
1066/* ptrace API */
1067void user_enable_single_step(struct task_struct *task)
1068{
1069	struct thread_info *ti = task_thread_info(task);
1070
1071	set_single_step(task, task_pt_regs(task)->csr_era);
1072	task->thread.single_step = task_pt_regs(task)->csr_era;
1073	set_ti_thread_flag(ti, TIF_SINGLESTEP);
1074}
1075
1076void user_disable_single_step(struct task_struct *task)
1077{
1078	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1079}
1080#endif
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Author: Hanlu Li <lihanlu@loongson.cn>
   4 *         Huacai Chen <chenhuacai@loongson.cn>
   5 *
   6 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
   7 *
   8 * Derived from MIPS:
   9 * Copyright (C) 1992 Ross Biro
  10 * Copyright (C) Linus Torvalds
  11 * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
  12 * Copyright (C) 1996 David S. Miller
  13 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  14 * Copyright (C) 1999 MIPS Technologies, Inc.
  15 * Copyright (C) 2000 Ulf Carlsson
  16 */
  17#include <linux/kernel.h>
  18#include <linux/audit.h>
  19#include <linux/compiler.h>
  20#include <linux/context_tracking.h>
  21#include <linux/elf.h>
  22#include <linux/errno.h>
  23#include <linux/hw_breakpoint.h>
  24#include <linux/mm.h>
  25#include <linux/nospec.h>
  26#include <linux/ptrace.h>
  27#include <linux/regset.h>
  28#include <linux/sched.h>
  29#include <linux/sched/task_stack.h>
  30#include <linux/security.h>
  31#include <linux/smp.h>
  32#include <linux/stddef.h>
  33#include <linux/seccomp.h>
  34#include <linux/thread_info.h>
  35#include <linux/uaccess.h>
  36
  37#include <asm/byteorder.h>
  38#include <asm/cpu.h>
  39#include <asm/cpu-info.h>
  40#include <asm/fpu.h>
  41#include <asm/lbt.h>
  42#include <asm/loongarch.h>
  43#include <asm/page.h>
  44#include <asm/pgtable.h>
  45#include <asm/processor.h>
  46#include <asm/ptrace.h>
  47#include <asm/reg.h>
  48#include <asm/syscall.h>
  49
  50static void init_fp_ctx(struct task_struct *target)
  51{
  52	/* The target already has context */
  53	if (tsk_used_math(target))
  54		return;
  55
  56	/* Begin with data registers set to all 1s... */
  57	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
  58	set_stopped_child_used_math(target);
  59}
  60
  61/*
  62 * Called by kernel/ptrace.c when detaching..
  63 *
  64 * Make sure single step bits etc are not set.
  65 */
  66void ptrace_disable(struct task_struct *child)
  67{
  68	/* Don't load the watchpoint registers for the ex-child. */
  69	clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
  70	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
  71}
  72
  73/* regset get/set implementations */
  74
  75static int gpr_get(struct task_struct *target,
  76		   const struct user_regset *regset,
  77		   struct membuf to)
  78{
  79	int r;
  80	struct pt_regs *regs = task_pt_regs(target);
  81
  82	r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
  83	r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
  84	r = membuf_write(&to, &regs->csr_era, sizeof(u64));
  85	r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
  86
  87	return r;
  88}
  89
  90static int gpr_set(struct task_struct *target,
  91		   const struct user_regset *regset,
  92		   unsigned int pos, unsigned int count,
  93		   const void *kbuf, const void __user *ubuf)
  94{
  95	int err;
  96	int a0_start = sizeof(u64) * GPR_NUM;
  97	int era_start = a0_start + sizeof(u64);
  98	int badvaddr_start = era_start + sizeof(u64);
  99	struct pt_regs *regs = task_pt_regs(target);
 100
 101	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 102				 &regs->regs,
 103				 0, a0_start);
 104	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 105				 &regs->orig_a0,
 106				 a0_start, a0_start + sizeof(u64));
 107	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 108				 &regs->csr_era,
 109				 era_start, era_start + sizeof(u64));
 110	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 111				 &regs->csr_badvaddr,
 112				 badvaddr_start, badvaddr_start + sizeof(u64));
 113
 114	return err;
 115}
 116
 117
 118/*
 119 * Get the general floating-point registers.
 120 */
 121static int gfpr_get(struct task_struct *target, struct membuf *to)
 122{
 123	return membuf_write(to, &target->thread.fpu.fpr,
 124			    sizeof(elf_fpreg_t) * NUM_FPU_REGS);
 125}
 126
 127static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
 128{
 129	int i, r;
 130	u64 fpr_val;
 131
 132	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 133	for (i = 0; i < NUM_FPU_REGS; i++) {
 134		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
 135		r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
 136	}
 137
 138	return r;
 139}
 140
 141/*
 142 * Choose the appropriate helper for general registers, and then copy
 143 * the FCC and FCSR registers separately.
 144 */
 145static int fpr_get(struct task_struct *target,
 146		   const struct user_regset *regset,
 147		   struct membuf to)
 148{
 149	int r;
 150
 151	save_fpu_regs(target);
 152
 153	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 154		r = gfpr_get(target, &to);
 155	else
 156		r = gfpr_get_simd(target, &to);
 157
 158	r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
 159	r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
 160
 161	return r;
 162}
 163
 164static int gfpr_set(struct task_struct *target,
 165		    unsigned int *pos, unsigned int *count,
 166		    const void **kbuf, const void __user **ubuf)
 167{
 168	return user_regset_copyin(pos, count, kbuf, ubuf,
 169				  &target->thread.fpu.fpr,
 170				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
 171}
 172
 173static int gfpr_set_simd(struct task_struct *target,
 174		       unsigned int *pos, unsigned int *count,
 175		       const void **kbuf, const void __user **ubuf)
 176{
 177	int i, err;
 178	u64 fpr_val;
 179
 180	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
 181	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
 182		err = user_regset_copyin(pos, count, kbuf, ubuf,
 183					 &fpr_val, i * sizeof(elf_fpreg_t),
 184					 (i + 1) * sizeof(elf_fpreg_t));
 185		if (err)
 186			return err;
 187		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
 188	}
 189
 190	return 0;
 191}
 192
 193/*
 194 * Choose the appropriate helper for general registers, and then copy
 195 * the FCC register separately.
 196 */
 197static int fpr_set(struct task_struct *target,
 198		   const struct user_regset *regset,
 199		   unsigned int pos, unsigned int count,
 200		   const void *kbuf, const void __user *ubuf)
 201{
 202	const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
 203	const int fcsr_start = fcc_start + sizeof(u64);
 204	int err;
 205
 206	BUG_ON(count % sizeof(elf_fpreg_t));
 207	if (pos + count > sizeof(elf_fpregset_t))
 208		return -EIO;
 209
 210	init_fp_ctx(target);
 211
 212	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
 213		err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
 214	else
 215		err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
 216	if (err)
 217		return err;
 218
 219	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 220				  &target->thread.fpu.fcc, fcc_start,
 221				  fcc_start + sizeof(u64));
 222	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 223				  &target->thread.fpu.fcsr, fcsr_start,
 224				  fcsr_start + sizeof(u32));
 225
 226	return err;
 227}
 228
 229static int cfg_get(struct task_struct *target,
 230		   const struct user_regset *regset,
 231		   struct membuf to)
 232{
 233	int i, r;
 234	u32 cfg_val;
 235
 236	i = 0;
 237	while (to.left > 0) {
 238		cfg_val = read_cpucfg(i++);
 239		r = membuf_write(&to, &cfg_val, sizeof(u32));
 240	}
 241
 242	return r;
 243}
 244
 245/*
 246 * CFG registers are read-only.
 247 */
 248static int cfg_set(struct task_struct *target,
 249		   const struct user_regset *regset,
 250		   unsigned int pos, unsigned int count,
 251		   const void *kbuf, const void __user *ubuf)
 252{
 253	return 0;
 254}
 255
 256#ifdef CONFIG_CPU_HAS_LSX
 257
 258static void copy_pad_fprs(struct task_struct *target,
 259			 const struct user_regset *regset,
 260			 struct membuf *to, unsigned int live_sz)
 261{
 262	int i, j;
 263	unsigned long long fill = ~0ull;
 264	unsigned int cp_sz, pad_sz;
 265
 266	cp_sz = min(regset->size, live_sz);
 267	pad_sz = regset->size - cp_sz;
 268	WARN_ON(pad_sz % sizeof(fill));
 269
 270	for (i = 0; i < NUM_FPU_REGS; i++) {
 271		membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
 272		for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
 273			membuf_store(to, fill);
 274		}
 275	}
 276}
 277
 278static int simd_get(struct task_struct *target,
 279		    const struct user_regset *regset,
 280		    struct membuf to)
 281{
 282	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 283
 284	save_fpu_regs(target);
 285
 286	if (!tsk_used_math(target)) {
 287		/* The task hasn't used FP or LSX, fill with 0xff */
 288		copy_pad_fprs(target, regset, &to, 0);
 289	} else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
 290		/* Copy scalar FP context, fill the rest with 0xff */
 291		copy_pad_fprs(target, regset, &to, 8);
 292#ifdef CONFIG_CPU_HAS_LASX
 293	} else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
 294		/* Copy LSX 128 Bit context, fill the rest with 0xff */
 295		copy_pad_fprs(target, regset, &to, 16);
 296#endif
 297	} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 298		/* Trivially copy the vector registers */
 299		membuf_write(&to, &target->thread.fpu.fpr, wr_size);
 300	} else {
 301		/* Copy as much context as possible, fill the rest with 0xff */
 302		copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
 303	}
 304
 305	return 0;
 306}
 307
 308static int simd_set(struct task_struct *target,
 309		    const struct user_regset *regset,
 310		    unsigned int pos, unsigned int count,
 311		    const void *kbuf, const void __user *ubuf)
 312{
 313	const unsigned int wr_size = NUM_FPU_REGS * regset->size;
 314	unsigned int cp_sz;
 315	int i, err, start;
 316
 317	init_fp_ctx(target);
 318
 319	if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
 320		/* Trivially copy the vector registers */
 321		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 322					 &target->thread.fpu.fpr,
 323					 0, wr_size);
 324	} else {
 325		/* Copy as much context as possible */
 326		cp_sz = min_t(unsigned int, regset->size,
 327			      sizeof(target->thread.fpu.fpr[0]));
 328
 329		i = start = err = 0;
 330		for (; i < NUM_FPU_REGS; i++, start += regset->size) {
 331			err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 332						  &target->thread.fpu.fpr[i],
 333						  start, start + cp_sz);
 334		}
 335	}
 336
 337	return err;
 338}
 339
 340#endif /* CONFIG_CPU_HAS_LSX */
 341
 342#ifdef CONFIG_CPU_HAS_LBT
 343static int lbt_get(struct task_struct *target,
 344		   const struct user_regset *regset,
 345		   struct membuf to)
 346{
 347	int r;
 348
 349	r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
 350	r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
 351	r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
 352	r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
 353	r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
 354	r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
 355
 356	return r;
 357}
 358
 359static int lbt_set(struct task_struct *target,
 360		   const struct user_regset *regset,
 361		   unsigned int pos, unsigned int count,
 362		   const void *kbuf, const void __user *ubuf)
 363{
 364	int err = 0;
 365	const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
 366	const int ftop_start = eflags_start + sizeof(u32);
 367
 368	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 369				  &target->thread.lbt.scr0,
 370				  0, 4 * sizeof(target->thread.lbt.scr0));
 371	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 372				  &target->thread.lbt.eflags,
 373				  eflags_start, ftop_start);
 374	err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
 375				  &target->thread.fpu.ftop,
 376				  ftop_start, ftop_start + sizeof(u32));
 377
 378	return err;
 379}
 380#endif /* CONFIG_CPU_HAS_LBT */
 381
 382#ifdef CONFIG_HAVE_HW_BREAKPOINT
 383
 384/*
 385 * Handle hitting a HW-breakpoint.
 386 */
 387static void ptrace_hbptriggered(struct perf_event *bp,
 388				struct perf_sample_data *data,
 389				struct pt_regs *regs)
 390{
 391	int i;
 392	struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
 393
 394	for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
 395		if (current->thread.hbp_break[i] == bp)
 396			break;
 397
 398	for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
 399		if (current->thread.hbp_watch[i] == bp)
 400			break;
 401
 402	force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
 403}
 404
 405static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
 406					       struct task_struct *tsk,
 407					       unsigned long idx)
 408{
 409	struct perf_event *bp;
 410
 411	switch (note_type) {
 412	case NT_LOONGARCH_HW_BREAK:
 413		if (idx >= LOONGARCH_MAX_BRP)
 414			return ERR_PTR(-EINVAL);
 415		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
 416		bp = tsk->thread.hbp_break[idx];
 417		break;
 418	case NT_LOONGARCH_HW_WATCH:
 419		if (idx >= LOONGARCH_MAX_WRP)
 420			return ERR_PTR(-EINVAL);
 421		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
 422		bp = tsk->thread.hbp_watch[idx];
 423		break;
 424	}
 425
 426	return bp;
 427}
 428
 429static int ptrace_hbp_set_event(unsigned int note_type,
 430				struct task_struct *tsk,
 431				unsigned long idx,
 432				struct perf_event *bp)
 433{
 434	switch (note_type) {
 435	case NT_LOONGARCH_HW_BREAK:
 436		if (idx >= LOONGARCH_MAX_BRP)
 437			return -EINVAL;
 438		idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
 439		tsk->thread.hbp_break[idx] = bp;
 440		break;
 441	case NT_LOONGARCH_HW_WATCH:
 442		if (idx >= LOONGARCH_MAX_WRP)
 443			return -EINVAL;
 444		idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
 445		tsk->thread.hbp_watch[idx] = bp;
 446		break;
 447	}
 448
 449	return 0;
 450}
 451
 452static struct perf_event *ptrace_hbp_create(unsigned int note_type,
 453					    struct task_struct *tsk,
 454					    unsigned long idx)
 455{
 456	int err, type;
 457	struct perf_event *bp;
 458	struct perf_event_attr attr;
 459
 460	switch (note_type) {
 461	case NT_LOONGARCH_HW_BREAK:
 462		type = HW_BREAKPOINT_X;
 463		break;
 464	case NT_LOONGARCH_HW_WATCH:
 465		type = HW_BREAKPOINT_RW;
 466		break;
 467	default:
 468		return ERR_PTR(-EINVAL);
 469	}
 470
 471	ptrace_breakpoint_init(&attr);
 472
 473	/*
 474	 * Initialise fields to sane defaults
 475	 * (i.e. values that will pass validation).
 476	 */
 477	attr.bp_addr	= 0;
 478	attr.bp_len	= HW_BREAKPOINT_LEN_4;
 479	attr.bp_type	= type;
 480	attr.disabled	= 1;
 481
 482	bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
 483	if (IS_ERR(bp))
 484		return bp;
 485
 486	err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
 487	if (err)
 488		return ERR_PTR(err);
 489
 490	return bp;
 491}
 492
 493static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
 494				     struct arch_hw_breakpoint_ctrl ctrl,
 495				     struct perf_event_attr *attr)
 496{
 497	int err, len, type, offset;
 498
 499	err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
 500	if (err)
 501		return err;
 502
 503	switch (note_type) {
 504	case NT_LOONGARCH_HW_BREAK:
 505		if ((type & HW_BREAKPOINT_X) != type)
 506			return -EINVAL;
 507		break;
 508	case NT_LOONGARCH_HW_WATCH:
 509		if ((type & HW_BREAKPOINT_RW) != type)
 510			return -EINVAL;
 511		break;
 512	default:
 513		return -EINVAL;
 514	}
 515
 516	attr->bp_len	= len;
 517	attr->bp_type	= type;
 518	attr->bp_addr	+= offset;
 519
 520	return 0;
 521}
 522
 523static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
 524{
 525	u8 num;
 526	u64 reg = 0;
 527
 528	switch (note_type) {
 529	case NT_LOONGARCH_HW_BREAK:
 530		num = hw_breakpoint_slots(TYPE_INST);
 531		break;
 532	case NT_LOONGARCH_HW_WATCH:
 533		num = hw_breakpoint_slots(TYPE_DATA);
 534		break;
 535	default:
 536		return -EINVAL;
 537	}
 538
 539	*info = reg | num;
 540
 541	return 0;
 542}
 543
 544static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
 545							struct task_struct *tsk,
 546							unsigned long idx)
 547{
 548	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 549
 550	if (!bp)
 551		bp = ptrace_hbp_create(note_type, tsk, idx);
 552
 553	return bp;
 554}
 555
 556static int ptrace_hbp_get_ctrl(unsigned int note_type,
 557			       struct task_struct *tsk,
 558			       unsigned long idx, u32 *ctrl)
 559{
 560	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 561
 562	if (IS_ERR(bp))
 563		return PTR_ERR(bp);
 564
 565	*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
 566
 567	return 0;
 568}
 569
 570static int ptrace_hbp_get_mask(unsigned int note_type,
 571			       struct task_struct *tsk,
 572			       unsigned long idx, u64 *mask)
 573{
 574	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 575
 576	if (IS_ERR(bp))
 577		return PTR_ERR(bp);
 578
 579	*mask = bp ? counter_arch_bp(bp)->mask : 0;
 580
 581	return 0;
 582}
 583
 584static int ptrace_hbp_get_addr(unsigned int note_type,
 585			       struct task_struct *tsk,
 586			       unsigned long idx, u64 *addr)
 587{
 588	struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
 589
 590	if (IS_ERR(bp))
 591		return PTR_ERR(bp);
 592
 593	*addr = bp ? counter_arch_bp(bp)->address : 0;
 594
 595	return 0;
 596}
 597
 598static int ptrace_hbp_set_ctrl(unsigned int note_type,
 599			       struct task_struct *tsk,
 600			       unsigned long idx, u32 uctrl)
 601{
 602	int err;
 603	struct perf_event *bp;
 604	struct perf_event_attr attr;
 605	struct arch_hw_breakpoint_ctrl ctrl;
 606
 607	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 608	if (IS_ERR(bp))
 609		return PTR_ERR(bp);
 610
 611	attr = bp->attr;
 612	decode_ctrl_reg(uctrl, &ctrl);
 613	err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
 614	if (err)
 615		return err;
 616
 617	return modify_user_hw_breakpoint(bp, &attr);
 618}
 619
 620static int ptrace_hbp_set_mask(unsigned int note_type,
 621			       struct task_struct *tsk,
 622			       unsigned long idx, u64 mask)
 623{
 624	struct perf_event *bp;
 625	struct perf_event_attr attr;
 626	struct arch_hw_breakpoint *info;
 627
 628	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 629	if (IS_ERR(bp))
 630		return PTR_ERR(bp);
 631
 632	attr = bp->attr;
 633	info = counter_arch_bp(bp);
 634	info->mask = mask;
 635
 636	return modify_user_hw_breakpoint(bp, &attr);
 637}
 638
 639static int ptrace_hbp_set_addr(unsigned int note_type,
 640			       struct task_struct *tsk,
 641			       unsigned long idx, u64 addr)
 642{
 643	struct perf_event *bp;
 644	struct perf_event_attr attr;
 645
 646	bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
 647	if (IS_ERR(bp))
 648		return PTR_ERR(bp);
 649
 650	attr = bp->attr;
 651	attr.bp_addr = addr;
 652
 653	return modify_user_hw_breakpoint(bp, &attr);
 654}
 655
 656#define PTRACE_HBP_ADDR_SZ	sizeof(u64)
 657#define PTRACE_HBP_MASK_SZ	sizeof(u64)
 658#define PTRACE_HBP_CTRL_SZ	sizeof(u32)
 659#define PTRACE_HBP_PAD_SZ	sizeof(u32)
 660
 661static int hw_break_get(struct task_struct *target,
 662			const struct user_regset *regset,
 663			struct membuf to)
 664{
 665	u64 info;
 666	u32 ctrl;
 667	u64 addr, mask;
 668	int ret, idx = 0;
 669	unsigned int note_type = regset->core_note_type;
 670
 671	/* Resource info */
 672	ret = ptrace_hbp_get_resource_info(note_type, &info);
 673	if (ret)
 674		return ret;
 675
 676	membuf_write(&to, &info, sizeof(info));
 677
 678	/* (address, mask, ctrl) registers */
 679	while (to.left) {
 680		ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
 681		if (ret)
 682			return ret;
 683
 684		ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
 685		if (ret)
 686			return ret;
 687
 688		ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
 689		if (ret)
 690			return ret;
 691
 692		membuf_store(&to, addr);
 693		membuf_store(&to, mask);
 694		membuf_store(&to, ctrl);
 695		membuf_zero(&to, sizeof(u32));
 696		idx++;
 697	}
 698
 699	return 0;
 700}
 701
 702static int hw_break_set(struct task_struct *target,
 703			const struct user_regset *regset,
 704			unsigned int pos, unsigned int count,
 705			const void *kbuf, const void __user *ubuf)
 706{
 707	u32 ctrl;
 708	u64 addr, mask;
 709	int ret, idx = 0, offset, limit;
 710	unsigned int note_type = regset->core_note_type;
 711
 712	/* Resource info */
 713	offset = offsetof(struct user_watch_state, dbg_regs);
 714	user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
 715
 716	/* (address, mask, ctrl) registers */
 717	limit = regset->n * regset->size;
 718	while (count && offset < limit) {
 719		if (count < PTRACE_HBP_ADDR_SZ)
 720			return -EINVAL;
 721
 722		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
 723					 offset, offset + PTRACE_HBP_ADDR_SZ);
 724		if (ret)
 725			return ret;
 726
 727		ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
 728		if (ret)
 729			return ret;
 730		offset += PTRACE_HBP_ADDR_SZ;
 731
 732		if (!count)
 733			break;
 734
 735		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
 736					 offset, offset + PTRACE_HBP_MASK_SZ);
 737		if (ret)
 738			return ret;
 739
 740		ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
 741		if (ret)
 742			return ret;
 743		offset += PTRACE_HBP_MASK_SZ;
 744
 745		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
 746					 offset, offset + PTRACE_HBP_CTRL_SZ);
 747		if (ret)
 748			return ret;
 749
 750		ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
 751		if (ret)
 752			return ret;
 753		offset += PTRACE_HBP_CTRL_SZ;
 754
 755		user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
 756					  offset, offset + PTRACE_HBP_PAD_SZ);
 757		offset += PTRACE_HBP_PAD_SZ;
 758
 759		idx++;
 760	}
 761
 762	return 0;
 763}
 764
 765#endif
 766
 767struct pt_regs_offset {
 768	const char *name;
 769	int offset;
 770};
 771
 772#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
 773#define REG_OFFSET_END {.name = NULL, .offset = 0}
 774
 775static const struct pt_regs_offset regoffset_table[] = {
 776	REG_OFFSET_NAME(r0, regs[0]),
 777	REG_OFFSET_NAME(r1, regs[1]),
 778	REG_OFFSET_NAME(r2, regs[2]),
 779	REG_OFFSET_NAME(r3, regs[3]),
 780	REG_OFFSET_NAME(r4, regs[4]),
 781	REG_OFFSET_NAME(r5, regs[5]),
 782	REG_OFFSET_NAME(r6, regs[6]),
 783	REG_OFFSET_NAME(r7, regs[7]),
 784	REG_OFFSET_NAME(r8, regs[8]),
 785	REG_OFFSET_NAME(r9, regs[9]),
 786	REG_OFFSET_NAME(r10, regs[10]),
 787	REG_OFFSET_NAME(r11, regs[11]),
 788	REG_OFFSET_NAME(r12, regs[12]),
 789	REG_OFFSET_NAME(r13, regs[13]),
 790	REG_OFFSET_NAME(r14, regs[14]),
 791	REG_OFFSET_NAME(r15, regs[15]),
 792	REG_OFFSET_NAME(r16, regs[16]),
 793	REG_OFFSET_NAME(r17, regs[17]),
 794	REG_OFFSET_NAME(r18, regs[18]),
 795	REG_OFFSET_NAME(r19, regs[19]),
 796	REG_OFFSET_NAME(r20, regs[20]),
 797	REG_OFFSET_NAME(r21, regs[21]),
 798	REG_OFFSET_NAME(r22, regs[22]),
 799	REG_OFFSET_NAME(r23, regs[23]),
 800	REG_OFFSET_NAME(r24, regs[24]),
 801	REG_OFFSET_NAME(r25, regs[25]),
 802	REG_OFFSET_NAME(r26, regs[26]),
 803	REG_OFFSET_NAME(r27, regs[27]),
 804	REG_OFFSET_NAME(r28, regs[28]),
 805	REG_OFFSET_NAME(r29, regs[29]),
 806	REG_OFFSET_NAME(r30, regs[30]),
 807	REG_OFFSET_NAME(r31, regs[31]),
 808	REG_OFFSET_NAME(orig_a0, orig_a0),
 809	REG_OFFSET_NAME(csr_era, csr_era),
 810	REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
 811	REG_OFFSET_NAME(csr_crmd, csr_crmd),
 812	REG_OFFSET_NAME(csr_prmd, csr_prmd),
 813	REG_OFFSET_NAME(csr_euen, csr_euen),
 814	REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
 815	REG_OFFSET_NAME(csr_estat, csr_estat),
 816	REG_OFFSET_END,
 817};
 818
 819/**
 820 * regs_query_register_offset() - query register offset from its name
 821 * @name:       the name of a register
 822 *
 823 * regs_query_register_offset() returns the offset of a register in struct
 824 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
 825 */
 826int regs_query_register_offset(const char *name)
 827{
 828	const struct pt_regs_offset *roff;
 829
 830	for (roff = regoffset_table; roff->name != NULL; roff++)
 831		if (!strcmp(roff->name, name))
 832			return roff->offset;
 833	return -EINVAL;
 834}
 835
 836enum loongarch_regset {
 837	REGSET_GPR,
 838	REGSET_FPR,
 839	REGSET_CPUCFG,
 840#ifdef CONFIG_CPU_HAS_LSX
 841	REGSET_LSX,
 842#endif
 843#ifdef CONFIG_CPU_HAS_LASX
 844	REGSET_LASX,
 845#endif
 846#ifdef CONFIG_CPU_HAS_LBT
 847	REGSET_LBT,
 848#endif
 849#ifdef CONFIG_HAVE_HW_BREAKPOINT
 850	REGSET_HW_BREAK,
 851	REGSET_HW_WATCH,
 852#endif
 853};
 854
 855static const struct user_regset loongarch64_regsets[] = {
 856	[REGSET_GPR] = {
 857		.core_note_type	= NT_PRSTATUS,
 858		.n		= ELF_NGREG,
 859		.size		= sizeof(elf_greg_t),
 860		.align		= sizeof(elf_greg_t),
 861		.regset_get	= gpr_get,
 862		.set		= gpr_set,
 863	},
 864	[REGSET_FPR] = {
 865		.core_note_type	= NT_PRFPREG,
 866		.n		= ELF_NFPREG,
 867		.size		= sizeof(elf_fpreg_t),
 868		.align		= sizeof(elf_fpreg_t),
 869		.regset_get	= fpr_get,
 870		.set		= fpr_set,
 871	},
 872	[REGSET_CPUCFG] = {
 873		.core_note_type	= NT_LOONGARCH_CPUCFG,
 874		.n		= 64,
 875		.size		= sizeof(u32),
 876		.align		= sizeof(u32),
 877		.regset_get	= cfg_get,
 878		.set		= cfg_set,
 879	},
 880#ifdef CONFIG_CPU_HAS_LSX
 881	[REGSET_LSX] = {
 882		.core_note_type	= NT_LOONGARCH_LSX,
 883		.n		= NUM_FPU_REGS,
 884		.size		= 16,
 885		.align		= 16,
 886		.regset_get	= simd_get,
 887		.set		= simd_set,
 888	},
 889#endif
 890#ifdef CONFIG_CPU_HAS_LASX
 891	[REGSET_LASX] = {
 892		.core_note_type	= NT_LOONGARCH_LASX,
 893		.n		= NUM_FPU_REGS,
 894		.size		= 32,
 895		.align		= 32,
 896		.regset_get	= simd_get,
 897		.set		= simd_set,
 898	},
 899#endif
 900#ifdef CONFIG_CPU_HAS_LBT
 901	[REGSET_LBT] = {
 902		.core_note_type	= NT_LOONGARCH_LBT,
 903		.n		= 5,
 904		.size		= sizeof(u64),
 905		.align		= sizeof(u64),
 906		.regset_get	= lbt_get,
 907		.set		= lbt_set,
 908	},
 909#endif
 910#ifdef CONFIG_HAVE_HW_BREAKPOINT
 911	[REGSET_HW_BREAK] = {
 912		.core_note_type = NT_LOONGARCH_HW_BREAK,
 913		.n = sizeof(struct user_watch_state) / sizeof(u32),
 914		.size = sizeof(u32),
 915		.align = sizeof(u32),
 916		.regset_get = hw_break_get,
 917		.set = hw_break_set,
 918	},
 919	[REGSET_HW_WATCH] = {
 920		.core_note_type = NT_LOONGARCH_HW_WATCH,
 921		.n = sizeof(struct user_watch_state) / sizeof(u32),
 922		.size = sizeof(u32),
 923		.align = sizeof(u32),
 924		.regset_get = hw_break_get,
 925		.set = hw_break_set,
 926	},
 927#endif
 928};
 929
 930static const struct user_regset_view user_loongarch64_view = {
 931	.name		= "loongarch64",
 932	.e_machine	= ELF_ARCH,
 933	.regsets	= loongarch64_regsets,
 934	.n		= ARRAY_SIZE(loongarch64_regsets),
 935};
 936
 937
 938const struct user_regset_view *task_user_regset_view(struct task_struct *task)
 939{
 940	return &user_loongarch64_view;
 941}
 942
 943static inline int read_user(struct task_struct *target, unsigned long addr,
 944			    unsigned long __user *data)
 945{
 946	unsigned long tmp = 0;
 947
 948	switch (addr) {
 949	case 0 ... 31:
 950		tmp = task_pt_regs(target)->regs[addr];
 951		break;
 952	case ARG0:
 953		tmp = task_pt_regs(target)->orig_a0;
 954		break;
 955	case PC:
 956		tmp = task_pt_regs(target)->csr_era;
 957		break;
 958	case BADVADDR:
 959		tmp = task_pt_regs(target)->csr_badvaddr;
 960		break;
 961	default:
 962		return -EIO;
 963	}
 964
 965	return put_user(tmp, data);
 966}
 967
 968static inline int write_user(struct task_struct *target, unsigned long addr,
 969			    unsigned long data)
 970{
 971	switch (addr) {
 972	case 0 ... 31:
 973		task_pt_regs(target)->regs[addr] = data;
 974		break;
 975	case ARG0:
 976		task_pt_regs(target)->orig_a0 = data;
 977		break;
 978	case PC:
 979		task_pt_regs(target)->csr_era = data;
 980		break;
 981	case BADVADDR:
 982		task_pt_regs(target)->csr_badvaddr = data;
 983		break;
 984	default:
 985		return -EIO;
 986	}
 987
 988	return 0;
 989}
 990
 991long arch_ptrace(struct task_struct *child, long request,
 992		 unsigned long addr, unsigned long data)
 993{
 994	int ret;
 995	unsigned long __user *datap = (void __user *) data;
 996
 997	switch (request) {
 998	case PTRACE_PEEKUSR:
 999		ret = read_user(child, addr, datap);
1000		break;
1001
1002	case PTRACE_POKEUSR:
1003		ret = write_user(child, addr, data);
1004		break;
1005
1006	default:
1007		ret = ptrace_request(child, request, addr, data);
1008		break;
1009	}
1010
1011	return ret;
1012}
1013
1014#ifdef CONFIG_HAVE_HW_BREAKPOINT
1015static void ptrace_triggered(struct perf_event *bp,
1016		      struct perf_sample_data *data, struct pt_regs *regs)
1017{
1018	struct perf_event_attr attr;
1019
1020	attr = bp->attr;
1021	attr.disabled = true;
1022	modify_user_hw_breakpoint(bp, &attr);
1023}
1024
1025static int set_single_step(struct task_struct *tsk, unsigned long addr)
1026{
1027	struct perf_event *bp;
1028	struct perf_event_attr attr;
1029	struct arch_hw_breakpoint *info;
1030	struct thread_struct *thread = &tsk->thread;
1031
1032	bp = thread->hbp_break[0];
1033	if (!bp) {
1034		ptrace_breakpoint_init(&attr);
1035
1036		attr.bp_addr = addr;
1037		attr.bp_len = HW_BREAKPOINT_LEN_8;
1038		attr.bp_type = HW_BREAKPOINT_X;
1039
1040		bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
1041						 NULL, tsk);
1042		if (IS_ERR(bp))
1043			return PTR_ERR(bp);
1044
1045		thread->hbp_break[0] = bp;
1046	} else {
1047		int err;
1048
1049		attr = bp->attr;
1050		attr.bp_addr = addr;
1051
1052		/* Reenable breakpoint */
1053		attr.disabled = false;
1054		err = modify_user_hw_breakpoint(bp, &attr);
1055		if (unlikely(err))
1056			return err;
1057
1058		csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1059	}
1060	info = counter_arch_bp(bp);
1061	info->mask = TASK_SIZE - 1;
1062
1063	return 0;
1064}
1065
1066/* ptrace API */
1067void user_enable_single_step(struct task_struct *task)
1068{
1069	struct thread_info *ti = task_thread_info(task);
1070
1071	set_single_step(task, task_pt_regs(task)->csr_era);
1072	task->thread.single_step = task_pt_regs(task)->csr_era;
1073	set_ti_thread_flag(ti, TIF_SINGLESTEP);
1074}
1075
1076void user_disable_single_step(struct task_struct *task)
1077{
1078	clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1079}
1080#endif