Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020 SiFive
  4 */
  5
  6#include <linux/ptrace.h>
  7#include <linux/kdebug.h>
  8#include <linux/bug.h>
  9#include <linux/kgdb.h>
 10#include <linux/irqflags.h>
 11#include <linux/string.h>
 12#include <asm/cacheflush.h>
 13#include <asm/gdb_xml.h>
 14#include <asm/parse_asm.h>
 15
 16enum {
 17	NOT_KGDB_BREAK = 0,
 18	KGDB_SW_BREAK,
 19	KGDB_COMPILED_BREAK,
 20	KGDB_SW_SINGLE_STEP
 21};
 22
 23static unsigned long stepped_address;
 24static unsigned int stepped_opcode;
 25
 26#if __riscv_xlen == 32
 27/* C.JAL is an RV32C-only instruction */
 28DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
 29#else
 30#define is_c_jal_insn(opcode) 0
 31#endif
 32DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
 33DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
 34DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
 35DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
 36DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
 37DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
 38DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
 39DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
 40DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
 41DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
 42DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
 43DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
 44DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
 45DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
 46
 47static int decode_register_index(unsigned long opcode, int offset)
 48{
 49	return (opcode >> offset) & 0x1F;
 50}
 51
 52static int decode_register_index_short(unsigned long opcode, int offset)
 53{
 54	return ((opcode >> offset) & 0x7) + 8;
 55}
 56
 57/* Calculate the new address for after a step */
 58static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
 59{
 60	unsigned long pc = regs->epc;
 61	unsigned long *regs_ptr = (unsigned long *)regs;
 62	unsigned int rs1_num, rs2_num;
 63	int op_code;
 64
 65	if (get_kernel_nofault(op_code, (void *)pc))
 66		return -EINVAL;
 67	if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
 68		if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) {
 
 69			rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
 70			*next_addr = regs_ptr[rs1_num];
 71		} else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) {
 72			*next_addr = EXTRACT_RVC_J_IMM(op_code) + pc;
 73		} else if (is_c_beqz_insn(op_code)) {
 
 74			rs1_num = decode_register_index_short(op_code,
 75							      RVC_C1_RS1_OPOFF);
 76			if (!rs1_num || regs_ptr[rs1_num] == 0)
 77				*next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
 78			else
 79				*next_addr = pc + 2;
 80		} else if (is_c_bnez_insn(op_code)) {
 81			rs1_num =
 82			    decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
 83			if (rs1_num && regs_ptr[rs1_num] != 0)
 84				*next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
 85			else
 86				*next_addr = pc + 2;
 87		} else {
 88			*next_addr = pc + 2;
 89		}
 90	} else {
 91		if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
 92			bool result = false;
 93			long imm = EXTRACT_BTYPE_IMM(op_code);
 94			unsigned long rs1_val = 0, rs2_val = 0;
 95
 96			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
 97			rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
 98			if (rs1_num)
 99				rs1_val = regs_ptr[rs1_num];
100			if (rs2_num)
101				rs2_val = regs_ptr[rs2_num];
102
103			if (is_beq_insn(op_code))
104				result = (rs1_val == rs2_val) ? true : false;
105			else if (is_bne_insn(op_code))
106				result = (rs1_val != rs2_val) ? true : false;
107			else if (is_blt_insn(op_code))
108				result =
109				    ((long)rs1_val <
110				     (long)rs2_val) ? true : false;
111			else if (is_bge_insn(op_code))
112				result =
113				    ((long)rs1_val >=
114				     (long)rs2_val) ? true : false;
115			else if (is_bltu_insn(op_code))
116				result = (rs1_val < rs2_val) ? true : false;
117			else if (is_bgeu_insn(op_code))
118				result = (rs1_val >= rs2_val) ? true : false;
119			if (result)
120				*next_addr = imm + pc;
121			else
122				*next_addr = pc + 4;
123		} else if (is_jal_insn(op_code)) {
124			*next_addr = EXTRACT_JTYPE_IMM(op_code) + pc;
125		} else if (is_jalr_insn(op_code)) {
126			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
127			if (rs1_num)
128				*next_addr = ((unsigned long *)regs)[rs1_num];
129			*next_addr += EXTRACT_ITYPE_IMM(op_code);
130		} else if (is_sret_insn(op_code)) {
131			*next_addr = pc;
132		} else {
133			*next_addr = pc + 4;
134		}
135	}
136	return 0;
137}
138
139static int do_single_step(struct pt_regs *regs)
140{
141	/* Determine where the target instruction will send us to */
142	unsigned long addr = 0;
143	int error = get_step_address(regs, &addr);
144
145	if (error)
146		return error;
147
148	/* Store the op code in the stepped address */
149	error = get_kernel_nofault(stepped_opcode, (void *)addr);
150	if (error)
151		return error;
152
153	stepped_address = addr;
154
155	/* Replace the op code with the break instruction */
156	error = copy_to_kernel_nofault((void *)stepped_address,
157				   arch_kgdb_ops.gdb_bpt_instr,
158				   BREAK_INSTR_SIZE);
159	/* Flush and return */
160	if (!error) {
161		flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
162		kgdb_single_step = 1;
163		atomic_set(&kgdb_cpu_doing_single_step,
164			   raw_smp_processor_id());
165	} else {
166		stepped_address = 0;
167		stepped_opcode = 0;
168	}
169	return error;
170}
171
172/* Undo a single step */
173static void undo_single_step(struct pt_regs *regs)
174{
175	if (stepped_opcode != 0) {
176		copy_to_kernel_nofault((void *)stepped_address,
177				   (void *)&stepped_opcode, BREAK_INSTR_SIZE);
178		flush_icache_range(stepped_address,
179				   stepped_address + BREAK_INSTR_SIZE);
180	}
181	stepped_address = 0;
182	stepped_opcode = 0;
183	kgdb_single_step = 0;
184	atomic_set(&kgdb_cpu_doing_single_step, -1);
185}
186
187struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
188	{DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
189	{DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
190	{DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
191	{DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
192	{DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
193	{DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
194	{DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
195	{DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
196	{DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
197	{DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
198	{DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
199	{DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
200	{DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
201	{DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
202	{DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
203	{DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
204	{DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
205	{DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
206	{DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
207	{DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
208	{DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
209	{DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
210	{DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
211	{DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
212	{DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
213	{DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
214	{DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
215	{DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
216	{DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
217	{DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
218	{DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
219	{DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
220	{DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
221	{DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
222	{DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
223	{DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
224};
225
226char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
227{
228	if (regno >= DBG_MAX_REG_NUM || regno < 0)
229		return NULL;
230
231	if (dbg_reg_def[regno].offset != -1)
232		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
233		       dbg_reg_def[regno].size);
234	else
235		memset(mem, 0, dbg_reg_def[regno].size);
236	return dbg_reg_def[regno].name;
237}
238
239int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
240{
241	if (regno >= DBG_MAX_REG_NUM || regno < 0)
242		return -EINVAL;
243
244	if (dbg_reg_def[regno].offset != -1)
245		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
246		       dbg_reg_def[regno].size);
247	return 0;
248}
249
250void
251sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
252{
253	/* Initialize to zero */
254	memset((char *)gdb_regs, 0, NUMREGBYTES);
255
256	gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
257	gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
258	gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
259	gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
260	gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
261	gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
262	gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
263	gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
264	gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
265	gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
266	gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
267	gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
268	gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
269}
270
271void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
272{
273	regs->epc = pc;
274}
275
276void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
277				char *remcom_out_buffer)
278{
279	if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
280		     sizeof(gdb_xfer_read_target)))
281		strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
282	else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
283			  sizeof(gdb_xfer_read_cpuxml)))
284		strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
285}
286
287static inline void kgdb_arch_update_addr(struct pt_regs *regs,
288					 char *remcom_in_buffer)
289{
290	unsigned long addr;
291	char *ptr;
292
293	ptr = &remcom_in_buffer[1];
294	if (kgdb_hex2long(&ptr, &addr))
295		regs->epc = addr;
296}
297
298int kgdb_arch_handle_exception(int vector, int signo, int err_code,
299			       char *remcom_in_buffer, char *remcom_out_buffer,
300			       struct pt_regs *regs)
301{
302	int err = 0;
303
304	undo_single_step(regs);
305
306	switch (remcom_in_buffer[0]) {
307	case 'c':
308	case 'D':
309	case 'k':
310		if (remcom_in_buffer[0] == 'c')
311			kgdb_arch_update_addr(regs, remcom_in_buffer);
312		break;
313	case 's':
314		kgdb_arch_update_addr(regs, remcom_in_buffer);
315		err = do_single_step(regs);
316		break;
317	default:
318		err = -1;
319	}
320	return err;
321}
322
323static int kgdb_riscv_kgdbbreak(unsigned long addr)
324{
325	if (stepped_address == addr)
326		return KGDB_SW_SINGLE_STEP;
327	if (atomic_read(&kgdb_setting_breakpoint))
328		if (addr == (unsigned long)&kgdb_compiled_break)
329			return KGDB_COMPILED_BREAK;
330
331	return kgdb_has_hit_break(addr);
332}
333
334static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
335			     void *ptr)
336{
337	struct die_args *args = (struct die_args *)ptr;
338	struct pt_regs *regs = args->regs;
339	unsigned long flags;
340	int type;
341
342	if (user_mode(regs))
343		return NOTIFY_DONE;
344
345	type = kgdb_riscv_kgdbbreak(regs->epc);
346	if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
347		return NOTIFY_DONE;
348
349	local_irq_save(flags);
350
351	if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
352				  args->signr, cmd, regs))
353		return NOTIFY_DONE;
354
355	if (type == KGDB_COMPILED_BREAK)
356		regs->epc += 4;
357
358	local_irq_restore(flags);
359
360	return NOTIFY_STOP;
361}
362
363static struct notifier_block kgdb_notifier = {
364	.notifier_call = kgdb_riscv_notify,
365};
366
367int kgdb_arch_init(void)
368{
369	register_die_notifier(&kgdb_notifier);
370
371	return 0;
372}
373
374void kgdb_arch_exit(void)
375{
376	unregister_die_notifier(&kgdb_notifier);
377}
378
379/*
380 * Global data
381 */
382#ifdef CONFIG_RISCV_ISA_C
383const struct kgdb_arch arch_kgdb_ops = {
384	.gdb_bpt_instr = {0x02, 0x90},	/* c.ebreak */
385};
386#else
387const struct kgdb_arch arch_kgdb_ops = {
388	.gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00},	/* ebreak */
389};
390#endif
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2020 SiFive
  4 */
  5
  6#include <linux/ptrace.h>
  7#include <linux/kdebug.h>
  8#include <linux/bug.h>
  9#include <linux/kgdb.h>
 10#include <linux/irqflags.h>
 11#include <linux/string.h>
 12#include <asm/cacheflush.h>
 13#include <asm/gdb_xml.h>
 14#include <asm/insn.h>
 15
 16enum {
 17	NOT_KGDB_BREAK = 0,
 18	KGDB_SW_BREAK,
 19	KGDB_COMPILED_BREAK,
 20	KGDB_SW_SINGLE_STEP
 21};
 22
 23static unsigned long stepped_address;
 24static unsigned int stepped_opcode;
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26static int decode_register_index(unsigned long opcode, int offset)
 27{
 28	return (opcode >> offset) & 0x1F;
 29}
 30
 31static int decode_register_index_short(unsigned long opcode, int offset)
 32{
 33	return ((opcode >> offset) & 0x7) + 8;
 34}
 35
 36/* Calculate the new address for after a step */
 37static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
 38{
 39	unsigned long pc = regs->epc;
 40	unsigned long *regs_ptr = (unsigned long *)regs;
 41	unsigned int rs1_num, rs2_num;
 42	int op_code;
 43
 44	if (get_kernel_nofault(op_code, (void *)pc))
 45		return -EINVAL;
 46	if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
 47		if (riscv_insn_is_c_jalr(op_code) ||
 48		    riscv_insn_is_c_jr(op_code)) {
 49			rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
 50			*next_addr = regs_ptr[rs1_num];
 51		} else if (riscv_insn_is_c_j(op_code) ||
 52			   riscv_insn_is_c_jal(op_code)) {
 53			*next_addr = RVC_EXTRACT_JTYPE_IMM(op_code) + pc;
 54		} else if (riscv_insn_is_c_beqz(op_code)) {
 55			rs1_num = decode_register_index_short(op_code,
 56							      RVC_C1_RS1_OPOFF);
 57			if (!rs1_num || regs_ptr[rs1_num] == 0)
 58				*next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
 59			else
 60				*next_addr = pc + 2;
 61		} else if (riscv_insn_is_c_bnez(op_code)) {
 62			rs1_num =
 63			    decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
 64			if (rs1_num && regs_ptr[rs1_num] != 0)
 65				*next_addr = RVC_EXTRACT_BTYPE_IMM(op_code) + pc;
 66			else
 67				*next_addr = pc + 2;
 68		} else {
 69			*next_addr = pc + 2;
 70		}
 71	} else {
 72		if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
 73			bool result = false;
 74			long imm = RV_EXTRACT_BTYPE_IMM(op_code);
 75			unsigned long rs1_val = 0, rs2_val = 0;
 76
 77			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
 78			rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
 79			if (rs1_num)
 80				rs1_val = regs_ptr[rs1_num];
 81			if (rs2_num)
 82				rs2_val = regs_ptr[rs2_num];
 83
 84			if (riscv_insn_is_beq(op_code))
 85				result = (rs1_val == rs2_val) ? true : false;
 86			else if (riscv_insn_is_bne(op_code))
 87				result = (rs1_val != rs2_val) ? true : false;
 88			else if (riscv_insn_is_blt(op_code))
 89				result =
 90				    ((long)rs1_val <
 91				     (long)rs2_val) ? true : false;
 92			else if (riscv_insn_is_bge(op_code))
 93				result =
 94				    ((long)rs1_val >=
 95				     (long)rs2_val) ? true : false;
 96			else if (riscv_insn_is_bltu(op_code))
 97				result = (rs1_val < rs2_val) ? true : false;
 98			else if (riscv_insn_is_bgeu(op_code))
 99				result = (rs1_val >= rs2_val) ? true : false;
100			if (result)
101				*next_addr = imm + pc;
102			else
103				*next_addr = pc + 4;
104		} else if (riscv_insn_is_jal(op_code)) {
105			*next_addr = RV_EXTRACT_JTYPE_IMM(op_code) + pc;
106		} else if (riscv_insn_is_jalr(op_code)) {
107			rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
108			if (rs1_num)
109				*next_addr = ((unsigned long *)regs)[rs1_num];
110			*next_addr += RV_EXTRACT_ITYPE_IMM(op_code);
111		} else if (riscv_insn_is_sret(op_code)) {
112			*next_addr = pc;
113		} else {
114			*next_addr = pc + 4;
115		}
116	}
117	return 0;
118}
119
120static int do_single_step(struct pt_regs *regs)
121{
122	/* Determine where the target instruction will send us to */
123	unsigned long addr = 0;
124	int error = get_step_address(regs, &addr);
125
126	if (error)
127		return error;
128
129	/* Store the op code in the stepped address */
130	error = get_kernel_nofault(stepped_opcode, (void *)addr);
131	if (error)
132		return error;
133
134	stepped_address = addr;
135
136	/* Replace the op code with the break instruction */
137	error = copy_to_kernel_nofault((void *)stepped_address,
138				   arch_kgdb_ops.gdb_bpt_instr,
139				   BREAK_INSTR_SIZE);
140	/* Flush and return */
141	if (!error) {
142		flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
143		kgdb_single_step = 1;
144		atomic_set(&kgdb_cpu_doing_single_step,
145			   raw_smp_processor_id());
146	} else {
147		stepped_address = 0;
148		stepped_opcode = 0;
149	}
150	return error;
151}
152
153/* Undo a single step */
154static void undo_single_step(struct pt_regs *regs)
155{
156	if (stepped_opcode != 0) {
157		copy_to_kernel_nofault((void *)stepped_address,
158				   (void *)&stepped_opcode, BREAK_INSTR_SIZE);
159		flush_icache_range(stepped_address,
160				   stepped_address + BREAK_INSTR_SIZE);
161	}
162	stepped_address = 0;
163	stepped_opcode = 0;
164	kgdb_single_step = 0;
165	atomic_set(&kgdb_cpu_doing_single_step, -1);
166}
167
168struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
169	{DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
170	{DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
171	{DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
172	{DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
173	{DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
174	{DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
175	{DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
176	{DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
177	{DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
178	{DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
179	{DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
180	{DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
181	{DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
182	{DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
183	{DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
184	{DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
185	{DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
186	{DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
187	{DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
188	{DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
189	{DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
190	{DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
191	{DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
192	{DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
193	{DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
194	{DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
195	{DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
196	{DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
197	{DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
198	{DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
199	{DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
200	{DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
201	{DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
202	{DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
203	{DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
204	{DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
205};
206
207char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
208{
209	if (regno >= DBG_MAX_REG_NUM || regno < 0)
210		return NULL;
211
212	if (dbg_reg_def[regno].offset != -1)
213		memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
214		       dbg_reg_def[regno].size);
215	else
216		memset(mem, 0, dbg_reg_def[regno].size);
217	return dbg_reg_def[regno].name;
218}
219
220int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
221{
222	if (regno >= DBG_MAX_REG_NUM || regno < 0)
223		return -EINVAL;
224
225	if (dbg_reg_def[regno].offset != -1)
226		memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
227		       dbg_reg_def[regno].size);
228	return 0;
229}
230
231void
232sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
233{
234	/* Initialize to zero */
235	memset((char *)gdb_regs, 0, NUMREGBYTES);
236
237	gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
238	gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
239	gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
240	gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
241	gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
242	gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
243	gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
244	gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
245	gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
246	gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
247	gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
248	gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
249	gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
250}
251
252void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
253{
254	regs->epc = pc;
255}
256
257void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
258				char *remcom_out_buffer)
259{
260	if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
261		     sizeof(gdb_xfer_read_target)))
262		strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
263	else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
264			  sizeof(gdb_xfer_read_cpuxml)))
265		strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
266}
267
268static inline void kgdb_arch_update_addr(struct pt_regs *regs,
269					 char *remcom_in_buffer)
270{
271	unsigned long addr;
272	char *ptr;
273
274	ptr = &remcom_in_buffer[1];
275	if (kgdb_hex2long(&ptr, &addr))
276		regs->epc = addr;
277}
278
279int kgdb_arch_handle_exception(int vector, int signo, int err_code,
280			       char *remcom_in_buffer, char *remcom_out_buffer,
281			       struct pt_regs *regs)
282{
283	int err = 0;
284
285	undo_single_step(regs);
286
287	switch (remcom_in_buffer[0]) {
288	case 'c':
289	case 'D':
290	case 'k':
291		if (remcom_in_buffer[0] == 'c')
292			kgdb_arch_update_addr(regs, remcom_in_buffer);
293		break;
294	case 's':
295		kgdb_arch_update_addr(regs, remcom_in_buffer);
296		err = do_single_step(regs);
297		break;
298	default:
299		err = -1;
300	}
301	return err;
302}
303
304static int kgdb_riscv_kgdbbreak(unsigned long addr)
305{
306	if (stepped_address == addr)
307		return KGDB_SW_SINGLE_STEP;
308	if (atomic_read(&kgdb_setting_breakpoint))
309		if (addr == (unsigned long)&kgdb_compiled_break)
310			return KGDB_COMPILED_BREAK;
311
312	return kgdb_has_hit_break(addr);
313}
314
315static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
316			     void *ptr)
317{
318	struct die_args *args = (struct die_args *)ptr;
319	struct pt_regs *regs = args->regs;
320	unsigned long flags;
321	int type;
322
323	if (user_mode(regs))
324		return NOTIFY_DONE;
325
326	type = kgdb_riscv_kgdbbreak(regs->epc);
327	if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
328		return NOTIFY_DONE;
329
330	local_irq_save(flags);
331
332	if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
333				  args->signr, cmd, regs))
334		return NOTIFY_DONE;
335
336	if (type == KGDB_COMPILED_BREAK)
337		regs->epc += 4;
338
339	local_irq_restore(flags);
340
341	return NOTIFY_STOP;
342}
343
344static struct notifier_block kgdb_notifier = {
345	.notifier_call = kgdb_riscv_notify,
346};
347
348int kgdb_arch_init(void)
349{
350	register_die_notifier(&kgdb_notifier);
351
352	return 0;
353}
354
355void kgdb_arch_exit(void)
356{
357	unregister_die_notifier(&kgdb_notifier);
358}
359
360/*
361 * Global data
362 */
363#ifdef CONFIG_RISCV_ISA_C
364const struct kgdb_arch arch_kgdb_ops = {
365	.gdb_bpt_instr = {0x02, 0x90},	/* c.ebreak */
366};
367#else
368const struct kgdb_arch arch_kgdb_ops = {
369	.gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00},	/* ebreak */
370};
371#endif