Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SuperH KGDB support
4 *
5 * Copyright (C) 2008 - 2012 Paul Mundt
6 *
7 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
8 */
9#include <linux/kgdb.h>
10#include <linux/kdebug.h>
11#include <linux/irq.h>
12#include <linux/io.h>
13#include <linux/sched.h>
14#include <linux/sched/task_stack.h>
15
16#include <asm/cacheflush.h>
17#include <asm/traps.h>
18
19/* Macros for single step instruction identification */
20#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
21#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
22#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
23 (((op) & 0x7f ) << 1))
24#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
25#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
26#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
27#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
28 (((op) & 0x7ff) << 1))
29#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
30#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
31#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
32#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
33 (((op) & 0x7ff) << 1))
34#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
35#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
36#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
37#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
38#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
39#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
40#define OPCODE_RTS(op) ((op) == 0xb)
41#define OPCODE_RTE(op) ((op) == 0x2b)
42
43#define SR_T_BIT_MASK 0x1
44#define STEP_OPCODE 0xc33d
45
46/* Calculate the new address for after a step */
47static short *get_step_address(struct pt_regs *linux_regs)
48{
49 insn_size_t op = __raw_readw(linux_regs->pc);
50 long addr;
51
52 /* BT */
53 if (OPCODE_BT(op)) {
54 if (linux_regs->sr & SR_T_BIT_MASK)
55 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
56 else
57 addr = linux_regs->pc + 2;
58 }
59
60 /* BTS */
61 else if (OPCODE_BTS(op)) {
62 if (linux_regs->sr & SR_T_BIT_MASK)
63 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
64 else
65 addr = linux_regs->pc + 4; /* Not in delay slot */
66 }
67
68 /* BF */
69 else if (OPCODE_BF(op)) {
70 if (!(linux_regs->sr & SR_T_BIT_MASK))
71 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
72 else
73 addr = linux_regs->pc + 2;
74 }
75
76 /* BFS */
77 else if (OPCODE_BFS(op)) {
78 if (!(linux_regs->sr & SR_T_BIT_MASK))
79 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
80 else
81 addr = linux_regs->pc + 4; /* Not in delay slot */
82 }
83
84 /* BRA */
85 else if (OPCODE_BRA(op))
86 addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
87
88 /* BRAF */
89 else if (OPCODE_BRAF(op))
90 addr = linux_regs->pc + 4
91 + linux_regs->regs[OPCODE_BRAF_REG(op)];
92
93 /* BSR */
94 else if (OPCODE_BSR(op))
95 addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
96
97 /* BSRF */
98 else if (OPCODE_BSRF(op))
99 addr = linux_regs->pc + 4
100 + linux_regs->regs[OPCODE_BSRF_REG(op)];
101
102 /* JMP */
103 else if (OPCODE_JMP(op))
104 addr = linux_regs->regs[OPCODE_JMP_REG(op)];
105
106 /* JSR */
107 else if (OPCODE_JSR(op))
108 addr = linux_regs->regs[OPCODE_JSR_REG(op)];
109
110 /* RTS */
111 else if (OPCODE_RTS(op))
112 addr = linux_regs->pr;
113
114 /* RTE */
115 else if (OPCODE_RTE(op))
116 addr = linux_regs->regs[15];
117
118 /* Other */
119 else
120 addr = linux_regs->pc + instruction_size(op);
121
122 flush_icache_range(addr, addr + instruction_size(op));
123 return (short *)addr;
124}
125
126/*
127 * Replace the instruction immediately after the current instruction
128 * (i.e. next in the expected flow of control) with a trap instruction,
129 * so that returning will cause only a single instruction to be executed.
130 * Note that this model is slightly broken for instructions with delay
131 * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
132 * instruction in the delay slot will be executed.
133 */
134
135static unsigned long stepped_address;
136static insn_size_t stepped_opcode;
137
138static void do_single_step(struct pt_regs *linux_regs)
139{
140 /* Determine where the target instruction will send us to */
141 unsigned short *addr = get_step_address(linux_regs);
142
143 stepped_address = (int)addr;
144
145 /* Replace it */
146 stepped_opcode = __raw_readw((long)addr);
147 *addr = STEP_OPCODE;
148
149 /* Flush and return */
150 flush_icache_range((long)addr, (long)addr +
151 instruction_size(stepped_opcode));
152}
153
154/* Undo a single step */
155static void undo_single_step(struct pt_regs *linux_regs)
156{
157 /* If we have stepped, put back the old instruction */
158 /* Use stepped_address in case we stopped elsewhere */
159 if (stepped_opcode != 0) {
160 __raw_writew(stepped_opcode, stepped_address);
161 flush_icache_range(stepped_address, stepped_address + 2);
162 }
163
164 stepped_opcode = 0;
165}
166
167struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
168 { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
169 { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
170 { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
171 { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
172 { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
173 { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
174 { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
175 { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
176 { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
177 { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
178 { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
179 { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
180 { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
181 { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
182 { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
183 { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
184 { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
185 { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
186 { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
187 { "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
188 { "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
189 { "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
190 { "vbr", GDB_SIZEOF_REG, -1 },
191};
192
193int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
194{
195 if (regno < 0 || regno >= DBG_MAX_REG_NUM)
196 return -EINVAL;
197
198 if (dbg_reg_def[regno].offset != -1)
199 memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
200 dbg_reg_def[regno].size);
201
202 return 0;
203}
204
205char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
206{
207 if (regno >= DBG_MAX_REG_NUM || regno < 0)
208 return NULL;
209
210 if (dbg_reg_def[regno].size != -1)
211 memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
212 dbg_reg_def[regno].size);
213
214 switch (regno) {
215 case GDB_VBR:
216 __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
217 break;
218 }
219
220 return dbg_reg_def[regno].name;
221}
222
223void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
224{
225 struct pt_regs *thread_regs = task_pt_regs(p);
226 int reg;
227
228 /* Initialize to zero */
229 for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
230 gdb_regs[reg] = 0;
231
232 /*
233 * Copy out GP regs 8 to 14.
234 *
235 * switch_to() relies on SR.RB toggling, so regs 0->7 are banked
236 * and need privileged instructions to get to. The r15 value we
237 * fetch from the thread info directly.
238 */
239 for (reg = GDB_R8; reg < GDB_R15; reg++)
240 gdb_regs[reg] = thread_regs->regs[reg];
241
242 gdb_regs[GDB_R15] = p->thread.sp;
243 gdb_regs[GDB_PC] = p->thread.pc;
244
245 /*
246 * Additional registers we have context for
247 */
248 gdb_regs[GDB_PR] = thread_regs->pr;
249 gdb_regs[GDB_GBR] = thread_regs->gbr;
250}
251
252int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
253 char *remcomInBuffer, char *remcomOutBuffer,
254 struct pt_regs *linux_regs)
255{
256 unsigned long addr;
257 char *ptr;
258
259 /* Undo any stepping we may have done */
260 undo_single_step(linux_regs);
261
262 switch (remcomInBuffer[0]) {
263 case 'c':
264 case 's':
265 /* try to read optional parameter, pc unchanged if no parm */
266 ptr = &remcomInBuffer[1];
267 if (kgdb_hex2long(&ptr, &addr))
268 linux_regs->pc = addr;
269 case 'D':
270 case 'k':
271 atomic_set(&kgdb_cpu_doing_single_step, -1);
272
273 if (remcomInBuffer[0] == 's') {
274 do_single_step(linux_regs);
275 kgdb_single_step = 1;
276
277 atomic_set(&kgdb_cpu_doing_single_step,
278 raw_smp_processor_id());
279 }
280
281 return 0;
282 }
283
284 /* this means that we do not want to exit from the handler: */
285 return -1;
286}
287
288unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
289{
290 if (exception == 60)
291 return instruction_pointer(regs) - 2;
292 return instruction_pointer(regs);
293}
294
295void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
296{
297 regs->pc = ip;
298}
299
300/*
301 * The primary entry points for the kgdb debug trap table entries.
302 */
303BUILD_TRAP_HANDLER(singlestep)
304{
305 unsigned long flags;
306 TRAP_HANDLER_DECL;
307
308 local_irq_save(flags);
309 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
310 kgdb_handle_exception(0, SIGTRAP, 0, regs);
311 local_irq_restore(flags);
312}
313
314static int __kgdb_notify(struct die_args *args, unsigned long cmd)
315{
316 int ret;
317
318 switch (cmd) {
319 case DIE_BREAKPOINT:
320 /*
321 * This means a user thread is single stepping
322 * a system call which should be ignored
323 */
324 if (test_thread_flag(TIF_SINGLESTEP))
325 return NOTIFY_DONE;
326
327 ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
328 args->err, args->regs);
329 if (ret)
330 return NOTIFY_DONE;
331
332 break;
333 }
334
335 return NOTIFY_STOP;
336}
337
338static int
339kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
340{
341 unsigned long flags;
342 int ret;
343
344 local_irq_save(flags);
345 ret = __kgdb_notify(ptr, cmd);
346 local_irq_restore(flags);
347
348 return ret;
349}
350
351static struct notifier_block kgdb_notifier = {
352 .notifier_call = kgdb_notify,
353
354 /*
355 * Lowest-prio notifier priority, we want to be notified last:
356 */
357 .priority = -INT_MAX,
358};
359
360int kgdb_arch_init(void)
361{
362 return register_die_notifier(&kgdb_notifier);
363}
364
365void kgdb_arch_exit(void)
366{
367 unregister_die_notifier(&kgdb_notifier);
368}
369
370const struct kgdb_arch arch_kgdb_ops = {
371 /* Breakpoint instruction: trapa #0x3c */
372#ifdef CONFIG_CPU_LITTLE_ENDIAN
373 .gdb_bpt_instr = { 0x3c, 0xc3 },
374#else
375 .gdb_bpt_instr = { 0xc3, 0x3c },
376#endif
377};
1/*
2 * SuperH KGDB support
3 *
4 * Copyright (C) 2008 - 2009 Paul Mundt
5 *
6 * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/kgdb.h>
13#include <linux/kdebug.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <asm/cacheflush.h>
17
18/* Macros for single step instruction identification */
19#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
20#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
21#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
22 (((op) & 0x7f ) << 1))
23#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
24#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
25#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
26#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
27 (((op) & 0x7ff) << 1))
28#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
29#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
30#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
31#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
32 (((op) & 0x7ff) << 1))
33#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
34#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
35#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
36#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
37#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
38#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
39#define OPCODE_RTS(op) ((op) == 0xb)
40#define OPCODE_RTE(op) ((op) == 0x2b)
41
42#define SR_T_BIT_MASK 0x1
43#define STEP_OPCODE 0xc33d
44
45/* Calculate the new address for after a step */
46static short *get_step_address(struct pt_regs *linux_regs)
47{
48 insn_size_t op = __raw_readw(linux_regs->pc);
49 long addr;
50
51 /* BT */
52 if (OPCODE_BT(op)) {
53 if (linux_regs->sr & SR_T_BIT_MASK)
54 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
55 else
56 addr = linux_regs->pc + 2;
57 }
58
59 /* BTS */
60 else if (OPCODE_BTS(op)) {
61 if (linux_regs->sr & SR_T_BIT_MASK)
62 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
63 else
64 addr = linux_regs->pc + 4; /* Not in delay slot */
65 }
66
67 /* BF */
68 else if (OPCODE_BF(op)) {
69 if (!(linux_regs->sr & SR_T_BIT_MASK))
70 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
71 else
72 addr = linux_regs->pc + 2;
73 }
74
75 /* BFS */
76 else if (OPCODE_BFS(op)) {
77 if (!(linux_regs->sr & SR_T_BIT_MASK))
78 addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
79 else
80 addr = linux_regs->pc + 4; /* Not in delay slot */
81 }
82
83 /* BRA */
84 else if (OPCODE_BRA(op))
85 addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
86
87 /* BRAF */
88 else if (OPCODE_BRAF(op))
89 addr = linux_regs->pc + 4
90 + linux_regs->regs[OPCODE_BRAF_REG(op)];
91
92 /* BSR */
93 else if (OPCODE_BSR(op))
94 addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
95
96 /* BSRF */
97 else if (OPCODE_BSRF(op))
98 addr = linux_regs->pc + 4
99 + linux_regs->regs[OPCODE_BSRF_REG(op)];
100
101 /* JMP */
102 else if (OPCODE_JMP(op))
103 addr = linux_regs->regs[OPCODE_JMP_REG(op)];
104
105 /* JSR */
106 else if (OPCODE_JSR(op))
107 addr = linux_regs->regs[OPCODE_JSR_REG(op)];
108
109 /* RTS */
110 else if (OPCODE_RTS(op))
111 addr = linux_regs->pr;
112
113 /* RTE */
114 else if (OPCODE_RTE(op))
115 addr = linux_regs->regs[15];
116
117 /* Other */
118 else
119 addr = linux_regs->pc + instruction_size(op);
120
121 flush_icache_range(addr, addr + instruction_size(op));
122 return (short *)addr;
123}
124
125/*
126 * Replace the instruction immediately after the current instruction
127 * (i.e. next in the expected flow of control) with a trap instruction,
128 * so that returning will cause only a single instruction to be executed.
129 * Note that this model is slightly broken for instructions with delay
130 * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
131 * instruction in the delay slot will be executed.
132 */
133
134static unsigned long stepped_address;
135static insn_size_t stepped_opcode;
136
137static void do_single_step(struct pt_regs *linux_regs)
138{
139 /* Determine where the target instruction will send us to */
140 unsigned short *addr = get_step_address(linux_regs);
141
142 stepped_address = (int)addr;
143
144 /* Replace it */
145 stepped_opcode = __raw_readw((long)addr);
146 *addr = STEP_OPCODE;
147
148 /* Flush and return */
149 flush_icache_range((long)addr, (long)addr +
150 instruction_size(stepped_opcode));
151}
152
153/* Undo a single step */
154static void undo_single_step(struct pt_regs *linux_regs)
155{
156 /* If we have stepped, put back the old instruction */
157 /* Use stepped_address in case we stopped elsewhere */
158 if (stepped_opcode != 0) {
159 __raw_writew(stepped_opcode, stepped_address);
160 flush_icache_range(stepped_address, stepped_address + 2);
161 }
162
163 stepped_opcode = 0;
164}
165
166void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
167{
168 int i;
169
170 for (i = 0; i < 16; i++)
171 gdb_regs[GDB_R0 + i] = regs->regs[i];
172
173 gdb_regs[GDB_PC] = regs->pc;
174 gdb_regs[GDB_PR] = regs->pr;
175 gdb_regs[GDB_SR] = regs->sr;
176 gdb_regs[GDB_GBR] = regs->gbr;
177 gdb_regs[GDB_MACH] = regs->mach;
178 gdb_regs[GDB_MACL] = regs->macl;
179
180 __asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR]));
181}
182
183void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
184{
185 int i;
186
187 for (i = 0; i < 16; i++)
188 regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i];
189
190 regs->pc = gdb_regs[GDB_PC];
191 regs->pr = gdb_regs[GDB_PR];
192 regs->sr = gdb_regs[GDB_SR];
193 regs->gbr = gdb_regs[GDB_GBR];
194 regs->mach = gdb_regs[GDB_MACH];
195 regs->macl = gdb_regs[GDB_MACL];
196}
197
198void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
199{
200 gdb_regs[GDB_R15] = p->thread.sp;
201 gdb_regs[GDB_PC] = p->thread.pc;
202}
203
204int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
205 char *remcomInBuffer, char *remcomOutBuffer,
206 struct pt_regs *linux_regs)
207{
208 unsigned long addr;
209 char *ptr;
210
211 /* Undo any stepping we may have done */
212 undo_single_step(linux_regs);
213
214 switch (remcomInBuffer[0]) {
215 case 'c':
216 case 's':
217 /* try to read optional parameter, pc unchanged if no parm */
218 ptr = &remcomInBuffer[1];
219 if (kgdb_hex2long(&ptr, &addr))
220 linux_regs->pc = addr;
221 case 'D':
222 case 'k':
223 atomic_set(&kgdb_cpu_doing_single_step, -1);
224
225 if (remcomInBuffer[0] == 's') {
226 do_single_step(linux_regs);
227 kgdb_single_step = 1;
228
229 atomic_set(&kgdb_cpu_doing_single_step,
230 raw_smp_processor_id());
231 }
232
233 return 0;
234 }
235
236 /* this means that we do not want to exit from the handler: */
237 return -1;
238}
239
240unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
241{
242 if (exception == 60)
243 return instruction_pointer(regs) - 2;
244 return instruction_pointer(regs);
245}
246
247void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
248{
249 regs->pc = ip;
250}
251
252/*
253 * The primary entry points for the kgdb debug trap table entries.
254 */
255BUILD_TRAP_HANDLER(singlestep)
256{
257 unsigned long flags;
258 TRAP_HANDLER_DECL;
259
260 local_irq_save(flags);
261 regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
262 kgdb_handle_exception(0, SIGTRAP, 0, regs);
263 local_irq_restore(flags);
264}
265
266static int __kgdb_notify(struct die_args *args, unsigned long cmd)
267{
268 int ret;
269
270 switch (cmd) {
271 case DIE_BREAKPOINT:
272 /*
273 * This means a user thread is single stepping
274 * a system call which should be ignored
275 */
276 if (test_thread_flag(TIF_SINGLESTEP))
277 return NOTIFY_DONE;
278
279 ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
280 args->err, args->regs);
281 if (ret)
282 return NOTIFY_DONE;
283
284 break;
285 }
286
287 return NOTIFY_STOP;
288}
289
290static int
291kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
292{
293 unsigned long flags;
294 int ret;
295
296 local_irq_save(flags);
297 ret = __kgdb_notify(ptr, cmd);
298 local_irq_restore(flags);
299
300 return ret;
301}
302
303static struct notifier_block kgdb_notifier = {
304 .notifier_call = kgdb_notify,
305
306 /*
307 * Lowest-prio notifier priority, we want to be notified last:
308 */
309 .priority = -INT_MAX,
310};
311
312int kgdb_arch_init(void)
313{
314 return register_die_notifier(&kgdb_notifier);
315}
316
317void kgdb_arch_exit(void)
318{
319 unregister_die_notifier(&kgdb_notifier);
320}
321
322struct kgdb_arch arch_kgdb_ops = {
323 /* Breakpoint instruction: trapa #0x3c */
324#ifdef CONFIG_CPU_LITTLE_ENDIAN
325 .gdb_bpt_instr = { 0x3c, 0xc3 },
326#else
327 .gdb_bpt_instr = { 0xc3, 0x3c },
328#endif
329};