Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel Probes (KProbes)
4 * arch/mips/kernel/kprobes.c
5 *
6 * Copyright 2006 Sony Corp.
7 * Copyright 2010 Cavium Networks
8 *
9 * Some portions copied from the powerpc version.
10 *
11 * Copyright (C) IBM Corporation, 2002, 2004
12 */
13
14#include <linux/kprobes.h>
15#include <linux/preempt.h>
16#include <linux/uaccess.h>
17#include <linux/kdebug.h>
18#include <linux/slab.h>
19
20#include <asm/ptrace.h>
21#include <asm/branch.h>
22#include <asm/break.h>
23
24#include "probes-common.h"
25
26static const union mips_instruction breakpoint_insn = {
27 .b_format = {
28 .opcode = spec_op,
29 .code = BRK_KPROBE_BP,
30 .func = break_op
31 }
32};
33
34static const union mips_instruction breakpoint2_insn = {
35 .b_format = {
36 .opcode = spec_op,
37 .code = BRK_KPROBE_SSTEPBP,
38 .func = break_op
39 }
40};
41
42DEFINE_PER_CPU(struct kprobe *, current_kprobe);
43DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
44
45static int __kprobes insn_has_delayslot(union mips_instruction insn)
46{
47 return __insn_has_delay_slot(insn);
48}
49
50/*
51 * insn_has_ll_or_sc function checks whether instruction is ll or sc
52 * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
53 * so we need to prevent it and refuse kprobes insertion for such
54 * instructions; cannot do much about breakpoint in the middle of
55 * ll/sc pair; it is upto user to avoid those places
56 */
57static int __kprobes insn_has_ll_or_sc(union mips_instruction insn)
58{
59 int ret = 0;
60
61 switch (insn.i_format.opcode) {
62 case ll_op:
63 case lld_op:
64 case sc_op:
65 case scd_op:
66 ret = 1;
67 break;
68 default:
69 break;
70 }
71 return ret;
72}
73
74int __kprobes arch_prepare_kprobe(struct kprobe *p)
75{
76 union mips_instruction insn;
77 union mips_instruction prev_insn;
78 int ret = 0;
79
80 insn = p->addr[0];
81
82 if (insn_has_ll_or_sc(insn)) {
83 pr_notice("Kprobes for ll and sc instructions are not"
84 "supported\n");
85 ret = -EINVAL;
86 goto out;
87 }
88
89 if ((probe_kernel_read(&prev_insn, p->addr - 1,
90 sizeof(mips_instruction)) == 0) &&
91 insn_has_delayslot(prev_insn)) {
92 pr_notice("Kprobes for branch delayslot are not supported\n");
93 ret = -EINVAL;
94 goto out;
95 }
96
97 if (__insn_is_compact_branch(insn)) {
98 pr_notice("Kprobes for compact branches are not supported\n");
99 ret = -EINVAL;
100 goto out;
101 }
102
103 /* insn: must be on special executable page on mips. */
104 p->ainsn.insn = get_insn_slot();
105 if (!p->ainsn.insn) {
106 ret = -ENOMEM;
107 goto out;
108 }
109
110 /*
111 * In the kprobe->ainsn.insn[] array we store the original
112 * instruction at index zero and a break trap instruction at
113 * index one.
114 *
115 * On MIPS arch if the instruction at probed address is a
116 * branch instruction, we need to execute the instruction at
117 * Branch Delayslot (BD) at the time of probe hit. As MIPS also
118 * doesn't have single stepping support, the BD instruction can
119 * not be executed in-line and it would be executed on SSOL slot
120 * using a normal breakpoint instruction in the next slot.
121 * So, read the instruction and save it for later execution.
122 */
123 if (insn_has_delayslot(insn))
124 memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
125 else
126 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
127
128 p->ainsn.insn[1] = breakpoint2_insn;
129 p->opcode = *p->addr;
130
131out:
132 return ret;
133}
134
135void __kprobes arch_arm_kprobe(struct kprobe *p)
136{
137 *p->addr = breakpoint_insn;
138 flush_insn_slot(p);
139}
140
141void __kprobes arch_disarm_kprobe(struct kprobe *p)
142{
143 *p->addr = p->opcode;
144 flush_insn_slot(p);
145}
146
147void __kprobes arch_remove_kprobe(struct kprobe *p)
148{
149 if (p->ainsn.insn) {
150 free_insn_slot(p->ainsn.insn, 0);
151 p->ainsn.insn = NULL;
152 }
153}
154
155static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
156{
157 kcb->prev_kprobe.kp = kprobe_running();
158 kcb->prev_kprobe.status = kcb->kprobe_status;
159 kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
160 kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
161 kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
162}
163
164static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
165{
166 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
167 kcb->kprobe_status = kcb->prev_kprobe.status;
168 kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
169 kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
170 kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
171}
172
173static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
174 struct kprobe_ctlblk *kcb)
175{
176 __this_cpu_write(current_kprobe, p);
177 kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
178 kcb->kprobe_saved_epc = regs->cp0_epc;
179}
180
181/**
182 * evaluate_branch_instrucion -
183 *
184 * Evaluate the branch instruction at probed address during probe hit. The
185 * result of evaluation would be the updated epc. The insturction in delayslot
186 * would actually be single stepped using a normal breakpoint) on SSOL slot.
187 *
188 * The result is also saved in the kprobe control block for later use,
189 * in case we need to execute the delayslot instruction. The latter will be
190 * false for NOP instruction in dealyslot and the branch-likely instructions
191 * when the branch is taken. And for those cases we set a flag as
192 * SKIP_DELAYSLOT in the kprobe control block
193 */
194static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
195 struct kprobe_ctlblk *kcb)
196{
197 union mips_instruction insn = p->opcode;
198 long epc;
199 int ret = 0;
200
201 epc = regs->cp0_epc;
202 if (epc & 3)
203 goto unaligned;
204
205 if (p->ainsn.insn->word == 0)
206 kcb->flags |= SKIP_DELAYSLOT;
207 else
208 kcb->flags &= ~SKIP_DELAYSLOT;
209
210 ret = __compute_return_epc_for_insn(regs, insn);
211 if (ret < 0)
212 return ret;
213
214 if (ret == BRANCH_LIKELY_TAKEN)
215 kcb->flags |= SKIP_DELAYSLOT;
216
217 kcb->target_epc = regs->cp0_epc;
218
219 return 0;
220
221unaligned:
222 pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm);
223 force_sig(SIGBUS);
224 return -EFAULT;
225
226}
227
228static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
229 struct kprobe_ctlblk *kcb)
230{
231 int ret = 0;
232
233 regs->cp0_status &= ~ST0_IE;
234
235 /* single step inline if the instruction is a break */
236 if (p->opcode.word == breakpoint_insn.word ||
237 p->opcode.word == breakpoint2_insn.word)
238 regs->cp0_epc = (unsigned long)p->addr;
239 else if (insn_has_delayslot(p->opcode)) {
240 ret = evaluate_branch_instruction(p, regs, kcb);
241 if (ret < 0) {
242 pr_notice("Kprobes: Error in evaluating branch\n");
243 return;
244 }
245 }
246 regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
247}
248
249/*
250 * Called after single-stepping. p->addr is the address of the
251 * instruction whose first byte has been replaced by the "break 0"
252 * instruction. To avoid the SMP problems that can occur when we
253 * temporarily put back the original opcode to single-step, we
254 * single-stepped a copy of the instruction. The address of this
255 * copy is p->ainsn.insn.
256 *
257 * This function prepares to return from the post-single-step
258 * breakpoint trap. In case of branch instructions, the target
259 * epc to be restored.
260 */
261static void __kprobes resume_execution(struct kprobe *p,
262 struct pt_regs *regs,
263 struct kprobe_ctlblk *kcb)
264{
265 if (insn_has_delayslot(p->opcode))
266 regs->cp0_epc = kcb->target_epc;
267 else {
268 unsigned long orig_epc = kcb->kprobe_saved_epc;
269 regs->cp0_epc = orig_epc + 4;
270 }
271}
272
273static int __kprobes kprobe_handler(struct pt_regs *regs)
274{
275 struct kprobe *p;
276 int ret = 0;
277 kprobe_opcode_t *addr;
278 struct kprobe_ctlblk *kcb;
279
280 addr = (kprobe_opcode_t *) regs->cp0_epc;
281
282 /*
283 * We don't want to be preempted for the entire
284 * duration of kprobe processing
285 */
286 preempt_disable();
287 kcb = get_kprobe_ctlblk();
288
289 /* Check we're not actually recursing */
290 if (kprobe_running()) {
291 p = get_kprobe(addr);
292 if (p) {
293 if (kcb->kprobe_status == KPROBE_HIT_SS &&
294 p->ainsn.insn->word == breakpoint_insn.word) {
295 regs->cp0_status &= ~ST0_IE;
296 regs->cp0_status |= kcb->kprobe_saved_SR;
297 goto no_kprobe;
298 }
299 /*
300 * We have reentered the kprobe_handler(), since
301 * another probe was hit while within the handler.
302 * We here save the original kprobes variables and
303 * just single step on the instruction of the new probe
304 * without calling any user handlers.
305 */
306 save_previous_kprobe(kcb);
307 set_current_kprobe(p, regs, kcb);
308 kprobes_inc_nmissed_count(p);
309 prepare_singlestep(p, regs, kcb);
310 kcb->kprobe_status = KPROBE_REENTER;
311 if (kcb->flags & SKIP_DELAYSLOT) {
312 resume_execution(p, regs, kcb);
313 restore_previous_kprobe(kcb);
314 preempt_enable_no_resched();
315 }
316 return 1;
317 } else if (addr->word != breakpoint_insn.word) {
318 /*
319 * The breakpoint instruction was removed by
320 * another cpu right after we hit, no further
321 * handling of this interrupt is appropriate
322 */
323 ret = 1;
324 }
325 goto no_kprobe;
326 }
327
328 p = get_kprobe(addr);
329 if (!p) {
330 if (addr->word != breakpoint_insn.word) {
331 /*
332 * The breakpoint instruction was removed right
333 * after we hit it. Another cpu has removed
334 * either a probepoint or a debugger breakpoint
335 * at this address. In either case, no further
336 * handling of this interrupt is appropriate.
337 */
338 ret = 1;
339 }
340 /* Not one of ours: let kernel handle it */
341 goto no_kprobe;
342 }
343
344 set_current_kprobe(p, regs, kcb);
345 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
346
347 if (p->pre_handler && p->pre_handler(p, regs)) {
348 /* handler has already set things up, so skip ss setup */
349 reset_current_kprobe();
350 preempt_enable_no_resched();
351 return 1;
352 }
353
354 prepare_singlestep(p, regs, kcb);
355 if (kcb->flags & SKIP_DELAYSLOT) {
356 kcb->kprobe_status = KPROBE_HIT_SSDONE;
357 if (p->post_handler)
358 p->post_handler(p, regs, 0);
359 resume_execution(p, regs, kcb);
360 preempt_enable_no_resched();
361 } else
362 kcb->kprobe_status = KPROBE_HIT_SS;
363
364 return 1;
365
366no_kprobe:
367 preempt_enable_no_resched();
368 return ret;
369
370}
371
372static inline int post_kprobe_handler(struct pt_regs *regs)
373{
374 struct kprobe *cur = kprobe_running();
375 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
376
377 if (!cur)
378 return 0;
379
380 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
381 kcb->kprobe_status = KPROBE_HIT_SSDONE;
382 cur->post_handler(cur, regs, 0);
383 }
384
385 resume_execution(cur, regs, kcb);
386
387 regs->cp0_status |= kcb->kprobe_saved_SR;
388
389 /* Restore back the original saved kprobes variables and continue. */
390 if (kcb->kprobe_status == KPROBE_REENTER) {
391 restore_previous_kprobe(kcb);
392 goto out;
393 }
394 reset_current_kprobe();
395out:
396 preempt_enable_no_resched();
397
398 return 1;
399}
400
401int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
402{
403 struct kprobe *cur = kprobe_running();
404 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
405
406 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
407 return 1;
408
409 if (kcb->kprobe_status & KPROBE_HIT_SS) {
410 resume_execution(cur, regs, kcb);
411 regs->cp0_status |= kcb->kprobe_old_SR;
412
413 reset_current_kprobe();
414 preempt_enable_no_resched();
415 }
416 return 0;
417}
418
419/*
420 * Wrapper routine for handling exceptions.
421 */
422int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
423 unsigned long val, void *data)
424{
425
426 struct die_args *args = (struct die_args *)data;
427 int ret = NOTIFY_DONE;
428
429 switch (val) {
430 case DIE_BREAK:
431 if (kprobe_handler(args->regs))
432 ret = NOTIFY_STOP;
433 break;
434 case DIE_SSTEPBP:
435 if (post_kprobe_handler(args->regs))
436 ret = NOTIFY_STOP;
437 break;
438
439 case DIE_PAGE_FAULT:
440 /* kprobe_running() needs smp_processor_id() */
441 preempt_disable();
442
443 if (kprobe_running()
444 && kprobe_fault_handler(args->regs, args->trapnr))
445 ret = NOTIFY_STOP;
446 preempt_enable();
447 break;
448 default:
449 break;
450 }
451 return ret;
452}
453
454/*
455 * Function return probe trampoline:
456 * - init_kprobes() establishes a probepoint here
457 * - When the probed function returns, this probe causes the
458 * handlers to fire
459 */
460static void __used kretprobe_trampoline_holder(void)
461{
462 asm volatile(
463 ".set push\n\t"
464 /* Keep the assembler from reordering and placing JR here. */
465 ".set noreorder\n\t"
466 "nop\n\t"
467 ".global kretprobe_trampoline\n"
468 "kretprobe_trampoline:\n\t"
469 "nop\n\t"
470 ".set pop"
471 : : : "memory");
472}
473
474void kretprobe_trampoline(void);
475
476void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
477 struct pt_regs *regs)
478{
479 ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
480
481 /* Replace the return addr with trampoline addr */
482 regs->regs[31] = (unsigned long)kretprobe_trampoline;
483}
484
485/*
486 * Called when the probe at kretprobe trampoline is hit
487 */
488static int __kprobes trampoline_probe_handler(struct kprobe *p,
489 struct pt_regs *regs)
490{
491 struct kretprobe_instance *ri = NULL;
492 struct hlist_head *head, empty_rp;
493 struct hlist_node *tmp;
494 unsigned long flags, orig_ret_address = 0;
495 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
496
497 INIT_HLIST_HEAD(&empty_rp);
498 kretprobe_hash_lock(current, &head, &flags);
499
500 /*
501 * It is possible to have multiple instances associated with a given
502 * task either because an multiple functions in the call path
503 * have a return probe installed on them, and/or more than one return
504 * return probe was registered for a target function.
505 *
506 * We can handle this because:
507 * - instances are always inserted at the head of the list
508 * - when multiple return probes are registered for the same
509 * function, the first instance's ret_addr will point to the
510 * real return address, and all the rest will point to
511 * kretprobe_trampoline
512 */
513 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
514 if (ri->task != current)
515 /* another task is sharing our hash bucket */
516 continue;
517
518 if (ri->rp && ri->rp->handler)
519 ri->rp->handler(ri, regs);
520
521 orig_ret_address = (unsigned long)ri->ret_addr;
522 recycle_rp_inst(ri, &empty_rp);
523
524 if (orig_ret_address != trampoline_address)
525 /*
526 * This is the real return address. Any other
527 * instances associated with this task are for
528 * other calls deeper on the call stack
529 */
530 break;
531 }
532
533 kretprobe_assert(ri, orig_ret_address, trampoline_address);
534 instruction_pointer(regs) = orig_ret_address;
535
536 kretprobe_hash_unlock(current, &flags);
537
538 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
539 hlist_del(&ri->hlist);
540 kfree(ri);
541 }
542 /*
543 * By returning a non-zero value, we are telling
544 * kprobe_handler() that we don't want the post_handler
545 * to run (and have re-enabled preemption)
546 */
547 return 1;
548}
549
550int __kprobes arch_trampoline_kprobe(struct kprobe *p)
551{
552 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
553 return 1;
554
555 return 0;
556}
557
558static struct kprobe trampoline_p = {
559 .addr = (kprobe_opcode_t *)kretprobe_trampoline,
560 .pre_handler = trampoline_probe_handler
561};
562
563int __init arch_init_kprobes(void)
564{
565 return register_kprobe(&trampoline_p);
566}
1/*
2 * Kernel Probes (KProbes)
3 * arch/mips/kernel/kprobes.c
4 *
5 * Copyright 2006 Sony Corp.
6 * Copyright 2010 Cavium Networks
7 *
8 * Some portions copied from the powerpc version.
9 *
10 * Copyright (C) IBM Corporation, 2002, 2004
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; version 2 of the License.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kprobes.h>
27#include <linux/preempt.h>
28#include <linux/kdebug.h>
29#include <linux/slab.h>
30
31#include <asm/ptrace.h>
32#include <asm/break.h>
33#include <asm/inst.h>
34
35static const union mips_instruction breakpoint_insn = {
36 .b_format = {
37 .opcode = spec_op,
38 .code = BRK_KPROBE_BP,
39 .func = break_op
40 }
41};
42
43static const union mips_instruction breakpoint2_insn = {
44 .b_format = {
45 .opcode = spec_op,
46 .code = BRK_KPROBE_SSTEPBP,
47 .func = break_op
48 }
49};
50
51DEFINE_PER_CPU(struct kprobe *, current_kprobe);
52DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
53
54static int __kprobes insn_has_delayslot(union mips_instruction insn)
55{
56 switch (insn.i_format.opcode) {
57
58 /*
59 * This group contains:
60 * jr and jalr are in r_format format.
61 */
62 case spec_op:
63 switch (insn.r_format.func) {
64 case jr_op:
65 case jalr_op:
66 break;
67 default:
68 goto insn_ok;
69 }
70
71 /*
72 * This group contains:
73 * bltz_op, bgez_op, bltzl_op, bgezl_op,
74 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
75 */
76 case bcond_op:
77
78 /*
79 * These are unconditional and in j_format.
80 */
81 case jal_op:
82 case j_op:
83
84 /*
85 * These are conditional and in i_format.
86 */
87 case beq_op:
88 case beql_op:
89 case bne_op:
90 case bnel_op:
91 case blez_op:
92 case blezl_op:
93 case bgtz_op:
94 case bgtzl_op:
95
96 /*
97 * These are the FPA/cp1 branch instructions.
98 */
99 case cop1_op:
100
101#ifdef CONFIG_CPU_CAVIUM_OCTEON
102 case lwc2_op: /* This is bbit0 on Octeon */
103 case ldc2_op: /* This is bbit032 on Octeon */
104 case swc2_op: /* This is bbit1 on Octeon */
105 case sdc2_op: /* This is bbit132 on Octeon */
106#endif
107 return 1;
108 default:
109 break;
110 }
111insn_ok:
112 return 0;
113}
114
115int __kprobes arch_prepare_kprobe(struct kprobe *p)
116{
117 union mips_instruction insn;
118 union mips_instruction prev_insn;
119 int ret = 0;
120
121 prev_insn = p->addr[-1];
122 insn = p->addr[0];
123
124 if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
125 pr_notice("Kprobes for branch and jump instructions are not supported\n");
126 ret = -EINVAL;
127 goto out;
128 }
129
130 /* insn: must be on special executable page on mips. */
131 p->ainsn.insn = get_insn_slot();
132 if (!p->ainsn.insn) {
133 ret = -ENOMEM;
134 goto out;
135 }
136
137 /*
138 * In the kprobe->ainsn.insn[] array we store the original
139 * instruction at index zero and a break trap instruction at
140 * index one.
141 */
142
143 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
144 p->ainsn.insn[1] = breakpoint2_insn;
145 p->opcode = *p->addr;
146
147out:
148 return ret;
149}
150
151void __kprobes arch_arm_kprobe(struct kprobe *p)
152{
153 *p->addr = breakpoint_insn;
154 flush_insn_slot(p);
155}
156
157void __kprobes arch_disarm_kprobe(struct kprobe *p)
158{
159 *p->addr = p->opcode;
160 flush_insn_slot(p);
161}
162
163void __kprobes arch_remove_kprobe(struct kprobe *p)
164{
165 free_insn_slot(p->ainsn.insn, 0);
166}
167
168static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
169{
170 kcb->prev_kprobe.kp = kprobe_running();
171 kcb->prev_kprobe.status = kcb->kprobe_status;
172 kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
173 kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
174 kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
175}
176
177static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
178{
179 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
180 kcb->kprobe_status = kcb->prev_kprobe.status;
181 kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
182 kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
183 kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
184}
185
186static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
187 struct kprobe_ctlblk *kcb)
188{
189 __get_cpu_var(current_kprobe) = p;
190 kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
191 kcb->kprobe_saved_epc = regs->cp0_epc;
192}
193
194static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
195{
196 regs->cp0_status &= ~ST0_IE;
197
198 /* single step inline if the instruction is a break */
199 if (p->opcode.word == breakpoint_insn.word ||
200 p->opcode.word == breakpoint2_insn.word)
201 regs->cp0_epc = (unsigned long)p->addr;
202 else
203 regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
204}
205
206static int __kprobes kprobe_handler(struct pt_regs *regs)
207{
208 struct kprobe *p;
209 int ret = 0;
210 kprobe_opcode_t *addr;
211 struct kprobe_ctlblk *kcb;
212
213 addr = (kprobe_opcode_t *) regs->cp0_epc;
214
215 /*
216 * We don't want to be preempted for the entire
217 * duration of kprobe processing
218 */
219 preempt_disable();
220 kcb = get_kprobe_ctlblk();
221
222 /* Check we're not actually recursing */
223 if (kprobe_running()) {
224 p = get_kprobe(addr);
225 if (p) {
226 if (kcb->kprobe_status == KPROBE_HIT_SS &&
227 p->ainsn.insn->word == breakpoint_insn.word) {
228 regs->cp0_status &= ~ST0_IE;
229 regs->cp0_status |= kcb->kprobe_saved_SR;
230 goto no_kprobe;
231 }
232 /*
233 * We have reentered the kprobe_handler(), since
234 * another probe was hit while within the handler.
235 * We here save the original kprobes variables and
236 * just single step on the instruction of the new probe
237 * without calling any user handlers.
238 */
239 save_previous_kprobe(kcb);
240 set_current_kprobe(p, regs, kcb);
241 kprobes_inc_nmissed_count(p);
242 prepare_singlestep(p, regs);
243 kcb->kprobe_status = KPROBE_REENTER;
244 return 1;
245 } else {
246 if (addr->word != breakpoint_insn.word) {
247 /*
248 * The breakpoint instruction was removed by
249 * another cpu right after we hit, no further
250 * handling of this interrupt is appropriate
251 */
252 ret = 1;
253 goto no_kprobe;
254 }
255 p = __get_cpu_var(current_kprobe);
256 if (p->break_handler && p->break_handler(p, regs))
257 goto ss_probe;
258 }
259 goto no_kprobe;
260 }
261
262 p = get_kprobe(addr);
263 if (!p) {
264 if (addr->word != breakpoint_insn.word) {
265 /*
266 * The breakpoint instruction was removed right
267 * after we hit it. Another cpu has removed
268 * either a probepoint or a debugger breakpoint
269 * at this address. In either case, no further
270 * handling of this interrupt is appropriate.
271 */
272 ret = 1;
273 }
274 /* Not one of ours: let kernel handle it */
275 goto no_kprobe;
276 }
277
278 set_current_kprobe(p, regs, kcb);
279 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
280
281 if (p->pre_handler && p->pre_handler(p, regs)) {
282 /* handler has already set things up, so skip ss setup */
283 return 1;
284 }
285
286ss_probe:
287 prepare_singlestep(p, regs);
288 kcb->kprobe_status = KPROBE_HIT_SS;
289 return 1;
290
291no_kprobe:
292 preempt_enable_no_resched();
293 return ret;
294
295}
296
297/*
298 * Called after single-stepping. p->addr is the address of the
299 * instruction whose first byte has been replaced by the "break 0"
300 * instruction. To avoid the SMP problems that can occur when we
301 * temporarily put back the original opcode to single-step, we
302 * single-stepped a copy of the instruction. The address of this
303 * copy is p->ainsn.insn.
304 *
305 * This function prepares to return from the post-single-step
306 * breakpoint trap.
307 */
308static void __kprobes resume_execution(struct kprobe *p,
309 struct pt_regs *regs,
310 struct kprobe_ctlblk *kcb)
311{
312 unsigned long orig_epc = kcb->kprobe_saved_epc;
313 regs->cp0_epc = orig_epc + 4;
314}
315
316static inline int post_kprobe_handler(struct pt_regs *regs)
317{
318 struct kprobe *cur = kprobe_running();
319 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
320
321 if (!cur)
322 return 0;
323
324 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
325 kcb->kprobe_status = KPROBE_HIT_SSDONE;
326 cur->post_handler(cur, regs, 0);
327 }
328
329 resume_execution(cur, regs, kcb);
330
331 regs->cp0_status |= kcb->kprobe_saved_SR;
332
333 /* Restore back the original saved kprobes variables and continue. */
334 if (kcb->kprobe_status == KPROBE_REENTER) {
335 restore_previous_kprobe(kcb);
336 goto out;
337 }
338 reset_current_kprobe();
339out:
340 preempt_enable_no_resched();
341
342 return 1;
343}
344
345static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
346{
347 struct kprobe *cur = kprobe_running();
348 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
349
350 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
351 return 1;
352
353 if (kcb->kprobe_status & KPROBE_HIT_SS) {
354 resume_execution(cur, regs, kcb);
355 regs->cp0_status |= kcb->kprobe_old_SR;
356
357 reset_current_kprobe();
358 preempt_enable_no_resched();
359 }
360 return 0;
361}
362
363/*
364 * Wrapper routine for handling exceptions.
365 */
366int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
367 unsigned long val, void *data)
368{
369
370 struct die_args *args = (struct die_args *)data;
371 int ret = NOTIFY_DONE;
372
373 switch (val) {
374 case DIE_BREAK:
375 if (kprobe_handler(args->regs))
376 ret = NOTIFY_STOP;
377 break;
378 case DIE_SSTEPBP:
379 if (post_kprobe_handler(args->regs))
380 ret = NOTIFY_STOP;
381 break;
382
383 case DIE_PAGE_FAULT:
384 /* kprobe_running() needs smp_processor_id() */
385 preempt_disable();
386
387 if (kprobe_running()
388 && kprobe_fault_handler(args->regs, args->trapnr))
389 ret = NOTIFY_STOP;
390 preempt_enable();
391 break;
392 default:
393 break;
394 }
395 return ret;
396}
397
398int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
399{
400 struct jprobe *jp = container_of(p, struct jprobe, kp);
401 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
402
403 kcb->jprobe_saved_regs = *regs;
404 kcb->jprobe_saved_sp = regs->regs[29];
405
406 memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
407 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
408
409 regs->cp0_epc = (unsigned long)(jp->entry);
410
411 return 1;
412}
413
414/* Defined in the inline asm below. */
415void jprobe_return_end(void);
416
417void __kprobes jprobe_return(void)
418{
419 /* Assembler quirk necessitates this '0,code' business. */
420 asm volatile(
421 "break 0,%0\n\t"
422 ".globl jprobe_return_end\n"
423 "jprobe_return_end:\n"
424 : : "n" (BRK_KPROBE_BP) : "memory");
425}
426
427int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
428{
429 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
430
431 if (regs->cp0_epc >= (unsigned long)jprobe_return &&
432 regs->cp0_epc <= (unsigned long)jprobe_return_end) {
433 *regs = kcb->jprobe_saved_regs;
434 memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
435 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
436 preempt_enable_no_resched();
437
438 return 1;
439 }
440 return 0;
441}
442
443/*
444 * Function return probe trampoline:
445 * - init_kprobes() establishes a probepoint here
446 * - When the probed function returns, this probe causes the
447 * handlers to fire
448 */
449static void __used kretprobe_trampoline_holder(void)
450{
451 asm volatile(
452 ".set push\n\t"
453 /* Keep the assembler from reordering and placing JR here. */
454 ".set noreorder\n\t"
455 "nop\n\t"
456 ".global kretprobe_trampoline\n"
457 "kretprobe_trampoline:\n\t"
458 "nop\n\t"
459 ".set pop"
460 : : : "memory");
461}
462
463void kretprobe_trampoline(void);
464
465void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
466 struct pt_regs *regs)
467{
468 ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
469
470 /* Replace the return addr with trampoline addr */
471 regs->regs[31] = (unsigned long)kretprobe_trampoline;
472}
473
474/*
475 * Called when the probe at kretprobe trampoline is hit
476 */
477static int __kprobes trampoline_probe_handler(struct kprobe *p,
478 struct pt_regs *regs)
479{
480 struct kretprobe_instance *ri = NULL;
481 struct hlist_head *head, empty_rp;
482 struct hlist_node *node, *tmp;
483 unsigned long flags, orig_ret_address = 0;
484 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
485
486 INIT_HLIST_HEAD(&empty_rp);
487 kretprobe_hash_lock(current, &head, &flags);
488
489 /*
490 * It is possible to have multiple instances associated with a given
491 * task either because an multiple functions in the call path
492 * have a return probe installed on them, and/or more than one return
493 * return probe was registered for a target function.
494 *
495 * We can handle this because:
496 * - instances are always inserted at the head of the list
497 * - when multiple return probes are registered for the same
498 * function, the first instance's ret_addr will point to the
499 * real return address, and all the rest will point to
500 * kretprobe_trampoline
501 */
502 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
503 if (ri->task != current)
504 /* another task is sharing our hash bucket */
505 continue;
506
507 if (ri->rp && ri->rp->handler)
508 ri->rp->handler(ri, regs);
509
510 orig_ret_address = (unsigned long)ri->ret_addr;
511 recycle_rp_inst(ri, &empty_rp);
512
513 if (orig_ret_address != trampoline_address)
514 /*
515 * This is the real return address. Any other
516 * instances associated with this task are for
517 * other calls deeper on the call stack
518 */
519 break;
520 }
521
522 kretprobe_assert(ri, orig_ret_address, trampoline_address);
523 instruction_pointer(regs) = orig_ret_address;
524
525 reset_current_kprobe();
526 kretprobe_hash_unlock(current, &flags);
527 preempt_enable_no_resched();
528
529 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
530 hlist_del(&ri->hlist);
531 kfree(ri);
532 }
533 /*
534 * By returning a non-zero value, we are telling
535 * kprobe_handler() that we don't want the post_handler
536 * to run (and have re-enabled preemption)
537 */
538 return 1;
539}
540
541int __kprobes arch_trampoline_kprobe(struct kprobe *p)
542{
543 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
544 return 1;
545
546 return 0;
547}
548
549static struct kprobe trampoline_p = {
550 .addr = (kprobe_opcode_t *)kretprobe_trampoline,
551 .pre_handler = trampoline_probe_handler
552};
553
554int __init arch_init_kprobes(void)
555{
556 return register_kprobe(&trampoline_p);
557}