Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * User-space Probes (UProbes) for powerpc
4 *
5 * Copyright IBM Corporation, 2007-2012
6 *
7 * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
8 */
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/ptrace.h>
12#include <linux/uprobes.h>
13#include <linux/uaccess.h>
14#include <linux/kdebug.h>
15
16#include <asm/sstep.h>
17#include <asm/inst.h>
18
19#define UPROBE_TRAP_NR UINT_MAX
20
21/**
22 * is_trap_insn - check if the instruction is a trap variant
23 * @insn: instruction to be checked.
24 * Returns true if @insn is a trap variant.
25 */
26bool is_trap_insn(uprobe_opcode_t *insn)
27{
28 return (is_trap(*insn));
29}
30
31/**
32 * arch_uprobe_analyze_insn
33 * @mm: the probed address space.
34 * @arch_uprobe: the probepoint information.
35 * @addr: vaddr to probe.
36 * Return 0 on success or a -ve number on error.
37 */
38int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
39 struct mm_struct *mm, unsigned long addr)
40{
41 if (addr & 0x03)
42 return -EINVAL;
43
44 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
45 ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
46 (addr & 0x3f) == 60) {
47 pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
48 return -EINVAL;
49 }
50
51 return 0;
52}
53
54/*
55 * arch_uprobe_pre_xol - prepare to execute out of line.
56 * @auprobe: the probepoint information.
57 * @regs: reflects the saved user state of current task.
58 */
59int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
60{
61 struct arch_uprobe_task *autask = ¤t->utask->autask;
62
63 autask->saved_trap_nr = current->thread.trap_nr;
64 current->thread.trap_nr = UPROBE_TRAP_NR;
65 regs_set_return_ip(regs, current->utask->xol_vaddr);
66
67 user_enable_single_step(current);
68 return 0;
69}
70
71/**
72 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
73 * @regs: Reflects the saved state of the task after it has hit a breakpoint
74 * instruction.
75 * Return the address of the breakpoint instruction.
76 */
77unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
78{
79 return instruction_pointer(regs);
80}
81
82/*
83 * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
84 * then detect the case where a singlestepped instruction jumps back to its
85 * own address. It is assumed that anything like do_page_fault/do_trap/etc
86 * sets thread.trap_nr != UINT_MAX.
87 *
88 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
89 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
90 * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
91 */
92bool arch_uprobe_xol_was_trapped(struct task_struct *t)
93{
94 if (t->thread.trap_nr != UPROBE_TRAP_NR)
95 return true;
96
97 return false;
98}
99
100/*
101 * Called after single-stepping. To avoid the SMP problems that can
102 * occur when we temporarily put back the original opcode to
103 * single-step, we single-stepped a copy of the instruction.
104 *
105 * This function prepares to resume execution after the single-step.
106 */
107int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
108{
109 struct uprobe_task *utask = current->utask;
110
111 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
112
113 current->thread.trap_nr = utask->autask.saved_trap_nr;
114
115 /*
116 * On powerpc, except for loads and stores, most instructions
117 * including ones that alter code flow (branches, calls, returns)
118 * are emulated in the kernel. We get here only if the emulation
119 * support doesn't exist and have to fix-up the next instruction
120 * to be executed.
121 */
122 regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
123
124 user_disable_single_step(current);
125 return 0;
126}
127
128/* callback routine for handling exceptions. */
129int arch_uprobe_exception_notify(struct notifier_block *self,
130 unsigned long val, void *data)
131{
132 struct die_args *args = data;
133 struct pt_regs *regs = args->regs;
134
135 /* regs == NULL is a kernel bug */
136 if (WARN_ON(!regs))
137 return NOTIFY_DONE;
138
139 /* We are only interested in userspace traps */
140 if (!user_mode(regs))
141 return NOTIFY_DONE;
142
143 switch (val) {
144 case DIE_BPT:
145 if (uprobe_pre_sstep_notifier(regs))
146 return NOTIFY_STOP;
147 break;
148 case DIE_SSTEP:
149 if (uprobe_post_sstep_notifier(regs))
150 return NOTIFY_STOP;
151 break;
152 default:
153 break;
154 }
155 return NOTIFY_DONE;
156}
157
158/*
159 * This function gets called when XOL instruction either gets trapped or
160 * the thread has a fatal signal, so reset the instruction pointer to its
161 * probed address.
162 */
163void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
164{
165 struct uprobe_task *utask = current->utask;
166
167 current->thread.trap_nr = utask->autask.saved_trap_nr;
168 instruction_pointer_set(regs, utask->vaddr);
169
170 user_disable_single_step(current);
171}
172
173/*
174 * See if the instruction can be emulated.
175 * Returns true if instruction was emulated, false otherwise.
176 */
177bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
178{
179 int ret;
180
181 /*
182 * emulate_step() returns 1 if the insn was successfully emulated.
183 * For all other cases, we need to single-step in hardware.
184 */
185 ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
186 if (ret > 0)
187 return true;
188
189 return false;
190}
191
192unsigned long
193arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
194{
195 unsigned long orig_ret_vaddr;
196
197 orig_ret_vaddr = regs->link;
198
199 /* Replace the return addr with trampoline addr */
200 regs->link = trampoline_vaddr;
201
202 return orig_ret_vaddr;
203}
204
205bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
206 struct pt_regs *regs)
207{
208 if (ctx == RP_CHECK_CHAIN_CALL)
209 return regs->gpr[1] <= ret->stack;
210 else
211 return regs->gpr[1] < ret->stack;
212}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * User-space Probes (UProbes) for powerpc
4 *
5 * Copyright IBM Corporation, 2007-2012
6 *
7 * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
8 */
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/ptrace.h>
12#include <linux/uprobes.h>
13#include <linux/uaccess.h>
14#include <linux/kdebug.h>
15
16#include <asm/sstep.h>
17#include <asm/inst.h>
18
19#define UPROBE_TRAP_NR UINT_MAX
20
21/**
22 * is_trap_insn - check if the instruction is a trap variant
23 * @insn: instruction to be checked.
24 * Returns true if @insn is a trap variant.
25 */
26bool is_trap_insn(uprobe_opcode_t *insn)
27{
28 return (is_trap(*insn));
29}
30
31/**
32 * arch_uprobe_analyze_insn
33 * @mm: the probed address space.
34 * @arch_uprobe: the probepoint information.
35 * @addr: vaddr to probe.
36 * Return 0 on success or a -ve number on error.
37 */
38int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
39 struct mm_struct *mm, unsigned long addr)
40{
41 if (addr & 0x03)
42 return -EINVAL;
43
44 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
45 ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
46 (addr & 0x3f) == 60) {
47 pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
48 return -EINVAL;
49 }
50
51 if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
52 pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
53 return -ENOTSUPP;
54 }
55
56 return 0;
57}
58
59/*
60 * arch_uprobe_pre_xol - prepare to execute out of line.
61 * @auprobe: the probepoint information.
62 * @regs: reflects the saved user state of current task.
63 */
64int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
65{
66 struct arch_uprobe_task *autask = ¤t->utask->autask;
67
68 autask->saved_trap_nr = current->thread.trap_nr;
69 current->thread.trap_nr = UPROBE_TRAP_NR;
70 regs_set_return_ip(regs, current->utask->xol_vaddr);
71
72 user_enable_single_step(current);
73 return 0;
74}
75
76/**
77 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
78 * @regs: Reflects the saved state of the task after it has hit a breakpoint
79 * instruction.
80 * Return the address of the breakpoint instruction.
81 */
82unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
83{
84 return instruction_pointer(regs);
85}
86
87/*
88 * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
89 * then detect the case where a singlestepped instruction jumps back to its
90 * own address. It is assumed that anything like do_page_fault/do_trap/etc
91 * sets thread.trap_nr != UINT_MAX.
92 *
93 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
94 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
95 * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
96 */
97bool arch_uprobe_xol_was_trapped(struct task_struct *t)
98{
99 if (t->thread.trap_nr != UPROBE_TRAP_NR)
100 return true;
101
102 return false;
103}
104
105/*
106 * Called after single-stepping. To avoid the SMP problems that can
107 * occur when we temporarily put back the original opcode to
108 * single-step, we single-stepped a copy of the instruction.
109 *
110 * This function prepares to resume execution after the single-step.
111 */
112int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
113{
114 struct uprobe_task *utask = current->utask;
115
116 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
117
118 current->thread.trap_nr = utask->autask.saved_trap_nr;
119
120 /*
121 * On powerpc, except for loads and stores, most instructions
122 * including ones that alter code flow (branches, calls, returns)
123 * are emulated in the kernel. We get here only if the emulation
124 * support doesn't exist and have to fix-up the next instruction
125 * to be executed.
126 */
127 regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
128
129 user_disable_single_step(current);
130 return 0;
131}
132
133/* callback routine for handling exceptions. */
134int arch_uprobe_exception_notify(struct notifier_block *self,
135 unsigned long val, void *data)
136{
137 struct die_args *args = data;
138 struct pt_regs *regs = args->regs;
139
140 /* regs == NULL is a kernel bug */
141 if (WARN_ON(!regs))
142 return NOTIFY_DONE;
143
144 /* We are only interested in userspace traps */
145 if (!user_mode(regs))
146 return NOTIFY_DONE;
147
148 switch (val) {
149 case DIE_BPT:
150 if (uprobe_pre_sstep_notifier(regs))
151 return NOTIFY_STOP;
152 break;
153 case DIE_SSTEP:
154 if (uprobe_post_sstep_notifier(regs))
155 return NOTIFY_STOP;
156 break;
157 default:
158 break;
159 }
160 return NOTIFY_DONE;
161}
162
163/*
164 * This function gets called when XOL instruction either gets trapped or
165 * the thread has a fatal signal, so reset the instruction pointer to its
166 * probed address.
167 */
168void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
169{
170 struct uprobe_task *utask = current->utask;
171
172 current->thread.trap_nr = utask->autask.saved_trap_nr;
173 instruction_pointer_set(regs, utask->vaddr);
174
175 user_disable_single_step(current);
176}
177
178/*
179 * See if the instruction can be emulated.
180 * Returns true if instruction was emulated, false otherwise.
181 */
182bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
183{
184 int ret;
185
186 /*
187 * emulate_step() returns 1 if the insn was successfully emulated.
188 * For all other cases, we need to single-step in hardware.
189 */
190 ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
191 if (ret > 0)
192 return true;
193
194 return false;
195}
196
197unsigned long
198arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
199{
200 unsigned long orig_ret_vaddr;
201
202 orig_ret_vaddr = regs->link;
203
204 /* Replace the return addr with trampoline addr */
205 regs->link = trampoline_vaddr;
206
207 return orig_ret_vaddr;
208}
209
210bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
211 struct pt_regs *regs)
212{
213 if (ctx == RP_CHECK_CHAIN_CALL)
214 return regs->gpr[1] <= ret->stack;
215 else
216 return regs->gpr[1] < ret->stack;
217}