Loading...
1/*
2 * Code for Kernel probes Jump optimization.
3 *
4 * Copyright 2017, Anju T, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kprobes.h>
13#include <linux/jump_label.h>
14#include <linux/types.h>
15#include <linux/slab.h>
16#include <linux/list.h>
17#include <asm/kprobes.h>
18#include <asm/ptrace.h>
19#include <asm/cacheflush.h>
20#include <asm/code-patching.h>
21#include <asm/sstep.h>
22#include <asm/ppc-opcode.h>
23
24#define TMPL_CALL_HDLR_IDX \
25 (optprobe_template_call_handler - optprobe_template_entry)
26#define TMPL_EMULATE_IDX \
27 (optprobe_template_call_emulate - optprobe_template_entry)
28#define TMPL_RET_IDX \
29 (optprobe_template_ret - optprobe_template_entry)
30#define TMPL_OP_IDX \
31 (optprobe_template_op_address - optprobe_template_entry)
32#define TMPL_INSN_IDX \
33 (optprobe_template_insn - optprobe_template_entry)
34#define TMPL_END_IDX \
35 (optprobe_template_end - optprobe_template_entry)
36
37DEFINE_INSN_CACHE_OPS(ppc_optinsn);
38
39static bool insn_page_in_use;
40
41static void *__ppc_alloc_insn_page(void)
42{
43 if (insn_page_in_use)
44 return NULL;
45 insn_page_in_use = true;
46 return &optinsn_slot;
47}
48
49static void __ppc_free_insn_page(void *page __maybe_unused)
50{
51 insn_page_in_use = false;
52}
53
54struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
55 .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
56 .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
57 /* insn_size initialized later */
58 .alloc = __ppc_alloc_insn_page,
59 .free = __ppc_free_insn_page,
60 .nr_garbage = 0,
61};
62
63/*
64 * Check if we can optimize this probe. Returns NIP post-emulation if this can
65 * be optimized and 0 otherwise.
66 */
67static unsigned long can_optimize(struct kprobe *p)
68{
69 struct pt_regs regs;
70 struct instruction_op op;
71 unsigned long nip = 0;
72
73 /*
74 * kprobe placed for kretprobe during boot time
75 * has a 'nop' instruction, which can be emulated.
76 * So further checks can be skipped.
77 */
78 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
79 return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
80
81 /*
82 * We only support optimizing kernel addresses, but not
83 * module addresses.
84 *
85 * FIXME: Optimize kprobes placed in module addresses.
86 */
87 if (!is_kernel_addr((unsigned long)p->addr))
88 return 0;
89
90 memset(®s, 0, sizeof(struct pt_regs));
91 regs.nip = (unsigned long)p->addr;
92 regs.trap = 0x0;
93 regs.msr = MSR_KERNEL;
94
95 /*
96 * Kprobe placed in conditional branch instructions are
97 * not optimized, as we can't predict the nip prior with
98 * dummy pt_regs and can not ensure that the return branch
99 * from detour buffer falls in the range of address (i.e 32MB).
100 * A branch back from trampoline is set up in the detour buffer
101 * to the nip returned by the analyse_instr() here.
102 *
103 * Ensure that the instruction is not a conditional branch,
104 * and that can be emulated.
105 */
106 if (!is_conditional_branch(*p->ainsn.insn) &&
107 analyse_instr(&op, ®s, *p->ainsn.insn) == 1) {
108 emulate_update_regs(®s, &op);
109 nip = regs.nip;
110 }
111
112 return nip;
113}
114
115static void optimized_callback(struct optimized_kprobe *op,
116 struct pt_regs *regs)
117{
118 /* This is possible if op is under delayed unoptimizing */
119 if (kprobe_disabled(&op->kp))
120 return;
121
122 preempt_disable();
123
124 if (kprobe_running()) {
125 kprobes_inc_nmissed_count(&op->kp);
126 } else {
127 __this_cpu_write(current_kprobe, &op->kp);
128 regs->nip = (unsigned long)op->kp.addr;
129 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
130 opt_pre_handler(&op->kp, regs);
131 __this_cpu_write(current_kprobe, NULL);
132 }
133
134 preempt_enable_no_resched();
135}
136NOKPROBE_SYMBOL(optimized_callback);
137
138void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
139{
140 if (op->optinsn.insn) {
141 free_ppc_optinsn_slot(op->optinsn.insn, 1);
142 op->optinsn.insn = NULL;
143 }
144}
145
146/*
147 * emulate_step() requires insn to be emulated as
148 * second parameter. Load register 'r4' with the
149 * instruction.
150 */
151void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
152{
153 /* addis r4,0,(insn)@h */
154 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
155 ((val >> 16) & 0xffff));
156 addr++;
157
158 /* ori r4,r4,(insn)@l */
159 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
160 ___PPC_RS(4) | (val & 0xffff));
161}
162
163/*
164 * Generate instructions to load provided immediate 64-bit value
165 * to register 'r3' and patch these instructions at 'addr'.
166 */
167void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
168{
169 /* lis r3,(op)@highest */
170 patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
171 ((val >> 48) & 0xffff));
172 addr++;
173
174 /* ori r3,r3,(op)@higher */
175 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
176 ___PPC_RS(3) | ((val >> 32) & 0xffff));
177 addr++;
178
179 /* rldicr r3,r3,32,31 */
180 patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
181 ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
182 addr++;
183
184 /* oris r3,r3,(op)@h */
185 patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
186 ___PPC_RS(3) | ((val >> 16) & 0xffff));
187 addr++;
188
189 /* ori r3,r3,(op)@l */
190 patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
191 ___PPC_RS(3) | (val & 0xffff));
192}
193
194int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
195{
196 kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
197 kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
198 long b_offset;
199 unsigned long nip, size;
200 int rc, i;
201
202 kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
203
204 nip = can_optimize(p);
205 if (!nip)
206 return -EILSEQ;
207
208 /* Allocate instruction slot for detour buffer */
209 buff = get_ppc_optinsn_slot();
210 if (!buff)
211 return -ENOMEM;
212
213 /*
214 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
215 *
216 * The target address has to be relatively nearby, to permit use
217 * of branch instruction in powerpc, because the address is specified
218 * in an immediate field in the instruction opcode itself, ie 24 bits
219 * in the opcode specify the address. Therefore the address should
220 * be within 32MB on either side of the current instruction.
221 */
222 b_offset = (unsigned long)buff - (unsigned long)p->addr;
223 if (!is_offset_in_branch_range(b_offset))
224 goto error;
225
226 /* Check if the return address is also within 32MB range */
227 b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
228 (unsigned long)nip;
229 if (!is_offset_in_branch_range(b_offset))
230 goto error;
231
232 /* Setup template */
233 /* We can optimize this via patch_instruction_window later */
234 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
235 pr_devel("Copying template to %p, size %lu\n", buff, size);
236 for (i = 0; i < size; i++) {
237 rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
238 if (rc < 0)
239 goto error;
240 }
241
242 /*
243 * Fixup the template with instructions to:
244 * 1. load the address of the actual probepoint
245 */
246 patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
247
248 /*
249 * 2. branch to optimized_callback() and emulate_step()
250 */
251 op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
252 emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
253 if (!op_callback_addr || !emulate_step_addr) {
254 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
255 goto error;
256 }
257
258 branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
259 (unsigned long)op_callback_addr,
260 BRANCH_SET_LINK);
261
262 branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
263 (unsigned long)emulate_step_addr,
264 BRANCH_SET_LINK);
265
266 if (!branch_op_callback || !branch_emulate_step)
267 goto error;
268
269 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
270 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
271
272 /*
273 * 3. load instruction to be emulated into relevant register, and
274 */
275 patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
276
277 /*
278 * 4. branch back from trampoline
279 */
280 patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
281
282 flush_icache_range((unsigned long)buff,
283 (unsigned long)(&buff[TMPL_END_IDX]));
284
285 op->optinsn.insn = buff;
286
287 return 0;
288
289error:
290 free_ppc_optinsn_slot(buff, 0);
291 return -ERANGE;
292
293}
294
295int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
296{
297 return optinsn->insn != NULL;
298}
299
300/*
301 * On powerpc, Optprobes always replaces one instruction (4 bytes
302 * aligned and 4 bytes long). It is impossible to encounter another
303 * kprobe in this address range. So always return 0.
304 */
305int arch_check_optimized_kprobe(struct optimized_kprobe *op)
306{
307 return 0;
308}
309
310void arch_optimize_kprobes(struct list_head *oplist)
311{
312 struct optimized_kprobe *op;
313 struct optimized_kprobe *tmp;
314
315 list_for_each_entry_safe(op, tmp, oplist, list) {
316 /*
317 * Backup instructions which will be replaced
318 * by jump address
319 */
320 memcpy(op->optinsn.copied_insn, op->kp.addr,
321 RELATIVEJUMP_SIZE);
322 patch_instruction(op->kp.addr,
323 create_branch((unsigned int *)op->kp.addr,
324 (unsigned long)op->optinsn.insn, 0));
325 list_del_init(&op->list);
326 }
327}
328
329void arch_unoptimize_kprobe(struct optimized_kprobe *op)
330{
331 arch_arm_kprobe(&op->kp);
332}
333
334void arch_unoptimize_kprobes(struct list_head *oplist,
335 struct list_head *done_list)
336{
337 struct optimized_kprobe *op;
338 struct optimized_kprobe *tmp;
339
340 list_for_each_entry_safe(op, tmp, oplist, list) {
341 arch_unoptimize_kprobe(op);
342 list_move(&op->list, done_list);
343 }
344}
345
346int arch_within_optimized_kprobe(struct optimized_kprobe *op,
347 unsigned long addr)
348{
349 return ((unsigned long)op->kp.addr <= addr &&
350 (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
351}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Code for Kernel probes Jump optimization.
4 *
5 * Copyright 2017, Anju T, IBM Corp.
6 */
7
8#include <linux/kprobes.h>
9#include <linux/jump_label.h>
10#include <linux/types.h>
11#include <linux/slab.h>
12#include <linux/list.h>
13#include <asm/kprobes.h>
14#include <asm/ptrace.h>
15#include <asm/cacheflush.h>
16#include <asm/code-patching.h>
17#include <asm/sstep.h>
18#include <asm/ppc-opcode.h>
19#include <asm/inst.h>
20
21#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
22#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
23#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
24#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
25#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
26#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
27
28static bool insn_page_in_use;
29
30void *alloc_optinsn_page(void)
31{
32 if (insn_page_in_use)
33 return NULL;
34 insn_page_in_use = true;
35 return &optinsn_slot;
36}
37
38void free_optinsn_page(void *page)
39{
40 insn_page_in_use = false;
41}
42
43/*
44 * Check if we can optimize this probe. Returns NIP post-emulation if this can
45 * be optimized and 0 otherwise.
46 */
47static unsigned long can_optimize(struct kprobe *p)
48{
49 struct pt_regs regs;
50 struct instruction_op op;
51 unsigned long nip = 0;
52 unsigned long addr = (unsigned long)p->addr;
53
54 /*
55 * kprobe placed for kretprobe during boot time
56 * has a 'nop' instruction, which can be emulated.
57 * So further checks can be skipped.
58 */
59 if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
60 return addr + sizeof(kprobe_opcode_t);
61
62 /*
63 * We only support optimizing kernel addresses, but not
64 * module addresses.
65 *
66 * FIXME: Optimize kprobes placed in module addresses.
67 */
68 if (!is_kernel_addr(addr))
69 return 0;
70
71 memset(®s, 0, sizeof(struct pt_regs));
72 regs.nip = addr;
73 regs.trap = 0x0;
74 regs.msr = MSR_KERNEL;
75
76 /*
77 * Kprobe placed in conditional branch instructions are
78 * not optimized, as we can't predict the nip prior with
79 * dummy pt_regs and can not ensure that the return branch
80 * from detour buffer falls in the range of address (i.e 32MB).
81 * A branch back from trampoline is set up in the detour buffer
82 * to the nip returned by the analyse_instr() here.
83 *
84 * Ensure that the instruction is not a conditional branch,
85 * and that can be emulated.
86 */
87 if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
88 analyse_instr(&op, ®s, ppc_inst_read(p->ainsn.insn)) == 1) {
89 emulate_update_regs(®s, &op);
90 nip = regs.nip;
91 }
92
93 return nip;
94}
95
96static void optimized_callback(struct optimized_kprobe *op,
97 struct pt_regs *regs)
98{
99 /* This is possible if op is under delayed unoptimizing */
100 if (kprobe_disabled(&op->kp))
101 return;
102
103 preempt_disable();
104
105 if (kprobe_running()) {
106 kprobes_inc_nmissed_count(&op->kp);
107 } else {
108 __this_cpu_write(current_kprobe, &op->kp);
109 regs_set_return_ip(regs, (unsigned long)op->kp.addr);
110 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
111 opt_pre_handler(&op->kp, regs);
112 __this_cpu_write(current_kprobe, NULL);
113 }
114
115 preempt_enable();
116}
117NOKPROBE_SYMBOL(optimized_callback);
118
119void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
120{
121 if (op->optinsn.insn) {
122 free_optinsn_slot(op->optinsn.insn, 1);
123 op->optinsn.insn = NULL;
124 }
125}
126
127static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
128{
129 patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
130 patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
131}
132
133/*
134 * Generate instructions to load provided immediate 64-bit value
135 * to register 'reg' and patch these instructions at 'addr'.
136 */
137static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
138{
139 patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
140 patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
141 patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
142 patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
143 patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
144}
145
146static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
147{
148 if (IS_ENABLED(CONFIG_PPC64))
149 patch_imm64_load_insns(val, reg, addr);
150 else
151 patch_imm32_load_insns(val, reg, addr);
152}
153
154int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
155{
156 ppc_inst_t branch_op_callback, branch_emulate_step, temp;
157 unsigned long op_callback_addr, emulate_step_addr;
158 kprobe_opcode_t *buff;
159 long b_offset;
160 unsigned long nip, size;
161 int rc, i;
162
163 nip = can_optimize(p);
164 if (!nip)
165 return -EILSEQ;
166
167 /* Allocate instruction slot for detour buffer */
168 buff = get_optinsn_slot();
169 if (!buff)
170 return -ENOMEM;
171
172 /*
173 * OPTPROBE uses 'b' instruction to branch to optinsn.insn.
174 *
175 * The target address has to be relatively nearby, to permit use
176 * of branch instruction in powerpc, because the address is specified
177 * in an immediate field in the instruction opcode itself, ie 24 bits
178 * in the opcode specify the address. Therefore the address should
179 * be within 32MB on either side of the current instruction.
180 */
181 b_offset = (unsigned long)buff - (unsigned long)p->addr;
182 if (!is_offset_in_branch_range(b_offset))
183 goto error;
184
185 /* Check if the return address is also within 32MB range */
186 b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
187 if (!is_offset_in_branch_range(b_offset))
188 goto error;
189
190 /* Setup template */
191 /* We can optimize this via patch_instruction_window later */
192 size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
193 pr_devel("Copying template to %p, size %lu\n", buff, size);
194 for (i = 0; i < size; i++) {
195 rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
196 if (rc < 0)
197 goto error;
198 }
199
200 /*
201 * Fixup the template with instructions to:
202 * 1. load the address of the actual probepoint
203 */
204 patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
205
206 /*
207 * 2. branch to optimized_callback() and emulate_step()
208 */
209 op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
210 emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
211 if (!op_callback_addr || !emulate_step_addr) {
212 WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
213 goto error;
214 }
215
216 rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
217 op_callback_addr, BRANCH_SET_LINK);
218
219 rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
220 emulate_step_addr, BRANCH_SET_LINK);
221
222 if (rc)
223 goto error;
224
225 patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
226 patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
227
228 /*
229 * 3. load instruction to be emulated into relevant register, and
230 */
231 temp = ppc_inst_read(p->ainsn.insn);
232 patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
233
234 /*
235 * 4. branch back from trampoline
236 */
237 patch_branch(buff + TMPL_RET_IDX, nip, 0);
238
239 flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
240
241 op->optinsn.insn = buff;
242
243 return 0;
244
245error:
246 free_optinsn_slot(buff, 0);
247 return -ERANGE;
248
249}
250
251int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
252{
253 return optinsn->insn != NULL;
254}
255
256/*
257 * On powerpc, Optprobes always replaces one instruction (4 bytes
258 * aligned and 4 bytes long). It is impossible to encounter another
259 * kprobe in this address range. So always return 0.
260 */
261int arch_check_optimized_kprobe(struct optimized_kprobe *op)
262{
263 return 0;
264}
265
266void arch_optimize_kprobes(struct list_head *oplist)
267{
268 ppc_inst_t instr;
269 struct optimized_kprobe *op;
270 struct optimized_kprobe *tmp;
271
272 list_for_each_entry_safe(op, tmp, oplist, list) {
273 /*
274 * Backup instructions which will be replaced
275 * by jump address
276 */
277 memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
278 create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
279 patch_instruction(op->kp.addr, instr);
280 list_del_init(&op->list);
281 }
282}
283
284void arch_unoptimize_kprobe(struct optimized_kprobe *op)
285{
286 arch_arm_kprobe(&op->kp);
287}
288
289void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
290{
291 struct optimized_kprobe *op;
292 struct optimized_kprobe *tmp;
293
294 list_for_each_entry_safe(op, tmp, oplist, list) {
295 arch_unoptimize_kprobe(op);
296 list_move(&op->list, done_list);
297 }
298}
299
300int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
301{
302 return (op->kp.addr <= addr &&
303 op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
304}