Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracer architecture backend.
4 *
5 * Copyright IBM Corp. 2009,2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/moduleloader.h>
11#include <linux/hardirq.h>
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/kprobes.h>
17#include <trace/syscall.h>
18#include <asm/asm-offsets.h>
19#include <asm/text-patching.h>
20#include <asm/cacheflush.h>
21#include <asm/ftrace.lds.h>
22#include <asm/nospec-branch.h>
23#include <asm/set_memory.h>
24#include "entry.h"
25#include "ftrace.h"
26
27/*
28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
30 * (since gcc 9 / clang 10) is used.
31 * In both cases the original and also the disabled function prologue contains
32 * only a single six byte instruction and looks like this:
33 * > brcl 0,0 # offset 0
34 * To enable ftrace the code gets patched like above and afterwards looks
35 * like this:
36 * > brasl %r0,ftrace_caller # offset 0
37 *
38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
39 * The ftrace function gets called with a non-standard C function call ABI
40 * where r0 contains the return address. It is also expected that the called
41 * function only clobbers r0 and r1, but restores r2-r15.
42 * For module code we can't directly jump to ftrace caller, but need a
43 * trampoline (ftrace_plt), which clobbers also r1.
44 */
45
46void *ftrace_func __read_mostly = ftrace_stub;
47struct ftrace_insn {
48 u16 opc;
49 s32 disp;
50} __packed;
51
52asm(
53 " .align 16\n"
54 "ftrace_shared_hotpatch_trampoline_br:\n"
55 " lmg %r0,%r1,2(%r1)\n"
56 " br %r1\n"
57 "ftrace_shared_hotpatch_trampoline_br_end:\n"
58);
59
60#ifdef CONFIG_EXPOLINE
61asm(
62 " .align 16\n"
63 "ftrace_shared_hotpatch_trampoline_exrl:\n"
64 " lmg %r0,%r1,2(%r1)\n"
65 " exrl %r0,0f\n"
66 " j .\n"
67 "0: br %r1\n"
68 "ftrace_shared_hotpatch_trampoline_exrl_end:\n"
69);
70#endif /* CONFIG_EXPOLINE */
71
72#ifdef CONFIG_MODULES
73static char *ftrace_plt;
74#endif /* CONFIG_MODULES */
75
76static const char *ftrace_shared_hotpatch_trampoline(const char **end)
77{
78 const char *tstart, *tend;
79
80 tstart = ftrace_shared_hotpatch_trampoline_br;
81 tend = ftrace_shared_hotpatch_trampoline_br_end;
82#ifdef CONFIG_EXPOLINE
83 if (!nospec_disable) {
84 tstart = ftrace_shared_hotpatch_trampoline_exrl;
85 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
86 }
87#endif /* CONFIG_EXPOLINE */
88 if (end)
89 *end = tend;
90 return tstart;
91}
92
93bool ftrace_need_init_nop(void)
94{
95 return true;
96}
97
98int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
99{
100 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
101 __ftrace_hotpatch_trampolines_start;
102 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
103 static struct ftrace_hotpatch_trampoline *trampoline;
104 struct ftrace_hotpatch_trampoline **next_trampoline;
105 struct ftrace_hotpatch_trampoline *trampolines_end;
106 struct ftrace_hotpatch_trampoline tmp;
107 struct ftrace_insn *insn;
108 const char *shared;
109 s32 disp;
110
111 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
112 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
113
114 next_trampoline = &next_vmlinux_trampoline;
115 trampolines_end = __ftrace_hotpatch_trampolines_end;
116 shared = ftrace_shared_hotpatch_trampoline(NULL);
117#ifdef CONFIG_MODULES
118 if (mod) {
119 next_trampoline = &mod->arch.next_trampoline;
120 trampolines_end = mod->arch.trampolines_end;
121 shared = ftrace_plt;
122 }
123#endif
124
125 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
126 return -ENOMEM;
127 trampoline = (*next_trampoline)++;
128
129 /* Check for the compiler-generated fentry nop (brcl 0, .). */
130 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
131 return -EINVAL;
132
133 /* Generate the trampoline. */
134 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
135 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
136 tmp.interceptor = FTRACE_ADDR;
137 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
138 s390_kernel_write(trampoline, &tmp, sizeof(tmp));
139
140 /* Generate a jump to the trampoline. */
141 disp = ((char *)trampoline - (char *)rec->ip) / 2;
142 insn = (struct ftrace_insn *)rec->ip;
143 s390_kernel_write(&insn->disp, &disp, sizeof(disp));
144
145 return 0;
146}
147
148static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
149{
150 struct ftrace_hotpatch_trampoline *trampoline;
151 struct ftrace_insn insn;
152 s64 disp;
153 u16 opc;
154
155 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
156 return ERR_PTR(-EFAULT);
157 disp = (s64)insn.disp * 2;
158 trampoline = (void *)(rec->ip + disp);
159 if (get_kernel_nofault(opc, &trampoline->brasl_opc))
160 return ERR_PTR(-EFAULT);
161 if (opc != 0xc015)
162 return ERR_PTR(-EINVAL);
163 return trampoline;
164}
165
166int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
167 unsigned long addr)
168{
169 struct ftrace_hotpatch_trampoline *trampoline;
170 u64 old;
171
172 trampoline = ftrace_get_trampoline(rec);
173 if (IS_ERR(trampoline))
174 return PTR_ERR(trampoline);
175 if (get_kernel_nofault(old, &trampoline->interceptor))
176 return -EFAULT;
177 if (old != old_addr)
178 return -EINVAL;
179 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
180 return 0;
181}
182
183static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
184{
185 u16 old;
186 u8 op;
187
188 if (get_kernel_nofault(old, addr))
189 return -EFAULT;
190 if (old != expected)
191 return -EINVAL;
192 /* set mask field to all ones or zeroes */
193 op = enable ? 0xf4 : 0x04;
194 s390_kernel_write((char *)addr + 1, &op, sizeof(op));
195 return 0;
196}
197
198int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
199 unsigned long addr)
200{
201 /* Expect brcl 0xf,... */
202 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
203}
204
205int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
206{
207 struct ftrace_hotpatch_trampoline *trampoline;
208
209 trampoline = ftrace_get_trampoline(rec);
210 if (IS_ERR(trampoline))
211 return PTR_ERR(trampoline);
212 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
213 /* Expect brcl 0x0,... */
214 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
215}
216
217int ftrace_update_ftrace_func(ftrace_func_t func)
218{
219 ftrace_func = func;
220 return 0;
221}
222
223void arch_ftrace_update_code(int command)
224{
225 ftrace_modify_all_code(command);
226}
227
228void ftrace_arch_code_modify_post_process(void)
229{
230 /*
231 * Flush any pre-fetched instructions on all
232 * CPUs to make the new code visible.
233 */
234 text_poke_sync_lock();
235}
236
237#ifdef CONFIG_MODULES
238
239static int __init ftrace_plt_init(void)
240{
241 const char *start, *end;
242
243 ftrace_plt = module_alloc(PAGE_SIZE);
244 if (!ftrace_plt)
245 panic("cannot allocate ftrace plt\n");
246
247 start = ftrace_shared_hotpatch_trampoline(&end);
248 memcpy(ftrace_plt, start, end - start);
249 set_memory_ro((unsigned long)ftrace_plt, 1);
250 return 0;
251}
252device_initcall(ftrace_plt_init);
253
254#endif /* CONFIG_MODULES */
255
256#ifdef CONFIG_FUNCTION_GRAPH_TRACER
257/*
258 * Hook the return address and push it in the stack of return addresses
259 * in current thread info.
260 */
261unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
262 unsigned long ip)
263{
264 if (unlikely(ftrace_graph_is_dead()))
265 goto out;
266 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
267 goto out;
268 ip -= MCOUNT_INSN_SIZE;
269 if (!function_graph_enter(ra, ip, 0, (void *) sp))
270 ra = (unsigned long) return_to_handler;
271out:
272 return ra;
273}
274NOKPROBE_SYMBOL(prepare_ftrace_return);
275
276/*
277 * Patch the kernel code at ftrace_graph_caller location. The instruction
278 * there is branch relative on condition. To enable the ftrace graph code
279 * block, we simply patch the mask field of the instruction to zero and
280 * turn the instruction into a nop.
281 * To disable the ftrace graph code the mask field will be patched to
282 * all ones, which turns the instruction into an unconditional branch.
283 */
284int ftrace_enable_ftrace_graph_caller(void)
285{
286 int rc;
287
288 /* Expect brc 0xf,... */
289 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
290 if (rc)
291 return rc;
292 text_poke_sync_lock();
293 return 0;
294}
295
296int ftrace_disable_ftrace_graph_caller(void)
297{
298 int rc;
299
300 /* Expect brc 0x0,... */
301 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
302 if (rc)
303 return rc;
304 text_poke_sync_lock();
305 return 0;
306}
307
308#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
309
310#ifdef CONFIG_KPROBES_ON_FTRACE
311void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
312 struct ftrace_ops *ops, struct ftrace_regs *fregs)
313{
314 struct kprobe_ctlblk *kcb;
315 struct pt_regs *regs;
316 struct kprobe *p;
317 int bit;
318
319 bit = ftrace_test_recursion_trylock(ip, parent_ip);
320 if (bit < 0)
321 return;
322
323 regs = ftrace_get_regs(fregs);
324 p = get_kprobe((kprobe_opcode_t *)ip);
325 if (!regs || unlikely(!p) || kprobe_disabled(p))
326 goto out;
327
328 if (kprobe_running()) {
329 kprobes_inc_nmissed_count(p);
330 goto out;
331 }
332
333 __this_cpu_write(current_kprobe, p);
334
335 kcb = get_kprobe_ctlblk();
336 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
337
338 instruction_pointer_set(regs, ip);
339
340 if (!p->pre_handler || !p->pre_handler(p, regs)) {
341
342 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
343
344 if (unlikely(p->post_handler)) {
345 kcb->kprobe_status = KPROBE_HIT_SSDONE;
346 p->post_handler(p, regs, 0);
347 }
348 }
349 __this_cpu_write(current_kprobe, NULL);
350out:
351 ftrace_test_recursion_unlock(bit);
352}
353NOKPROBE_SYMBOL(kprobe_ftrace_handler);
354
355int arch_prepare_kprobe_ftrace(struct kprobe *p)
356{
357 p->ainsn.insn = NULL;
358 return 0;
359}
360#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dynamic function tracer architecture backend.
4 *
5 * Copyright IBM Corp. 2009,2014
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/moduleloader.h>
11#include <linux/hardirq.h>
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/kprobes.h>
17#include <trace/syscall.h>
18#include <asm/asm-offsets.h>
19#include <asm/text-patching.h>
20#include <asm/cacheflush.h>
21#include <asm/ftrace.lds.h>
22#include <asm/nospec-branch.h>
23#include <asm/set_memory.h>
24#include "entry.h"
25#include "ftrace.h"
26
27/*
28 * To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
29 * or a combination of -pg -mrecord-mcount -mnop-mcount -mfentry flags
30 * (since gcc 9 / clang 10) is used.
31 * In both cases the original and also the disabled function prologue contains
32 * only a single six byte instruction and looks like this:
33 * > brcl 0,0 # offset 0
34 * To enable ftrace the code gets patched like above and afterwards looks
35 * like this:
36 * > brasl %r0,ftrace_caller # offset 0
37 *
38 * The instruction will be patched by ftrace_make_call / ftrace_make_nop.
39 * The ftrace function gets called with a non-standard C function call ABI
40 * where r0 contains the return address. It is also expected that the called
41 * function only clobbers r0 and r1, but restores r2-r15.
42 * For module code we can't directly jump to ftrace caller, but need a
43 * trampoline (ftrace_plt), which clobbers also r1.
44 */
45
46void *ftrace_func __read_mostly = ftrace_stub;
47struct ftrace_insn {
48 u16 opc;
49 s32 disp;
50} __packed;
51
52#ifdef CONFIG_MODULES
53static char *ftrace_plt;
54#endif /* CONFIG_MODULES */
55
56static const char *ftrace_shared_hotpatch_trampoline(const char **end)
57{
58 const char *tstart, *tend;
59
60 tstart = ftrace_shared_hotpatch_trampoline_br;
61 tend = ftrace_shared_hotpatch_trampoline_br_end;
62#ifdef CONFIG_EXPOLINE
63 if (!nospec_disable) {
64 tstart = ftrace_shared_hotpatch_trampoline_exrl;
65 tend = ftrace_shared_hotpatch_trampoline_exrl_end;
66 }
67#endif /* CONFIG_EXPOLINE */
68 if (end)
69 *end = tend;
70 return tstart;
71}
72
73bool ftrace_need_init_nop(void)
74{
75 return true;
76}
77
78int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
79{
80 static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
81 __ftrace_hotpatch_trampolines_start;
82 static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
83 static struct ftrace_hotpatch_trampoline *trampoline;
84 struct ftrace_hotpatch_trampoline **next_trampoline;
85 struct ftrace_hotpatch_trampoline *trampolines_end;
86 struct ftrace_hotpatch_trampoline tmp;
87 struct ftrace_insn *insn;
88 const char *shared;
89 s32 disp;
90
91 BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
92 SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
93
94 next_trampoline = &next_vmlinux_trampoline;
95 trampolines_end = __ftrace_hotpatch_trampolines_end;
96 shared = ftrace_shared_hotpatch_trampoline(NULL);
97#ifdef CONFIG_MODULES
98 if (mod) {
99 next_trampoline = &mod->arch.next_trampoline;
100 trampolines_end = mod->arch.trampolines_end;
101 shared = ftrace_plt;
102 }
103#endif
104
105 if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
106 return -ENOMEM;
107 trampoline = (*next_trampoline)++;
108
109 /* Check for the compiler-generated fentry nop (brcl 0, .). */
110 if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
111 return -EINVAL;
112
113 /* Generate the trampoline. */
114 tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
115 tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
116 tmp.interceptor = FTRACE_ADDR;
117 tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
118 s390_kernel_write(trampoline, &tmp, sizeof(tmp));
119
120 /* Generate a jump to the trampoline. */
121 disp = ((char *)trampoline - (char *)rec->ip) / 2;
122 insn = (struct ftrace_insn *)rec->ip;
123 s390_kernel_write(&insn->disp, &disp, sizeof(disp));
124
125 return 0;
126}
127
128static struct ftrace_hotpatch_trampoline *ftrace_get_trampoline(struct dyn_ftrace *rec)
129{
130 struct ftrace_hotpatch_trampoline *trampoline;
131 struct ftrace_insn insn;
132 s64 disp;
133 u16 opc;
134
135 if (copy_from_kernel_nofault(&insn, (void *)rec->ip, sizeof(insn)))
136 return ERR_PTR(-EFAULT);
137 disp = (s64)insn.disp * 2;
138 trampoline = (void *)(rec->ip + disp);
139 if (get_kernel_nofault(opc, &trampoline->brasl_opc))
140 return ERR_PTR(-EFAULT);
141 if (opc != 0xc015)
142 return ERR_PTR(-EINVAL);
143 return trampoline;
144}
145
146int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
147 unsigned long addr)
148{
149 struct ftrace_hotpatch_trampoline *trampoline;
150 u64 old;
151
152 trampoline = ftrace_get_trampoline(rec);
153 if (IS_ERR(trampoline))
154 return PTR_ERR(trampoline);
155 if (get_kernel_nofault(old, &trampoline->interceptor))
156 return -EFAULT;
157 if (old != old_addr)
158 return -EINVAL;
159 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
160 return 0;
161}
162
163static int ftrace_patch_branch_mask(void *addr, u16 expected, bool enable)
164{
165 u16 old;
166 u8 op;
167
168 if (get_kernel_nofault(old, addr))
169 return -EFAULT;
170 if (old != expected)
171 return -EINVAL;
172 /* set mask field to all ones or zeroes */
173 op = enable ? 0xf4 : 0x04;
174 s390_kernel_write((char *)addr + 1, &op, sizeof(op));
175 return 0;
176}
177
178int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
179 unsigned long addr)
180{
181 /* Expect brcl 0xf,... */
182 return ftrace_patch_branch_mask((void *)rec->ip, 0xc0f4, false);
183}
184
185int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
186{
187 struct ftrace_hotpatch_trampoline *trampoline;
188
189 trampoline = ftrace_get_trampoline(rec);
190 if (IS_ERR(trampoline))
191 return PTR_ERR(trampoline);
192 s390_kernel_write(&trampoline->interceptor, &addr, sizeof(addr));
193 /* Expect brcl 0x0,... */
194 return ftrace_patch_branch_mask((void *)rec->ip, 0xc004, true);
195}
196
197int ftrace_update_ftrace_func(ftrace_func_t func)
198{
199 ftrace_func = func;
200 return 0;
201}
202
203void arch_ftrace_update_code(int command)
204{
205 ftrace_modify_all_code(command);
206}
207
208void ftrace_arch_code_modify_post_process(void)
209{
210 /*
211 * Flush any pre-fetched instructions on all
212 * CPUs to make the new code visible.
213 */
214 text_poke_sync_lock();
215}
216
217#ifdef CONFIG_MODULES
218
219static int __init ftrace_plt_init(void)
220{
221 const char *start, *end;
222
223 ftrace_plt = module_alloc(PAGE_SIZE);
224 if (!ftrace_plt)
225 panic("cannot allocate ftrace plt\n");
226
227 start = ftrace_shared_hotpatch_trampoline(&end);
228 memcpy(ftrace_plt, start, end - start);
229 set_memory_rox((unsigned long)ftrace_plt, 1);
230 return 0;
231}
232device_initcall(ftrace_plt_init);
233
234#endif /* CONFIG_MODULES */
235
236#ifdef CONFIG_FUNCTION_GRAPH_TRACER
237/*
238 * Hook the return address and push it in the stack of return addresses
239 * in current thread info.
240 */
241unsigned long prepare_ftrace_return(unsigned long ra, unsigned long sp,
242 unsigned long ip)
243{
244 if (unlikely(ftrace_graph_is_dead()))
245 goto out;
246 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
247 goto out;
248 ip -= MCOUNT_INSN_SIZE;
249 if (!function_graph_enter(ra, ip, 0, (void *) sp))
250 ra = (unsigned long) return_to_handler;
251out:
252 return ra;
253}
254NOKPROBE_SYMBOL(prepare_ftrace_return);
255
256/*
257 * Patch the kernel code at ftrace_graph_caller location. The instruction
258 * there is branch relative on condition. To enable the ftrace graph code
259 * block, we simply patch the mask field of the instruction to zero and
260 * turn the instruction into a nop.
261 * To disable the ftrace graph code the mask field will be patched to
262 * all ones, which turns the instruction into an unconditional branch.
263 */
264int ftrace_enable_ftrace_graph_caller(void)
265{
266 int rc;
267
268 /* Expect brc 0xf,... */
269 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa7f4, false);
270 if (rc)
271 return rc;
272 text_poke_sync_lock();
273 return 0;
274}
275
276int ftrace_disable_ftrace_graph_caller(void)
277{
278 int rc;
279
280 /* Expect brc 0x0,... */
281 rc = ftrace_patch_branch_mask(ftrace_graph_caller, 0xa704, true);
282 if (rc)
283 return rc;
284 text_poke_sync_lock();
285 return 0;
286}
287
288#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
289
290#ifdef CONFIG_KPROBES_ON_FTRACE
291void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
292 struct ftrace_ops *ops, struct ftrace_regs *fregs)
293{
294 struct kprobe_ctlblk *kcb;
295 struct pt_regs *regs;
296 struct kprobe *p;
297 int bit;
298
299 bit = ftrace_test_recursion_trylock(ip, parent_ip);
300 if (bit < 0)
301 return;
302
303 regs = ftrace_get_regs(fregs);
304 p = get_kprobe((kprobe_opcode_t *)ip);
305 if (!regs || unlikely(!p) || kprobe_disabled(p))
306 goto out;
307
308 if (kprobe_running()) {
309 kprobes_inc_nmissed_count(p);
310 goto out;
311 }
312
313 __this_cpu_write(current_kprobe, p);
314
315 kcb = get_kprobe_ctlblk();
316 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
317
318 instruction_pointer_set(regs, ip);
319
320 if (!p->pre_handler || !p->pre_handler(p, regs)) {
321
322 instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
323
324 if (unlikely(p->post_handler)) {
325 kcb->kprobe_status = KPROBE_HIT_SSDONE;
326 p->post_handler(p, regs, 0);
327 }
328 }
329 __this_cpu_write(current_kprobe, NULL);
330out:
331 ftrace_test_recursion_unlock(bit);
332}
333NOKPROBE_SYMBOL(kprobe_ftrace_handler);
334
335int arch_prepare_kprobe_ftrace(struct kprobe *p)
336{
337 p->ainsn.insn = NULL;
338 return 0;
339}
340#endif