Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure to took into function calls and returns.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 * Highly modified by Steven Rostedt (VMware).
9 */
10#include <linux/jump_label.h>
11#include <linux/suspend.h>
12#include <linux/ftrace.h>
13#include <linux/slab.h>
14
15#include <trace/events/sched.h>
16
17#include "ftrace_internal.h"
18#include "trace.h"
19
20#ifdef CONFIG_DYNAMIC_FTRACE
21#define ASSIGN_OPS_HASH(opsname, val) \
22 .func_hash = val, \
23 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
24#else
25#define ASSIGN_OPS_HASH(opsname, val)
26#endif
27
28DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
29int ftrace_graph_active;
30
31/* Both enabled by default (can be cleared by function_graph tracer flags */
32static bool fgraph_sleep_time = true;
33
34#ifdef CONFIG_DYNAMIC_FTRACE
35/*
36 * archs can override this function if they must do something
37 * to enable hook for graph tracer.
38 */
39int __weak ftrace_enable_ftrace_graph_caller(void)
40{
41 return 0;
42}
43
44/*
45 * archs can override this function if they must do something
46 * to disable hook for graph tracer.
47 */
48int __weak ftrace_disable_ftrace_graph_caller(void)
49{
50 return 0;
51}
52#endif
53
54/**
55 * ftrace_graph_stop - set to permanently disable function graph tracing
56 *
57 * In case of an error int function graph tracing, this is called
58 * to try to keep function graph tracing from causing any more harm.
59 * Usually this is pretty severe and this is called to try to at least
60 * get a warning out to the user.
61 */
62void ftrace_graph_stop(void)
63{
64 static_branch_enable(&kill_ftrace_graph);
65}
66
67/* Add a function return address to the trace stack on thread info.*/
68static int
69ftrace_push_return_trace(unsigned long ret, unsigned long func,
70 unsigned long frame_pointer, unsigned long *retp)
71{
72 unsigned long long calltime;
73 int index;
74
75 if (unlikely(ftrace_graph_is_dead()))
76 return -EBUSY;
77
78 if (!current->ret_stack)
79 return -EBUSY;
80
81 /*
82 * We must make sure the ret_stack is tested before we read
83 * anything else.
84 */
85 smp_rmb();
86
87 /* The return trace stack is full */
88 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
89 atomic_inc(¤t->trace_overrun);
90 return -EBUSY;
91 }
92
93 calltime = trace_clock_local();
94
95 index = ++current->curr_ret_stack;
96 barrier();
97 current->ret_stack[index].ret = ret;
98 current->ret_stack[index].func = func;
99 current->ret_stack[index].calltime = calltime;
100#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
101 current->ret_stack[index].fp = frame_pointer;
102#endif
103#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
104 current->ret_stack[index].retp = retp;
105#endif
106 return 0;
107}
108
109/*
110 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
111 * functions. But those archs currently don't support direct functions
112 * anyway, and ftrace_find_rec_direct() is just a stub for them.
113 * Define MCOUNT_INSN_SIZE to keep those archs compiling.
114 */
115#ifndef MCOUNT_INSN_SIZE
116/* Make sure this only works without direct calls */
117# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
118# error MCOUNT_INSN_SIZE not defined with direct calls enabled
119# endif
120# define MCOUNT_INSN_SIZE 0
121#endif
122
123int function_graph_enter(unsigned long ret, unsigned long func,
124 unsigned long frame_pointer, unsigned long *retp)
125{
126 struct ftrace_graph_ent trace;
127
128#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
129 /*
130 * Skip graph tracing if the return location is served by direct trampoline,
131 * since call sequence and return addresses are unpredictable anyway.
132 * Ex: BPF trampoline may call original function and may skip frame
133 * depending on type of BPF programs attached.
134 */
135 if (ftrace_direct_func_count &&
136 ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
137 return -EBUSY;
138#endif
139 trace.func = func;
140 trace.depth = ++current->curr_ret_depth;
141
142 if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
143 goto out;
144
145 /* Only trace if the calling function expects to */
146 if (!ftrace_graph_entry(&trace))
147 goto out_ret;
148
149 return 0;
150 out_ret:
151 current->curr_ret_stack--;
152 out:
153 current->curr_ret_depth--;
154 return -EBUSY;
155}
156
157/* Retrieve a function return address to the trace stack on thread info.*/
158static void
159ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
160 unsigned long frame_pointer)
161{
162 int index;
163
164 index = current->curr_ret_stack;
165
166 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
167 ftrace_graph_stop();
168 WARN_ON(1);
169 /* Might as well panic, otherwise we have no where to go */
170 *ret = (unsigned long)panic;
171 return;
172 }
173
174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 /*
176 * The arch may choose to record the frame pointer used
177 * and check it here to make sure that it is what we expect it
178 * to be. If gcc does not set the place holder of the return
179 * address in the frame pointer, and does a copy instead, then
180 * the function graph trace will fail. This test detects this
181 * case.
182 *
183 * Currently, x86_32 with optimize for size (-Os) makes the latest
184 * gcc do the above.
185 *
186 * Note, -mfentry does not use frame pointers, and this test
187 * is not needed if CC_USING_FENTRY is set.
188 */
189 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
190 ftrace_graph_stop();
191 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
192 " from func %ps return to %lx\n",
193 current->ret_stack[index].fp,
194 frame_pointer,
195 (void *)current->ret_stack[index].func,
196 current->ret_stack[index].ret);
197 *ret = (unsigned long)panic;
198 return;
199 }
200#endif
201
202 *ret = current->ret_stack[index].ret;
203 trace->func = current->ret_stack[index].func;
204 trace->calltime = current->ret_stack[index].calltime;
205 trace->overrun = atomic_read(¤t->trace_overrun);
206 trace->depth = current->curr_ret_depth--;
207 /*
208 * We still want to trace interrupts coming in if
209 * max_depth is set to 1. Make sure the decrement is
210 * seen before ftrace_graph_return.
211 */
212 barrier();
213}
214
215/*
216 * Hibernation protection.
217 * The state of the current task is too much unstable during
218 * suspend/restore to disk. We want to protect against that.
219 */
220static int
221ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
222 void *unused)
223{
224 switch (state) {
225 case PM_HIBERNATION_PREPARE:
226 pause_graph_tracing();
227 break;
228
229 case PM_POST_HIBERNATION:
230 unpause_graph_tracing();
231 break;
232 }
233 return NOTIFY_DONE;
234}
235
236static struct notifier_block ftrace_suspend_notifier = {
237 .notifier_call = ftrace_suspend_notifier_call,
238};
239
240/* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
241struct fgraph_ret_regs;
242
243/*
244 * Send the trace to the ring-buffer.
245 * @return the original return address.
246 */
247static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
248 unsigned long frame_pointer)
249{
250 struct ftrace_graph_ret trace;
251 unsigned long ret;
252
253 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
254#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
255 trace.retval = fgraph_ret_regs_return_value(ret_regs);
256#endif
257 trace.rettime = trace_clock_local();
258 ftrace_graph_return(&trace);
259 /*
260 * The ftrace_graph_return() may still access the current
261 * ret_stack structure, we need to make sure the update of
262 * curr_ret_stack is after that.
263 */
264 barrier();
265 current->curr_ret_stack--;
266
267 if (unlikely(!ret)) {
268 ftrace_graph_stop();
269 WARN_ON(1);
270 /* Might as well panic. What else to do? */
271 ret = (unsigned long)panic;
272 }
273
274 return ret;
275}
276
277/*
278 * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
279 * leave only ftrace_return_to_handler(ret_regs).
280 */
281#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
282unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
283{
284 return __ftrace_return_to_handler(ret_regs,
285 fgraph_ret_regs_frame_pointer(ret_regs));
286}
287#else
288unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
289{
290 return __ftrace_return_to_handler(NULL, frame_pointer);
291}
292#endif
293
294/**
295 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
296 * @task: The task to read the shadow stack from
297 * @idx: Index down the shadow stack
298 *
299 * Return the ret_struct on the shadow stack of the @task at the
300 * call graph at @idx starting with zero. If @idx is zero, it
301 * will return the last saved ret_stack entry. If it is greater than
302 * zero, it will return the corresponding ret_stack for the depth
303 * of saved return addresses.
304 */
305struct ftrace_ret_stack *
306ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
307{
308 idx = task->curr_ret_stack - idx;
309
310 if (idx >= 0 && idx <= task->curr_ret_stack)
311 return &task->ret_stack[idx];
312
313 return NULL;
314}
315
316/**
317 * ftrace_graph_ret_addr - convert a potentially modified stack return address
318 * to its original value
319 *
320 * This function can be called by stack unwinding code to convert a found stack
321 * return address ('ret') to its original value, in case the function graph
322 * tracer has modified it to be 'return_to_handler'. If the address hasn't
323 * been modified, the unchanged value of 'ret' is returned.
324 *
325 * 'idx' is a state variable which should be initialized by the caller to zero
326 * before the first call.
327 *
328 * 'retp' is a pointer to the return address on the stack. It's ignored if
329 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
330 */
331#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
332unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
333 unsigned long ret, unsigned long *retp)
334{
335 int index = task->curr_ret_stack;
336 int i;
337
338 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
339 return ret;
340
341 if (index < 0)
342 return ret;
343
344 for (i = 0; i <= index; i++)
345 if (task->ret_stack[i].retp == retp)
346 return task->ret_stack[i].ret;
347
348 return ret;
349}
350#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
351unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
352 unsigned long ret, unsigned long *retp)
353{
354 int task_idx;
355
356 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
357 return ret;
358
359 task_idx = task->curr_ret_stack;
360
361 if (!task->ret_stack || task_idx < *idx)
362 return ret;
363
364 task_idx -= *idx;
365 (*idx)++;
366
367 return task->ret_stack[task_idx].ret;
368}
369#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
370
371static struct ftrace_ops graph_ops = {
372 .func = ftrace_graph_func,
373 .flags = FTRACE_OPS_FL_INITIALIZED |
374 FTRACE_OPS_FL_PID |
375 FTRACE_OPS_GRAPH_STUB,
376#ifdef FTRACE_GRAPH_TRAMP_ADDR
377 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
378 /* trampoline_size is only needed for dynamically allocated tramps */
379#endif
380 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
381};
382
383void ftrace_graph_sleep_time_control(bool enable)
384{
385 fgraph_sleep_time = enable;
386}
387
388int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
389{
390 return 0;
391}
392
393/*
394 * Simply points to ftrace_stub, but with the proper protocol.
395 * Defined by the linker script in linux/vmlinux.lds.h
396 */
397extern void ftrace_stub_graph(struct ftrace_graph_ret *);
398
399/* The callbacks that hook a function */
400trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
401trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
402static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
403
404/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
405static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
406{
407 int i;
408 int ret = 0;
409 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
410 struct task_struct *g, *t;
411
412 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
413 ret_stack_list[i] =
414 kmalloc_array(FTRACE_RETFUNC_DEPTH,
415 sizeof(struct ftrace_ret_stack),
416 GFP_KERNEL);
417 if (!ret_stack_list[i]) {
418 start = 0;
419 end = i;
420 ret = -ENOMEM;
421 goto free;
422 }
423 }
424
425 rcu_read_lock();
426 for_each_process_thread(g, t) {
427 if (start == end) {
428 ret = -EAGAIN;
429 goto unlock;
430 }
431
432 if (t->ret_stack == NULL) {
433 atomic_set(&t->trace_overrun, 0);
434 t->curr_ret_stack = -1;
435 t->curr_ret_depth = -1;
436 /* Make sure the tasks see the -1 first: */
437 smp_wmb();
438 t->ret_stack = ret_stack_list[start++];
439 }
440 }
441
442unlock:
443 rcu_read_unlock();
444free:
445 for (i = start; i < end; i++)
446 kfree(ret_stack_list[i]);
447 return ret;
448}
449
450static void
451ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
452 struct task_struct *prev,
453 struct task_struct *next,
454 unsigned int prev_state)
455{
456 unsigned long long timestamp;
457 int index;
458
459 /*
460 * Does the user want to count the time a function was asleep.
461 * If so, do not update the time stamps.
462 */
463 if (fgraph_sleep_time)
464 return;
465
466 timestamp = trace_clock_local();
467
468 prev->ftrace_timestamp = timestamp;
469
470 /* only process tasks that we timestamped */
471 if (!next->ftrace_timestamp)
472 return;
473
474 /*
475 * Update all the counters in next to make up for the
476 * time next was sleeping.
477 */
478 timestamp -= next->ftrace_timestamp;
479
480 for (index = next->curr_ret_stack; index >= 0; index--)
481 next->ret_stack[index].calltime += timestamp;
482}
483
484static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
485{
486 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
487 return 0;
488 return __ftrace_graph_entry(trace);
489}
490
491/*
492 * The function graph tracer should only trace the functions defined
493 * by set_ftrace_filter and set_ftrace_notrace. If another function
494 * tracer ops is registered, the graph tracer requires testing the
495 * function against the global ops, and not just trace any function
496 * that any ftrace_ops registered.
497 */
498void update_function_graph_func(void)
499{
500 struct ftrace_ops *op;
501 bool do_test = false;
502
503 /*
504 * The graph and global ops share the same set of functions
505 * to test. If any other ops is on the list, then
506 * the graph tracing needs to test if its the function
507 * it should call.
508 */
509 do_for_each_ftrace_op(op, ftrace_ops_list) {
510 if (op != &global_ops && op != &graph_ops &&
511 op != &ftrace_list_end) {
512 do_test = true;
513 /* in double loop, break out with goto */
514 goto out;
515 }
516 } while_for_each_ftrace_op(op);
517 out:
518 if (do_test)
519 ftrace_graph_entry = ftrace_graph_entry_test;
520 else
521 ftrace_graph_entry = __ftrace_graph_entry;
522}
523
524static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
525
526static void
527graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
528{
529 atomic_set(&t->trace_overrun, 0);
530 t->ftrace_timestamp = 0;
531 /* make curr_ret_stack visible before we add the ret_stack */
532 smp_wmb();
533 t->ret_stack = ret_stack;
534}
535
536/*
537 * Allocate a return stack for the idle task. May be the first
538 * time through, or it may be done by CPU hotplug online.
539 */
540void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
541{
542 t->curr_ret_stack = -1;
543 t->curr_ret_depth = -1;
544 /*
545 * The idle task has no parent, it either has its own
546 * stack or no stack at all.
547 */
548 if (t->ret_stack)
549 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
550
551 if (ftrace_graph_active) {
552 struct ftrace_ret_stack *ret_stack;
553
554 ret_stack = per_cpu(idle_ret_stack, cpu);
555 if (!ret_stack) {
556 ret_stack =
557 kmalloc_array(FTRACE_RETFUNC_DEPTH,
558 sizeof(struct ftrace_ret_stack),
559 GFP_KERNEL);
560 if (!ret_stack)
561 return;
562 per_cpu(idle_ret_stack, cpu) = ret_stack;
563 }
564 graph_init_task(t, ret_stack);
565 }
566}
567
568/* Allocate a return stack for newly created task */
569void ftrace_graph_init_task(struct task_struct *t)
570{
571 /* Make sure we do not use the parent ret_stack */
572 t->ret_stack = NULL;
573 t->curr_ret_stack = -1;
574 t->curr_ret_depth = -1;
575
576 if (ftrace_graph_active) {
577 struct ftrace_ret_stack *ret_stack;
578
579 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
580 sizeof(struct ftrace_ret_stack),
581 GFP_KERNEL);
582 if (!ret_stack)
583 return;
584 graph_init_task(t, ret_stack);
585 }
586}
587
588void ftrace_graph_exit_task(struct task_struct *t)
589{
590 struct ftrace_ret_stack *ret_stack = t->ret_stack;
591
592 t->ret_stack = NULL;
593 /* NULL must become visible to IRQs before we free it: */
594 barrier();
595
596 kfree(ret_stack);
597}
598
599/* Allocate a return stack for each task */
600static int start_graph_tracing(void)
601{
602 struct ftrace_ret_stack **ret_stack_list;
603 int ret, cpu;
604
605 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
606 sizeof(struct ftrace_ret_stack *),
607 GFP_KERNEL);
608
609 if (!ret_stack_list)
610 return -ENOMEM;
611
612 /* The cpu_boot init_task->ret_stack will never be freed */
613 for_each_online_cpu(cpu) {
614 if (!idle_task(cpu)->ret_stack)
615 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
616 }
617
618 do {
619 ret = alloc_retstack_tasklist(ret_stack_list);
620 } while (ret == -EAGAIN);
621
622 if (!ret) {
623 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
624 if (ret)
625 pr_info("ftrace_graph: Couldn't activate tracepoint"
626 " probe to kernel_sched_switch\n");
627 }
628
629 kfree(ret_stack_list);
630 return ret;
631}
632
633int register_ftrace_graph(struct fgraph_ops *gops)
634{
635 int ret = 0;
636
637 mutex_lock(&ftrace_lock);
638
639 /* we currently allow only one tracer registered at a time */
640 if (ftrace_graph_active) {
641 ret = -EBUSY;
642 goto out;
643 }
644
645 register_pm_notifier(&ftrace_suspend_notifier);
646
647 ftrace_graph_active++;
648 ret = start_graph_tracing();
649 if (ret) {
650 ftrace_graph_active--;
651 goto out;
652 }
653
654 ftrace_graph_return = gops->retfunc;
655
656 /*
657 * Update the indirect function to the entryfunc, and the
658 * function that gets called to the entry_test first. Then
659 * call the update fgraph entry function to determine if
660 * the entryfunc should be called directly or not.
661 */
662 __ftrace_graph_entry = gops->entryfunc;
663 ftrace_graph_entry = ftrace_graph_entry_test;
664 update_function_graph_func();
665
666 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
667out:
668 mutex_unlock(&ftrace_lock);
669 return ret;
670}
671
672void unregister_ftrace_graph(struct fgraph_ops *gops)
673{
674 mutex_lock(&ftrace_lock);
675
676 if (unlikely(!ftrace_graph_active))
677 goto out;
678
679 ftrace_graph_active--;
680 ftrace_graph_return = ftrace_stub_graph;
681 ftrace_graph_entry = ftrace_graph_entry_stub;
682 __ftrace_graph_entry = ftrace_graph_entry_stub;
683 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
684 unregister_pm_notifier(&ftrace_suspend_notifier);
685 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
686
687 out:
688 mutex_unlock(&ftrace_lock);
689}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Infrastructure to took into function calls and returns.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7 *
8 * Highly modified by Steven Rostedt (VMware).
9 */
10#include <linux/bits.h>
11#include <linux/jump_label.h>
12#include <linux/suspend.h>
13#include <linux/ftrace.h>
14#include <linux/static_call.h>
15#include <linux/slab.h>
16
17#include <trace/events/sched.h>
18
19#include "ftrace_internal.h"
20#include "trace.h"
21
22/*
23 * FGRAPH_FRAME_SIZE: Size in bytes of the meta data on the shadow stack
24 * FGRAPH_FRAME_OFFSET: Size in long words of the meta data frame
25 */
26#define FGRAPH_FRAME_SIZE sizeof(struct ftrace_ret_stack)
27#define FGRAPH_FRAME_OFFSET DIV_ROUND_UP(FGRAPH_FRAME_SIZE, sizeof(long))
28
29/*
30 * On entry to a function (via function_graph_enter()), a new fgraph frame
31 * (ftrace_ret_stack) is pushed onto the stack as well as a word that
32 * holds a bitmask and a type (called "bitmap"). The bitmap is defined as:
33 *
34 * bits: 0 - 9 offset in words from the previous ftrace_ret_stack
35 *
36 * bits: 10 - 11 Type of storage
37 * 0 - reserved
38 * 1 - bitmap of fgraph_array index
39 * 2 - reserved data
40 *
41 * For type with "bitmap of fgraph_array index" (FGRAPH_TYPE_BITMAP):
42 * bits: 12 - 27 The bitmap of fgraph_ops fgraph_array index
43 * That is, it's a bitmask of 0-15 (16 bits)
44 * where if a corresponding ops in the fgraph_array[]
45 * expects a callback from the return of the function
46 * it's corresponding bit will be set.
47 *
48 *
49 * The top of the ret_stack (when not empty) will always have a reference
50 * word that points to the last fgraph frame that was saved.
51 *
52 * For reserved data:
53 * bits: 12 - 17 The size in words that is stored
54 * bits: 18 - 23 The index of fgraph_array, which shows who is stored
55 *
56 * That is, at the end of function_graph_enter, if the first and forth
57 * fgraph_ops on the fgraph_array[] (index 0 and 3) needs their retfunc called
58 * on the return of the function being traced, and the forth fgraph_ops
59 * stored two words of data, this is what will be on the task's shadow
60 * ret_stack: (the stack grows upward)
61 *
62 * ret_stack[SHADOW_STACK_OFFSET]
63 * | SHADOW_STACK_TASK_VARS(ret_stack)[15] |
64 * ...
65 * | SHADOW_STACK_TASK_VARS(ret_stack)[0] |
66 * ret_stack[SHADOW_STACK_MAX_OFFSET]
67 * ...
68 * | | <- task->curr_ret_stack
69 * +--------------------------------------------+
70 * | (3 << 12) | (3 << 10) | FGRAPH_FRAME_OFFSET|
71 * | *or put another way* |
72 * | (3 << FGRAPH_DATA_INDEX_SHIFT)| \ | This is for fgraph_ops[3].
73 * | ((2 - 1) << FGRAPH_DATA_SHIFT)| \ | The data size is 2 words.
74 * | (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT)| \ |
75 * | (offset2:FGRAPH_FRAME_OFFSET+3) | <- the offset2 is from here
76 * +--------------------------------------------+ ( It is 4 words from the ret_stack)
77 * | STORED DATA WORD 2 |
78 * | STORED DATA WORD 1 |
79 * +--------------------------------------------+
80 * | (9 << 12) | (1 << 10) | FGRAPH_FRAME_OFFSET|
81 * | *or put another way* |
82 * | (BIT(3)|BIT(0)) << FGRAPH_INDEX_SHIFT | \ |
83 * | FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT| \ |
84 * | (offset1:FGRAPH_FRAME_OFFSET) | <- the offset1 is from here
85 * +--------------------------------------------+
86 * | struct ftrace_ret_stack |
87 * | (stores the saved ret pointer) | <- the offset points here
88 * +--------------------------------------------+
89 * | (X) | (N) | ( N words away from
90 * | | previous ret_stack)
91 * ...
92 * ret_stack[0]
93 *
94 * If a backtrace is required, and the real return pointer needs to be
95 * fetched, then it looks at the task's curr_ret_stack offset, if it
96 * is greater than zero (reserved, or right before popped), it would mask
97 * the value by FGRAPH_FRAME_OFFSET_MASK to get the offset of the
98 * ftrace_ret_stack structure stored on the shadow stack.
99 */
100
101/*
102 * The following is for the top word on the stack:
103 *
104 * FGRAPH_FRAME_OFFSET (0-9) holds the offset delta to the fgraph frame
105 * FGRAPH_TYPE (10-11) holds the type of word this is.
106 * (RESERVED or BITMAP)
107 */
108#define FGRAPH_FRAME_OFFSET_BITS 10
109#define FGRAPH_FRAME_OFFSET_MASK GENMASK(FGRAPH_FRAME_OFFSET_BITS - 1, 0)
110
111#define FGRAPH_TYPE_BITS 2
112#define FGRAPH_TYPE_MASK GENMASK(FGRAPH_TYPE_BITS - 1, 0)
113#define FGRAPH_TYPE_SHIFT FGRAPH_FRAME_OFFSET_BITS
114
115enum {
116 FGRAPH_TYPE_RESERVED = 0,
117 FGRAPH_TYPE_BITMAP = 1,
118 FGRAPH_TYPE_DATA = 2,
119};
120
121/*
122 * For BITMAP type:
123 * FGRAPH_INDEX (12-27) bits holding the gops index wanting return callback called
124 */
125#define FGRAPH_INDEX_BITS 16
126#define FGRAPH_INDEX_MASK GENMASK(FGRAPH_INDEX_BITS - 1, 0)
127#define FGRAPH_INDEX_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
128
129/*
130 * For DATA type:
131 * FGRAPH_DATA (12-17) bits hold the size of data (in words)
132 * FGRAPH_INDEX (18-23) bits hold the index for which gops->idx the data is for
133 *
134 * Note:
135 * data_size == 0 means 1 word, and 31 (=2^5 - 1) means 32 words.
136 */
137#define FGRAPH_DATA_BITS 5
138#define FGRAPH_DATA_MASK GENMASK(FGRAPH_DATA_BITS - 1, 0)
139#define FGRAPH_DATA_SHIFT (FGRAPH_TYPE_SHIFT + FGRAPH_TYPE_BITS)
140#define FGRAPH_MAX_DATA_SIZE (sizeof(long) * (1 << FGRAPH_DATA_BITS))
141
142#define FGRAPH_DATA_INDEX_BITS 4
143#define FGRAPH_DATA_INDEX_MASK GENMASK(FGRAPH_DATA_INDEX_BITS - 1, 0)
144#define FGRAPH_DATA_INDEX_SHIFT (FGRAPH_DATA_SHIFT + FGRAPH_DATA_BITS)
145
146#define FGRAPH_MAX_INDEX \
147 ((FGRAPH_INDEX_SIZE << FGRAPH_DATA_BITS) + FGRAPH_RET_INDEX)
148
149#define FGRAPH_ARRAY_SIZE FGRAPH_INDEX_BITS
150
151/*
152 * SHADOW_STACK_SIZE: The size in bytes of the entire shadow stack
153 * SHADOW_STACK_OFFSET: The size in long words of the shadow stack
154 * SHADOW_STACK_MAX_OFFSET: The max offset of the stack for a new frame to be added
155 */
156#define SHADOW_STACK_SIZE (4096)
157#define SHADOW_STACK_OFFSET (SHADOW_STACK_SIZE / sizeof(long))
158/* Leave on a buffer at the end */
159#define SHADOW_STACK_MAX_OFFSET \
160 (SHADOW_STACK_OFFSET - (FGRAPH_FRAME_OFFSET + 1 + FGRAPH_ARRAY_SIZE))
161
162/* RET_STACK(): Return the frame from a given @offset from task @t */
163#define RET_STACK(t, offset) ((struct ftrace_ret_stack *)(&(t)->ret_stack[offset]))
164
165/*
166 * Each fgraph_ops has a reservered unsigned long at the end (top) of the
167 * ret_stack to store task specific state.
168 */
169#define SHADOW_STACK_TASK_VARS(ret_stack) \
170 ((unsigned long *)(&(ret_stack)[SHADOW_STACK_OFFSET - FGRAPH_ARRAY_SIZE]))
171
172DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173int ftrace_graph_active;
174
175static struct kmem_cache *fgraph_stack_cachep;
176
177static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
178static unsigned long fgraph_array_bitmask;
179
180/* LRU index table for fgraph_array */
181static int fgraph_lru_table[FGRAPH_ARRAY_SIZE];
182static int fgraph_lru_next;
183static int fgraph_lru_last;
184
185/* Initialize fgraph_lru_table with unused index */
186static void fgraph_lru_init(void)
187{
188 int i;
189
190 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
191 fgraph_lru_table[i] = i;
192}
193
194/* Release the used index to the LRU table */
195static int fgraph_lru_release_index(int idx)
196{
197 if (idx < 0 || idx >= FGRAPH_ARRAY_SIZE ||
198 WARN_ON_ONCE(fgraph_lru_table[fgraph_lru_last] != -1))
199 return -1;
200
201 fgraph_lru_table[fgraph_lru_last] = idx;
202 fgraph_lru_last = (fgraph_lru_last + 1) % FGRAPH_ARRAY_SIZE;
203
204 clear_bit(idx, &fgraph_array_bitmask);
205 return 0;
206}
207
208/* Allocate a new index from LRU table */
209static int fgraph_lru_alloc_index(void)
210{
211 int idx = fgraph_lru_table[fgraph_lru_next];
212
213 /* No id is available */
214 if (idx == -1)
215 return -1;
216
217 fgraph_lru_table[fgraph_lru_next] = -1;
218 fgraph_lru_next = (fgraph_lru_next + 1) % FGRAPH_ARRAY_SIZE;
219
220 set_bit(idx, &fgraph_array_bitmask);
221 return idx;
222}
223
224/* Get the offset to the fgraph frame from a ret_stack value */
225static inline int __get_offset(unsigned long val)
226{
227 return val & FGRAPH_FRAME_OFFSET_MASK;
228}
229
230/* Get the type of word from a ret_stack value */
231static inline int __get_type(unsigned long val)
232{
233 return (val >> FGRAPH_TYPE_SHIFT) & FGRAPH_TYPE_MASK;
234}
235
236/* Get the data_index for a DATA type ret_stack word */
237static inline int __get_data_index(unsigned long val)
238{
239 return (val >> FGRAPH_DATA_INDEX_SHIFT) & FGRAPH_DATA_INDEX_MASK;
240}
241
242/* Get the data_size for a DATA type ret_stack word */
243static inline int __get_data_size(unsigned long val)
244{
245 return ((val >> FGRAPH_DATA_SHIFT) & FGRAPH_DATA_MASK) + 1;
246}
247
248/* Get the word from the ret_stack at @offset */
249static inline unsigned long get_fgraph_entry(struct task_struct *t, int offset)
250{
251 return t->ret_stack[offset];
252}
253
254/* Get the FRAME_OFFSET from the word from the @offset on ret_stack */
255static inline int get_frame_offset(struct task_struct *t, int offset)
256{
257 return __get_offset(t->ret_stack[offset]);
258}
259
260/* For BITMAP type: get the bitmask from the @offset at ret_stack */
261static inline unsigned long
262get_bitmap_bits(struct task_struct *t, int offset)
263{
264 return (t->ret_stack[offset] >> FGRAPH_INDEX_SHIFT) & FGRAPH_INDEX_MASK;
265}
266
267/* Write the bitmap to the ret_stack at @offset (does index, offset and bitmask) */
268static inline void
269set_bitmap(struct task_struct *t, int offset, unsigned long bitmap)
270{
271 t->ret_stack[offset] = (bitmap << FGRAPH_INDEX_SHIFT) |
272 (FGRAPH_TYPE_BITMAP << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
273}
274
275/* For DATA type: get the data saved under the ret_stack word at @offset */
276static inline void *get_data_type_data(struct task_struct *t, int offset)
277{
278 unsigned long val = t->ret_stack[offset];
279
280 if (__get_type(val) != FGRAPH_TYPE_DATA)
281 return NULL;
282 offset -= __get_data_size(val);
283 return (void *)&t->ret_stack[offset];
284}
285
286/* Create the ret_stack word for a DATA type */
287static inline unsigned long make_data_type_val(int idx, int size, int offset)
288{
289 return (idx << FGRAPH_DATA_INDEX_SHIFT) |
290 ((size - 1) << FGRAPH_DATA_SHIFT) |
291 (FGRAPH_TYPE_DATA << FGRAPH_TYPE_SHIFT) | offset;
292}
293
294/* ftrace_graph_entry set to this to tell some archs to run function graph */
295static int entry_run(struct ftrace_graph_ent *trace, struct fgraph_ops *ops)
296{
297 return 0;
298}
299
300/* ftrace_graph_return set to this to tell some archs to run function graph */
301static void return_run(struct ftrace_graph_ret *trace, struct fgraph_ops *ops)
302{
303}
304
305static void ret_stack_set_task_var(struct task_struct *t, int idx, long val)
306{
307 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
308
309 gvals[idx] = val;
310}
311
312static unsigned long *
313ret_stack_get_task_var(struct task_struct *t, int idx)
314{
315 unsigned long *gvals = SHADOW_STACK_TASK_VARS(t->ret_stack);
316
317 return &gvals[idx];
318}
319
320static void ret_stack_init_task_vars(unsigned long *ret_stack)
321{
322 unsigned long *gvals = SHADOW_STACK_TASK_VARS(ret_stack);
323
324 memset(gvals, 0, sizeof(*gvals) * FGRAPH_ARRAY_SIZE);
325}
326
327/**
328 * fgraph_reserve_data - Reserve storage on the task's ret_stack
329 * @idx: The index of fgraph_array
330 * @size_bytes: The size in bytes to reserve
331 *
332 * Reserves space of up to FGRAPH_MAX_DATA_SIZE bytes on the
333 * task's ret_stack shadow stack, for a given fgraph_ops during
334 * the entryfunc() call. If entryfunc() returns zero, the storage
335 * is discarded. An entryfunc() can only call this once per iteration.
336 * The fgraph_ops retfunc() can retrieve this stored data with
337 * fgraph_retrieve_data().
338 *
339 * Returns: On success, a pointer to the data on the stack.
340 * Otherwise, NULL if there's not enough space left on the
341 * ret_stack for the data, or if fgraph_reserve_data() was called
342 * more than once for a single entryfunc() call.
343 */
344void *fgraph_reserve_data(int idx, int size_bytes)
345{
346 unsigned long val;
347 void *data;
348 int curr_ret_stack = current->curr_ret_stack;
349 int data_size;
350
351 if (size_bytes > FGRAPH_MAX_DATA_SIZE)
352 return NULL;
353
354 /* Convert the data size to number of longs. */
355 data_size = (size_bytes + sizeof(long) - 1) >> (sizeof(long) == 4 ? 2 : 3);
356
357 val = get_fgraph_entry(current, curr_ret_stack - 1);
358 data = ¤t->ret_stack[curr_ret_stack];
359
360 curr_ret_stack += data_size + 1;
361 if (unlikely(curr_ret_stack >= SHADOW_STACK_MAX_OFFSET))
362 return NULL;
363
364 val = make_data_type_val(idx, data_size, __get_offset(val) + data_size + 1);
365
366 /* Set the last word to be reserved */
367 current->ret_stack[curr_ret_stack - 1] = val;
368
369 /* Make sure interrupts see this */
370 barrier();
371 current->curr_ret_stack = curr_ret_stack;
372 /* Again sync with interrupts, and reset reserve */
373 current->ret_stack[curr_ret_stack - 1] = val;
374
375 return data;
376}
377
378/**
379 * fgraph_retrieve_data - Retrieve stored data from fgraph_reserve_data()
380 * @idx: the index of fgraph_array (fgraph_ops::idx)
381 * @size_bytes: pointer to retrieved data size.
382 *
383 * This is to be called by a fgraph_ops retfunc(), to retrieve data that
384 * was stored by the fgraph_ops entryfunc() on the function entry.
385 * That is, this will retrieve the data that was reserved on the
386 * entry of the function that corresponds to the exit of the function
387 * that the fgraph_ops retfunc() is called on.
388 *
389 * Returns: The stored data from fgraph_reserve_data() called by the
390 * matching entryfunc() for the retfunc() this is called from.
391 * Or NULL if there was nothing stored.
392 */
393void *fgraph_retrieve_data(int idx, int *size_bytes)
394{
395 return fgraph_retrieve_parent_data(idx, size_bytes, 0);
396}
397
398/**
399 * fgraph_get_task_var - retrieve a task specific state variable
400 * @gops: The ftrace_ops that owns the task specific variable
401 *
402 * Every registered fgraph_ops has a task state variable
403 * reserved on the task's ret_stack. This function returns the
404 * address to that variable.
405 *
406 * Returns the address to the fgraph_ops @gops tasks specific
407 * unsigned long variable.
408 */
409unsigned long *fgraph_get_task_var(struct fgraph_ops *gops)
410{
411 return ret_stack_get_task_var(current, gops->idx);
412}
413
414/*
415 * @offset: The offset into @t->ret_stack to find the ret_stack entry
416 * @frame_offset: Where to place the offset into @t->ret_stack of that entry
417 *
418 * Returns a pointer to the previous ret_stack below @offset or NULL
419 * when it reaches the bottom of the stack.
420 *
421 * Calling this with:
422 *
423 * offset = task->curr_ret_stack;
424 * do {
425 * ret_stack = get_ret_stack(task, offset, &offset);
426 * } while (ret_stack);
427 *
428 * Will iterate through all the ret_stack entries from curr_ret_stack
429 * down to the first one.
430 */
431static inline struct ftrace_ret_stack *
432get_ret_stack(struct task_struct *t, int offset, int *frame_offset)
433{
434 int offs;
435
436 BUILD_BUG_ON(FGRAPH_FRAME_SIZE % sizeof(long));
437
438 if (unlikely(offset <= 0))
439 return NULL;
440
441 offs = get_frame_offset(t, --offset);
442 if (WARN_ON_ONCE(offs <= 0 || offs > offset))
443 return NULL;
444
445 offset -= offs;
446
447 *frame_offset = offset;
448 return RET_STACK(t, offset);
449}
450
451/**
452 * fgraph_retrieve_parent_data - get data from a parent function
453 * @idx: The index into the fgraph_array (fgraph_ops::idx)
454 * @size_bytes: A pointer to retrieved data size
455 * @depth: The depth to find the parent (0 is the current function)
456 *
457 * This is similar to fgraph_retrieve_data() but can be used to retrieve
458 * data from a parent caller function.
459 *
460 * Return: a pointer to the specified parent data or NULL if not found
461 */
462void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth)
463{
464 struct ftrace_ret_stack *ret_stack = NULL;
465 int offset = current->curr_ret_stack;
466 unsigned long val;
467
468 if (offset <= 0)
469 return NULL;
470
471 for (;;) {
472 int next_offset;
473
474 ret_stack = get_ret_stack(current, offset, &next_offset);
475 if (!ret_stack || --depth < 0)
476 break;
477 offset = next_offset;
478 }
479
480 if (!ret_stack)
481 return NULL;
482
483 offset--;
484
485 val = get_fgraph_entry(current, offset);
486 while (__get_type(val) == FGRAPH_TYPE_DATA) {
487 if (__get_data_index(val) == idx)
488 goto found;
489 offset -= __get_data_size(val) + 1;
490 val = get_fgraph_entry(current, offset);
491 }
492 return NULL;
493found:
494 if (size_bytes)
495 *size_bytes = __get_data_size(val) * sizeof(long);
496 return get_data_type_data(current, offset);
497}
498
499/* Both enabled by default (can be cleared by function_graph tracer flags */
500bool fgraph_sleep_time = true;
501
502#ifdef CONFIG_DYNAMIC_FTRACE
503/*
504 * archs can override this function if they must do something
505 * to enable hook for graph tracer.
506 */
507int __weak ftrace_enable_ftrace_graph_caller(void)
508{
509 return 0;
510}
511
512/*
513 * archs can override this function if they must do something
514 * to disable hook for graph tracer.
515 */
516int __weak ftrace_disable_ftrace_graph_caller(void)
517{
518 return 0;
519}
520#endif
521
522int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace,
523 struct fgraph_ops *gops)
524{
525 return 0;
526}
527
528static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace,
529 struct fgraph_ops *gops)
530{
531}
532
533static struct fgraph_ops fgraph_stub = {
534 .entryfunc = ftrace_graph_entry_stub,
535 .retfunc = ftrace_graph_ret_stub,
536};
537
538static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub;
539DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub);
540DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub);
541static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct);
542
543/**
544 * ftrace_graph_stop - set to permanently disable function graph tracing
545 *
546 * In case of an error int function graph tracing, this is called
547 * to try to keep function graph tracing from causing any more harm.
548 * Usually this is pretty severe and this is called to try to at least
549 * get a warning out to the user.
550 */
551void ftrace_graph_stop(void)
552{
553 static_branch_enable(&kill_ftrace_graph);
554}
555
556/* Add a function return address to the trace stack on thread info.*/
557static int
558ftrace_push_return_trace(unsigned long ret, unsigned long func,
559 unsigned long frame_pointer, unsigned long *retp,
560 int fgraph_idx)
561{
562 struct ftrace_ret_stack *ret_stack;
563 unsigned long val;
564 int offset;
565
566 if (unlikely(ftrace_graph_is_dead()))
567 return -EBUSY;
568
569 if (!current->ret_stack)
570 return -EBUSY;
571
572 BUILD_BUG_ON(SHADOW_STACK_SIZE % sizeof(long));
573
574 /* Set val to "reserved" with the delta to the new fgraph frame */
575 val = (FGRAPH_TYPE_RESERVED << FGRAPH_TYPE_SHIFT) | FGRAPH_FRAME_OFFSET;
576
577 /*
578 * We must make sure the ret_stack is tested before we read
579 * anything else.
580 */
581 smp_rmb();
582
583 /*
584 * Check if there's room on the shadow stack to fit a fraph frame
585 * and a bitmap word.
586 */
587 if (current->curr_ret_stack + FGRAPH_FRAME_OFFSET + 1 >= SHADOW_STACK_MAX_OFFSET) {
588 atomic_inc(¤t->trace_overrun);
589 return -EBUSY;
590 }
591
592 offset = READ_ONCE(current->curr_ret_stack);
593 ret_stack = RET_STACK(current, offset);
594 offset += FGRAPH_FRAME_OFFSET;
595
596 /* ret offset = FGRAPH_FRAME_OFFSET ; type = reserved */
597 current->ret_stack[offset] = val;
598 ret_stack->ret = ret;
599 /*
600 * The unwinders expect curr_ret_stack to point to either zero
601 * or an offset where to find the next ret_stack. Even though the
602 * ret stack might be bogus, we want to write the ret and the
603 * offset to find the ret_stack before we increment the stack point.
604 * If an interrupt comes in now before we increment the curr_ret_stack
605 * it may blow away what we wrote. But that's fine, because the
606 * offset will still be correct (even though the 'ret' won't be).
607 * What we worry about is the offset being correct after we increment
608 * the curr_ret_stack and before we update that offset, as if an
609 * interrupt comes in and does an unwind stack dump, it will need
610 * at least a correct offset!
611 */
612 barrier();
613 WRITE_ONCE(current->curr_ret_stack, offset + 1);
614 /*
615 * This next barrier is to ensure that an interrupt coming in
616 * will not corrupt what we are about to write.
617 */
618 barrier();
619
620 /* Still keep it reserved even if an interrupt came in */
621 current->ret_stack[offset] = val;
622
623 ret_stack->ret = ret;
624 ret_stack->func = func;
625#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
626 ret_stack->fp = frame_pointer;
627#endif
628 ret_stack->retp = retp;
629 return offset;
630}
631
632/*
633 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
634 * functions. But those archs currently don't support direct functions
635 * anyway, and ftrace_find_rec_direct() is just a stub for them.
636 * Define MCOUNT_INSN_SIZE to keep those archs compiling.
637 */
638#ifndef MCOUNT_INSN_SIZE
639/* Make sure this only works without direct calls */
640# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
641# error MCOUNT_INSN_SIZE not defined with direct calls enabled
642# endif
643# define MCOUNT_INSN_SIZE 0
644#endif
645
646/* If the caller does not use ftrace, call this function. */
647int function_graph_enter(unsigned long ret, unsigned long func,
648 unsigned long frame_pointer, unsigned long *retp)
649{
650 struct ftrace_graph_ent trace;
651 unsigned long bitmap = 0;
652 int offset;
653 int i;
654
655 trace.func = func;
656 trace.depth = ++current->curr_ret_depth;
657
658 offset = ftrace_push_return_trace(ret, func, frame_pointer, retp, 0);
659 if (offset < 0)
660 goto out;
661
662#ifdef CONFIG_HAVE_STATIC_CALL
663 if (static_branch_likely(&fgraph_do_direct)) {
664 int save_curr_ret_stack = current->curr_ret_stack;
665
666 if (static_call(fgraph_func)(&trace, fgraph_direct_gops))
667 bitmap |= BIT(fgraph_direct_gops->idx);
668 else
669 /* Clear out any saved storage */
670 current->curr_ret_stack = save_curr_ret_stack;
671 } else
672#endif
673 {
674 for_each_set_bit(i, &fgraph_array_bitmask,
675 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
676 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
677 int save_curr_ret_stack;
678
679 if (gops == &fgraph_stub)
680 continue;
681
682 save_curr_ret_stack = current->curr_ret_stack;
683 if (ftrace_ops_test(&gops->ops, func, NULL) &&
684 gops->entryfunc(&trace, gops))
685 bitmap |= BIT(i);
686 else
687 /* Clear out any saved storage */
688 current->curr_ret_stack = save_curr_ret_stack;
689 }
690 }
691
692 if (!bitmap)
693 goto out_ret;
694
695 /*
696 * Since this function uses fgraph_idx = 0 as a tail-call checking
697 * flag, set that bit always.
698 */
699 set_bitmap(current, offset, bitmap | BIT(0));
700
701 return 0;
702 out_ret:
703 current->curr_ret_stack -= FGRAPH_FRAME_OFFSET + 1;
704 out:
705 current->curr_ret_depth--;
706 return -EBUSY;
707}
708
709/* Retrieve a function return address to the trace stack on thread info.*/
710static struct ftrace_ret_stack *
711ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
712 unsigned long frame_pointer, int *offset)
713{
714 struct ftrace_ret_stack *ret_stack;
715
716 ret_stack = get_ret_stack(current, current->curr_ret_stack, offset);
717
718 if (unlikely(!ret_stack)) {
719 ftrace_graph_stop();
720 WARN(1, "Bad function graph ret_stack pointer: %d",
721 current->curr_ret_stack);
722 /* Might as well panic, otherwise we have no where to go */
723 *ret = (unsigned long)panic;
724 return NULL;
725 }
726
727#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
728 /*
729 * The arch may choose to record the frame pointer used
730 * and check it here to make sure that it is what we expect it
731 * to be. If gcc does not set the place holder of the return
732 * address in the frame pointer, and does a copy instead, then
733 * the function graph trace will fail. This test detects this
734 * case.
735 *
736 * Currently, x86_32 with optimize for size (-Os) makes the latest
737 * gcc do the above.
738 *
739 * Note, -mfentry does not use frame pointers, and this test
740 * is not needed if CC_USING_FENTRY is set.
741 */
742 if (unlikely(ret_stack->fp != frame_pointer)) {
743 ftrace_graph_stop();
744 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
745 " from func %ps return to %lx\n",
746 ret_stack->fp,
747 frame_pointer,
748 (void *)ret_stack->func,
749 ret_stack->ret);
750 *ret = (unsigned long)panic;
751 return NULL;
752 }
753#endif
754
755 *offset += FGRAPH_FRAME_OFFSET;
756 *ret = ret_stack->ret;
757 trace->func = ret_stack->func;
758 trace->overrun = atomic_read(¤t->trace_overrun);
759 trace->depth = current->curr_ret_depth;
760 /*
761 * We still want to trace interrupts coming in if
762 * max_depth is set to 1. Make sure the decrement is
763 * seen before ftrace_graph_return.
764 */
765 barrier();
766
767 return ret_stack;
768}
769
770/*
771 * Hibernation protection.
772 * The state of the current task is too much unstable during
773 * suspend/restore to disk. We want to protect against that.
774 */
775static int
776ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
777 void *unused)
778{
779 switch (state) {
780 case PM_HIBERNATION_PREPARE:
781 pause_graph_tracing();
782 break;
783
784 case PM_POST_HIBERNATION:
785 unpause_graph_tracing();
786 break;
787 }
788 return NOTIFY_DONE;
789}
790
791static struct notifier_block ftrace_suspend_notifier = {
792 .notifier_call = ftrace_suspend_notifier_call,
793};
794
795/* fgraph_ret_regs is not defined without CONFIG_FUNCTION_GRAPH_RETVAL */
796struct fgraph_ret_regs;
797
798/*
799 * Send the trace to the ring-buffer.
800 * @return the original return address.
801 */
802static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs,
803 unsigned long frame_pointer)
804{
805 struct ftrace_ret_stack *ret_stack;
806 struct ftrace_graph_ret trace;
807 unsigned long bitmap;
808 unsigned long ret;
809 int offset;
810 int i;
811
812 ret_stack = ftrace_pop_return_trace(&trace, &ret, frame_pointer, &offset);
813
814 if (unlikely(!ret_stack)) {
815 ftrace_graph_stop();
816 WARN_ON(1);
817 /* Might as well panic. What else to do? */
818 return (unsigned long)panic;
819 }
820
821 trace.rettime = trace_clock_local();
822#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
823 trace.retval = fgraph_ret_regs_return_value(ret_regs);
824#endif
825
826 bitmap = get_bitmap_bits(current, offset);
827
828#ifdef CONFIG_HAVE_STATIC_CALL
829 if (static_branch_likely(&fgraph_do_direct)) {
830 if (test_bit(fgraph_direct_gops->idx, &bitmap))
831 static_call(fgraph_retfunc)(&trace, fgraph_direct_gops);
832 } else
833#endif
834 {
835 for_each_set_bit(i, &bitmap, sizeof(bitmap) * BITS_PER_BYTE) {
836 struct fgraph_ops *gops = READ_ONCE(fgraph_array[i]);
837
838 if (gops == &fgraph_stub)
839 continue;
840
841 gops->retfunc(&trace, gops);
842 }
843 }
844
845 /*
846 * The ftrace_graph_return() may still access the current
847 * ret_stack structure, we need to make sure the update of
848 * curr_ret_stack is after that.
849 */
850 barrier();
851 current->curr_ret_stack = offset - FGRAPH_FRAME_OFFSET;
852
853 current->curr_ret_depth--;
854 return ret;
855}
856
857/*
858 * After all architecures have selected HAVE_FUNCTION_GRAPH_RETVAL, we can
859 * leave only ftrace_return_to_handler(ret_regs).
860 */
861#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
862unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs)
863{
864 return __ftrace_return_to_handler(ret_regs,
865 fgraph_ret_regs_frame_pointer(ret_regs));
866}
867#else
868unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
869{
870 return __ftrace_return_to_handler(NULL, frame_pointer);
871}
872#endif
873
874/**
875 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
876 * @task: The task to read the shadow stack from.
877 * @idx: Index down the shadow stack
878 *
879 * Return the ret_struct on the shadow stack of the @task at the
880 * call graph at @idx starting with zero. If @idx is zero, it
881 * will return the last saved ret_stack entry. If it is greater than
882 * zero, it will return the corresponding ret_stack for the depth
883 * of saved return addresses.
884 */
885struct ftrace_ret_stack *
886ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
887{
888 struct ftrace_ret_stack *ret_stack = NULL;
889 int offset = task->curr_ret_stack;
890
891 if (offset < 0)
892 return NULL;
893
894 do {
895 ret_stack = get_ret_stack(task, offset, &offset);
896 } while (ret_stack && --idx >= 0);
897
898 return ret_stack;
899}
900
901/**
902 * ftrace_graph_top_ret_addr - return the top return address in the shadow stack
903 * @task: The task to read the shadow stack from.
904 *
905 * Return the first return address on the shadow stack of the @task, which is
906 * not the fgraph's return_to_handler.
907 */
908unsigned long ftrace_graph_top_ret_addr(struct task_struct *task)
909{
910 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
911 struct ftrace_ret_stack *ret_stack = NULL;
912 int offset = task->curr_ret_stack;
913
914 if (offset < 0)
915 return 0;
916
917 do {
918 ret_stack = get_ret_stack(task, offset, &offset);
919 } while (ret_stack && ret_stack->ret == return_handler);
920
921 return ret_stack ? ret_stack->ret : 0;
922}
923
924/**
925 * ftrace_graph_ret_addr - return the original value of the return address
926 * @task: The task the unwinder is being executed on
927 * @idx: An initialized pointer to the next stack index to use
928 * @ret: The current return address (likely pointing to return_handler)
929 * @retp: The address on the stack of the current return location
930 *
931 * This function can be called by stack unwinding code to convert a found stack
932 * return address (@ret) to its original value, in case the function graph
933 * tracer has modified it to be 'return_to_handler'. If the address hasn't
934 * been modified, the unchanged value of @ret is returned.
935 *
936 * @idx holds the last index used to know where to start from. It should be
937 * initialized to zero for the first iteration as that will mean to start
938 * at the top of the shadow stack. If the location is found, this pointer
939 * will be assigned that location so that if called again, it will continue
940 * where it left off.
941 *
942 * @retp is a pointer to the return address on the stack.
943 */
944unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
945 unsigned long ret, unsigned long *retp)
946{
947 struct ftrace_ret_stack *ret_stack;
948 unsigned long return_handler = (unsigned long)dereference_kernel_function_descriptor(return_to_handler);
949 int i;
950
951 if (ret != return_handler)
952 return ret;
953
954 if (!idx)
955 return ret;
956
957 i = *idx ? : task->curr_ret_stack;
958 while (i > 0) {
959 ret_stack = get_ret_stack(task, i, &i);
960 if (!ret_stack)
961 break;
962 /*
963 * For the tail-call, there would be 2 or more ftrace_ret_stacks on
964 * the ret_stack, which records "return_to_handler" as the return
965 * address except for the last one.
966 * But on the real stack, there should be 1 entry because tail-call
967 * reuses the return address on the stack and jump to the next function.
968 * Thus we will continue to find real return address.
969 */
970 if (ret_stack->retp == retp &&
971 ret_stack->ret != return_handler) {
972 *idx = i;
973 return ret_stack->ret;
974 }
975 }
976
977 return ret;
978}
979
980static struct ftrace_ops graph_ops = {
981 .func = ftrace_graph_func,
982 .flags = FTRACE_OPS_GRAPH_STUB,
983#ifdef FTRACE_GRAPH_TRAMP_ADDR
984 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
985 /* trampoline_size is only needed for dynamically allocated tramps */
986#endif
987};
988
989void fgraph_init_ops(struct ftrace_ops *dst_ops,
990 struct ftrace_ops *src_ops)
991{
992 dst_ops->flags = FTRACE_OPS_FL_PID | FTRACE_OPS_GRAPH_STUB;
993
994#ifdef CONFIG_DYNAMIC_FTRACE
995 if (src_ops) {
996 dst_ops->func_hash = &src_ops->local_hash;
997 mutex_init(&dst_ops->local_hash.regex_lock);
998 INIT_LIST_HEAD(&dst_ops->subop_list);
999 dst_ops->flags |= FTRACE_OPS_FL_INITIALIZED;
1000 }
1001#endif
1002}
1003
1004void ftrace_graph_sleep_time_control(bool enable)
1005{
1006 fgraph_sleep_time = enable;
1007}
1008
1009/*
1010 * Simply points to ftrace_stub, but with the proper protocol.
1011 * Defined by the linker script in linux/vmlinux.lds.h
1012 */
1013void ftrace_stub_graph(struct ftrace_graph_ret *trace, struct fgraph_ops *gops);
1014
1015/* The callbacks that hook a function */
1016trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
1017trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
1018
1019/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1020static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
1021{
1022 int i;
1023 int ret = 0;
1024 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1025 struct task_struct *g, *t;
1026
1027 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1028 return -ENOMEM;
1029
1030 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1031 ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1032 if (!ret_stack_list[i]) {
1033 start = 0;
1034 end = i;
1035 ret = -ENOMEM;
1036 goto free;
1037 }
1038 }
1039
1040 rcu_read_lock();
1041 for_each_process_thread(g, t) {
1042 if (start == end) {
1043 ret = -EAGAIN;
1044 goto unlock;
1045 }
1046
1047 if (t->ret_stack == NULL) {
1048 atomic_set(&t->trace_overrun, 0);
1049 ret_stack_init_task_vars(ret_stack_list[start]);
1050 t->curr_ret_stack = 0;
1051 t->curr_ret_depth = -1;
1052 /* Make sure the tasks see the 0 first: */
1053 smp_wmb();
1054 t->ret_stack = ret_stack_list[start++];
1055 }
1056 }
1057
1058unlock:
1059 rcu_read_unlock();
1060free:
1061 for (i = start; i < end; i++)
1062 kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]);
1063 return ret;
1064}
1065
1066static void
1067ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
1068 struct task_struct *prev,
1069 struct task_struct *next,
1070 unsigned int prev_state)
1071{
1072 unsigned long long timestamp;
1073
1074 /*
1075 * Does the user want to count the time a function was asleep.
1076 * If so, do not update the time stamps.
1077 */
1078 if (fgraph_sleep_time)
1079 return;
1080
1081 timestamp = trace_clock_local();
1082
1083 prev->ftrace_timestamp = timestamp;
1084
1085 /* only process tasks that we timestamped */
1086 if (!next->ftrace_timestamp)
1087 return;
1088
1089 next->ftrace_sleeptime += timestamp - next->ftrace_timestamp;
1090}
1091
1092static DEFINE_PER_CPU(unsigned long *, idle_ret_stack);
1093
1094static void
1095graph_init_task(struct task_struct *t, unsigned long *ret_stack)
1096{
1097 atomic_set(&t->trace_overrun, 0);
1098 ret_stack_init_task_vars(ret_stack);
1099 t->ftrace_timestamp = 0;
1100 t->curr_ret_stack = 0;
1101 t->curr_ret_depth = -1;
1102 /* make curr_ret_stack visible before we add the ret_stack */
1103 smp_wmb();
1104 t->ret_stack = ret_stack;
1105}
1106
1107/*
1108 * Allocate a return stack for the idle task. May be the first
1109 * time through, or it may be done by CPU hotplug online.
1110 */
1111void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
1112{
1113 t->curr_ret_stack = 0;
1114 t->curr_ret_depth = -1;
1115 /*
1116 * The idle task has no parent, it either has its own
1117 * stack or no stack at all.
1118 */
1119 if (t->ret_stack)
1120 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
1121
1122 if (ftrace_graph_active) {
1123 unsigned long *ret_stack;
1124
1125 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1126 return;
1127
1128 ret_stack = per_cpu(idle_ret_stack, cpu);
1129 if (!ret_stack) {
1130 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1131 if (!ret_stack)
1132 return;
1133 per_cpu(idle_ret_stack, cpu) = ret_stack;
1134 }
1135 graph_init_task(t, ret_stack);
1136 }
1137}
1138
1139/* Allocate a return stack for newly created task */
1140void ftrace_graph_init_task(struct task_struct *t)
1141{
1142 /* Make sure we do not use the parent ret_stack */
1143 t->ret_stack = NULL;
1144 t->curr_ret_stack = 0;
1145 t->curr_ret_depth = -1;
1146
1147 if (ftrace_graph_active) {
1148 unsigned long *ret_stack;
1149
1150 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1151 return;
1152
1153 ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
1154 if (!ret_stack)
1155 return;
1156 graph_init_task(t, ret_stack);
1157 }
1158}
1159
1160void ftrace_graph_exit_task(struct task_struct *t)
1161{
1162 unsigned long *ret_stack = t->ret_stack;
1163
1164 t->ret_stack = NULL;
1165 /* NULL must become visible to IRQs before we free it: */
1166 barrier();
1167
1168 if (ret_stack) {
1169 if (WARN_ON_ONCE(!fgraph_stack_cachep))
1170 return;
1171 kmem_cache_free(fgraph_stack_cachep, ret_stack);
1172 }
1173}
1174
1175#ifdef CONFIG_DYNAMIC_FTRACE
1176static int fgraph_pid_func(struct ftrace_graph_ent *trace,
1177 struct fgraph_ops *gops)
1178{
1179 struct trace_array *tr = gops->ops.private;
1180 int pid;
1181
1182 if (tr) {
1183 pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
1184 if (pid == FTRACE_PID_IGNORE)
1185 return 0;
1186 if (pid != FTRACE_PID_TRACE &&
1187 pid != current->pid)
1188 return 0;
1189 }
1190
1191 return gops->saved_func(trace, gops);
1192}
1193
1194void fgraph_update_pid_func(void)
1195{
1196 struct fgraph_ops *gops;
1197 struct ftrace_ops *op;
1198
1199 if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
1200 return;
1201
1202 list_for_each_entry(op, &graph_ops.subop_list, list) {
1203 if (op->flags & FTRACE_OPS_FL_PID) {
1204 gops = container_of(op, struct fgraph_ops, ops);
1205 gops->entryfunc = ftrace_pids_enabled(op) ?
1206 fgraph_pid_func : gops->saved_func;
1207 if (ftrace_graph_active == 1)
1208 static_call_update(fgraph_func, gops->entryfunc);
1209 }
1210 }
1211}
1212#endif
1213
1214/* Allocate a return stack for each task */
1215static int start_graph_tracing(void)
1216{
1217 unsigned long **ret_stack_list;
1218 int ret, cpu;
1219
1220 ret_stack_list = kcalloc(FTRACE_RETSTACK_ALLOC_SIZE,
1221 sizeof(*ret_stack_list), GFP_KERNEL);
1222
1223 if (!ret_stack_list)
1224 return -ENOMEM;
1225
1226 /* The cpu_boot init_task->ret_stack will never be freed */
1227 for_each_online_cpu(cpu) {
1228 if (!idle_task(cpu)->ret_stack)
1229 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1230 }
1231
1232 do {
1233 ret = alloc_retstack_tasklist(ret_stack_list);
1234 } while (ret == -EAGAIN);
1235
1236 if (!ret) {
1237 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1238 if (ret)
1239 pr_info("ftrace_graph: Couldn't activate tracepoint"
1240 " probe to kernel_sched_switch\n");
1241 }
1242
1243 kfree(ret_stack_list);
1244 return ret;
1245}
1246
1247static void init_task_vars(int idx)
1248{
1249 struct task_struct *g, *t;
1250 int cpu;
1251
1252 for_each_online_cpu(cpu) {
1253 if (idle_task(cpu)->ret_stack)
1254 ret_stack_set_task_var(idle_task(cpu), idx, 0);
1255 }
1256
1257 read_lock(&tasklist_lock);
1258 for_each_process_thread(g, t) {
1259 if (t->ret_stack)
1260 ret_stack_set_task_var(t, idx, 0);
1261 }
1262 read_unlock(&tasklist_lock);
1263}
1264
1265static void ftrace_graph_enable_direct(bool enable_branch, struct fgraph_ops *gops)
1266{
1267 trace_func_graph_ent_t func = NULL;
1268 trace_func_graph_ret_t retfunc = NULL;
1269 int i;
1270
1271 if (gops) {
1272 func = gops->entryfunc;
1273 retfunc = gops->retfunc;
1274 fgraph_direct_gops = gops;
1275 } else {
1276 for_each_set_bit(i, &fgraph_array_bitmask,
1277 sizeof(fgraph_array_bitmask) * BITS_PER_BYTE) {
1278 func = fgraph_array[i]->entryfunc;
1279 retfunc = fgraph_array[i]->retfunc;
1280 fgraph_direct_gops = fgraph_array[i];
1281 }
1282 }
1283 if (WARN_ON_ONCE(!func))
1284 return;
1285
1286 static_call_update(fgraph_func, func);
1287 static_call_update(fgraph_retfunc, retfunc);
1288 if (enable_branch)
1289 static_branch_disable(&fgraph_do_direct);
1290}
1291
1292static void ftrace_graph_disable_direct(bool disable_branch)
1293{
1294 if (disable_branch)
1295 static_branch_disable(&fgraph_do_direct);
1296 static_call_update(fgraph_func, ftrace_graph_entry_stub);
1297 static_call_update(fgraph_retfunc, ftrace_graph_ret_stub);
1298 fgraph_direct_gops = &fgraph_stub;
1299}
1300
1301/* The cpu_boot init_task->ret_stack will never be freed */
1302static int fgraph_cpu_init(unsigned int cpu)
1303{
1304 if (!idle_task(cpu)->ret_stack)
1305 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
1306 return 0;
1307}
1308
1309int register_ftrace_graph(struct fgraph_ops *gops)
1310{
1311 static bool fgraph_initialized;
1312 int command = 0;
1313 int ret = 0;
1314 int i = -1;
1315
1316 guard(mutex)(&ftrace_lock);
1317
1318 if (!fgraph_stack_cachep) {
1319 fgraph_stack_cachep = kmem_cache_create("fgraph_stack",
1320 SHADOW_STACK_SIZE,
1321 SHADOW_STACK_SIZE, 0, NULL);
1322 if (!fgraph_stack_cachep)
1323 return -ENOMEM;
1324 }
1325
1326 if (!fgraph_initialized) {
1327 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
1328 fgraph_cpu_init, NULL);
1329 if (ret < 0) {
1330 pr_warn("fgraph: Error to init cpu hotplug support\n");
1331 return ret;
1332 }
1333 fgraph_initialized = true;
1334 ret = 0;
1335 }
1336
1337 if (!fgraph_array[0]) {
1338 /* The array must always have real data on it */
1339 for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
1340 fgraph_array[i] = &fgraph_stub;
1341 fgraph_lru_init();
1342 }
1343
1344 i = fgraph_lru_alloc_index();
1345 if (i < 0 || WARN_ON_ONCE(fgraph_array[i] != &fgraph_stub))
1346 return -ENOSPC;
1347 gops->idx = i;
1348
1349 ftrace_graph_active++;
1350
1351 if (ftrace_graph_active == 2)
1352 ftrace_graph_disable_direct(true);
1353
1354 if (ftrace_graph_active == 1) {
1355 ftrace_graph_enable_direct(false, gops);
1356 register_pm_notifier(&ftrace_suspend_notifier);
1357 ret = start_graph_tracing();
1358 if (ret)
1359 goto error;
1360 /*
1361 * Some archs just test to see if these are not
1362 * the default function
1363 */
1364 ftrace_graph_return = return_run;
1365 ftrace_graph_entry = entry_run;
1366 command = FTRACE_START_FUNC_RET;
1367 } else {
1368 init_task_vars(gops->idx);
1369 }
1370 /* Always save the function, and reset at unregistering */
1371 gops->saved_func = gops->entryfunc;
1372
1373 ret = ftrace_startup_subops(&graph_ops, &gops->ops, command);
1374 if (!ret)
1375 fgraph_array[i] = gops;
1376
1377error:
1378 if (ret) {
1379 ftrace_graph_active--;
1380 gops->saved_func = NULL;
1381 fgraph_lru_release_index(i);
1382 }
1383 return ret;
1384}
1385
1386void unregister_ftrace_graph(struct fgraph_ops *gops)
1387{
1388 int command = 0;
1389
1390 guard(mutex)(&ftrace_lock);
1391
1392 if (unlikely(!ftrace_graph_active))
1393 return;
1394
1395 if (unlikely(gops->idx < 0 || gops->idx >= FGRAPH_ARRAY_SIZE ||
1396 fgraph_array[gops->idx] != gops))
1397 return;
1398
1399 if (fgraph_lru_release_index(gops->idx) < 0)
1400 return;
1401
1402 fgraph_array[gops->idx] = &fgraph_stub;
1403
1404 ftrace_graph_active--;
1405
1406 if (!ftrace_graph_active)
1407 command = FTRACE_STOP_FUNC_RET;
1408
1409 ftrace_shutdown_subops(&graph_ops, &gops->ops, command);
1410
1411 if (ftrace_graph_active == 1)
1412 ftrace_graph_enable_direct(true, NULL);
1413 else if (!ftrace_graph_active)
1414 ftrace_graph_disable_direct(false);
1415
1416 if (!ftrace_graph_active) {
1417 ftrace_graph_return = ftrace_stub_graph;
1418 ftrace_graph_entry = ftrace_graph_entry_stub;
1419 unregister_pm_notifier(&ftrace_suspend_notifier);
1420 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
1421 }
1422 gops->saved_func = NULL;
1423}