Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
3#define _LINUX_KERNEL_FTRACE_INTERNAL_H
4
5#ifdef CONFIG_FUNCTION_TRACER
6
7/*
8 * Traverse the ftrace_global_list, invoking all entries. The reason that we
9 * can use rcu_dereference_raw_check() is that elements removed from this list
10 * are simply leaked, so there is no need to interact with a grace-period
11 * mechanism. The rcu_dereference_raw_check() calls are needed to handle
12 * concurrent insertions into the ftrace_global_list.
13 *
14 * Silly Alpha and silly pointer-speculation compiler optimizations!
15 */
16#define do_for_each_ftrace_op(op, list) \
17 op = rcu_dereference_raw_check(list); \
18 do
19
20/*
21 * Optimized for just a single item in the list (as that is the normal case).
22 */
23#define while_for_each_ftrace_op(op) \
24 while (likely(op = rcu_dereference_raw_check((op)->next)) && \
25 unlikely((op) != &ftrace_list_end))
26
27extern struct ftrace_ops __rcu *ftrace_ops_list;
28extern struct ftrace_ops ftrace_list_end;
29extern struct mutex ftrace_lock;
30extern struct ftrace_ops global_ops;
31
32#ifdef CONFIG_DYNAMIC_FTRACE
33
34int ftrace_startup(struct ftrace_ops *ops, int command);
35int ftrace_shutdown(struct ftrace_ops *ops, int command);
36int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
37
38#else /* !CONFIG_DYNAMIC_FTRACE */
39
40int __register_ftrace_function(struct ftrace_ops *ops);
41int __unregister_ftrace_function(struct ftrace_ops *ops);
42/* Keep as macros so we do not need to define the commands */
43# define ftrace_startup(ops, command) \
44 ({ \
45 int ___ret = __register_ftrace_function(ops); \
46 if (!___ret) \
47 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
48 ___ret; \
49 })
50# define ftrace_shutdown(ops, command) \
51 ({ \
52 int ___ret = __unregister_ftrace_function(ops); \
53 if (!___ret) \
54 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
55 ___ret; \
56 })
57static inline int
58ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
59{
60 return 1;
61}
62#endif /* CONFIG_DYNAMIC_FTRACE */
63
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65extern int ftrace_graph_active;
66void update_function_graph_func(void);
67#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
68# define ftrace_graph_active 0
69static inline void update_function_graph_func(void) { }
70#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
71
72#else /* !CONFIG_FUNCTION_TRACER */
73#endif /* CONFIG_FUNCTION_TRACER */
74
75#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
3#define _LINUX_KERNEL_FTRACE_INTERNAL_H
4
5#ifdef CONFIG_FUNCTION_TRACER
6
7extern struct mutex ftrace_lock;
8extern struct ftrace_ops global_ops;
9
10#ifdef CONFIG_DYNAMIC_FTRACE
11
12int ftrace_startup(struct ftrace_ops *ops, int command);
13int ftrace_shutdown(struct ftrace_ops *ops, int command);
14int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);
15
16#else /* !CONFIG_DYNAMIC_FTRACE */
17
18int __register_ftrace_function(struct ftrace_ops *ops);
19int __unregister_ftrace_function(struct ftrace_ops *ops);
20/* Keep as macros so we do not need to define the commands */
21# define ftrace_startup(ops, command) \
22 ({ \
23 int ___ret = __register_ftrace_function(ops); \
24 if (!___ret) \
25 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
26 ___ret; \
27 })
28# define ftrace_shutdown(ops, command) \
29 ({ \
30 int ___ret = __unregister_ftrace_function(ops); \
31 if (!___ret) \
32 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
33 ___ret; \
34 })
35static inline int
36ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
37{
38 return 1;
39}
40#endif /* CONFIG_DYNAMIC_FTRACE */
41
42#ifdef CONFIG_FUNCTION_GRAPH_TRACER
43extern int ftrace_graph_active;
44void update_function_graph_func(void);
45#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
46# define ftrace_graph_active 0
47static inline void update_function_graph_func(void) { }
48#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
49
50#else /* !CONFIG_FUNCTION_TRACER */
51#endif /* CONFIG_FUNCTION_TRACER */
52
53#endif