Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#undef TRACE_SYSTEM_VAR
4
5#ifdef CONFIG_PERF_EVENTS
6
7#undef __entry
8#define __entry entry
9
10#undef __get_dynamic_array
11#define __get_dynamic_array(field) \
12 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
13
14#undef __get_dynamic_array_len
15#define __get_dynamic_array_len(field) \
16 ((__entry->__data_loc_##field >> 16) & 0xffff)
17
18#undef __get_str
19#define __get_str(field) ((char *)__get_dynamic_array(field))
20
21#undef __get_bitmask
22#define __get_bitmask(field) (char *)__get_dynamic_array(field)
23
24#undef __perf_count
25#define __perf_count(c) (__count = (c))
26
27#undef __perf_task
28#define __perf_task(t) (__task = (t))
29
30#undef DECLARE_EVENT_CLASS
31#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
32static notrace void \
33perf_trace_##call(void *__data, proto) \
34{ \
35 struct trace_event_call *event_call = __data; \
36 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
37 struct trace_event_raw_##call *entry; \
38 struct pt_regs *__regs; \
39 u64 __count = 1; \
40 struct task_struct *__task = NULL; \
41 struct hlist_head *head; \
42 int __entry_size; \
43 int __data_size; \
44 int rctx; \
45 \
46 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
47 \
48 head = this_cpu_ptr(event_call->perf_events); \
49 if (!bpf_prog_array_valid(event_call) && \
50 __builtin_constant_p(!__task) && !__task && \
51 hlist_empty(head)) \
52 return; \
53 \
54 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
55 sizeof(u64)); \
56 __entry_size -= sizeof(u32); \
57 \
58 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
59 if (!entry) \
60 return; \
61 \
62 perf_fetch_caller_regs(__regs); \
63 \
64 tstruct \
65 \
66 { assign; } \
67 \
68 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
69 event_call, __count, __regs, \
70 head, __task); \
71}
72
73/*
74 * This part is compiled out, it is only here as a build time check
75 * to make sure that if the tracepoint handling changes, the
76 * perf probe will fail to compile unless it too is updated.
77 */
78#undef DEFINE_EVENT
79#define DEFINE_EVENT(template, call, proto, args) \
80static inline void perf_test_probe_##call(void) \
81{ \
82 check_trace_callback_type_##call(perf_trace_##template); \
83}
84
85
86#undef DEFINE_EVENT_PRINT
87#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
88 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
89
90#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
91#endif /* CONFIG_PERF_EVENTS */
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#undef TRACE_SYSTEM_VAR
4
5#ifdef CONFIG_PERF_EVENTS
6
7#include "stages/stage6_event_callback.h"
8
9#undef __perf_count
10#define __perf_count(c) (__count = (c))
11
12#undef __perf_task
13#define __perf_task(t) (__task = (t))
14
15#undef __DECLARE_EVENT_CLASS
16#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
17static notrace void \
18do_perf_trace_##call(void *__data, proto) \
19{ \
20 struct trace_event_call *event_call = __data; \
21 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
22 struct trace_event_raw_##call *entry; \
23 struct pt_regs *__regs; \
24 u64 __count = 1; \
25 struct task_struct *__task = NULL; \
26 struct hlist_head *head; \
27 int __entry_size; \
28 int __data_size; \
29 int rctx; \
30 \
31 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
32 \
33 head = this_cpu_ptr(event_call->perf_events); \
34 if (!bpf_prog_array_valid(event_call) && \
35 __builtin_constant_p(!__task) && !__task && \
36 hlist_empty(head)) \
37 return; \
38 \
39 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
40 sizeof(u64)); \
41 __entry_size -= sizeof(u32); \
42 \
43 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
44 if (!entry) \
45 return; \
46 \
47 perf_fetch_caller_regs(__regs); \
48 \
49 tstruct \
50 \
51 { assign; } \
52 \
53 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
54 event_call, __count, __regs, \
55 head, __task); \
56}
57
58/*
59 * Define unused __count and __task variables to use @args to pass
60 * arguments to do_perf_trace_##call. This is needed because the
61 * macros __perf_count and __perf_task introduce the side-effect to
62 * store copies into those local variables.
63 */
64#undef DECLARE_EVENT_CLASS
65#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
66__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
67 PARAMS(assign), PARAMS(print)) \
68static notrace void \
69perf_trace_##call(void *__data, proto) \
70{ \
71 u64 __count __attribute__((unused)); \
72 struct task_struct *__task __attribute__((unused)); \
73 \
74 do_perf_trace_##call(__data, args); \
75}
76
77#undef DECLARE_EVENT_SYSCALL_CLASS
78#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
79__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
80 PARAMS(assign), PARAMS(print)) \
81static notrace void \
82perf_trace_##call(void *__data, proto) \
83{ \
84 u64 __count __attribute__((unused)); \
85 struct task_struct *__task __attribute__((unused)); \
86 \
87 might_fault(); \
88 preempt_disable_notrace(); \
89 do_perf_trace_##call(__data, args); \
90 preempt_enable_notrace(); \
91}
92
93/*
94 * This part is compiled out, it is only here as a build time check
95 * to make sure that if the tracepoint handling changes, the
96 * perf probe will fail to compile unless it too is updated.
97 */
98#undef DEFINE_EVENT
99#define DEFINE_EVENT(template, call, proto, args) \
100static inline void perf_test_probe_##call(void) \
101{ \
102 check_trace_callback_type_##call(perf_trace_##template); \
103}
104
105
106#undef DEFINE_EVENT_PRINT
107#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
108 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
109
110#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
111
112#undef __DECLARE_EVENT_CLASS
113
114#endif /* CONFIG_PERF_EVENTS */