Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#undef TRACE_SYSTEM_VAR
4
5#ifdef CONFIG_PERF_EVENTS
6
7#include "stages/stage6_event_callback.h"
8
9#undef __perf_count
10#define __perf_count(c) (__count = (c))
11
12#undef __perf_task
13#define __perf_task(t) (__task = (t))
14
15#undef __DECLARE_EVENT_CLASS
16#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
17static notrace void \
18do_perf_trace_##call(void *__data, proto) \
19{ \
20 struct trace_event_call *event_call = __data; \
21 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
22 struct trace_event_raw_##call *entry; \
23 struct pt_regs *__regs; \
24 u64 __count = 1; \
25 struct task_struct *__task = NULL; \
26 struct hlist_head *head; \
27 int __entry_size; \
28 int __data_size; \
29 int rctx; \
30 \
31 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
32 \
33 head = this_cpu_ptr(event_call->perf_events); \
34 if (!bpf_prog_array_valid(event_call) && \
35 __builtin_constant_p(!__task) && !__task && \
36 hlist_empty(head)) \
37 return; \
38 \
39 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
40 sizeof(u64)); \
41 __entry_size -= sizeof(u32); \
42 \
43 entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx); \
44 if (!entry) \
45 return; \
46 \
47 perf_fetch_caller_regs(__regs); \
48 \
49 tstruct \
50 \
51 { assign; } \
52 \
53 perf_trace_run_bpf_submit(entry, __entry_size, rctx, \
54 event_call, __count, __regs, \
55 head, __task); \
56}
57
58/*
59 * Define unused __count and __task variables to use @args to pass
60 * arguments to do_perf_trace_##call. This is needed because the
61 * macros __perf_count and __perf_task introduce the side-effect to
62 * store copies into those local variables.
63 */
64#undef DECLARE_EVENT_CLASS
65#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
66__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
67 PARAMS(assign), PARAMS(print)) \
68static notrace void \
69perf_trace_##call(void *__data, proto) \
70{ \
71 u64 __count __attribute__((unused)); \
72 struct task_struct *__task __attribute__((unused)); \
73 \
74 do_perf_trace_##call(__data, args); \
75}
76
77#undef DECLARE_EVENT_SYSCALL_CLASS
78#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
79__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
80 PARAMS(assign), PARAMS(print)) \
81static notrace void \
82perf_trace_##call(void *__data, proto) \
83{ \
84 u64 __count __attribute__((unused)); \
85 struct task_struct *__task __attribute__((unused)); \
86 \
87 might_fault(); \
88 preempt_disable_notrace(); \
89 do_perf_trace_##call(__data, args); \
90 preempt_enable_notrace(); \
91}
92
93/*
94 * This part is compiled out, it is only here as a build time check
95 * to make sure that if the tracepoint handling changes, the
96 * perf probe will fail to compile unless it too is updated.
97 */
98#undef DEFINE_EVENT
99#define DEFINE_EVENT(template, call, proto, args) \
100static inline void perf_test_probe_##call(void) \
101{ \
102 check_trace_callback_type_##call(perf_trace_##template); \
103}
104
105
106#undef DEFINE_EVENT_PRINT
107#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
108 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
109
110#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
111
112#undef __DECLARE_EVENT_CLASS
113
114#endif /* CONFIG_PERF_EVENTS */
1
2#undef TRACE_SYSTEM_VAR
3
4#ifdef CONFIG_PERF_EVENTS
5
6#undef __entry
7#define __entry entry
8
9#undef __get_dynamic_array
10#define __get_dynamic_array(field) \
11 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
12
13#undef __get_dynamic_array_len
14#define __get_dynamic_array_len(field) \
15 ((__entry->__data_loc_##field >> 16) & 0xffff)
16
17#undef __get_str
18#define __get_str(field) (char *)__get_dynamic_array(field)
19
20#undef __get_bitmask
21#define __get_bitmask(field) (char *)__get_dynamic_array(field)
22
23#undef __perf_addr
24#define __perf_addr(a) (__addr = (a))
25
26#undef __perf_count
27#define __perf_count(c) (__count = (c))
28
29#undef __perf_task
30#define __perf_task(t) (__task = (t))
31
32#undef DECLARE_EVENT_CLASS
33#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
34static notrace void \
35perf_trace_##call(void *__data, proto) \
36{ \
37 struct trace_event_call *event_call = __data; \
38 struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
39 struct trace_event_raw_##call *entry; \
40 struct pt_regs *__regs; \
41 u64 __addr = 0, __count = 1; \
42 struct task_struct *__task = NULL; \
43 struct hlist_head *head; \
44 int __entry_size; \
45 int __data_size; \
46 int rctx; \
47 \
48 __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
49 \
50 head = this_cpu_ptr(event_call->perf_events); \
51 if (__builtin_constant_p(!__task) && !__task && \
52 hlist_empty(head)) \
53 return; \
54 \
55 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
56 sizeof(u64)); \
57 __entry_size -= sizeof(u32); \
58 \
59 entry = perf_trace_buf_prepare(__entry_size, \
60 event_call->event.type, &__regs, &rctx); \
61 if (!entry) \
62 return; \
63 \
64 perf_fetch_caller_regs(__regs); \
65 \
66 tstruct \
67 \
68 { assign; } \
69 \
70 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
71 __count, __regs, head, __task); \
72}
73
74/*
75 * This part is compiled out, it is only here as a build time check
76 * to make sure that if the tracepoint handling changes, the
77 * perf probe will fail to compile unless it too is updated.
78 */
79#undef DEFINE_EVENT
80#define DEFINE_EVENT(template, call, proto, args) \
81static inline void perf_test_probe_##call(void) \
82{ \
83 check_trace_callback_type_##call(perf_trace_##template); \
84}
85
86
87#undef DEFINE_EVENT_PRINT
88#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
89 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
90
91#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
92#endif /* CONFIG_PERF_EVENTS */