Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#undef TRACE_SYSTEM_VAR
  4
  5#ifdef CONFIG_PERF_EVENTS
  6
  7#include "stages/stage6_event_callback.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8
  9#undef __perf_count
 10#define __perf_count(c)	(__count = (c))
 11
 12#undef __perf_task
 13#define __perf_task(t)	(__task = (t))
 14
 15#undef __DECLARE_EVENT_CLASS
 16#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
 17static notrace void							\
 18do_perf_trace_##call(void *__data, proto)				\
 19{									\
 20	struct trace_event_call *event_call = __data;			\
 21	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
 22	struct trace_event_raw_##call *entry;				\
 23	struct pt_regs *__regs;						\
 24	u64 __count = 1;						\
 25	struct task_struct *__task = NULL;				\
 26	struct hlist_head *head;					\
 27	int __entry_size;						\
 28	int __data_size;						\
 29	int rctx;							\
 30									\
 31	__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
 32									\
 33	head = this_cpu_ptr(event_call->perf_events);			\
 34	if (!bpf_prog_array_valid(event_call) &&			\
 35	    __builtin_constant_p(!__task) && !__task &&			\
 36	    hlist_empty(head))						\
 37		return;							\
 38									\
 39	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
 40			     sizeof(u64));				\
 41	__entry_size -= sizeof(u32);					\
 42									\
 43	entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx);	\
 44	if (!entry)							\
 45		return;							\
 46									\
 47	perf_fetch_caller_regs(__regs);					\
 48									\
 49	tstruct								\
 50									\
 51	{ assign; }							\
 52									\
 53	perf_trace_run_bpf_submit(entry, __entry_size, rctx,		\
 54				  event_call, __count, __regs,		\
 55				  head, __task);			\
 56}
 57
 58/*
 59 * Define unused __count and __task variables to use @args to pass
 60 * arguments to do_perf_trace_##call. This is needed because the
 61 * macros __perf_count and __perf_task introduce the side-effect to
 62 * store copies into those local variables.
 63 */
 64#undef DECLARE_EVENT_CLASS
 65#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
 66__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
 67		      PARAMS(assign), PARAMS(print))			\
 68static notrace void							\
 69perf_trace_##call(void *__data, proto)					\
 70{									\
 71	u64 __count __attribute__((unused));				\
 72	struct task_struct *__task __attribute__((unused));		\
 73									\
 74	do_perf_trace_##call(__data, args);				\
 75}
 76
 77#undef DECLARE_EVENT_SYSCALL_CLASS
 78#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \
 79__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \
 80		      PARAMS(assign), PARAMS(print))			\
 81static notrace void							\
 82perf_trace_##call(void *__data, proto)					\
 83{									\
 84	u64 __count __attribute__((unused));				\
 85	struct task_struct *__task __attribute__((unused));		\
 86									\
 87	might_fault();							\
 88	preempt_disable_notrace();					\
 89	do_perf_trace_##call(__data, args);				\
 90	preempt_enable_notrace();					\
 91}
 92
 93/*
 94 * This part is compiled out, it is only here as a build time check
 95 * to make sure that if the tracepoint handling changes, the
 96 * perf probe will fail to compile unless it too is updated.
 97 */
 98#undef DEFINE_EVENT
 99#define DEFINE_EVENT(template, call, proto, args)			\
100static inline void perf_test_probe_##call(void)				\
101{									\
102	check_trace_callback_type_##call(perf_trace_##template);	\
103}
104
105
106#undef DEFINE_EVENT_PRINT
107#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
108	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
109
110#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
111
112#undef __DECLARE_EVENT_CLASS
113
114#endif /* CONFIG_PERF_EVENTS */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2
  3#undef TRACE_SYSTEM_VAR
  4
  5#ifdef CONFIG_PERF_EVENTS
  6
  7#undef __entry
  8#define __entry entry
  9
 10#undef __get_dynamic_array
 11#define __get_dynamic_array(field)	\
 12		((void *)__entry + (__entry->__data_loc_##field & 0xffff))
 13
 14#undef __get_dynamic_array_len
 15#define __get_dynamic_array_len(field)	\
 16		((__entry->__data_loc_##field >> 16) & 0xffff)
 17
 18#undef __get_str
 19#define __get_str(field) ((char *)__get_dynamic_array(field))
 20
 21#undef __get_bitmask
 22#define __get_bitmask(field) (char *)__get_dynamic_array(field)
 23
 24#undef __get_cpumask
 25#define __get_cpumask(field) (char *)__get_dynamic_array(field)
 26
 27#undef __get_sockaddr
 28#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field))
 29
 30#undef __get_rel_dynamic_array
 31#define __get_rel_dynamic_array(field)	\
 32		((void *)__entry +					\
 33		 offsetof(typeof(*__entry), __rel_loc_##field) +	\
 34		 sizeof(__entry->__rel_loc_##field) +			\
 35		 (__entry->__rel_loc_##field & 0xffff))
 36
 37#undef __get_rel_dynamic_array_len
 38#define __get_rel_dynamic_array_len(field)	\
 39		((__entry->__rel_loc_##field >> 16) & 0xffff)
 40
 41#undef __get_rel_str
 42#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
 43
 44#undef __get_rel_bitmask
 45#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
 46
 47#undef __get_rel_cpumask
 48#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field)
 49
 50#undef __get_rel_sockaddr
 51#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field))
 52
 53#undef __perf_count
 54#define __perf_count(c)	(__count = (c))
 55
 56#undef __perf_task
 57#define __perf_task(t)	(__task = (t))
 58
 59#undef DECLARE_EVENT_CLASS
 60#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
 61static notrace void							\
 62perf_trace_##call(void *__data, proto)					\
 63{									\
 64	struct trace_event_call *event_call = __data;			\
 65	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
 66	struct trace_event_raw_##call *entry;				\
 67	struct pt_regs *__regs;						\
 68	u64 __count = 1;						\
 69	struct task_struct *__task = NULL;				\
 70	struct hlist_head *head;					\
 71	int __entry_size;						\
 72	int __data_size;						\
 73	int rctx;							\
 74									\
 75	__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
 76									\
 77	head = this_cpu_ptr(event_call->perf_events);			\
 78	if (!bpf_prog_array_valid(event_call) &&			\
 79	    __builtin_constant_p(!__task) && !__task &&			\
 80	    hlist_empty(head))						\
 81		return;							\
 82									\
 83	__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
 84			     sizeof(u64));				\
 85	__entry_size -= sizeof(u32);					\
 86									\
 87	entry = perf_trace_buf_alloc(__entry_size, &__regs, &rctx);	\
 88	if (!entry)							\
 89		return;							\
 90									\
 91	perf_fetch_caller_regs(__regs);					\
 92									\
 93	tstruct								\
 94									\
 95	{ assign; }							\
 96									\
 97	perf_trace_run_bpf_submit(entry, __entry_size, rctx,		\
 98				  event_call, __count, __regs,		\
 99				  head, __task);			\
100}
101
102/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103 * This part is compiled out, it is only here as a build time check
104 * to make sure that if the tracepoint handling changes, the
105 * perf probe will fail to compile unless it too is updated.
106 */
107#undef DEFINE_EVENT
108#define DEFINE_EVENT(template, call, proto, args)			\
109static inline void perf_test_probe_##call(void)				\
110{									\
111	check_trace_callback_type_##call(perf_trace_##template);	\
112}
113
114
115#undef DEFINE_EVENT_PRINT
116#define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
117	DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
118
119#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
 
 
120#endif /* CONFIG_PERF_EVENTS */